text
stringlengths 56
7.94M
|
---|
\begin{document}
\tildetle{Reconstructing Jacobi Matrices from Three Spectra}
\author{Johanna Michor}
\address{Institut f\"ur Mathematik\\
Strudlhofgasse 4\\ 1090 Wien\\ Austria\\ and International Erwin Schr\"odinger
Institute for Mathematical Physics, Boltzmanngasse 9\\ 1090 Wien\\ Austria}
\email{[email protected]}
\author{Gerald Teschl}
\address{Institut f\"ur Mathematik\\
Strudlhofgasse 4\\ 1090 Wien\\ Austria\\ and International Erwin Schr\"odinger
Institute for Mathematical Physics, Boltzmanngasse 9\\ 1090 Wien\\ Austria}
\email{[email protected]}
\urladdr{http://www.mat.univie.ac.at/\~{}gerald/}
\keywords{Jacobi matrices, spectral theory, trace formulas,
Hochstadt's theorem}
\subjclass{Primary 36A10, 39A70; Secondary 34B24, 34L05}
\title{Reconstructing Jacobi Matrices from Three Spectra}
\begin{abstract}
Cut a Jacobi matrix into two pieces by removing the $n$-th column and $n$-th
row. We give necessary and sufficient conditions for the spectra of the
original matrix plus the spectra of the two submatrices to uniquely determine
the original matrix. Our result contains Hochstadt's theorem as a special case.
\end{abstract}
\section{Introduction}
The topic of this paper is inverse spectral theory for Jacobi matrices, that
is, matrices of the form
\begin{equation}
H = \left( \begin{array}{ccccc}
b_1 & a_1 & & & \\
a_1 & b_2 & a_2 & & \\
& \ddots & \ddots & \ddots & \\
& & a_{N - 2} & b_{N - 1} & a_{N - 1} \\
& & & a_{N - 1} & b_N \\
\end{array} \right)
\end{equation}
This is an old problem closely related to the moment problem (see
\cite{simp} and the references therein), which has attracted considerable interest recently
(see, e.g., \cite{gsfj} and the references therein, \cite{gla}, \cite{gibs}, \cite{shieh}).
In this note we want to investigate the following question: Remove the
$n$-th row and the $n$-th column from $H$ and denote the resulting submatrices
by $H_-$ (from $b_1$ to $b_{n-1}$) respectively $H_+$ (from $b_{n+1}$ to $b_N$).
When do the spectra of these three matrices determine the original matrix $H$?
We will show that this is the case if and only if $H_-$ and $H_+$ have no
eigenvalues in common.
{}From a physical point of view such a model describes a chain of $N$
particles coupled via springs and fixed at both end points
(see \cite{tjac}, Section~1.5). Determining the eigenfrequencies
of this system and the one obtained by keeping one particle fixed, one
can uniquely reconstruct the masses and spring constants. Moreover,
these results can be applied to completely integrable systems, in particular the
Toda lattice (see e.g., \cite{tjac}).
\section{Main result}
To set the stage let us introduce some further notation. We denote the spectra
of the matrices introduced in the previous section by
\begin{equation}
\sigma(H) = \{ \lambda_j \}_{j = 1}^{N}, \quad
\sigma(H_-) = \{ \mu^-_k \}_{k = 1}^{n - 1}, \quad
\sigma(H_+) = \{ \mu^+_l \}_{l = 1}^{N - n}.
\end{equation}
Moreover, we denote by $( \mu_j )_{j=1}^{N-1}$ the ordered eigenvalues of
$H_-$ and $H_+$ (listing common eigenvalues twice) and recall
the well-known formula
\begin{equation} \label{gzn}
g(z,n) = \frac{\prod_{j=1}^{N-1} (z - \mu_j)}{\prod_{j=1}^{N-1} (z - \lambda_j)} =
\frac{-1}{z-b_n + a_n^2 m_+(z,n) + a_{n-1}^2 m_-(z,n)},
\end{equation}
where $g(z,n)$ are the diagonal entries of the resolvent $(H-z)^{-1}$ and $m_\pm(z,n)$
are the Weyl $m$-functions corresponding to $H_-$ and $H_+$. The Weyl functions
$m_\pm(z,n)$ are Herglotz and hence have a representation of the following form
\begin{eqnarray}
m_-(z, n) & = & \sum_{k = 1}^{n - 1} \frac {\alpha_k^-}{\mu_k^- - z},
\qquad \alpha_k^- > 0, \quad \sum_{k = 1}^{n - 1} \alpha_k^- =1,\\
m_+(z, n) & = & \sum_{l = 1}^{N - n} \frac {\alpha_l^+}{\mu_l^+ - z},
\qquad \alpha_l^+ > 0, \quad \sum_{l = 1}^{N - n} \alpha_l^+=1.
\end{eqnarray}
With this notation our main result reads as follows
\begin{thm}
To each Jacobi matrix $H$ we can associate spectral data
\begin{equation}
\{ \lambda_j \}_{j = 1}^{N}, \quad ( \mu_j, \sigma_j )_{j = 1}^{N - 1},
\end{equation}
where $\sigma_j = +1$ if $\mu_j \in \sigma(H_+) \backslash \sigma(H_-)$,
$\sigma_j = -1$ if $\mu_j \in \sigma(H_-) \backslash \sigma(H_+)$, and
\begin{equation}
\sigma_j = \frac {a_n^2 \alpha_l^+ - a_{n - 1}^2 \alpha_k^-}
{a_n^2 \alpha_l^+ + a_{n - 1}^2 \alpha_k^-}
\end{equation}
if $\mu_j=\mu_k^- = \mu_l^+$.
Then these spectral data satisfy
\begin{enumerate}
\item[(i)] $\lambdabda_1 < \mu_1 \leq \lambdabda_2 \leq \mu_2 \leq \dots < \lambdabda_N$,
\item[(ii)] $\sigma_j=\sigma_{j+1} \in(-1,1)$ if $\mu_j = \mu_{j+1}$ and
$\sigma_j \in \{ \pm 1\}$ if $\mu_j \ne \mu_i$ for $i\ne j$
\end{enumerate}
and uniquely determine $H$. Conversely, for every given set of spectral data
satisfying $(i)$ and $(ii)$, there is a corresponding Jacobi matrix $H$.
\end{thm}
\begin{proof}
We first consider the case where $H_-$ and $H_+$ have no eigenvalues in common.
The interlacing property (i) is equivalent to the Herglotz property of $g(z,n)$.
Furthermore, the residues $\alpha^-_i$ can be computed from (\ref{gzn})
\begin{eqnarray} \nonumber
\frac {\prod_{j = 1}^N (z - \lambdabda_j)}
{\prod_{k = 1}^{n - 1} (z - \mu_k^-) \prod_{l = 1}^{N - n} (z - \mu_l^+)}
& = & z - b_n - a_n^2 \sum_{l = 1}^{N - n} \frac {\alpha_l^+}{z -
\mu_l^+} \\ \label{inserting our ansatz}
& & -\, a_{n - 1}^2 \sum_{k = 1}^{n - 1}
\frac {\alpha_k^-}{z - \mu_k^-}.
\end{eqnarray}
and are given
by $ \alpha_i^-= a_{n - 1}^{-2} \beta_i^-$, where
\begin{equation} \label{reconstruct betaim}
\beta_i^- = -\, \frac
{\prod_{j = 1}^N (\mu_i^- - \lambdabda_j)}
{\prod_{l \neq i} (\mu_i^- - \mu_l^-) \prod_{l = 1}^{N - n}
(\mu_i^- - \mu_l^+)}, \qquad
a_{n - 1}^2 = \sum_{i = 1}^{n - 1} \beta_i^-.
\end{equation}
Similarly, $\alpha_l^+ = a_n^{-2} \beta_l^+$, where
\begin{equation} \label{reconstruct betaip}
\beta_l^+ = -\, \frac {\prod_{j = 1}^N (\mu_l^+ - \lambdabda_j)}
{\prod_{k = 1}^{n - 1} (\mu_l^+ - \mu_k^-) \prod_{p \neq l}
(\mu_l^+ - \mu_p^+)}, \qquad
a_n^2 = \sum_{l = 1}^{N - n} \beta_l^+.
\end{equation}
Hence $m_\pm(z,n)$ are uniquely determined and thus $H_\pm$ by standard
results from the moment problem. The only remaining coefficient $b_n$
follows from the well-known trace formula
\begin{equation} \label{reconstruct bn}
b_n = \mathrm{tr}(H) - \mathrm{tr}(H_-) - \mathrm{tr}(H_+) = \sum_{j = 1}^N \lambdabda_j
- \sum_{k = 1}^{n - 1} \mu_k^- - \sum_{l = 1}^{N - n} \mu_l^+.
\end{equation}
Conversely, suppose we have the spectral data given.
Then we can define $a_n$, $a_{n-1}$, $b_n$, $\alpha_k^-$, $\alpha_l^+$
as above. By (i), $\alpha_k^-$ and $\alpha_l^+$ are positive and hence give rise
to $H_\pm$. Together with $a_n$, $a_{n-1}$, $b_n$ we have thus defined a
Jacobi matrix $H$. By construction, the eigenvalues $\mu_k^-$, $\mu_l^+$
are the right ones and also (\ref{gzn}) holds for $H$. Thus $\lambda_j$ are the
eigenvalues of $H$, since they are the poles of $g(z,n)$.
Next we come to the general case where $\mu_{j_0} = \mu_{k_0}^- = \mu_{l_0}^+\,\, ( = \lambdabda_{j_0})$ at least for one $j_0$. Now some factors in the
left hand side of (\ref{inserting our ansatz}) will cancel and we can no longer
compute $\beta_{k_0}^-$, $\beta_{l_0}^+$, but only
$\gamma_{j_0}= \beta_{k_0}^- + \beta_{l_0}^+$. However, by definition of
$\sigma_{j_0}$ we have
\begin{equation} \label{distribute sum}
\beta_{k_0}^- = \frac {1 - \sigmama_{j_0}}{2}\, \gammama_{j_0}, \quad \quad
\beta_{l_0}^+ = \frac {1 + \sigmama_{j_0}}{2}\, \gammama_{j_0}.
\end{equation}
Now we can proceed as before to see that $H$ is uniquely determined by the spectral data.
Conversely, we can also construct a matrix $H$ from given spectral data, but it
is no longer clear that $\lambda_j$ is an eigenvalue of $H$ unless it is a pole
of $g(z,n)$. However, in the case $\lambda_{j_0}= \mu_{k_0}^- = \mu_{l_0}^+$ we
can glue the eigenvectors of $H_-$ and $H_+$ to give an eigenvector
corresponding to $\lambda_{j_0}$ of $H$.
\end{proof}
The special case where we remove the first row and the first column (in which case $H_-$ is not present) corresponds to Hochstadt's theorem \cite{hspec}. Similar
results for (quasi-)periodic Jacobi operators can be found in \cite{ttr}.
\end{document}
|
\begin{document}
\begin{abstract}
We prove a dimension-free $L^p(\mathbb{R}^d)$, $1<p<\infty$, estimate for the vector of maximal Riesz transforms of odd order in terms of the corresponding Riesz transforms. This implies a dimension-free $L^p(\mathbb{R}^d)$ estimate for the vector of maximal Riesz transforms in terms of the input function. We also give explicit estimates for the dependencies of the constants on $p$ when the order is fixed. Analogous dimension-free estimates are also obtained for single Riesz transforms of odd orders with an improved estimate of the constants. These results are a dimension-free extension of the work of J. Mateu, J. Orobitg, C. P\'erez, and J. Verdera. Our proof consists of factorization and averaging procedures, followed by a non-obvious application of the method of rotations.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:Int}
Fix a positive integer $k$ and denote by $\mathcal{H}_k$ the space of spherical harmonics of degree $k$ on $\mathbb{R}^d.$ Then, for $P \in \mathcal{H}_k$ the Riesz transform $R=R_P$ is defined by the kernel
\begin{equation}
\label{eq:KP}
K_P(x)=K(x) = \gamma_k \frac{P(x)}{\abs{x}^{k+d}} \qquad\textrm{ with } \qquad \gamma_k = \frac{\Gamma\left( \frac{k+d}{2}\right)}{\pi^{d/2}\Gamma\left( \frac{k}{2}\right)},
\end{equation}
more precisely,
\begin{equation} \label{eq:R}
R_P f(x) = \lim_{t \to 0} R_P^t f(x), \qquad\textrm{ where } \qquad R_P^t f(x) = \gamma_k \int_{\abs{y}>t} \frac{P(y)}{\abs{y}^{k+d}} f(x-y) dy.
\end{equation}
The operator $R_P^t$ is called the \emph{truncated Riesz transform}. In the particular case of $k=1$ it coincides with the classical first order Riesz transforms $R_j:=R_{x_j}.$ It is well known, see \cite[p. 73]{stein}, that the Fourier multiplier associated with the Riesz transform $R_P$ equals
\begin{equation} \label{eq:m}
m_P(\xi) = (-i)^k\frac{P(\xi)}{\abs{\xi}^{k}}.
\end{equation}
Since $P$ is homogeneous of degree $k$, by the above formula $m_P$ is bounded and
Plancherel's theorem implies the $L^2(\mathbb{R}^d)$ boundedness of $R_P.$
The $L^p(\mathbb{R}^d)$ boundedness of the single Riesz transforms $R_P$ for $1<p<\infty$ follows from the Calder\'on--Zygmund method of rotations \cite{CZ2}.
The systematic study of the dimension-free $L^p$ bounds
for the Riesz transforms has begun in the seminal paper of
E. M. Stein \cite{stein_riesz}. He has proved the $\ell^2$ vector-valued estimates
for the vector of the first order Riesz transforms
$(R_1f,\ldots,R_df)$. More precisely,
\begin{equation}
\label{eq:Riesz0}
\norm{\bigg(\sum_{j=1}^d |R_jf|^2\bigg)^{1/2}}_{L^p(\mathbb{R}^d)}\leqslant C_p\, \|f\|_{L^p(\mathbb{R}^d)},\qquad 1<p<\infty,\end{equation}
where $C_p$ is independent of the dimension $d.$
This result has been extended to many other settings. The
analogue of the dimension-free inequality \eqref{eq:Riesz0} has also been proved for higher order Riesz transforms, see \cite[Th\'{e}or\`{e}me 2]{duo_rubio}. The optimal constant $C_p$ in \eqref{eq:Riesz0} remains unknown when $d\geqslant 2;$ however the best results to date given in \cite{BW1} (see also \cite{DV}) established the correct order of the dependence on $p$. We note that the explicit values of $L^p(\mathbb{R}^d)$ norms of the single first order Riesz transforms $R_j,$ $j=1,\ldots,d,$ were obtained by Iwaniec and Martin \cite{iwaniec_martin} based on the complex method of rotations.
In this paper we study the relation between $R_P$ and the \emph{maximal Riesz transform} defined by
\begin{equation*}
R_P^* f(x) = \sup_{t > 0} \abs{R_P^t f(x)}.
\end{equation*}
There is an obvious pointwise inequality $R_P f(x)\leqslant R_P^*f(x).$
In a series of papers \cite[Theorem 1]{mateu_verdera} (first order Riesz transforms), \cite[Section 4]{mopv} (odd order higher Riesz transforms), and \cite[Section 2]{mov1} (even order higher Riesz transforms), J. Mateu, J. Orobitg, C. P\'erez, and J. Verdera proved that also a reverse inequality holds in the $L^p(\mathbb{R}^d)$ norm.
Namely, together the results of \cite{mateu_verdera,mopv,mov1} imply that for each $1<p<\infty$ there exists a constant $C(p,k,d)$ such that
\begin{equation} \label{eq:mat_ver}
\norm{R_P^* f}_{L^p(\mathbb{R}^d)}\leqslant C(p,k,d)\norm{R_P f}_{L^p(\mathbb{R}^d)}
\end{equation}
for all $f\in L^p(\mathbb{R}^d)$.
The estimate \eqref{eq:mat_ver} as presented in \cite{mateu_verdera,mopv,mov1} has been proved for general singular integral operators with
even kernels \cite{mov1} or with odd kernels \cite{mopv}. The cost of this generality is that the values of $C(p,k,d)$ grow exponentially with the dimension. In view of \cite{Jan}, the question about improved rate arises naturally.
Recently, the first and the second named author proved that when $p=2$, in \eqref{eq:mat_ver} one may take an explicit dimension-free constant $C(2,1,d)\leqslant 2\cdot 10^8,$ see \cite[Theorem 1.1]{kw}. The arguments applied in \cite{kw} relied on Fourier transform estimates together with
square function techniques developed by Bourgain \cite{bou1} for studying dimension-free estimates for maximal functions associated with symmetric convex bodies. Extension of this approach to other $p$ seems to be delicate due to the lack of the necessary $L^1$ behaviour of the operators $M^t$ defined in \cite[eq.\ (3.5)]{kw}.
In this paper we prove that the dimension-free estimate from \cite{kw} still holds
for odd order Riesz transforms and for $1<p<\infty$.
The main result of our paper is the following square function estimate of the vector of maximal Riesz transforms in terms of the Riesz transforms.
\begin{theorem} \thlabel{thm1}
Take $p \in (1, \infty)$ and let $k\leqslant d$ be a non-negative odd integer. Let $\mathcal{P}_k$ be a subset of $\mathcal{H}_k.$ Then there is a constant $A(p,k)$ independent of the dimension $d$ and such that
\begin{equation*}
\norm{\left(\sum_{P\in \mathcal{P}_k} |R_P^* f|^2\right)^{1/2}}_{L^p(\mathbb{R}^d)} \leqslant A(p,k) \norm{ \left(\sum_{P\in \mathcal{P}_k} |R_P f|^2\right)^{1/2}}_{L^p(\mathbb{R}^d)},
\end{equation*}
where $f\in L^p(\mathbb{R}^d).$ Moreover, for fixed $k$ we have $A(p,k)=O(p^{3+k/2})$ as $p\to \infty$ and $A(p,k)=O((p-1)^{-3-k/2})$ as $p\to 1.$
\end{theorem}
In particular, if $\mathcal{P}_k$ contains one element $P,$ then \thref{thm1} immediately gives
\begin{equation*}
\norm{ R_P^* f}_{L^p(\mathbb{R}^d)} \leqslant A(p,k) \norm{ R_P f}_{L^p(\mathbb{R}^d)}.
\end{equation*}
In this case however, we can slightly improve the constant $A(p,k).$
\begin{theorem} \thlabel{thm2}
Take $p \in (1, \infty)$ and let $k\leqslant d$ be a non-negative odd integer. Let $P$ be a spherical harmonic of degree $k.$ Then there is a constant $ B(p,k)$ independent of the dimension $d$ and such that
\begin{equation*}
\norm{ R_P^* f}_{L^p(\mathbb{R}^d)} \leqslant B(p,k) \norm{ R_P f}_{L^p(\mathbb{R}^d)},
\end{equation*}
where $f\in L^p(\mathbb{R}^d).$ Moreover, for fixed $k$ we have $B(p,k)=O(p^{2+k/2})$ as $p\to \infty$ and $B(p,k)=O((p-1)^{-2-k/2})$ as $p\to 1.$
\end{theorem}
Our last main result follows from a combination of \thref{thm1} with a result of Duoandikoetxea and Rubio de Francia \cite[Th\'eor\`eme 2]{duo_rubio}. Denote by $a(d,k)$ the dimension of $\mathcal{H}_k$ and let $\{Y_j\}_{j=1,\ldots,a(d,k)}$ be an orthogonal basis of $\mathcal{H}_k$ normalized by the condition
\[
\frac{1}{\sigma(S^{d-1})}\int_{S^{d-1}} |Y_j(\theta)|^2\,d\sigma(\theta) =\frac{1}{a(d,k)};
\]
here $d\sigma$ denotes the (unnormalized) spherical measure.
\begin{cor}
\thlabel{cor:dr}
Take $p \in (1, \infty)$ and let $k\leqslant d$ be a non-negative odd integer. Then there is a constant $G(p,k)$ independent of the dimension $d$ and such that
\begin{equation*}
\norm{\left(\sum_{j=1}^{a(d,k)} |R_{Y_j}^* f|^2\right)^{1/2}}_{L^p(\mathbb{R}^d)} \leqslant G(p,k) \|f\|_{L^p(\mathbb{R}^d)},
\end{equation*}
where $f\in L^p(\mathbb{R}^d).$ Moreover, for fixed $k$ we have $G(p,k)=O(p^{4+k/2})$ as $p\to \infty$ and $G(p,k)=O((p-1)^{-4-k})$ as $p\to 1.$
\end{cor}
\subsection{Structure of the paper and our methods}
There are three main ingredients used in the proofs of \thref{thm1,thm2}.
Firstly, we need a factorization of the truncated Riesz transform $R_P^t=M^t_k(R_P)$. Here, $M^t_k,$ $t>0,$ is a family of radial Fourier multiplier operators. In the case $k=1$ this factorization has been one of the key steps in establishing the main results of \cite{kw}. In particular the operator $M_1^t$ considered here coincides with $M^t$ defined in \cite[(eq.) 3.5]{kw}. For general values of $k$ the factorization is also implicitly contained in \cite[Section 2]{mateu_verdera} ($k=1$), \cite[Section 2]{mov1} ($k$ even), and \cite[Section 4]{mopv} ($k$ odd). Note that for the first order Riesz transforms the formulas $R_j^t=M^t_1(R_j),$ $j=1,\ldots,d,$ together with the identity $I=-\sum_{j=1}^d R_j^2$ imply that
\begin{equation}
\label{eq:MtR1}
M^t_1=-\sum_{j=1}^d M_1^t R_j^2=-\sum_{j=1}^d R_j^t R_j.\end{equation}
Details of the factorization procedure are given in Section \ref{sec:fa}.
The second ingredient we need is an averaging procedure. It turns out that a useful analogue of \eqref{eq:MtR1} is not directly available for Riesz transforms of orders higher than one. The reason behind it is the fact that not all compositions of first-order Riesz transforms are higher order Riesz transforms according to our definition. For instance, in the case $k=3$ the multiplier symbol of $R_1^3=R_1 R_1 R_1$ on $L^2(\mathbb{R}^2)$ equals $\xi_1^3/|\xi|^3$ and $P(\xi)=\xi_1^3$ is not a spherical harmonic. However, the formula $$I=-\sum_{j_1=1}^d \sum_{j_2=1}^d \sum_{j_3=1}^d R_{j_1}^2 R_{j_2}^2 R_{j_3}^2,$$
includes squares of all compositions of Riesz transforms including $R_1^6=(R_1^3)^2$. Therefore the above formula does not directly lead to an expression of $M^t$ in terms of $R^t_P$ and $R_P.$ To overcome this problem we average over the special orthogonal group $SO(d).$ Then we obtain
\begin{equation}
\label{eq:lemAp}
M^t_kf(x) = C(d,k)\int_{SO(d)} \sum_{j \in I} (R_j^t R_j f)_U(x)\, d\mu(U),
\end{equation}
see \thref{pro:av}. Here $T_U$ is the conjugation of an operator $T$ by $U\in SO(d),$ see \eqref{conj_U_def}, $d\mu$ denotes the normalized Haar measure on $SO(d),$ while $C(d,k)$ is a constant. The symbol $I$ denotes the set of distinct indices $j=(j_1,\ldots,j_k)$ while $R_j^t$ and $R_j$ are the truncated Riesz transforms and the Riesz transforms \eqref{eq:R} corresponding to the monomials $P_j(x)=x_{j_1}\cdots x_{j_d}.$ Note that since $j\in I$ the polynomials $P_j$ are spherical harmonics and thus the operators $R_j$ are indeed higher order Riesz transforms. In view of \eqref{eq:lemAp}, if we demonstrate that that $C(d,k)$ is bounded by a universal constant, we are left with estimating the maximal function corresponding to
$\sum_{j \in I} (R_j^t R_j).$ The reduction via the averaging procedure is described in detail in Section \ref{sec:av}. It is noteworthy that in order for the averaging approach to work it is essential that for each order $k$ the multiplier symbols of $M^t_k$ are radial functions.
The third main ingredient of our argument is the method of rotations. We use it to estimate the maximal function corresponding to $\sum_{j \in I} (R_j^t R_j)$ and thus the first two ingredients described above are crucial in reaching this point. In the context of dimension-free estimates for Riesz transforms this method has been first employed by Duoandikoetxea and Rubio de Francia \cite{duo_rubio}. However, a direct application of their techniques seems not well suited for our problem. Indeed, it only allows one to prove a weaker variant of \thref{cor:dr}, with the supremum taken outside of the $\ell^2$ norm, cf.\ \cite[Theorem 1.3]{kw}. In order to make the method of rotations work in our problem we need several duality arguments, Khintchine's inequality, and some specific computations. All of it reflects the size of the constants $A(p,k)$ in \thref{thm1} and $B(p,k)$ in \thref{thm2}. The application of the method of rotations to our problem is described in detail in Section \ref{sec:mr}.
At the first reading it might be helpful to skip the explicit values of constants in terms of $k$ and $p$ and only focus on these constants being independent of the dimension $d.$ An interested reader may trace the exact dependencies of the constants in terms of $k$ and $p$ in the paper.
\subsection{Notation}
\label{sec:not}
We finish the introduction with a description of the notation and conventions used in the rest of the paper.
\begin{enumerate}
\item The letters $d$ and $k$ stand for the dimension and for the order of the Riesz transforms, respectively. In particular we always have $k\leqslant d,$ even if this is not stated explicitly.
\item The symbol $\mathbb{N}$ represents the set of positive integers. We write $\mathbb{N}o$ for the set of odd elements of $\mathbb{N}.$ Throughout the rest of the paper we always assume that $k\in \mathbb{N}o$.
\item For an exponent $p\in [1,\infty]$ we let $q$ be its conjugate exponent satisfying
$$1=\frac1p+\frac1q.$$
When $p\in(1,\infty)$ we set
$$p^*:=\max(p,(p-1)^{-1}).$$
\item We abbreviate $L^p(\mathbb{R}^d)$ to $L^p$ and $\norm{\cdot}_{L^p}$ to $\norm{\cdot}_p$. For a sublinear operator $T$ on $L^p$ we denote by $\|T\|_{p\to p}$ its norm. We let $\mathcal S$ be the space of Schwartz functions on $\mathbb{R}^d.$ Slightly abusing the notation we say that a sublinear operator $T$ is bounded on $L^p$ if it is bounded on $\mathcal S$ in the $L^p$ norm. For $k\in \mathbb{N}$ we let $\mathcal{D}(k)$ be the linear span of $\{R_P(f)\colon P\in \mathcal{H}_k, f\in \mathcal{S}\}.$ Since $R_P$ is bounded on $L^p$ for $1<p<\infty$ the space $\mathcal{D}(k)$ is then a subspace of each of the $L^p$ spaces.
\item For a Banach space $X$ the symbol $L^p(\mathbb{R}^d;E)$ stands for the space of weakly measurable functions $f\colon \mathbb{R}^d\to E$ equipped with the norm $\|f\|_{L^p(\mathbb{R}^d;E)}=(\int_{\mathbb{R}^d}\|f(x)\|_E^p\,dx)^{1/p}.$ Similarly, for a finite set $F$ by $\ell^p(F;E)$ we denote the Banach space of $E$-valued sequences $\{f_s\}_{s\in F}$ equipped with the norm $\|f\|_{\ell^p(F;E)}=(\sum_{s\in F}\|f_s\|_E^p)^{1/p}.$
\item The symbol $C_{\Delta}$ stands for a constant that possibly depends on $\Delta>0.$ We write $C$ without a subscript when the constant is universal in the sense that it may depend only on $k$ but not on the dimension $d$ nor on any other quantity.
\item For two quantities $X$ and $Y$ we write $X\lesssim_{\Delta} Y$ if $X \leqslant C_{\Delta} Y$ for some constant $C_{\Delta}>0$ that depends only on $\Delta.$ We abbreviate $X\lesssim Y$ when $C$ is a universal constant. We also write $X\sim Y$ if both $X\lesssim Y$ and $Y \lesssim X$ hold simultaneously. By $X\lesssim^{\Delta} Y$ we mean that $X\leqslant C^{\Delta} Y$ with a universal constant $C.$ Note that in this case $X^{1/\Delta}\lesssim Y^{1/\Delta}.$
\item The symbol $S^{d-1}$ stands for the $(d-1)$-dimensional unit sphere in $\mathbb{R}^d$ and by $\omega$ we denote the uniform measure on $S^{d-1}$ normalized by the condition $\omega(S^{d-1})=1.$ We also write
\begin{equation}
\label{eq:Sd-1}
S_{d-1} = \frac{2\pi^{d/2}}{\Gamma\left( \frac{d}{2} \right)}
\end{equation} to denote the unnormalized surface area of $S^{d-1}.$
\item The Fourier transform is defined for $f\in L^1$ and $\xi\in\mathbb{R}^d$ by the formula
\[
\widehat{f}(\xi) = \int_{\mathbb{R}^d} f(x) e^{-2 \pi i x \cdot \xi} dx.
\]
\item The Gamma function is defined for $s>0$ by the formula
\[
\Gamma(s) = \int_0^\infty t^{s-1}e^{-t}dt.
\]
We shall often use Stirling's approximation for $\Gamma(s)$
\begin{equation}
\label{StirF}
\Gamma(s)\sim\sqrt{2\pi}s^{s-\frac12}e^{-s},\qquad s\to \infty.
\end{equation}
\end{enumerate}
\section{Factorization}
\label{sec:fa}
The first goal of this section is to show that a factorization formula for $R_P^t$ in terms of $R_P$ is feasible. Proposition below is implicit in \cite[Section 4]{mopv}. \begin{pro}
\thlabel{pro:fact}
Let $k\in \mathbb{N}o$. Then there exists a family of operators $M_k^t,$ $t>0$,
which are bounded on $L^p,$ $1<p<\infty,$ and such that for all $P\in \mathcal{H} _k$ we have
\begin{equation}
\label{eq:fact}
R_P^t f=M_k^t (R_P f),
\end{equation}
where $f\in L^p.$ Each $M^t_k$ is a convolution operator with a radial convolution kernel $b^t_k.$ Moreover, when $P\in \mathcal{H}_k$ and $f\in \mathcal{S},$ then for a.e.\ $x\in \mathbb{R}^d,$ the function $t\mapsto M_k^t (R_P f)(x)$ is continuous on $(0,\infty).$
\end{pro}
\begin{proof}
Let $c_d=\frac{\Gamma((d-1)/2)}{2\pi^{d/2}\Gamma(1/2)},$ $N=(k-1)/2,$ and denote by $B$ the open Euclidean ball of radius $1$ in $\mathbb{R}^d.$
It is justified in \cite[pp.\ 3674--3675]{mopv} that the function
\begin{equation}
\label{eq:bdef}
b(x)=b_{k,d}(x):=\sum_{j=1}^d R_j\left[y_j\cdot h(y)\right](x),
\end{equation}
where
$$h(y)=c_d(1-d)\frac{1}{|y|^{d+1}}\ind{B^c}(y)+(\beta_1 +\beta_2|y|^2+\cdots+\beta_N |y|^{2N-2})\ind{B}(y),$$
satisfies the formula
\begin{equation}
\label{eq:RKB}
R_P(b)(x)=K_P(x)\ind{B^c}.
\end{equation}
Here $\beta_1,\ldots,\beta_N$ are constants which depend only on $k$ and $d$ and whose exact value is irrelevant for our considerations, and $K_P,$ $R_P$ have been defined in \eqref{eq:KP}, \eqref{eq:R}, respectively. The important point is that \eqref{eq:RKB} remains true for any $P\in \mathcal{H}_k.$
Denote by $H$ the radial profile of the Fourier transform of $h$, i.e.\ $H(|\xi|)=\widehat{h}(\xi)$ for $\xi\in \mathbb{R}^d.$ By taking the Fourier transform of \eqref{eq:bdef} it is straightforward to see that $b$ is a radial function. This follows since the multiplier symbol of $R_j$ is $-i\xi_j/|\xi|$ and $$\widehat {(y_j h(y))}(\xi)=\frac{\xi_j}{-2\pi i|\xi|}\,H'(|\xi|),$$ so that
\begin{align*}
\mathcal{F} b(\xi)=\sum_{j=1}^d \frac{\xi_j^2}{2\pi |\xi|^2}\cdot H'(|\xi|)=\frac{1}{2\pi}H'(|\xi|)
\end{align*}
is indeed radial and so is $b.$
Let $b^t(x)=b^t_k(x):=t^{-d}b(x/t)$ be the $L^1$ dilation of $b;$ clearly $b^t$ is still radial. The dilation invariance of $R_P$ together with \eqref{eq:RKB} leads us to the expression
\begin{equation}
\label{eq:RKBt}
K_P(x)\ind{B^c}(x/t)=R_P(b^t)(x).
\end{equation}
Let $M^t_k$ be the convolution operator
\begin{equation*}
M^t_k f(x)=b^t*f(x).
\end{equation*}
It follows from \cite[Section 4]{mopv} that $M_k^t$ is bounded on $L^p$ spaces whenever $1<p<\infty.$
Moreover, in view of \eqref{eq:RKBt} we see that
\begin{equation*}
R_P^t f= R_P(b^t)*f=b^t * R_P(f)=M_k^t (R_P f).
\end{equation*}
Finally, for $f\in \mathcal{S},$ $P\in \mathcal{H}_k,$ and $x\in \mathbb{R}^d$ the mapping $t\mapsto R_P^t f(x)$ is continuous on $(0,\infty).$ Thus, also $M_k^t (R_P f)(x)$ is a continuous function of $t>0$ for a.e.\ $x.$ This completes the proof of the proposition.
\end{proof}
As a corollary of \thref{pro:fact} we see that in order to justify \thref{thm1,thm2} it suffices to control vector and scalar valued maximal functions corresponding to the operators $M^t_k.$ In what follows, for $f\in \mathcal{D}(k)$ we set
\begin{equation*}
M^*f(x)=\sup_{t>0}|M^t_k f(x)|.
\end{equation*}
Note that by \thref{pro:fact}
for $f\in \mathcal{D}(k)$ we have
\begin{equation}
\label{eq:MtmaxQ}
M^*f(x)=\sup_{t\in \mathbb{Q}_+ }|M^t_k f(x)|,
\end{equation}
where $\mathbb{Q}_+$ denotes the set of non-negative rational numbers; hence
the maximal function $M^*f(x)$ is measurable, although possibly being infinite for some $x.$
\begin{theorem}
\thlabel{thm1'}
Let $k\in \mathbb{N}o.$ For each $p\in (1,\infty)$ there is a constant $A(p,k)$ independent of the dimension $d$ and such that for any $S\in\mathbb{N}$ we have
\begin{equation*}
\norm{\left(\sum_{s=1}^S |M^* f_s|^2\right)^{1/2}}_{p} \leqslant A(p,k) \norm{ \left(\sum_{s=1}^S | f_s|^2\right)^{1/2}}_{p},
\end{equation*}
whenever $f_1,\ldots,f_S \in \mathcal{D}(k).$
Moreover, for fixed $k$ we have $A(p,k)=O(p^{3+k/2})$ as $p\to \infty$ and $A(p,k)=O((p-1)^{-3-k/2})$ as $p\to 1.$
\end{theorem}
\begin{theorem} \thlabel{thm2'}
Let $k\in \mathbb{N}o.$ For each $p\in (1,\infty)$ there is a constant $ B(p,k)$ independent of the dimension $d$ and such that
\begin{equation*}
\norm{ M^* f}_{p} \leqslant B(p,k) \norm{ f}_{p},
\end{equation*}
whenever $f\in \mathcal{D}(k).$ Moreover, for fixed $k$ we have $B(p,k)\lesssim (p^*)^{2+k/2}. $
\end{theorem}
\thref{thm1',thm2'} together with \thref{pro:fact} imply \thref{thm1,thm2} with the same values of constants $A(p,k)$ and $B(p,k).$ This is done first for $f\in \mathcal{S},$ and then by density for all $f\in L^p.$ Therefore, from now on we focus on proving \thref{thm1',thm2'}.
\section{Averaging}
\label{sec:av}
In this section we describe the averaging procedure. This will allow us to pass from $M^*$ to another maximal function that is better suited for an application of the method of rotations in Section \ref{sec:mr}. Before moving on, we establish some notation. For a multi-index $j = (j_1, \dots, j_k) \in \{1, \dots, d\}^k$ by $R_j$ we denote the Riesz transform associated with the monomial $P_k(x) = x_{j_1} \cdots x_{j_k}$; the truncated transform $R_j^t$ and the maximal transform $R_j^*$ are defined analogously. We will also abbreviate
\[
x_j = x_{j_1} \cdots x_{j_k} \quad \text{and} \quad x_j^n = x_{j_1}^n \cdots x_{j_k}^n.
\]
As we will be mainly interested in multi-indices with different components, we define $I = \{j \in \{1, \dots, d\}^k: j_k \neq j_l \text{ for } k \neq l \}$.
The averaging procedure will provide an expression for $M^t$ in terms of the Riesz transforms $R$ and $R^t$ postulated in \eqref{eq:lemAp}.
For $f\in L^p,$ $1<p<\infty,$ denote
\begin{equation}
\label{eq:Rt}
R^tf:=\sum_{j \in I} R_j^t R_jf\qquad\textrm{and let}\qquad R^* f:=\sup_{t\in \mathbb{Q}_+}\left|R^t f\right|.
\end{equation}
Note that both $R^t$ and $R^*$ are well defined on all $L^p$ spaces. Indeed, $R_j^t$ and $R_j$ are $L^p$ bounded and the supremum in the definition of $R^*$ runs over a countable set thus defining a measurable function.
Let $SO(d)$ be the special orthogonal group in dimension $d.$ Since it is compact, it has a bi-invariant Haar measure $\mu$ such that $\mu(SO(d))=1.$ For $U\in SO(d)$ and a sublinear operator $T$ on $L^2$ we denote by $T_U$ the conjugation by $U,$ i.e. the operator acting via \begin{equation}\label{conj_U_def}
T_Uf(x)=T(f (U^{-1}\cdot))(Ux).
\end{equation}
\begin{pro} \thlabel{pro:av}
Let $k\in \mathbb{N}o.$ Then there is a constant $C(d,k)$ such that
\begin{equation} \label{eq:lemAplem}
M^t_k f(x)=C(d,k)\int_{SO(d)} [(R^t)_U f](x) d\mu(U)
\end{equation}
for all $t>0$ and $f\in \mathcal{D}(k).$ Moreover, $C(d,k)$ has an estimate from above by a constant that depends only on $k$ but not on the dimension $d,$ so that
\begin{equation} \label{eq:lemA}
\left(\sum_{s=1}^S |M^* f_s(x)|^2\right)^{1/2} \lesssim \int_{SO(d)} \left(\sum_{s=1}^S \left|[(R^*)_U f_s](x)\right|^2\right)^{1/2} \, d\mu(U),
\end{equation}
for $S\in \mathbb{N}$ and $f_1,\ldots,f_S\in \mathcal{D}(k).$
\end{pro}
\begin{proof}
Let $A$ be the operator
\begin{equation}
\label{eq:Adef}
A = \sum_{j \in I} R_j^2 ,
\end{equation}
which by \eqref{eq:m} means that its multiplier symbol equals
\[
a(\xi) =(-i)^{2k} \sum_{j \in I} \frac{\xi_j^2}{\abs{\xi}^{2k}}=-\sum_{j \in I} \frac{\xi_j^2}{\abs{\xi}^{2k}}.
\]
Let $\widetilde{A}$ be the operator with the multiplier symbol
\begin{equation}
\label{eq:mtil}
\widetilde{a}(\xi) := \int_{SO(d)} a(U\xi) d\mu(U)=- \sum_{j \in I} \int_{SO(d)} \frac{\left( (U\xi \right)_j)^2}{\abs{\xi}^{2k}} d\mu(U).
\end{equation}
Then
$\widetilde{a}$ being
radial and homogeneous of order $0$ is constant.
The first step in the proof of the proposition is to show that
\begin{equation}
\label{eq:lemA1}
|\widetilde{a}|\sim 1
\end{equation}
uniformly in the dimension $d.$ Note that each of the integrals on the right hand side of \eqref{eq:mtil} has the same value independently of $j\in I,$
so that
\[
\widetilde{a}(\xi)= -\abs{I} \int_{SO(d)} \frac{(\left(U\xi \right)_{(1,\ldots,k)})^2}{\abs{\xi}^{2k}} d\mu(U);
\]
here $|I|$ stands for the number of elements in $I.$ Since $\tilde{a}$ is radial, integrating the above expression over the unit sphere $S^{d-1}$ with respect to the normalized surface measure $d\omega$ we obtain
\begin{equation} \label{eq0}
\widetilde{a} = -\abs{I} \int_{S^{d-1}} \omega_1^2 \cdots \omega_k^2 \ d\omega.
\end{equation}
Since $k$ is fixed, by an elementary argument we get $|I|=d!/(d-k)!\sim d^k$. Thus it remains to show that
\begin{equation}
\label{eq:lemA1'}
\int_{S^{d-1}} \omega_1^2 \cdots \omega_k^2 \ d\omega \sim d^{-k}
\end{equation}
Formula \eqref{eq:lemA1'} is given in \cite[(10)]{sykora}. It can be also easily
computed by the method from \cite[Chapter 3.4]{Ho}; for the sake of completeness we provide a brief argument.
Consider the integral $J=\int_{\mathbb{R}^d}x_1^2...x_k^2e^{-|x|^2}dx$. Since $J$ is a product of the one-dimensional integrals we calculate $J=\Gamma \left(\frac{3}{2} \right)^k \Gamma \left(\frac{1}{2}\right)^{d-k},$ while using polar coordinates
gives $J=S_{d-1}\int_{S^{d-1}} \omega_1^2 \cdots \omega_k^2 \ d\omega\int_0^\infty r^{2k+d-1}e^{-r^2}dr$, where $S_{d-1}$ is defined by \eqref{eq:Sd-1}.
Altogether we have justified that
\[
\int_{S^{d-1}} \omega_1^2 \cdots \omega_k^2 \ d\omega\sim \frac{ \Gamma \left(\frac{1}{2} \right)^{d-k}}{S_{d-1}\Gamma\left( k+\frac{d}{2} \right)}.
\]
Since $k$ is fixed and $d$ is arbitrarily large, using \eqref{eq:Sd-1}, Stirling's formula for the $\Gamma$ function \eqref{StirF}
and the known identity $\Gamma(1/2)=\sqrt{\pi}$ we obtain
\begin{align*}
\int_{S^{d-1}} \omega_1^2 \cdots \omega_k^2 \ d\omega &\sim \frac{ \sqrt{k+\frac{d}{2}} \left( \frac{d}{2e} \right)^{d/2} }{\sqrt{\frac{d}{2}} \left( \frac{k+\frac{d}{2}}{e} \right)^{k+d/2}} \\
&\sim \frac{ e^{-d/2}}{e^{-k-d/2}} \left( \frac{k+\frac{d}{2}}{d/2} \right)^{-d/2} \left( k+\frac{d}{2} \right)^{-k} \\
&\sim d^{-k}
\end{align*}
This gives \eqref{eq:lemA1'} and concludes the proof of \eqref{eq:lemA1}.
Let now $m^t$ be the multiplier symbol of $M^t_k.$ Then, from \thref{pro:fact} we see that $m^t=\hat{b^t}$ is radial, so that
\begin{align*}
m^t(\xi)&=\tilde{a}^{-1} \tilde{a}\, m^t(\xi)=\tilde{a}^{-1} \int_{SO(d)} m^t(\xi)a(U\xi) d\mu(U)\\
&=\tilde{a}^{-1} \int_{SO(d)} m^t(U\xi)a(U\xi) d\mu(U).
\end{align*}
Using properties of the Fourier transform the above equality implies that
\begin{align*}
M^t f(x)=\tilde{a}^{-1}\int_{SO(d)}\, [(M^t A)_U](f)(x)\,d\mu(U).
\end{align*}
Recalling \eqref{eq:Adef} we apply \eqref{eq:fact} from \thref{pro:fact} and obtain
\[
M^t A=\sum_{j\in I} M^t R_j R_j=\sum_{j\in I}R_j^t R_j=R^t;
\]
here an application of \eqref{eq:fact} is allowed since each $R_j$ corresponds to the monomial $x_j$ which is in $\mathcal{H}_k$ when $j\in I.$ In summary, we justified that
\begin{equation}
\label{eq:Mtexpp}
M^t f(x)=\tilde{a}^{-1}\int_{SO(d)}\, [(R^t)_U](f)(x)\,d\mu(U),\qquad f\in\mathcal{D}(k),
\end{equation}
which is \eqref{eq:lemAplem} with $C(d,k)=\tilde{a}^{-1}.$
It remains to justify \eqref{eq:lemA}. This follows from \eqref{eq:MtmaxQ}, \eqref{eq:Mtexpp}, and \eqref{eq:lemA1}, together with the norm inequality
\[
\norm{\int_{SO(d)}\, F_{s,t}(U)\,d\mu(U)}_X\leqslant \int_{SO(d)}\,\norm{ F_{s,t}(U)}_X\,d\mu(U);
\]
on the Banach space $X=\ell^2(\{1,\ldots,S\};\ell^{\infty}(\mathbb{Q}_+)),$ with $F_{s,t}(U)=(R^t)_U(f_s)(x)$ and $x$ being fixed.
The proof of \thref{pro:av} is thus completed.
\end{proof}
Since conjugation by $U\in SO(d)$ is an isometry on all $L^p$ spaces, $1\leqslant p\leqslant \infty,$ we have, for $f_s\in \mathcal{D}(k)$
\[
\norm{\left(\sum_{s=1}^S \left|[(R^*)_U f_s]\right|^2\right)^{1/2} }_p=\norm{\left(\sum_{s=1}^S [R^* f_s]^2\right)^{1/2} }_p.
\]
Thus, in view of $\mu(SO(d))=1$ and Minkowski's integral inequality \thref{pro:av} eq.\ \eqref{eq:lemA} allows us to deduce \thref{thm1',thm2'} from the two theorems below. Note that, by our convention, the implicit constants from \thref{thm1'',thm2''} transfer to $A(p,k)\leqslant C_{}(k)(p^*)^{3+k/2}$ and $B(p,k)\leqslant C_{}(k)(p^*)^{2+k/2},$ in \thref{thm1',thm2'} hence, also in \thref{thm1,thm2}; here $C(k)$ denotes a constant that depends only on $k.$
\begin{theorem}
\thlabel{thm1''}
Let $k\in \mathbb{N}o$ and take $p \in (1, \infty).$ Then, for $f_1,\ldots,f_S \in L^p$ it holds
\begin{equation*}
\norm{\left(\sum_{s=1}^S |R^* f_s|^2\right)^{1/2}}_{p} \lesssim (p^*)^{3+k/2} \norm{ \left(\sum_{s=1}^S | f_s|^2\right)^{1/2}}_{p}.
\end{equation*}
\end{theorem}
\begin{theorem} \thlabel{thm2''}
Let $k\in \mathbb{N}o$ and take $p \in (1, \infty).$ Then for $f\in L^p$ it holds
\begin{equation*}
\norm{ R^* f}_{p} \lesssim (p^*)^{2+k/2} \norm{ f}_{p}.
\end{equation*}
\end{theorem}
\section{The method of rotations --- bounds for $R^*$}
\label{sec:mr}
The goal of this section is to prove \thref{thm1'',thm2''}. This will be done by the method of rotations together with a number of duality arguments. In proving \thref{thm1''} we shall also need Khintchine's inequality.
Before going further we need a lemma on the explicit $L^p$ bounds for the square function corresponding to the Riesz transforms $R_j,$ $j \in I.$ This will be derived from \cite[Th\'eor\`eme 2]{duo_rubio}. The key observation in the proof of \thref{lem:Rjduru} is that $|I|\sim \dim \mathcal{H}_k$ (more precisely, $|I|\sim k! \dim \mathcal{H}_k$). We provide details for the convenience of the reader. A version of \thref{lem:Rjduru} can be also deduced from \eqref{eq:Riesz0} together with an iterative applications of Khintchine's inequality. However, such an approach produces worse constants than \cite[Th\'eor\`eme 2]{duo_rubio}.
\begin{lemma}
\thlabel{lem:Rjduru}
Let $1<p<\infty.$ Then, for $f\in L^p$ we have
\begin{equation*}
\norm{\left( \sum_{j \in I} \left( R_jf \right)^2 \right)^{1/2}}_p \lesssim \max(p,(p-1)^{-1-k/2})\|f\|_p.
\end{equation*}
\end{lemma}
\begin{proof}
By \eqref{eq:lemA1} and \eqref{eq0} we see that
$\int_{S^{d-1}} (x_j)^2\sim \frac{1}{|I|}.$ Additionally, since
\begin{align*}
\dim \mathcal{H}_k&={d+k -1 \choose k}-{d+k -3 \choose k-2}\\
&=\frac{(d+k-3)!}{(k-2)!(d-1)!}\left(\frac{(d+k-2)(d+k-1)}{(k-1)k}-1\right)\end{align*}
we see that $\dim \mathcal{H}_k=a(d,k)\sim d^{k},$ with an implicit constant depending on $k$ but not on the dimension $d.$ Since $|I|=d!/(d-k)!$ we thus have $|I|\sim d^k\sim a(d,k),$ so that
\begin{equation*}
\|x_j\|_{L^2(S^{d-1},d\omega)}\sim \frac{1}{\sqrt{a(d,k)}}.
\end{equation*}
Defining
\[
Y_j(x)=\frac{1}{\sqrt{a(d,k)}\|x_j\|_{L^2}} x_j,\qquad j \in I,
\]
we thus see that
\begin{equation}
\label{eq:wereach}
Y_j(x)=c(d,k)x_j,\qquad R_{Y_j}=c(d,k)R_j,
\end{equation}
where $c(d,k)\sim 1.$ Moreover, $Y_j,$ $j \in I$, are pairwise orthogonal and satisfy
\[
\int_{S^{d-1}}|Y_j(\omega)|^2\,d\omega=\frac{1}{a(d,k)}.
\]
Completing the set $\{Y_j\}_{j \in I}$ to an orthonormal basis of $\mathcal{H}_k$ we obtain a larger set $\{Y_j\}_{j\in J},$ where $I\subseteq J$ and $|J|=a(d,k).$
Therefore, from \cite[Th\'eor\`eme 2]{duo_rubio} we reach
\begin{equation*}
\norm{\left( \sum_{j \in J} \left( R_{Y_j}f \right)^2 \right)^{1/2}}_p \lesssim \max(p,(p-1)^{-1-k/2})\|f\|_p,
\end{equation*}
and an application of \eqref{eq:wereach} completes the proof of the lemma.
\end{proof}
Having justified \thref{lem:Rjduru} we move on to the proof of \thref{thm2''}.
\begin{proof}[Proof of \thref{thm2''}]
From Lebesgue's monotone convergence theorem it follows that we may restrict the supremum in the definition \eqref{eq:Rt} of $R^*$ to a finite set, say $\{t_1,\ldots,t_N\},$ as long as our final estimate is independent of $N.$
Let $q$ be the conjugate exponent to $p,$ i.e.\ such that $1/p+1/q=1.$ Using duality between the spaces $L^p(\mathbb{R}^d;\ell^\infty(\{t_1,\ldots,t_N\}))$ and $L^q(\mathbb{R}^d;\ell^1({t_1,\ldots,t_N}))$ our task is reduced to the following equivalent inequality
\begin{equation*}
\abs{\int_{\mathbb{R}^d} \sum_{n=1}^N \sum_{j \in I} R^{t_n}_j R_j f(x) g_n(x) dx} \lesssim \max(p^{2+k/2},(p-1)^{-2-k/2}) \norm{f}_p \norm{\sum_{n=1}^N \abs{g_n}}_q,
\end{equation*}
where $\{g_n\}\in L^q(\mathbb{R}^d;\ell^1({t_1,\ldots,t_N})).$
Since for each $j \in I$ the monomial $P_j(x)=x_j$ is a real-valued function that satisfies $P_j(-x)=-P_j(x)$, by \eqref{eq:R} the operators $R_j^t,$ $j \in I$ are skew-adjoint, i.e.\ $(R_j^t)^*=-R_j^t,$ $j \in I$. Hence, by Cauchy--Schwarz inequality, H\"{o}lder's inequality, and \thref{lem:Rjduru} we get
\begin{align*}
&\abs{\int_{\mathbb{R}^d} \sum_{n=1}^N \sum_{j \in I} R_j^{t_n} R_j f(x) g_n(x) dx} = \abs{\int_{\mathbb{R}^d} \sum_{j \in I} R_j f(x) \cdot \sum_{n=1}^N R_j^{t_n}g_n(x) dx} \nonumber \\
&\leqslant \int_{\mathbb{R}^d} \left( \sum_{j \in I} \left( R_jf(x) \right)^2 \right)^{1/2} \left( \sum_{j \in I} \left( \sum_{n=1}^N R_j^{t_n} g_n(x) \right)^2 \right)^{1/2} dx \nonumber \\
&\leqslant \norm{\left( \sum_{j \in I} \left( R_jf \right)^2 \right)^{1/2}}_p \norm{\left( \sum_{j \in I} \left( \sum_{n=1}^N R_j^{t_n} g_n \right)^2 \right)^{1/2}}_q\\
&\lesssim \max(p,(p-1)^{-1-k/2})\|f\|_p\cdot \norm{\left( \sum_{j \in I} \left( \sum_{n=1}^N R_j^{t_n} g_n \right)^2 \right)^{1/2}}_q.
\end{align*}
Therefore we can focus on proving that
\begin{equation}
\label{eq:st}
\norm{\left( \sum_{j \in I} \left( \sum_{n=1}^N R_j^{t_n} g_n \right)^2 \right)^{1/2}}_q\lesssim \max(p^{1+k/2},(p-1)^{-1})\cdot \norm{\sum_{n=1}^N \abs{g_n}}_q
\end{equation}
Here we use the method of rotations, specifically \cite[5.2.20]{grafakos}, to the truncated Riesz transforms $R_j^t$ obtaining
\begin{equation}
\label{eq:rot-1}
R_j^t f(x) = \gamma_k' \int_{S^{d-1}} \theta_j H_\theta^t f(x) d\theta.
\end{equation}
Here $\gamma_k' = \frac{\pi}{2}\gamma_k$, $d\theta$ is the unnormalized surface measure on $S^{d-1}$ (i.e. $\int_{S^{d-1}} d\theta = S_{d-1} = \frac{2\pi^{d/2}}{\Gamma\left( \frac{d}{2} \right)}$), and $H_\theta^t$ is the truncated directional Hilbert transform (see \cite[Section 5.2.3]{grafakos} for more details). Recall that $\theta_j=\theta_{j_1}\cdots \theta_{j_k}.$ In terms of the normalized surface measure $d\omega$ on $S^{d-1}$ equality \eqref{eq:rot-1} becomes
\begin{equation} \label{eq:rot}
R_j^t f(x) = \frac{\pi \Gamma((k+d)/2)}{\Gamma(k/2)\Gamma(d/2)}\int_{S^{d-1}} \omega_j H_\omega^t f(x) d\omega.
\end{equation}
Note that since $k$ is fixed and $d$ is large, in view of \eqref{StirF} we have
\begin{equation}
\label{eq:approxGkd}
\frac{\pi \Gamma((k+d)/2)}{\Gamma(k/2)\Gamma(d/2)}\sim d^{k/2}.
\end{equation}
Now, take numbers $\lambda_j(x),$ $j\in I$, such that
\begin{equation} \label{eq7}
\left( \sum_{j \in I} \left( \sum_{n=1}^N R_j^{t_n} g_n(x) \right)^2 \right)^{1/2} = \sum_{j \in I} \lambda_j(x) \sum_{n=1}^N R_j^{t_n} g_n(x), \qquad \sum_{j \in I} \lambda_j^2 = 1.
\end{equation}
Using \eqref{eq7}, \eqref{eq:rot}, and \eqref{eq:approxGkd} followed by H\"older's inequality we obtain
\begin{align} \label{eq8}
&\norm{\left( \sum_{j \in I} \left( \sum_{n=1}^N R_j^{t_n} g_n \right)^2 \right)^{1/2}}_q^q = \int_{\mathbb{R}^d} \abs{\sum_{j \in I} \lambda_j(x) \sum_{n=1}^N R_j^{t_n} g_n(x)}^q dx \nonumber \\
&\lesssim^q d^{kq/2}\int_{\mathbb{R}^d} \abs{\int_{S^{d-1}} \sum_{j \in I} \lambda_j(x) \omega_j \sum_{n=1}^N H_\omega^{t_n} g_n(x) d\omega }^q dx \nonumber \\
&\leqslant d^{kq/2} \int_{\mathbb{R}^d} \left(\int_{S^{d-1}} \abs{\sum_{j \in I} \lambda_j(x) \omega_j}^p d\omega \right)^{q/p} \int_{S^{d-1}} \abs{\sum_{n=1}^N H_\omega^{t_n} g_n(x)}^q d\omega dx;
\end{align}
with the meaning of $\lesssim^q$ being explained in Section \ref{sec:not} item (7).
We deal with the first inner integral in \eqref{eq8}. Using \cite[Lemme, p. 195]{duo_rubio}, the formula $\sum_{j\in I}\lambda_j(x)^2=1,$ and \eqref{eq:lemA1'} we get
\begin{equation}
\label{eq:calcu}
\begin{split}
&\left(\int_{S^{d-1}} \abs{\sum_{j \in I} \lambda_j(x) \omega_j}^p d\omega \right)^{1/p} \lesssim p^{k/2} \left(\int_{S^{d-1}} \abs{\sum_{j \in I} \lambda_j(x) \omega_j}^2 d\omega \right)^{1/2} \\
&= p^{k/2} \left(\int_{S^{d-1}} \sum_{j \in I} \lambda_j(x)^2 \omega_j^2 d\omega \right)^{1/2} = p^{k/2} \left(\sum_{j \in I} \lambda_j(x)^2 \int_{S^{d-1}} \omega_{(1,\dots,k)}^2 d\omega \right)^{1/2} \\
&= p^{k/2} \left(\int_{S^{d-1}} \omega_1^2 \cdots \omega_k^2 \ d\omega \right)^{1/2} \sim p^{k/2} d^{-k/2},
\end{split}
\end{equation}
where the first equality above follows from the observation that if we expand the squared sum, then only the diagonal terms contribute non-zero integrals over $S^{d-1}$. Note that an application of \cite[Lemme, p. 195]{duo_rubio} is permitted here, since for each fixed $x\in \mathbb{R}^d$ the function $\sum_{j \in I} \lambda_j(x) \omega_j$ belongs to $\mathcal{H}_k.$ At this point it is again important that $j\in I.$ Inequality \eqref{eq:calcu} implies
\begin{equation}
\label{eq:calcu'}
\left(\int_{S^{d-1}} \abs{\sum_{j \in I} \lambda_j(x) \omega_j}^p d\omega \right)^{q/p}\lesssim^q p^{kq/2}d^{-kq/2}.
\end{equation}
Collecting \eqref{eq8} and \eqref{eq:calcu'},
we see that the proof of \eqref{eq:st} will be finished if we show that
\begin{equation*}
\norm{\sum_{n=1}^N H_\omega^{t_n} g_n}_q \lesssim p^* \norm{\sum_{n=1}^N \abs{g_n}}_q,
\end{equation*}
uniformly in $\omega \in S^{d-1}.$
To this end we use duality between the spaces $L^q$ and $L^p$ which lets us write the following equivalent inequality
\begin{equation} \label{eq11}
\abs{\int_{\mathbb{R}^d} \sum_{n=1}^N H_\omega^{t_n} g_n(x) h(x) dx} \lesssim p^* \norm{\sum_{n=1}^N \abs{g_n}}_q \norm{h}_p, \quad h \in L^p.
\end{equation}
Using H\"{o}lder's inequality on the left-hand side of \eqref{eq11} we arrive at
\begin{align*}
&\abs{\int_{\mathbb{R}^d} \sum_{n=1}^N H_\omega^{t_n} g_n(x) h(x) dx} \leqslant \int_{\mathbb{R}^d} \sum_{n=1}^N \abs{g_n(x) H_\omega^{t_n} h(x)} dx \\
&\leqslant \int_{\mathbb{R}^d} \max_{1 \leqslant n \leqslant N} \abs{H_\omega^{t_n} h(x)} \sum_{n=1}^N \abs{g_n(x) } dx \leqslant \norm{H^*_\omega h}_p \norm{\sum_{n=1}^N \abs{g_n}}_q \\
&\leqslant \norm{H^*}_{p} \norm{h}_p \norm{\sum_{n=1}^N \abs{g_n}}_q,
\end{align*}
where $H^*_\omega$ is the maximal directional Hilbert transform on $\mathbb{R}^d$ and $H^*$ is the maximal Hilbert transform on $\mathbb{R}$. In the last inequality we used the fact that for $\omega\in S^{d-1}$ it holds $\norm{H^*_\omega}_{L^p(\mathbb{R}^d)} = \norm{H^*}_{L^p(\mathbb{R})}$. Finally $H^*$ is bounded on $L^p$ and $\|H^*\|_p\lesssim p^*,$ see \cite[Theorem 4.2.4, eq.\ (4.2.4)]{grafakos_modern}. This completes the proof of \eqref{eq11} and hence also the proof of \thref{thm2''}.
\end{proof}
We shall now prove \thref{thm1''}. The main idea is similar to the one used in the proof of \thref{thm2''}. The computations, however, are more involved, mainly because of a need for extra (Khintchine's) inequalities.
\begin{proof}[Proof of \thref{thm1''}]
As in the proof of \thref{thm2''} we reduce the supremum to a finite sequence of positive numbers $t_1, \dots, t_N$, which leaves us with the goal to prove
\begin{equation} \label{eq22}
\norm{\left(\sum_{s=1}^S \sup_{1 \leqslant n \leqslant N} \abs{ \sum_{j \in I} R_j^{t_n} R_j f_s}^2 \right)^{1/2}}_p \lesssim \max(p^{3+k/2},(p-1)^{-3-k/2}) \norm{\left( \sum_{s=1}^S f_s^2 \right)^{1/2}}_p.
\end{equation}
We use duality between the spaces $L^p(\mathbb{R}^d;E_{\infty})$ and $L^q(\mathbb{R}^d;E_1)$, with $$E_{\infty}=\ell^2(\{1,\ldots,S\};\ell^{\infty}(\{t_1,\ldots,t_N\})),\quad E_1=\ell^2(\{1,\ldots,S\};\ell^{1}(\{t_1,\ldots,t_N\})),$$ and $p$ and $q$ being conjugate exponents. This allows us to write \eqref{eq22} in the following equivalent form
\begin{equation} \label{eq23}
\begin{aligned}
&\abs{\int_{\mathbb{R}^d} \sum_{n=1}^N \sum_{s=1}^S \sum_{j \in I} R_j^{t_n} R_j f_s(x) g_{s,n}(x) dx} \\
&\lesssim \max(p^{3+k/2},(p-1)^{-3-k/2}) \norm{\left( \sum_{s=1}^S f_s^2 \right)^{1/2}}_p \norm{\left( \sum_{s=1}^S \left(\sum_{n=1}^N \abs{g_{s,n}}\right)^2 \right)^{1/2}}_q
\end{aligned}
\end{equation}
for any $g_{s,n} \in L^q(\mathbb{R}^d;E_1)$. Since $(R_j^{t_n})^*=-R_j^{t_n}$ for $j\in I,$ an application of Cauchy--Schwarz inequality and H\"{o}lder's inequality gives
\begin{align*}
&\abs{\int_{\mathbb{R}^d} \sum_{s=1}^S \sum_{n=1}^N \sum_{j \in I} R_j^{t_n} R_j f_s(x) g_{s,n}(x) dx} = \abs{\int_{\mathbb{R}^d} \sum_{j \in I} \sum_{s=1}^S R_j f_s(x) \cdot \sum_{n=1}^N R_j^{t_n}g_{s,n}(x) dx} \nonumber \\
&\leqslant \int_{\mathbb{R}^d} \left( \sum_{j \in I} \sum_{s=1}^S \left( R_j f_s(x) \right)^2 \right)^{1/2} \left( \sum_{j \in I} \sum_{s=1}^S \left( \sum_{n=1}^N R_j^{t_n} g_{s,n}(x) \right)^2 \right)^{1/2} dx \nonumber \\
&\leqslant \norm{\left( \sum_{j \in I} \sum_{s=1}^S \left( R_j f_s \right)^2 \right)^{1/2}}_p \norm{\left( \sum_{j \in I} \sum_{s=1}^S \left( \sum_{n=1}^N R_j^{t_n} g_{s,n} \right)^2 \right)^{1/2}}_q.
\end{align*}
Hence in order to prove \eqref{eq23} it is enough to show that
\begin{equation} \label{eq25}
\norm{\left( \sum_{j \in I} \sum_{s=1}^S \left( R_j f_s \right)^2 \right)^{1/2}}_p \lesssim\max(p^{\frac32},(p-1)^{-\frac32-k/2}) \norm{\left( \sum_{s=1}^S f_s^2 \right)^{1/2}}_p
\end{equation}
and
\begin{equation} \label{eq26}
\begin{split}
&\norm{\left( \sum_{j \in I} \sum_{s=1}^S \left( \sum_{n=1}^N R_j^{t_n} g_{s,n} \right)^2 \right)^{1/2}}_q \\
&\lesssim \max(p^{\frac32+k/2},(p-1)^{-\frac32}) \norm{\left( \sum_{s=1}^S \left(\sum_{n=1}^N \abs{g_{s,n}}\right)^2 \right)^{1/2}}_q,
\end{split}
\end{equation}
uniformly in $t_1,\ldots,t_N.$
It turns out that \eqref{eq26} implies \eqref{eq25}. Indeed, switching the roles of $p$ and $q$ and taking $g_{s,1} = f_s$ and all other $g_{s,n} = 0$ in \eqref{eq26} we obtain a variant of \eqref{eq25} with $R_j$ replaced by $R_j^{t_1},$ namely
\begin{equation} \label{eq25'}
\norm{\left( \sum_{j \in I} \sum_{s=1}^S \left( R_j^{t_1} f_s \right)^2 \right)^{1/2}}_p \lesssim \max(p^\frac32,(p-1)^{-\frac32-k/2}) \norm{\left( \sum_{s=1}^S f_s^2 \right)^{1/2}}_p.
\end{equation}
Now, since $\lim_{t_1\to 0} R_j^{t_1}f_s=R_j f_s$, an application of Fatou's lemma shows that \eqref{eq25'}, being uniform in $t_1>0,$ implies \eqref{eq25} with the same constants.
Therefore, in what follows we will focus on establishing \eqref{eq26}.
Similarly to the proof of \thref{thm2''} we take numbers $\lambda_{s,j}(x),$ $s\in \{1,\ldots,S\},$ $j\in I,$ such that
\begin{equation*}
\left( \sum_{j \in I} \sum_{s=1}^S \left( \sum_{n=1}^N R_j^{t_n} g_{s,n}(x) \right)^2 \right)^{1/2} = \sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \sum_{n=1}^N R_j^{t_n} g_{s,n}(x)
\end{equation*}
and $\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}^2 = 1$ and we use the method of rotations \eqref{eq:rot}. Together with \eqref{eq:approxGkd} this gives
\begin{align} \label{eq28}
&\norm{\left( \sum_{j \in I} \sum_{s=1}^S \left( \sum_{n=1}^N R_j^{t_n} g_{s,n} \right)^2 \right)^{1/2}}_q^q = \int_{\mathbb{R}^d} \abs{\sum_{j \in I} \sum_{s=1}^S \sum_{n=1}^N \lambda_{s,j}(x) R_j^{t_n} g_{s,n}(x)}^q dx \nonumber \\
&\lesssim^q d^{kq/2} \int_{\mathbb{R}^d} \abs{\int_{S^{d-1}} \sum_{j \in I} \sum_{s=1}^S \sum_{n=1}^N \lambda_{s,j}(x) \omega_j H_\omega^{t_n} g_{s,n}(x) d\omega }^q dx;
\end{align}
recall here the meaning of $\lesssim^q$ in Section \ref{sec:not} item (7).
At this point we need to use Khintchine's inequality. Let $\{r_s\},$ $s=1,2,\ldots,$ be the Rademacher functions, see \cite[Appendix C]{grafakos}. These form an orthonormal set on $L^2([0,1])$.
Moreover we have Khintchine's inequality (\cite[Appendix C.2]{grafakos})
\begin{equation}
\label{eq:chinczyn}
\norm{\sum_{j=1}^\infty a_j r_j}_{L^p([0,1])} \lesssim p^{\frac12} \left( \sum_{j=1}^\infty a_j^2 \right)^{1/2},
\end{equation}
for any real sequence $(a_s)_{s=1}^\infty$ and $1 \leqslant p < \infty.$ The explicit bounds on constants in \eqref{eq:chinczyn} follow from explicit values of the optimal constants established by Haagerup \cite{Ha} together with Stirling's formula \eqref{StirF}. Using the orthonormality of $\{r_s\}$ we rewrite the right-hand side of \eqref{eq28} as
\begin{equation}
\label{eq29'}
\begin{split}
& d^{kq/2} \int_{\mathbb{R}^d} \abs{\int_{S^{d-1}} \sum_{j \in I} \sum_{s=1}^S \sum_{n=1}^N \lambda_{s,j}(x) \omega_j H_\omega^{t_n} g_{s,n}(x) d\omega }^q dx \\
&= d^{kq/2}\int_{\mathbb{R}^d} \left|\int_{S^{d-1}} \int_0^1 \left( \sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi) \right)\right.\\
&\left.\hspace{3.4cm}\times \left(\sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) r_s(\xi) \right) d\xi d\omega \right|^q dx
\end{split}
\end{equation}
and estimate it using H\"{o}lder's inequality by
\begin{equation}
\label{eq29}
\begin{split}
&d^{kq/2}\int_{\mathbb{R}^d} \left( \int_{S^{d-1}} \int_0^1 \abs{\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)}^p d\xi d\omega \right)^{q/p} \\
&\hspace{3em}\times \int_{S^{d-1}} \int_0^1 \abs{\sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) r_s(\xi)}^q d\xi d\omega \ dx.
\end{split}
\end{equation}
We shall now estimate the inner integral in the first line of \eqref{eq29}. Here the proof splits into two cases.
If $p \geqslant 2$, we apply Khintchine's inequality \eqref{eq:chinczyn}, Minkowski's inequality and \cite[Lemme, p.\ 195]{duo_rubio},
obtaining
\begin{equation*}
\begin{aligned}
\int_{S^{d-1}} &\int_0^1 \abs{\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)}^p d\xi d\omega \lesssim^p p^\frac{p}{2} \int_{S^{d-1}} \left( \sum_{s=1}^S \left( \sum_{j \in I} \lambda_{s,j}(x) \omega_j \right)^2 \right)^{p/2} d\omega \\
&\leqslant p^\frac{p}{2} \left( \sum_{s=1}^S \left( \int_{S^{d-1}} \left| \sum_{j \in I} \lambda_{s,j}(x) \omega_j \right|^p d\omega \right)^{2/p} \right)^{p/2} \\
&\lesssim^p p^\frac{p}{2} p^{kp/2} \left( \sum_{s=1}^S \int_{S^{d-1}} \left( \sum_{j \in I} \lambda_{s,j}(x) \omega_j \right)^2 d\omega \right)^{p/2}.
\end{aligned}
\end{equation*}
Here an application of \cite[Lemme, p.\ 195]{duo_rubio} is justified since $\omega_j\in \mathcal{H}_k$ for $j\in I$ and thus also the sum $\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)$ belongs to $\mathcal{H}_k$ for each fixed $x\in \mathbb{R}^d$ and $\xi \in[0,1].$ Now, using the orthogonality of $\omega_j,$ $j\in I$ we see that
\begin{equation}
\label{eq:intst}
\begin{split}
&\int_{S^{d-1}} \int_0^1 \abs{\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)}^p d\xi d\omega \\
&\lesssim^p p^\frac{p}{2} p^{kp/2}\left( \sum_{s=1}^S \int_{S^{d-1}} \sum_{j \in I} \lambda_{s,j}(x)^2 \omega_j^2 \, d\omega \right)^{p/2} = p^ \frac{p}{2}p^{kp/2} \left(\int_{S^{d-1}} \omega_{(1,\dots,k)}^2 \, d\omega \right)^{p/2}.
\end{split}
\end{equation}
If on the other hand $1 < p < 2$, an application of H\"older's inequality together with \eqref{eq:intst} in the case $p=2$ shows that
\begin{align*}
&\int_{S^{d-1}} \int_0^1 \abs{\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)}^p d\xi d\omega \\
&\leqslant \left( \int_{S^{d-1}} \int_0^1 \abs{\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)}^2 d\xi d\omega \right)^{p/2} \lesssim \left(\int_{S^{d-1}} \omega_{(1,\dots,k)}^2 \, d\omega \right)^{p/2}.
\end{align*}
Altogether \eqref{eq:intst} remains true for all $p\in(1,\infty).$
Thus, using \eqref{eq:lemA1'} we arrive at
\begin{align*}
d^{kq/2} &\left( \int_{S^{d-1}} \int_0^1 \abs{\sum_{j \in I} \sum_{s=1}^S \lambda_{s,j}(x) \omega_j r_s(\xi)}^p d\xi d\omega \right)^{q/p} \lesssim^q p^\frac{q}{2} p^{kq/2}.
\end{align*}
Returning to \eqref{eq29'} and \eqref{eq29} we have thus proved
\begin{align*}
& \left(d^{kq/2} \int_{\mathbb{R}^d} \abs{\int_{S^{d-1}} \sum_{j \in I} \sum_{s=1}^S \sum_{n=1}^N \lambda_{s,j}(x) \omega_j H_\omega^{t_n} g_{s,n}(x) d\omega }^q dx\right)^{1/q}\\
&\lesssim p^{\frac12+k/2} \left(\int_{\mathbb{R}^d}\int_{S^{d-1}} \int_0^1 \abs{\sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) r_s(\xi)}^q d\xi d\omega \ dx\right)^{1/q}.
\end{align*}
In view of \eqref{eq28} we now see that \eqref{eq26} will follow if we establish
\begin{equation} \label{eq31}
\begin{aligned}
&\left( \int_{\mathbb{R}^d} \int_{S^{d-1}} \int_0^1 \abs{\sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) r_s(\xi)}^q d\xi d\omega \ dx \right)^{1/q} \\
&\leqslant \max(p,(p-1)^{-\frac32})\norm{\left( \sum_{s=1}^S \left(\sum_{n=1}^N \abs{g_{s,n}}\right)^2 \right)^{1/2}}_q.
\end{aligned}
\end{equation}
In the reminder of the proof we thus focus on justifying \eqref{eq31}. We use Khintchine's inequality \eqref{eq:chinczyn} on the left-hand side of \eqref{eq31} and get
\begin{align*}
&\left(\int_{\mathbb{R}^d} \int_{S^{d-1}} \int_0^1 \abs{\sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) r_s(\xi)}^q d\xi d\omega \ dx \right)^{1/q} \\
&\lesssim q^\frac12\left(\int_{S^{d-1}}\int_{\mathbb{R}^d} \left( \sum_{s=1}^S \left( \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) \right)^2 \right)^{q/2} dx \, d\omega \right)^{1/q}.
\end{align*}
Since $q \lesssim \max(1,(p-1)^{-1})$ our goal is now to prove the uniform in $\omega \in S^{d-1}$ estimate
\begin{equation} \label{eq32}
\begin{aligned}
\norm{\left( \sum_{s=1}^S \left( \sum_{n=1}^N H_\omega^{t_n} g_{s,n} \right)^2 \right)^{1/2}}_q \lesssim p^* \, \norm{\left( \sum_{s=1}^S \left(\sum_{n=1}^N \abs{g_{s,n}}\right)^2 \right)^{1/2}}_q.
\end{aligned}
\end{equation}
Using duality between $L^q(\mathbb{R}^d;\ell^2(\{1,\ldots,S\}))$ and $L^p(\mathbb{R}^d;\ell^2(\{1,\ldots,S\}))$ we write an equivalent inequality
\begin{equation} \label{eq33}
\begin{aligned}
&\abs{\int_{\mathbb{R}^d} \sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) k_s(x) dx}\\
&\lesssim p^* \norm{\left( \sum_{s=1}^S \left(\sum_{n=1}^N \abs{g_{s,n}} \right)^2 \right)^{1/2} }_q \norm{ \left( \sum_{s=1}^S \abs{k_s}^2 \right)^{1/2}}_p
\end{aligned}
\end{equation}
where $k_s \in L^p(\mathbb{R}^d;\ell^2(\{1,\ldots,S\}))$. Since $(H_\omega^{t_n})^*=-H_{\omega}^{t_n}$, Cauchy--Schwarz inequality and H\"{o}lder's inequality give
\begin{align} \label{eq34}
&\abs{\int_{\mathbb{R}^d} \sum_{s=1}^S \sum_{n=1}^N H_\omega^{t_n} g_{s,n}(x) k_s(x) dx} = \abs{\int_{\mathbb{R}^d} \sum_{s=1}^S \sum_{n=1}^N g_{s,n}(x) H_\omega^{t_n} k_s(x) dx} \nonumber \\
&\leqslant \int_{\mathbb{R}^d} \sum_{s=1}^S \sum_{n=1}^N \abs{g_{s,n}(x)} H_\omega^* k_s(x) dx \nonumber \\
&\leqslant \int_{\mathbb{R}^d} \left(\sum_{s=1}^S \left( \sum_{n=1}^N \abs{g_{s,n}(x)} \right)^2 \right)^{1/2} \left(\sum_{s=1}^S \left( H_\omega^* k_s(x) \right)^2 \right)^{1/2} dx \nonumber \\
&\leqslant \norm{\left( \sum_{s=1}^S \left(\sum_{n=1}^N \abs{g_{s,n}} \right)^2 \right)^{1/2} }_q \norm{ \left( \sum_{s=1}^S \abs{H_\omega^* k_s}^2 \right)^{1/2}}_p.
\end{align}
Comparing \eqref{eq33} and \eqref{eq34} we see that \eqref{eq32} will follow if we justify
\begin{equation*}
\norm{ \left( \sum_{s=1}^S \abs{H_\omega^* k_s}^2 \right)^{1/2}}_p \lesssim p^* \norm{ \left( \sum_{s=1}^S \abs{k_s}^2 \right)^{1/2}}_p.
\end{equation*}
By rotational invariance the above inequality reduces to its one-dimensional case
\begin{equation}
\label{eq:Hmvod}
\norm{ \left( \sum_{s=1}^S \abs{H^* k_s}^2 \right)^{1/2}}_{L^p(\mathbb{R})} \lesssim p^*\norm{ \left( \sum_{s=1}^S \abs{k_s}^2 \right)^{1/2}}_{L^p(\mathbb{R})}.
\end{equation}
Inequality \eqref{eq:Hmvod} can be deduced along the lines of \cite[Section 5.6]{grafakos}. We sketch the argument for the convenience of the reader.
Let $\varphi\colon \mathbb{R}\to \mathbb{R}$ be a smooth even function which satisfies $\varphi(x)=1$ for $|x|<2$ and $\varphi(x)=0$ for $|x|>4.$ Denoting $\varphi_t(x)=\varphi(x/t),$ $\chi_t(x)=\ind{(t,\infty)}(|x|)x^{-1}$ we have the pointwise estimate
\begin{equation}
\label{eq:H*split}
\begin{split}
H^*f(x)&\leqslant \frac{1}{\pi}\sup_{t>0}|(\varphi_t\chi_t *f)(x)|+\frac{1}{\pi}\sup_{t>0}|((1-\varphi_t)\chi_t) *f(x)|\\
&=:H^*_{\varphi}f(x)+H^*_{1-\varphi}f(x)\\
&\lesssim \mathcal M f(x)+ H^*_{1-\varphi}f(x),
\end{split}
\end{equation}
where $\mathcal M$ denotes the Hardy--Littlewood maximal operator on $\mathbb{R}$. Thus, from \cite[Theorem 5.6.6]{grafakos} we obtain
\begin{equation*}
\norm{ \left( \sum_{s=1}^S \abs{H^*_{\varphi} k_s}^2 \right)^{1/2}}_{L^p(\mathbb{R})} \lesssim p^*\norm{ \left( \sum_{s=1}^S \abs{k_s}^2 \right)^{1/2}}_{L^p(\mathbb{R})}.
\end{equation*}
Hence, \eqref{eq:Hmvod} will be justified if we show that
\begin{equation}
\label{eq:Hmvod'}
\norm{ \left( \sum_{s=1}^S \abs{H^*_{1-\varphi} k_s}^2 \right)^{1/2}}_{L^p(\mathbb{R})} \lesssim p^*\norm{ \left( \sum_{s=1}^S \abs{k_s}^2 \right)^{1/2}}_{L^p(\mathbb{R})}.
\end{equation}
We will obtain \eqref{eq:Hmvod'} from \cite[Theorem 5.6.1]{grafakos} applied to
$$
\mathcal{B}_1 = \ell^2\left( \{1, \dots, S\} \right)\qquad\textrm{and} \qquad\mathcal{B}_2 = \ell^2\left( \{1, \dots, S\}; L^\infty{(0,\infty)} \right)
$$ and
\begin{equation} \label{eq:kernelK}
\vec{K}(x)(u) = \left( (1-\varphi_{t}) \chi_{t}(x) \cdot u_1 ,\ldots, (1-\varphi_{t}) \chi_{t}(x) \cdot u_S\right) \in \mathcal{B}_2,
\end{equation}
for any sequence $u=(u_s)_{s=1}^S \in \mathcal{B}_1$. Then, taking $e_s = (0, \dots, 1, \dots, 0)$, with $1$ on the $s$-th coordinate, we can see that the operator $\vec{T}$ defined in \cite[5.6.4]{grafakos} satisfies
\begin{equation} \label{eq:def_T}
\vec{T}\left( \sum_{s=1}^S f_s e_s \right)(x) = \left(H^{t}_{1-\varphi} f_1(x),\ldots,H^{t}_{1-\varphi} f_S(x)\right)
\end{equation}
and
\[
\norm{\vec{T}\left( \sum_{s=1}^S f_s e_s \right)(x)}_{{\mathcal{B}_2}} = \left(\sum_{s=1}^S \abs{H^*_{1-\varphi} f_s(x)}^2 \right)^{1/2};
\]
for any sequence $(f_s)_{s=1}^S$ of smooth functions that vanish at infinity.
In order to use \cite[Theorem 5.6.1]{grafakos} we need to verify conditions (5.6.1), (5.6.2) and (5.6.3) from \cite{grafakos} and check that $\vec{T}$ is bounded from $L^2(\mathbb{R}, \mathcal{B}_1)$ to $L^2(\mathbb{R}, \mathcal{B}_2)$.
The condition (5.6.1) is a straightforward consequence of \eqref{eq:kernelK} and the fact that $\vec{K}$ is an odd function implies that the condition (5.6.3) is also satisfied with $\vec{K}_0=0$.
To check the condition (5.6.2) denote $g_t(x) = (1-\varphi_{t}) \chi_t(x)$ and observe that
\[
\norm{\vec{K}(x-y) - \vec{K}(x)}_{\mathcal{B}_1 \to \mathcal{B}_2} = \sup_{t>0} \abs{g_t(x-y) - g_t(x)}.
\]
In view of the above equality and the fact that $g_t(x)=(1-\varphi_{t}(x))x^{-1}$ a short calculation shows that
$$ \norm{\vec{K}(x-y) - \vec{K}(x)}_{\mathcal{B}_1 \to \mathcal{B}_2}\lesssim\frac{|y|}{|x-y|^2},\qquad |x|\geqslant 2|y|,$$
so that (5.6.2) follows.
It remains to justify the boundedness of $\vec{T}$ from $L^2(\mathbb{R}, \mathcal{B}_1)$ to $L^2(\mathbb{R}, \mathcal{B}_2).$ A reasoning similar to \eqref{eq:H*split} gives the pointwise bound
\[
H^*_{1-\varphi}f(x)\lesssim \mathcal M f(x)+H^* f(x).
\]
Therefore the desired $L^2$ boundedness of $\vec{T}$ is a consequence of \eqref{eq:def_T} and the $L^2(\mathbb{R})$ boundedness of $\mathcal M$ and $H^*$. This allows us to use \cite[Theorem 5.6.1]{grafakos} and completes the proof of \eqref{eq:Hmvod'} hence also the proof of \thref{thm1''}.
\end{proof}
\end{document}
|
\begin{document}
\title{Conversion of a general quantum stabilizer code to
an entanglement distillation protocol\thanks{
One page abstract of this paper will appear in
the Proceedings of 2003 IEEE International Symposium
on Information Theory.}}
\author{Ryutaroh Matsumoto\\
Dept.\ of Communications and Integrated Systems\\
Tokyo Institute of Technology, 152-8552 Japan\\
Email: \texttt{[email protected]}}
\date{April 4, 2003}
\maketitle
\begin{abstract}
We show how to convert a quantum stabilizer code
to a one-way or two-way entanglement distillation protocol.
The proposed conversion
method is a generalization of those of Shor-Preskill and
Nielsen-Chuang.
The recurrence protocol and the quantum privacy amplification protocol are
equivalent to the protocols converted
from $[[2,1]]$ stabilizer codes.
We also give an example of a two-way protocol converted from a
stabilizer better than the recurrence protocol and the quantum
privacy amplification protocol.
The distillable entanglement by the class of one-way protocols converted from
stabilizer codes for a certain class of states
is equal to or greater than the achievable rate of stabilizer codes
over the channel corresponding to the distilled state,
and they can distill asymptotically more entanglement from
a very noisy Werner state than the hashing protocol.
\end{abstract}
\section{Introduction}
In many applications of quantum mechanics to communication,
the sender and the receiver have to share a maximally entangled
quantum state of two particles.
When there is a noiseless quantum communication channel,
the sender can send one of two particles in a maximally entangled
state to the receiver and sharing of it
is easily accomplished.
However, the quantum communication channel is usually noisy,
that is, the quantum state of the received particle
changes probabilistically from the original state of a particle.
Entanglement distillation protocols \cite{bennett96a}
and quantum error-correcting codes \cite{shor95,steane96b}
are the schemes for sharing a maximally entangled state over
a noisy communication channel.
A distillation protocol is said to be \emph{two-way}
(resp.\ \emph{one-way}) if it involves two-way (resp.\ one-way)
classical communication.
Two-way protocols have larger distillation ability than
one-way protocols.
However, few two-way protocols has been proposed so far,
namely the recurrence protocol \cite{bennett96a}
and the quantum privacy amplification protocol
(QPA protocol) \cite{deutsch96}.
There may be many two-way protocols better than existing ones,
and the discovery of better protocols has been awaited.
Immediately after the proposal
of those schemes,
Bennett et~al.\ discovered that one
can construct a \emph{one-way} entanglement
distillation protocol from a quantum code
\cite[Section V.C]{bennett96}, which requires
$2n$ additional qubits where $n$ is the number of noisy
entangled states
to be distilled.
Nielsen and Chuang
\cite[Exercise 12.34]{chuangnielsen}
observed a construction method of a one-way protocol without extra qubits
from a real binary quantum stabilizer code
as a generalization of the idea
in \cite{shor00}.
By a conversion method from a quantum code to
a distillation protocol,
we can solve problems of distillation protocols
from results in quantum codes.
For example, we can construct a good distillation protocol
from a good quantum code.
Thus such a conversion method deserves further investigation.
It is not known how one can convert a quantum error-correcting
code to a \emph{two-way} entanglement distillation protocol.
We shall propose a conversion method from
an arbitrary quantum stabilizer code to both one-way
and two-way entanglement distillation protocols
as a generalization of Shor, Preskill \cite{shor00}, Nielsen,
and Chuang \cite{chuangnielsen}.
Benefits of the proposed conversion methods
are
\begin{itemize}
\item We can construct infinitely many two-way protocols.
One can easily construct a two-way protocol
better than the recurrence protocol and the QPA protocol
from a simple stabilizer code (see Section \ref{sec42}).
\item It is known that one-way protocols and quantum error-correcting
codes without classical communication have the same ability
for sharing maximally entangle states over a noisy quantum
channel \cite{bennett96}.
The proposed protocols might be used for further clarification
of the relation between distillation protocols
and quantum error-correcting codes.
\end{itemize}
This paper is organized as follows:
In Section \ref{sec2},
basic notation is introduced.
In Section \ref{sec3},
we present a construction of entanglement distillation protocols
from quantum stabilizer codes.
In Section \ref{sec4},
we give examples of converted protocols equivalent to
the recurrence protocol and the QPA protocol,
and an example better than them.
In Section \ref{sec5}, we evaluate the distillable entanglement by the
class of one-way protocols converted from stabilizer codes,
and show that the converted protocols can distill asymptotically
more entanglement from a noisy Werner state than the hashing protocol
\cite{bennett96}.
In Section \ref{generalfidelity},
we derive a lower bound on fidelity with a general initial state
of protocols.
\section{Notation}\label{sec2}
In this section we fix notation and
the problem formulation.
Let $H_A$ and $H_B$ be $p$-dimensional complex linear spaces
with orthonormal bases $\{\ket{0_A}$, \ldots, $\ket{(p-1)_A}\}$
and $\{\ket{0_B}$, \ldots, $\ket{(p-1)_B}\}$, respectively,
where $p$ is a prime number.
We shall restrict ourselves to $p$-ary stabilizer codes
because an $m$-ary stabilizer code can be constructed as
a tensor product of $p_i$-ary stabilizer codes \cite[p.1831, Remarks]{rains97},
where $p_i$ are prime divisors of $m$,
and extension of the proposed conversion method to the
$m$-ary case is straightforward.
We define the maximally entangled states in $H_A \otimes H_B$ by
\[
\ket{\beta(a,b)} = I\otimes X^a Z^b
\frac{1}{\sqrt{p}}\sum_{i=0}^{p-1}\ket{i_A i_B}
\]
where $a$, $b \in \{0$, \ldots, $p-1\}$,
and matrices $X$ and $Z$ are defined by
\[
X\ket{i} = \ket{i+1 \bmod p}, \;
Z\ket{i} = \omega^i \ket{i}
\]
with a complex primitive $p$-th root $\omega$ of $1$.
The matrices $X$, $Z$ and their commutation relation
were first applied to the quantum mechanics by Weyl \cite[Section 4.15]{weyl31}.
Suppose that Charlie prepares $n$ pairs of particles
in the state $\ket{\beta(0,0)}$,
sends the particles corresponding to $H_A$ to Alice,
and sends the other particles corresponding to $H_B$ to Bob.
The quantum channels between Alice and Charlie and between
Bob and Charlie are noisy in general,
and Alice and Bob share a mixed state $\rho
\in \mathcal{S}(H_A^{\otimes n} \otimes H_B^{\otimes n})$,
where $\mathcal{S}(H_A^{\otimes n} \otimes H_B^{\otimes n})$
is the set of density operators on $H_A^{\otimes n} \otimes H_B^{\otimes n}$.
The state $\rho$ can be an arbitrary density operator.
The goal of an entanglement distillation protocol is
to extract as many pairs of particles with state close to $\ket{\beta(0,0)}$
as possible from $n$ pairs of particles in the state $\rho$.
\section{Protocol}\label{sec3}
In this section we shall describe how to make an entanglement
distillation protocol from a quantum stabilizer code.
In the protocol we extract a state $\tau \in \mathcal{S}(H_A^{\otimes k}
\otimes H_B^{\otimes k})$ from $\rho \in \mathcal{S}(H_A^{\otimes n}
\otimes H_B^{\otimes n})$.
The proposed protocol will be constructed from
the nonbinary generalization \cite{knill96a,rains97}
of quantum
stabilizer codes \cite{calderbank97,calderbank98,gottesman96}.
We assume that the reader is familiar with the formalism of the
nonbinary stabilizer code. Let us introduce notation of stabilizer codes.
Let $E = \{ \omega^i X^{a_1} Z^{b_1} \otimes \cdots
\otimes X^{a_n} Z^{b_n} \,:\,$
$a_1$, $b_1$, \ldots, $a_n$, $b_n$, $i$ are integers $\}$,
and $S$ a commutative subgroup of $E$.
The subgroup $S$ is called a stabilizer.
Let $\mathbf{Z}_p = \{0$, \ldots, $p-1\}$ with addition and multiplication taken
modulo $p$.
For a vector $\vec{a} = (a_1$, $b_1$, \ldots, $a_n$, $b_n) \in \mathbf{Z}_p^{2n}$,
let
\[
\mathsf{XZ}(\vec{a}) = X^{a_1}Z^{b_1} \otimes
\cdots \otimes X^{a_n}Z^{b_n}.
\]
Suppose that $\{\mathsf{XZ}(\vec{g}_1)$,
\ldots, $\mathsf{XZ}(\vec{g}_{n-k})$
(and possibly some power of $\omega I$) $\}$
is a generating set of the group $S$,
where $\vec{g}_1$, \ldots, $\vec{g}_{n-k}$
are linearly independent over $\mathbf{Z}_p$.
Let $H$ be a complex linear space with the orthonormal basis
$\{\ket{0}$, \ldots, $\ket{p-1}\}$,
and hereafter we shall identify $H$ with $H_A$ and $H_B$
by linear maps $\ket{i} \mapsto \ket{i_A}$ and
$\ket{i} \mapsto \ket{i_B}$.
Let $Q$ be a stabilizer code defined by $S$,
that is, a joint eigenspace of $S$ in $H^{\otimes n}$.
There are many joint eigenspaces of $S$ and
we can distinguish an eigenspace by its eigenvalue
of $\mathsf{XZ}(\vec{g}_i)$ for $i=1$, \ldots, $n-k$.
Hereafter we fix a joint eigenspace $Q$ of $S$ and
suppose that $Q$ belongs to the eigenvalue $\lambda_i$
of $\mathsf{XZ}(\vec{g}_i)$ for $i=1$, \ldots, $n-k$.
Suppose that we sent $\ket{\varphi} \in Q$, and received
$\mathsf{XZ}(\vec{e}) \ket{\varphi}$.
We can tell which eigenspace of $S$ contains
the state $\mathsf{XZ}(\vec{e}) \ket{\varphi}$ by
measuring
an observable whose eigenspaces are the same
as those of $\mathsf{XZ}(\vec{g}_i)$.
Then the measurement outcome always indicates
that the measured state $\mathsf{XZ}(\vec{e}) \ket{\varphi}$
belonging to the eigenspace $\lambda_i \omega^{\langle \vec{g}_i,
\vec{e}\rangle}$, where
$\langle \vec{g}_i,
\vec{e}\rangle$ is the symplectic inner product
defined by
\begin{equation}
\langle \vec{g}_i,
\vec{e}\rangle
= \sum_{i=1}^n b_i c_i - a_i d_i,\label{symplectic}
\end{equation}
for $\vec{g}_i = (a_1$, $b_1$, \ldots, $a_n$, $b_n)$
and $\vec{e} = (c_1$, $d_1$, \ldots, $c_n$, $d_n)$.
We define $\vec{g}_i^{\star} = (a_1$, $-b_1$, \ldots, $a_n$, $-b_n)$.
Since the complex conjugate of $\omega$ is $\omega^{-1}$,
we can see that $\mathsf{XZ}(\vec{g}_i^{\star})$ is a
componentwise complex conjugated matrix of $\mathsf{XZ}(\vec{g}_i)$.
Let $S^\star$ be a subgroup of $E$ generated by
$\{\mathsf{XZ}(\vec{g}_1^\star)$,
\ldots, $\mathsf{XZ}(\vec{g}_{n-k}^\star)\}$.
Easy computation shows that $S^\star$ is again commutative.
So we can consider joint eigenspaces of $S^\star$.
There exists a joint eigenspace $Q^\star$ of $S^\star$
whose eigenvalue of $\mathsf{XZ}(\vec{g}_i^{\star})$ is
$\bar{\lambda}_i$ (the complex conjugate of $\lambda_i$).
With those notation,
our protocol is executed as follows:
\begin{enumerate}
\item\label{step1} Alice measures an observable corresponding
to $\mathsf{XZ}(\vec{g}_{i}^\star)$ for each $i$,
and let $\bar{\lambda}_i \omega^{-a_i}$ be the eigenvalue
of an eigenspace of $S^\star$ containing the state
after measurement.
In what follows we refer to $(a_1$, \ldots, $a_{n-k}) \in \mathbf{Z}_p^{n-k}$
as a \emph{measurement outcome}.
\item\label{step2} Bob measures an observable corresponding
to $\mathsf{XZ}(\vec{g}_{i})$ for each $i$,
and let $\lambda_i \omega^{b_i}$ be the eigenvalue
of an eigenspace of $S$ containing the state
after measurement.
In what follows we also refer to $(b_1$, \ldots, $b_{n-k}) \in \mathbf{Z}_p^{n-k}$
as a \emph{measurement outcome}.
\item Alice sends $(a_1$, \ldots, $a_{n-k})$ to Bob.
\item\label{step4} Bob perform the error correction process
according to $b_1-a_1$, \ldots, $b_{n-k}-a_{n-k}$
as described below.
\item\label{step5} Alice and Bob apply the inverse of encoding operators
of the quantum stabilizer codes.
\item\label{step6} Alice and Bob discards the last $n-k$ particles.
\item\label{step7} If the difference of the measurement outcomes
$(b_1-a_1$, \ldots, $b_{n-k}-a_{n-k})$ indicates that
the fidelity between the remaining $k$ particles
and $\ket{\beta(0,0)}^{\otimes k}$ is low,
Bob discards all of his particles and he tells Alice the disposal
of particles.
\end{enumerate}
We shall introduce some notation.
For a vector $\vec{u}\in \mathbf{Z}_p^{2n}$
let
\[
\ket{\beta(\vec{u})} = (I\otimes \mathsf{XZ}(\vec{u}))
\ket{\beta(0,0)}^{\otimes n}.
\]
Let $Q(\vec{x})$ [resp.\ $Q^\star(\vec{x})$]
$\subset H^{\otimes n} \simeq H_A^{\otimes n} \simeq
H_B^{\otimes n}$
be the quantum stabilizer code of $S$ (resp.\ $S^\star$)
belonging to the eigenvalue $\lambda_i \omega^{x_i}$ (resp.\
$\bar{\lambda}_i \omega^{-x_i}$) of $\mathsf{XZ}(\vec{g}_i)$
[resp.\ $\mathsf{XZ}(\vec{g}_i^\star)$]
for a vector $\vec{x} =(x_1$, \ldots, $x_{n-k})\in \mathbf{Z}_p^{n-k}$,
and $P(\vec{x})$ [resp.\ $P^\star(\vec{x})$]
be the projection onto $Q(\vec{x})$ [resp.\ $Q^\star(\vec{x})$].
\begin{lemma}
We have
\begin{equation}
\{P^\star(\vec{x})\otimes I\} \ket{\beta(\vec{0})} =
\{P^\star(\vec{x})\otimes P(\vec{x})\} \ket{\beta(\vec{0})} \label{form}
\end{equation}
for any $\vec{x} \in \mathbf{Z}_p^{n-k}$.
\end{lemma}
\noindent\emph{Proof.}
Let $\{\ket{0}$, \ldots, $\ket{p^n-1}\}$ be an orthonormal basis
of $H^{\otimes n}$ consisting of tensor products of
$\{ \ket{0}$, \ldots, $\ket{p-1}\} \subset H$, and we have
\[
\sqrt{p^n}\ket{\beta(\vec{0})} = \sum_{i=0}^{p^n-1} \ket{i}\otimes \ket{i}.
\]
For $\vec{x} \in \mathbf{Z}_p^{n-k}$,
let $\{ \ket{\vec{x},0}$, \ldots,
$\ket{\vec{x},p^k-1}\}$ be an orthonormal basis of $Q(\vec{x})$.
For a state
\[
\ket{\varphi} = \alpha_0 \ket{0} + \cdots
+\alpha_{p^n-1} \ket{p^n-1} \in H^{\otimes n},
\]
we define
\[
\overline{\ket{\varphi}} = \bar{\alpha}_0 \ket{0} + \cdots
+\bar{\alpha}_{p^n-1} \ket{p^n-1},
\]
where $\bar{\alpha}_i$ is the complex conjugate of $\alpha_i$.
With this notation,
$\{ \overline{\ket{\vec{x},0}}$, \ldots,
$\overline{\ket{\vec{x},p^{n-k}-1}}\}$
is an orthonormal basis of $Q^\star(\vec{x})$.
The set $\{ \ket{\vec{x},i} \,:\, \vec{x} \in \mathbf{Z}_p^{n-k}$,
$i = 0$, \ldots, $p^k-1\}$ is an orthonormal basis of $H^{\otimes n}$
and there exists a unitary matrix on $H^{\otimes n}$ that
transforms the basis $\{\ket{0}$, \ldots, $\ket{p^n-1}\}$
to $\{ \ket{\vec{x},i} \,:\, \vec{x} \in \mathbf{Z}_p^{n-k}$,
$i = 0$, \ldots, $p^k-1\}$.
Let $\bar{U}$ be the componentwise complex conjugate of $U$,
that is, $\bar{U}$ transforms $\{\ket{0}$, \ldots, $\ket{p^n-1}\}$
to $\{ \overline{\ket{\vec{x},i}} \,:\, \vec{x} \in \mathbf{Z}_p^{n-k}$,
$i = 0$, \ldots, $p^k-1\}$.
We have $\bar{U}\otimes U \ket{\beta(\vec{0})} = \ket{\beta(\vec{0})}$
\cite{horodecki99}.
Therefore
\[
\sqrt{p^n}\ket{\beta(\vec{0})} = \sum_{\vec{x}\in \mathbf{Z}_p^{n-k}}
\sum_{i=0}^{p^k-1} \overline{\ket{\vec{x},i}} \otimes\ket{\vec{x},i}.
\]
Since
\[
P^\star(\vec{x}) = \sum_{i=0}^{p^k-1} \overline{\ket{\vec{x},i}}\,
\overline{\bra{\vec{x},i}},
\]
we have
\begin{eqnarray}
\sqrt{p^n}\{P^\star(\vec{x})\otimes I\} \ket{\beta(\vec{0})} &= &
\left[\sum_{i=0}^{p^k-1} \overline{\ket{\vec{x},i}}\,
\overline{\bra{\vec{x},i}} \otimes I \right]
\sum_{\vec{x}\in \mathbf{Z}_p^{n-k}}
\sum_{i=0}^{p^k-1} \overline{\ket{\vec{x},i}} \otimes\ket{\vec{x},i} \nonumber\\
&=&
\sum_{i=0}^{p^k-1} \overline{\ket{\vec{x},i}} \otimes\ket{\vec{x},i} \nonumber\\
&=&
\sqrt{p^n}\{P^\star(\vec{x})\otimes P(\vec{x})\} \ket{\beta(\vec{0})}\label{form2}
\end{eqnarray}
\qed
Suppose that we perform the protocol above to the state
$\ket{\beta(\vec{u})} = \{ I\otimes \mathsf{XZ}(\vec{u})\}
\ket{\beta(\vec{0})}$.
After we get $\vec{a} = (a_1$, \ldots, $a_{n-k})\in \mathbf{Z}_p^{n-k}$
as a measurement outcome in Step \ref{step1},
the state is
\begin{eqnarray*}
&&\{ P^\star(\vec{a})\otimes I\} \{ I\otimes \mathsf{XZ}(\vec{u})\}
\ket{\beta(\vec{0})}\\
&=& \{ I\otimes \mathsf{XZ}(\vec{u})\} \{ P^\star(\vec{a})\otimes I\}
\ket{\beta(\vec{0})}\\
&=& \{ I\otimes \mathsf{XZ}(\vec{u})\} \{ P^\star(\vec{a})\otimes P(\vec{a})\}
\ket{\beta(\vec{0})} \mbox{ [by Eq.\ (\ref{form})]}.
\end{eqnarray*}
Observe that the vector $\{ I\otimes \mathsf{XZ}(\vec{u})\} \{ P^\star(\vec{a})\otimes P(\vec{a})\}
\ket{\beta(\vec{0})}$
belongs to $Q^\star(\vec{a})\otimes Q(\vec{b})$, where
\[
\vec{b} = \vec{a} + (\langle \vec{g}_1, \vec{u}\rangle,
\ldots, \langle \vec{g}_{n-k}, \vec{u}\rangle).
\]
Thus the measurement outcome in Step \ref{step2} must be $\vec{b}$.
For the simplicity of presentation,
we assume that the state $\rho
\in \mathcal{S}(H_A^{\otimes n}
\otimes H_B^{\otimes n})$ can be written as
\begin{equation}
\rho = \sum_{\vec{u}\in\mathbf{Z}_p^{2n}}
\alpha(\vec{u}) \ket{\beta(\vec{u})}\bra{\beta(\vec{u})},\label{restrictedstate}
\end{equation}
where
$\{\alpha(\vec{u}) \,:\, \vec{u} \in \mathbf{Z}_p^{2n}\}$ is
a probability distribution.
A general case will be treated in Section \ref{generalfidelity}.
After performing Step \ref{step1} in the proposed protocol
to state (\ref{restrictedstate})
and getting $\vec{a} \in \mathbf{Z}_p^{n-k}$ as a
measurement outcome,
the state is
\[
\sum_{\vec{u}\in\mathbf{Z}_p^{2n}}
\alpha(\vec{u})
\{ I\otimes \mathsf{XZ}(\vec{u})\}
P(\vec{a},\vec{a}) \rho(\vec{0})P(\vec{a},\vec{a})\{ I\otimes \mathsf{XZ}(\vec{u})^*\}
,
\]
where $P(\vec{a}$, $\vec{a}) = P^\star(\vec{a})\otimes P(\vec{a})$ and
$\rho(\vec{0}) = \ket{\beta(\vec{0})}\bra{\beta(\vec{0})}$.
Suppose that we get $\vec{b}$ as a measurement outcome in Step \ref{step2},
and denote $(b_1-a_1$, \ldots, $b_{n-k}-a_{n-k})$
by $\vec{s}$.
The state $\{ I\otimes \mathsf{XZ}(\vec{u})\}
P(\vec{a}, \vec{a}) \ket{\beta(\vec{0})}$ belongs
to $Q^\star(\vec{a})\otimes Q[\vec{a} + (\langle \vec{g}_1$,
$\vec{u}\rangle$, \ldots, $\langle \vec{g}_{n-k}$,
$\vec{u}\rangle)]$.
Thus
the state after Step \ref{step2} is
\begin{eqnarray*}
&&\sum_{\vec{u}\in\mathbf{Z}_p^{2n}}
\alpha(\vec{u})
P(\vec{a},\vec{b}) \{ I\otimes \mathsf{XZ}(\vec{u})\}
P(\vec{a}, \vec{a}) \rho(\vec{0})
P(\vec{a}, \vec{a}) \{ I\otimes \mathsf{XZ}(\vec{u})^*\}
P(\vec{a},\vec{b}) \\
&=&
\sum_{\vec{u} \in D(\vec{s})}
\alpha(\vec{u})
P(\vec{a}, \vec{b}) \{ I\otimes \mathsf{XZ}(\vec{u})\}
P(\vec{a}, \vec{a}) \rho(\vec{0})
P(\vec{a}, \vec{a}) \{ I\otimes \mathsf{XZ}(\vec{u})^*\}
P(\vec{a}, \vec{b})
\\
&=&\sum_{\vec{u} \in D(\vec{s})}
\alpha(\vec{u})
\{ I\otimes \mathsf{XZ}(\vec{u})\}
P(\vec{a},\vec{a}) \rho(\vec{0})P(\vec{a},\vec{a})
\{ I\otimes \mathsf{XZ}(\vec{u})\}
,
\end{eqnarray*}
where
\[
D(\vec{s}) =
\{ \vec{u}\in \mathbf{Z}_p^{2n} \,:\,
\langle\vec{g}_i ,\vec{u}\rangle = b_i - a_i,
\mbox{ for each } i\}.
\]
Let $C$ be the linear subspace of $\mathbf{Z}_p^{2n}$
spanned by $\vec{g}_1$, \ldots, $\vec{g}_{n-k}$,
and $C^{\perp}$ be the orthogonal space of $C$
with respect to the symplectic inner product (\ref{symplectic}).
For vectors $\vec{u}$, $\vec{v}$ such that $\vec{u}-\vec{v} \in C$,
$\mathsf{XZ}(\vec{u})$ and $\mathsf{XZ}(\vec{v})$ has the same effect on
states in $Q(\vec{a})$ for any $\vec{a}$,
and we can identify errors $\mathsf{XZ}(\vec{u})$ and
$\mathsf{XZ}(\vec{v})$ if $\vec{u}-\vec{v} \in C$,
which is equivalent to $\vec{v} \in \vec{u}+C$.
Thus, among errors $\mathsf{XZ}(\vec{u})$
corresponding to $D(\vec{s})$,
the most likely error vector $\vec{u}$ is one having maximum
\[
\sum_{\vec{v} \in \vec{u}+C} \alpha(\vec{v})
\]
in the set $D(\vec{s})$.
Let $\vec{e}$ be the most likely error vector in $D(\vec{s})$.
The set $D(\vec{s})$ is equal to
\[
\vec{e}+C^{\perp} = \{\vec{e}+\vec{u} \,:\,
\vec{u} \in C^{\perp}\}.
\]
Bob applies $\mathsf{XZ}(\vec{e})^{-1}$ to his particles.
This is Step \ref{step4}.
After applying $\mathsf{XZ}(\vec{e})^{-1}$ to Bob's particles,
the joint state of particles of Alice and Bob is
\begin{equation}
\sum_{\vec{u} \in \vec{e}+C^{\perp}}
\alpha(\vec{u}) \{I\otimes \mathsf{XZ}(\vec{u}-\vec{e})\}
P(\vec{a},\vec{a})
\rho(\vec{0})P(\vec{a},\vec{a})
\{I\otimes \mathsf{XZ}(\vec{u}-\vec{e})^*\}. \label{step4state1}
\end{equation}
Recall that $\mathsf{XZ}(\vec{u}-\vec{e})$ does not change
a state in $Q(\vec{a})$ if $\vec{u}-\vec{e} \in C$.
Therefore the state (\ref{step4state1}) is equal to
\begin{eqnarray}
\lefteqn{\sum_{\vec{u} \in \vec{e}+C}
\alpha(\vec{u})
P(\vec{a},\vec{a})
\rho(\vec{0})P(\vec{a}, \vec{a})+}\nonumber\\*
&&
\sum_{\vec{u} \in \vec{e}+(C^{\perp}\setminus C)}
\alpha(\vec{u})
[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})]P(\vec{a},\vec{a})
\rho(\vec{0})P(\vec{a}, \vec{a})
[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})^*].
\label{step4state2}
\end{eqnarray}
We shall explain how to use an encoding operator in Step \ref{step5}
to extract $\ket{\beta(0,0)}^{\otimes k}$ from the above state.
Let $\ket{\mathrm{a}} \in H^{\otimes n-k}$ be an ancillary state.
Consider an encoding operator $U_\mathrm{e}$ on $H^{\otimes n}$
sending $\ket{i} \otimes \ket{\mathrm{a}}\in H^{\otimes n}$
to $\ket{\vec{a},i}$ for $i=0$, \ldots, $p^k-1$, where
$\{ \ket{\vec{a},0}$, \ldots,
$\ket{\vec{a},p^k-1}\}$ is an orthonormal basis of $Q(\vec{a})$
defined above.
Observe that $\overline{U_\mathrm{e}}$
is an encoding operator for $Q^\star(\vec{a})$ sending
$\ket{i} \otimes \ket{\mathrm{a}}\in H^{\otimes n}$
to $\overline{\ket{\vec{a},i}}$ for $i=0$, \ldots, $p^k-1$.
Applying $\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1}$
to state (\ref{step4state1}) yields
\begin{eqnarray}
&&\sum_{\vec{u} \in \vec{e}+C^{\perp}}
\alpha(\vec{u})
(\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1})[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})]P(\vec{a},\vec{a})
\rho(\vec{0})\nonumber\\*
&&\mbox{ }P(\vec{a}, \vec{a})
[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})^*]
(\overline{U_\mathrm{e}}\otimes U_\mathrm{e}) \nonumber\\
& = &
\sum_{\vec{u} \in \vec{e}+C}
\alpha(\vec{u})
(\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1}) P(\vec{a},\vec{a})
\rho(\vec{0})
P(\vec{a}, \vec{a})
(\overline{U_\mathrm{e}}\otimes U_\mathrm{e})
\mbox{ [by Eq.\ (\ref{step4state2})]} \nonumber\\*
&&\mbox{}+
\sum_{\vec{u} \in \vec{e}+(C^{\perp}\setminus C)}
\alpha(\vec{u})
(\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1})[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})]P(\vec{a},\vec{a})
\rho(\vec{0})\nonumber\\*
&&\mbox{ }P(\vec{a}, \vec{a})
[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})^*]
(\overline{U_\mathrm{e}}\otimes U_\mathrm{e})\nonumber\\
& = &
\sum_{\vec{u} \in \vec{e}+C}
\alpha(\vec{u})
(\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1})
\left[\frac{1}{p^n}
\left\{ \sum_{i=0}^{p^k-1}
\overline{\ket{\vec{a},i}}\otimes \ket{\vec{a},i}\right\}
\left\{
\sum_{i=0}^{p^k-1}
\overline{\bra{\vec{a},i}}\otimes \bra{\vec{a},i}
\right\}\right]\nonumber\\*
&&\mbox{ }
(\overline{U_\mathrm{e}}\otimes U_\mathrm{e})
\mbox{ [by Eq.\ (\ref{form2})]} \nonumber\\*
&&+
\sum_{\vec{u} \in \vec{e}+(C^{\perp}\setminus C)}
\alpha(\vec{u})
(\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1})[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})]P(\vec{a},\vec{a})
\rho(\vec{0})\nonumber\\*
&&\mbox{ }P(\vec{a}, \vec{a})
[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})^*]
(\overline{U_\mathrm{e}}\otimes U_\mathrm{e})\nonumber\\
& = &
\frac{1}{p^n}\sum_{\vec{u} \in \vec{e}+C}
\alpha(\vec{u})
\left\{ \ket{\beta(0,0)}^{\otimes k}\otimes\ket{\mathrm{a}}^{\otimes 2}\right\}
\left\{ \bra{\beta(0,0)}^{\otimes k}\otimes \bra{\mathrm{a}}^{\otimes 2}
\right\}
\mbox{ [by definition of $U_\mathrm{e}$]}\nonumber\\*
&&+
\sum_{\vec{u} \in \vec{e}+(C^{\perp}\setminus C)}
\alpha(\vec{u})
(\overline{U_\mathrm{e}}^{-1}\otimes U_\mathrm{e}^{-1})[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})]P(\vec{a},\vec{a})
\rho(\vec{0})\nonumber\\*
&&P(\vec{a}, \vec{a})
[I\otimes \mathsf{XZ}(\vec{u}-\vec{e})^*]
(\overline{U_\mathrm{e}}\otimes U_\mathrm{e}) \label{step5state}
\end{eqnarray}
Taking partial trace of the first term over the last $n-k$ qubits
yields $\ket{\beta(0,0)}^{\otimes k}$,
which is Step \ref{step6}.
Let $\tau_5$ be the final state of Step \ref{step5},
that is, state (\ref{step5state}),
and $\tau_6$ be the state after Step \ref{step6}.
In Step \ref{step7},
Bob computes the fidelity between the state
$\ket{\beta(0,0)}^{\otimes k}$ and $\tau_6$
by using knowledge of $\vec{s}$ and $\{\alpha(\vec{u}) \,:\,
\vec{u} \in \mathbf{Z}_p^{2n} \}$.
$\mathrm{Tr}[\tau_5]$ is not $1$ because
$\tau_5$ is a state after projection.
We have
\begin{eqnarray*}
\mathrm{Tr}[\tau_6] =\mathrm{Tr}[\tau_5]
&=& \mathrm{Tr}\left[P(\vec{a},\vec{a})
\rho(\vec{0})P(\vec{a}, \vec{a})\right]
\sum_{\vec{u}\in \vec{e}+C^\perp} \alpha(\vec{u})\\
&=&
\bra{\beta(\vec{0})} P^\star(\vec{a})\otimes I
\ket{\beta(\vec{0})} \sum_{\vec{u}\in \vec{e}+C^\perp} \alpha(\vec{u})
\mbox{ [by Eq.~(\ref{form})]}\\
&=&\frac{1}{p^{n-k}} \sum_{\vec{u}\in \vec{e}+C^\perp} \alpha(\vec{u})
\mbox{ [by Eq.~(\ref{form2})]}
\end{eqnarray*}
\sloppy
If the initial state is $\ket{\beta(\vec{u})}$ such that
$\vec{u} \in \vec{e}+C$, we can get $(1/p^{n-k})\ket{\beta(0,0)}^{\otimes k}
\bra{\beta(0,0)}^{\otimes k}$
as $\tau_6$. Therefore we have
\[
\bra{\beta(0,0)}^{\otimes k} \tau_6 \ket{\beta(0,0)}^{\otimes k}
\geq
\frac{1}{p^{n-k}}
\sum_{\vec{u}\in \vec{e}+C} \alpha(\vec{u}).
\]
Thus Bob estimates that the fidelity between $\ket{\beta(0,0)}^{\otimes k}$
and the normalized state of $\tau_6$ is at least
\begin{equation}
\frac{\sum_{\vec{u}\in \vec{e}+C} \alpha(\vec{u})}
{\sum_{\vec{u}\in \vec{e}+C^{\perp}} \alpha(\vec{u})}.
\label{estimatedfidelity}
\end{equation}
The value (\ref{estimatedfidelity}) varies according to
$\vec{s} = (b_1-a_1$, \ldots, $b_{n-k}-a_{n-k})$.
If obtained difference $\vec{s}$ implies low fidelity,
Bob discards all the particles and tell Alice the disposal.
\fussy
Note that if we include Step \ref{step7} then
the whole protocol needs two-way classical communication, but
if we exclude Step \ref{step7} then it needs only
one-way classical communication.
When Alice and Bib do not execute Step~\ref{step7},
the average of fidelity (\ref{estimatedfidelity}) should be
considered instead of respective values of Eq.~(\ref{estimatedfidelity})
for each difference $\vec{s}$ of measurement outcomes.
The average of Eq.~(\ref{estimatedfidelity}) is at least
\begin{equation}
\sum_{\vec{s}\in\mathbf{Z}_p^{n-k}}\sum_{\vec{u}\in \vec{e}(\vec{s})+C}
\alpha(\vec{u})\label{averagefidelity},
\end{equation}
where $\vec{e}(\vec{s})$ is the guessed error vector for a given
difference $\vec{s}$ of measurement outcomes.
This average fidelity (\ref{averagefidelity}) will be studied in
Sections \ref{sec4} and \ref{generalfidelity}.
\section{Examples}\label{sec4}
In this section we show how one can construct the well-known
recurrence protocol and the QPA protocol from stabilizer codes,
and give a two-way protocol constructed from a stabilizer
better than the recurrence protocol and the QPA protocol.
\subsection{The recurrence protocol and the QPA protocol}
The recurrence protocol
without twirling \cite[Step (A2)]{bennett96a}
has the same effect on any density operator
on $H_A^{\otimes 2} \otimes H_B^{\otimes 2}$
as the proposed protocol
with $p=2$, $n=2$, $k=1$, the stabilizer $S$
generated by $Z\otimes Z$,
encoding operators $U_\mathrm{e}(+1):$
$(\alpha_0\ket{0}+\alpha_1\ket{1})\ket{\mathrm{a}} \mapsto
\alpha_0 \ket{00} + \alpha_1\ket{11}$
for the code belonging to eigenvalue $+1$ of $Z\otimes Z$,
$U_\mathrm{e}(-1):$
$(\alpha_0\ket{0}+\alpha_1\ket{1})\ket{\mathrm{a}} \mapsto
\alpha_0 \ket{01} + \alpha_1\ket{10}$
for the code belonging to eigenvalue $-1$ of $Z\otimes Z$,
and
discarding particles in Step \ref{step7} if
$\vec{s} = (1) \in\mathbf{Z}_2^1$.
This can be seen by a tedious but straightforward computation.
The QPA protocol \cite{deutsch96}
has the same effect as the protocol converted from
the stabilizer $S$
generated by $XZ\otimes XZ$,
encoding operators $U_\mathrm{e}(+1):$
$(\alpha_0\ket{0}+\alpha_1\ket{1})\ket{\mathrm{a}} \mapsto
\alpha_0 (\ket{0}-i\ket{1})(\ket{0}+i\ket{1}) + \alpha_1
(\ket{0}+i\ket{1})(\ket{0}-i\ket{1})$
for the code belonging to eigenvalue $+1$ of $XZ\otimes XZ$,
$U_\mathrm{e}(-1):$
$(\alpha_0\ket{0}+\alpha_1\ket{1})\ket{\mathrm{a}} \mapsto
\alpha_0 (\ket{0}-i\ket{1})(\ket{0}-i\ket{1}) + \alpha_1
(\ket{0}+i\ket{1})(\ket{0}+i\ket{1})$
for the code belonging to eigenvalue $-1$ of $XZ\otimes XZ$,
and
discarding particles in Step \ref{step7} if
$\vec{s} = (1) \in\mathbf{Z}_2^1$
\subsection{A better protocol}\label{sec42}
We shell compare the protocol constructed from
the stabilizer generated by
$\{X\otimes X\otimes X\otimes X$,
$Z\otimes Z\otimes Z\otimes Z\}$ ($p = 2$)
with the recurrence protocol and the QPA protocol
in a similar way to \cite[Fig.~8]{bennett96}.
We discard particles in the protocol unless the measurement outcomes
completely agree, i.e., $\vec{s} = (0,0)$.
Encoding operators for the stabilizer codes
belonging to the eigenvalue $(-1)^{s_1}$ of
$X\otimes X\otimes X\otimes X$ and
$(-1)^{s_2}$ of
$Z\otimes Z\otimes Z\otimes Z$ are
described in Table \ref{tab1}.
Suppose that we have many copies of noisy entangled state
\[
F \ket{\beta(0,0)}\bra{\beta(0,0)}
+
\frac{1-F}{3}
(\ket{\beta(0,1)}\bra{\beta(0,1)}+
\ket{\beta(1,0)}\bra{\beta(1,0)}+
\ket{\beta(1,1)}\bra{\beta(1,1)}),
\]
and we want to distill the Bell state $\ket{\beta(0,0)}\bra{\beta(0,0)}$
as many as possible
by using the hashing protocol and a two-way protocol
chosen from
the recurrence protocol \emph{without twirling},
the QPA protocol, and the protocol
constructed from $\{X\otimes X\otimes X\otimes X$,
$Z\otimes Z\otimes Z\otimes Z\}$.
We use the hashing protocol to distill the perfect the Bell state $\ket{\beta(0,0)}\bra{\beta(0,0)}$ after suitable number of iteration of a two-way
protocol
as described in \cite[Section III.B.1]{bennett96}.
The number of perfect Bell state distillable by the three
two-way protocols are compared in Figure \ref{fig1}.
Observe that an example of the proposed protocol has
larger distillable entanglement for the range of $F$
between $0.75$ to $0.87$.
\begin{table}
\caption{Encoding Maps}
\label{tab1}
\[
\begin{array}{|c|c|}
\hline
\mbox{eigenvalues}&\mbox{encoding map}\\\hline
\begin{array}{c}
(s_1,s_2)=(0,0)
\end{array}&
\begin{array}{ccc}
\ket{00}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0000}+\ket{1111})\\
\ket{01}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0011}+\ket{1100})\\
\ket{10}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0101}+\ket{1010})\\
\ket{11}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0110}+\ket{1001})
\end{array}\\\hline
\begin{array}{c}
(s_1,s_2)=(0,1)
\end{array}&
\begin{array}{ccc}
\ket{00}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0001}+\ket{1110})\\
\ket{01}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0010}+\ket{1101})\\
\ket{10}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0100}+\ket{1011})\\
\ket{11}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{1000}+\ket{0111})
\end{array}\\\hline
\begin{array}{c}
(s_1,s_2)=(1,0)
\end{array}&
\begin{array}{ccc}
\ket{00}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0000}-\ket{1111})\\
\ket{01}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0011}-\ket{1100})\\
\ket{10}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0101}-\ket{1010})\\
\ket{11}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0110}-\ket{1001})
\end{array}\\\hline
\begin{array}{c}
(s_1,s_2)=(1,1)
\end{array}&
\begin{array}{ccc}
\ket{00}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0001}-\ket{1110})\\
\ket{01}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0010}-\ket{1101})\\
\ket{10}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{0100}-\ket{1011})\\
\ket{11}\ket{\mathrm{a}}&\mapsto&\frac{1}{\sqrt{2}}(\ket{1000}-\ket{0111})
\end{array}\\\hline
\end{array}
\]
\end{table}
\begin{figure}
\caption{Comparison of two-way protocols}
\label{fig1}
\end{figure}
\section{Distillable entanglement by the converted protocols}\label{sec5}
In this section,
we evaluate the distillable entanglement by
one-way protocols constructed from stabilizers.
Distillable entanglement is the most important measure of the performance
of a class of protocols.
We mean by an $[[n,k]]$ entanglement distillation protocol
a protocol always
leaving $k$ pairs of particles out of given $n$ pairs of particles.
Let $\mathcal{D}$ be a class of $[[n,k]]$ entanglement distillation protocol
for $n=1$, $2$, \ldots, and $k=1$, \ldots, $n$.
Let $\rho_n$ be a density operator on $H^{\otimes 2n}$.
The distillable entanglement by the protocol $\mathcal{D}$
for the sequence of states $\{\rho_n\}$
is the maximum of a real number $R$ such that
for any $R' < R$ and any $\epsilon > 0$ there exists
an $[[n,k]]$ ($k \geq nR'$) protocol in $\mathcal{D}$ such that the protocol
extracts a state $\tau \in H^{\otimes 2k}$ from $\rho_n$
such that the fidelity between $\tau$ and a maximally entangled state
in $H^{\otimes k}$ is at least $1-\epsilon$.
Roughly speaking, the distillable entanglement by $\mathcal{D}$
is the largest number of maximally entangled pairs in $H^{\otimes 2}$
distillable from one pair of particles.
Our definition imposes on protocols the restriction that
a protocol always produces the same number of pairs of particles.
A general definition without this restriction was given by
Rains \cite{rains99}.
Let $\{ \alpha(i,j) \,:\, (i,j) \in \mathbf{Z}_p^2 \}$
be a probability distribution, and consider the density operator
\[
\rho = \sum_{(i,j) \in \mathbf{Z}_p^2}
\alpha(i,j) \ket{\beta(i,j)}\bra{\beta(i,j)}
\]
on $H_A \otimes H_B$. We shall estimate the distillable entanglement
by the proposed protocol for the sequence of states
$\{ \rho_n=\rho^{\otimes n} \,:\, n=1$, \ldots $\}$,
and show the distillable entanglement is at least as large as
the achievable rate of quantum stabilizer codes over
the quantum channel $\Gamma$
on $H$ with an error $X^i Z^j$ occurs
with probability $\alpha(i,j)$.
The achievable rate by quantum stabilizer codes
over $\Gamma$ is the maximum of a real number $R$
such that
for any $R' < R$ and any $\epsilon > 0$ there exists
an $[[n,k]]$ ($k \geq nR'$) stabilizer code $Q$ such that
any state $\ket{\varphi} \in Q$ can be transmitted over
$\Gamma$ with fidelity at least $1-\epsilon$.
\begin{proposition}
We assume that the decoding of a quantum stabilizer code
is implemented as follows:
First measure an observable whose eigenspaces are the
same as the stabilizer of the code,
determine most likely error of the form
$X^{i_1}Z^{j_1} \otimes \cdots \otimes X^{i_n}Z^{j_n}$,
and apply the inverse of the guessed error to the codeword.
Under this assumption,
the distillable entanglement by the proposed protocol
without
Step \ref{step7}
for $\{ \rho_n=\rho^{\otimes n} \,:\, n=1$, \ldots $\}$
is at least as large as
the achievable rate by quantum stabilizer codes
over $\Gamma$.
\end{proposition}
\noindent\emph{Proof.}
Let $R$ be the achievable rate by quantum stabilizer codes
over $\Gamma$.
Then for any $R'<R$ and $\epsilon'>0$
there exists an $[[n,k]]$ ($k \geq nR'$)
quantum stabilizer code $Q$ with stabilizer $S$
such that for any state $\ket{\varphi}\in Q$
can be transmitted over $\Gamma$ with fidelity at least $1-\epsilon'$.
Let $S$ be generated by $\{\mathsf{XZ}(\vec{g}_1)$,
\ldots, $\mathsf{XZ}(\vec{g}_{n-k})$
(and possibly some power of $\omega I$) $\}$,
and $Q$ belong to the eigenvalue $\lambda_i$ of $\mathsf{XZ}(\vec{g}_i)$.
Suppose that the decoder guesses the error as $\mathsf{XZ}(\vec{e}(\vec{s}))$
when the measurement outcomes indicate that the received state
belongs to eigenvalue
$\lambda_i \omega^{s_i}$ of $\mathsf{XZ}(\vec{g}_i)$
for $i=1$, \ldots, $n-k$,
where $\vec{s} = (s_1$, \ldots, $s_{n-k})$.
Then the decoder can correct any error $\mathsf{XZ}(\vec{u})$
if
\begin{equation}
\vec{u} \in \{ \vec{e}(\vec{s}) + C \,:\, \vec{s} \in \mathbf{Z}_p^{n-k}\},
\label{correctable}
\end{equation}
where $C$ is a linear subspace of $\mathbf{Z}_p^{2n}$
spanned by $\vec{g}_1$, \ldots, $\vec{g}_{n-k}$.
By Lemma \ref{badcodeword} (see Appendix \ref{sec:badcodeword}),
there exists a codeword $\ket{\varphi} \in Q$ such that
if $\ket{\varphi}$ is transmitted and $\mathsf{XZ}(\vec{u})\ket{\varphi}$
is received with $\vec{u}$ not in the set~(\ref{correctable})
then the fidelity between $\ket{\varphi}$ and the decoded state
is at most $9/16$,
because the set~(\ref{correctable}) is equal to the set of correctable
errors by $Q$ in Lemma \ref{badcodeword}.
Since $\ket{\varphi}$ can be transmitted through $\Gamma$ with fidelity
at least $1-\epsilon'$, the probability of the correctable error~(\ref{correctable})
over $\Gamma^{\otimes n}$
is at least $1-16\epsilon'/9$.
Suppose that we apply the proposed protocol to $\rho^{\otimes n}$ such that
if the difference $\vec{s}$ of measurement outcomes is observed
then $\mathsf{XZ}(\vec{e}(\vec{s}))^{-1}$ is applied in Step \ref{step4}.
Then the average (\ref{averagefidelity}) of the fidelity
is at least $1-16\epsilon'/9$,
because the errors in the set (\ref{correctable}) are
also correctable by the proposed protocol [see Eq.\ (\ref{step4state2})].
For given $\epsilon > 0$ set $\epsilon' = 9\epsilon/16$ in the above
argument, and we can see that the distillable entanglement
is at least as large as the achievable rate of quantum stabilizer
codes over $\Gamma$. \qed
The best known lower bound
on the achievable rate by quantum stabilizer codes
over $\Gamma$ is given by Hamada \cite{hamada02}, and his lower bound
gives the true value for the depolarizing channels.
Let us compare the distillable entanglement by the converted protocols
and that by the hashing protocol \cite{bennett96} for
the Werner state of fidelity $F$,
which is given by
$\alpha(1,1) =F$, $\alpha(0,1) = \alpha(1,0) = \alpha(0,0) =
(1-F)/3$ and $p=2$. The Werner state is converted to
\begin{eqnarray}
&&F \ket{\beta(0,0)}\bra{\beta(0,0)} +
\frac{1-F}{3}( \ket{\beta(0,1)}\bra{\beta(0,1)} + \nonumber\\
&& \ket{\beta(1,0)}\bra{\beta(1,0)} +
\ket{\beta(1,1)}\bra{\beta(1,1)}) \label{convertedwerner}
\end{eqnarray}
by applying $XZ$ on Bob's particle.
The distillable entanglement of state (\ref{convertedwerner})
by the hashing protocol
is estimated as
\begin{equation}
1 - H_2(F, (1-F)/3, (1-F)/3, (1-F)/3)\label{binaryhashing}
\end{equation}
where $H_b$ is the Shannon entropy with base $b$.
The distillable entanglement of state (\ref{convertedwerner})
by the converted protocols
is strictly larger than Eq.~(\ref{binaryhashing})
for certain range of $F$,
because the achievable rate of
the Shor-Smolin concatenated codes is strictly larger than
Eq.~(\ref{binaryhashing}) over the depolarizing channel
of fidelity $F$ \cite{divincenzo98}
and they can be written as stabilizer codes \cite{hamada02}.
Let us consider the case of $p=3$, $\alpha(0,0) = F$,
and $\alpha(i,j) = (1-F)/8$ for $(i,j) \neq (0,0)$.
The distillable entanglement by
the nonbinary generalization \cite{vollbrecht02}
of the hashing protocol is
estimated as
\begin{equation}
1 - H_3(\{\alpha(i,j)\}). \label{ternaryhashing}
\end{equation}
The achievable rate by the quantum stabilizer codes
is strictly greater than Eq.~(\ref{ternaryhashing})
for $0.2552 \leq F \leq 0.2557$ \cite[Section VI.C]{hamada02},
and so is the distillable entanglement by the converted protocols.
\section{Fidelity calculation in general case}\label{generalfidelity}
In the preceding argument we assumed that the initial state
shared by Alice and Bob was in the form of Eq.~(\ref{restrictedstate}).
In this section we remove this restriction.
Let $\rho$ be an arbitrary density operator in
$H_A^{\otimes n}\otimes H_B^{\otimes n}$.
We shall consider applying the proposed protocol without Step
\ref{step7} to $\rho$ and calculate the fidelity
between the distilled state and $\ket{\beta(0,0)}^{\otimes k}$.
Precisely speaking,
we shall calculate the fidelity between
$\ket{\beta(0,0)}^{\otimes k} \otimes \ket{\mathrm{a}}^{\otimes 2}$
and the state after Step \ref{step5},
which is equal to that between $\ket{\beta(0,0)}^{\otimes k}$
and the state after Step \ref{step6}.
The idea of the following argument is borrowed from
Section 7.4 of \cite{preskill98}.
Since there is no selection of particles in
Steps \ref{step1}--\ref{step6} by a measurement,
the whole process of Steps \ref{step1}--\ref{step6}
can be written as a completely positive trace-preserving map
$\Lambda$ on the density operators on
$H_A^{\otimes n}\otimes H_B^{\otimes n}$.
Let $\ket{\psi}\in H_A^{\otimes n}\otimes H_B^{\otimes n}\otimes
H_\mathrm{env}$ is a purification of $\rho$.
Since $\{ \ket{\beta(\vec{x})} \,:\, \vec{x}\in \mathbf{Z}_p^{2n}\}$
is an orthonormal basis of $H_A^{\otimes n}\otimes H_B^{\otimes n}$,
we can write $\ket{\psi}$ as
\begin{equation}
\ket{\psi} = \sum_{\vec{x}\in\mathbf{Z}_p^{2n}}
\ket{\beta(\vec{x})} \otimes \ket{\mathrm{env}(\vec{x})},
\label{expansion}
\end{equation}
where $\ket{\mathrm{env}(\vec{x})}$ is a vector in
$H_\mathrm{env}$.
In Step~\ref{step4}, the inverse error operator
$\mathsf{XZ}(\vec{e})^{-1}$ is determined from
the difference $\vec{s}$ of measurement outcomes and knowledge
of $\{\alpha(\vec{u}) \,:\, \vec{u} \in\mathbf{Z}_p^{2n}\}$.
When we deal with an arbitrary but known density operator
$\rho$, determine $\vec{e}$ from $\vec{s}$ so that
the lower bound (\ref{generalbound}) below on fidelity
becomes large.
Once we fix a determination rule of $\vec{e}$ from $\vec{s}$,
we can define $\mathsf{Good} = \{
\vec{u} \in \mathbf{Z}_p^{2n} \,:\,$
the protocol can perfectly distill $\ket{\beta(0,0)}^{\otimes k}$
from $\ket{\beta(\vec{u})} \}$.
Equation~(\ref{expansion}) can be written as
\begin{equation}
\sum_{\vec{x}\in\mathsf{Good}}
\ket{\beta(\vec{x})} \otimes \ket{\mathrm{env}(\vec{x})}+
\sum_{\vec{x}\in\mathbf{Z}_p^{2n}\setminus \mathsf{Good}}
\ket{\beta(\vec{x})} \otimes \ket{\mathrm{env}(\vec{x})}.
\label{expansion2}
\end{equation}
The almost same argument as Section 7.4 of \cite{preskill98}
shows that the fidelity between
$\ket{\beta(0,0)}^{\otimes k}$ and the state after
Step~\ref{step6} is at least
\begin{equation}
1 - \left\|
\sum_{\vec{x}\in\mathbf{Z}_p^{2n}\setminus \mathsf{Good}}
\ket{\beta(\vec{x})} \otimes \ket{\mathrm{env}(\vec{x})}
\right\|^2. \label{generalbound}
\end{equation}
\section*{Acknowledgment}
The author would like to thank Prof.\ Tomohiko Uyematsu
and Mr.\ Toshiyuki Morita
for helpful discussions.
This research was supported by
the Japan Society for the Promotion of Science
under contract No.\ 14750278.
\appendix
\section{Bad codeword lemma}\label{sec:badcodeword}
We consider a quantum channel over which
an error of the form $\mathsf{XZ}(\vec{e})$ occurs
with the probability $\alpha(\vec{e})$ for $\vec{e} \in \mathbf{Z}_p^{2n}$,
and we also consider the following decoding method:
Measure the observable of $H^{\otimes n}$
whose eigenspaces are the same as those of $S$,
and apply an operator $\mathsf{XZ}(\vec{r}_e)$
($\vec{r}_e\in \mathbf{Z}_p^{2n}$)
determined by the measurement
outcome and some deterministic criterion.
With this decoding method,
we can correct at most $p^{2n-2k}$ errors
among all the $p^{2n}$ errors for
an $[[n,k]]$ quantum stabilizer code.
\begin{lemma}\label{badcodeword}
Let $Q$ be an $[[n,k]]$ quantum stabilizer code.
Suppose that we have a fixed decoding method
as described above.
There exists a codeword $\ket{\varphi} \in Q$
such that
\[
|\bra{\varphi}\mathsf{XZ}(\vec{r}_e)\mathsf{XZ}(\vec{e})\ket{\varphi}| \leq \frac{3}{4}
\]
for all uncorrectable error $\mathsf{XZ}(\vec{e})$,
where an error $\mathsf{XZ}(\vec{e})$ is said to be
\emph{correctable} if a received state $\mathsf{XZ}(\vec{e})\ket{\varphi}$
is decoded to $\ket{\varphi}$ for all $\ket{\varphi} \in Q$ and
\emph{uncorrectable} otherwise.
\end{lemma}
\noindent\emph{Proof.}
Consider the following map
\[
f:
\left\{
\begin{array}{ccc}
E & \longrightarrow&
\mathbf{Z}_p^{2n}\\
\omega^i X^{a_1}Z^{b_1}\otimes
\cdots \otimes X^{a_n}Z^{b_n} &\longmapsto&
(a_1,b_1,\ldots,a_n,b_n)
\end{array}\right..
\]
Let $C = f(S) \subset \mathbf{Z}_p^{2n}$.
Since $S$ is commutative,
we have $C \subseteq C^\perp$.
Let $C_\mathrm{max}$ be a subspace of
$\mathbf{Z}_p^{2n}$ such that
\begin{eqnarray*}
C_\mathrm{max} &=& C_\mathrm{max}^\perp,\\
C \subseteq & C_\mathrm{max}& \subseteq C^\perp.
\end{eqnarray*}
Such a space $C_\mathrm{max}$ always exists by
the Witt theorem (see Sec.\ 20 of Ref.\ \cite{aschbacher00}).
Since $C_\mathrm{max} = C_\mathrm{max}^\perp$,
we have $\dim C_\mathrm{max} = n$.
The set $f^{-1}(C_\mathrm{max})$ is a commutative subgroup
of $E$, so we can consider a quantum stabilizer code
$Q_\mathrm{min} \subset Q$ defined by $f^{-1}(C_\mathrm{max})$.
We have $\dim Q_\mathrm{min} = p^{n - \dim C_\mathrm{max}} = 1$.
Let $\ket{\psi_1} \in Q_\mathrm{min}$ be a normalized state vector.
We shall construct the desired codeword $\ket{\varphi}$
in Lemma \ref{badcodeword} from $\ket{\psi_1}$.
By the property of stabilizer codes,
if $\vec{x} + C_\mathrm{max} \neq \vec{y} + C_\mathrm{max}$
then
\begin{equation}
\bra{\psi_1} \mathsf{XZ}(\vec{x})^*\; \mathsf{XZ}(\vec{y})\ket{\psi_1} = 0.
\label{orth}
\end{equation}
Let $R \subset C^\perp$ be a set of coset representatives
of $C_\mathrm{max}$ in $C^\perp$, that is,
$R$ has the same number of elements as
$C^\perp/C_\mathrm{max}$, and if $\vec{x}, \vec{y} \in R$
and $\vec{x} \neq \vec{y}$
then $\vec{x} + C_\mathrm{max} \neq \vec{y} + C_\mathrm{max}$.
We assume $\vec{0} \in R$.
Define
\[
\ket{\psi_2} = \frac{1}{\sqrt{p^k}}\sum_{\vec{x}\in R} \mathsf{XZ}(\vec{x}) \ket{\psi_1},
\]
which is a normalized state vector in $Q$
by Eq.~(\ref{orth}).
We want to take $\ket{\varphi}$ in Lemma \ref{badcodeword}
as a multiple of $\ket{\psi_1 + \psi_2}$,
so let us compute
\begin{eqnarray*}
\langle \psi_1 | \psi_2 \rangle &=&
\frac{1}{\sqrt{p^k}}
\sum_{\vec{x}\in R} \langle \psi_1 | \mathsf{XZ}(\vec{x}) | \psi_1 \rangle\\
&=& \frac{1}{\sqrt{p^k}}
\langle \psi_1 | \psi_1 \rangle \mbox{ by Eq.\ (\ref{orth}) and }
\vec{0}\in R.
\end{eqnarray*}
By Eq.~(\ref{orth}) we also have $\langle \psi_2 | \psi_2 \rangle
= \langle \psi_1 | \psi_1 \rangle$.
Therefore $\langle \psi_1 + \psi_2 | \psi_1 + \psi_2 \rangle = (2 +
2/\sqrt{p^k}) \langle \psi_1 | \psi_1 \rangle$.
Define $\ket{\varphi}$ by
\[
\frac{1}{\sqrt{2 + 2/\sqrt{p^k}}}\ket{\psi_1 + \psi_2},
\]
which is a normalized state vector in $Q$.
We shall show that $\ket{\varphi}$ has the desired property.
Suppose that an error $\mathsf{XZ}(\vec{e'})$ occurred and
we applied $\mathsf{XZ}(\vec{r}_{e'})$ as the recovery operator.
If $\vec{e} = \vec{e'} - \vec{r}_{e'} \in C$, then
the error $\vec{e'}$ is correctable,
otherwise $\vec{e'}$ is uncorrectable.
If $\vec{e} \notin C^\perp$,
the decoded state is orthogonal to any transmitted state,
so we may assume $\vec{e} \in C^\perp \setminus C$ hereafter.
For $\vec{e} \in C_\mathrm{max}\setminus C$,
\begin{eqnarray*}
&&p^k \bra{\psi_2} \mathsf{XZ}(\vec{e}) \ket{\psi_2}\\
&=&
\sum_{\vec{x},\vec{y}\in R} \bra{\psi_1}\mathsf{XZ}(\vec{x})^*\mathsf{XZ}(\vec{e})
\mathsf{XZ}({\vec{y}}) \ket{\psi_1}\\
&=&
\sum_{\renewcommand{0.01}{0.01}
\begin{array}{c}
\scriptstyle \vec{x},\vec{y}\in R\\
\scriptstyle \vec{x}+C_\mathrm{max} = \vec{e}+\vec{y} + C_\mathrm{max}
\end{array}}
\bra{\psi_1}\mathsf{XZ}(\vec{x})^*\mathsf{XZ}(\vec{e})
\mathsf{XZ}({\vec{y}}) \ket{\psi_1} \mbox{ by Eq.\ (\ref{orth})}\\
&=&
\sum_{\vec{x}\in R} \bra{\psi_1}\mathsf{XZ}(\vec{x})^*\mathsf{XZ}(\vec{e})
\mathsf{XZ}({\vec{x}}) \ket{\psi_1}\\
&=&
\sum_{\vec{x}\in R} \omega^{\langle \vec{e},\vec{x}\rangle}
\bra{\psi_1}\mathsf{XZ}(\vec{x})^*\mathsf{XZ}({\vec{x}}) \mathsf{XZ}(\vec{e})
\ket{\psi_1}\\
&=&
\bra{\psi_1}\mathsf{XZ}(\vec{e})\ket{\psi_1}\sum_{\vec{x}\in R} \omega^{\langle \vec{e},\vec{x}\rangle}.
\end{eqnarray*}
Consider the linear map $L_{\vec{e}}$
from $C^\perp$ to $\mathbf{Z}_p$ defined by
\[
L_{\vec{e}}(\vec{x}) = \langle \vec{e},\vec{x}\rangle.
\]
Then the kernel of $L_{\vec{e}}$ contains $C_\mathrm{max}$
because $\vec{e} \in C_\mathrm{max}$,
and $\vec{e} \notin C$ implies that $L_{\vec{e}}$ is not a zero
linear map.
Hence we can partition $R$ into cosets of $\ker (L_{\vec{e}})$
in $C^\perp$.
Each coset of $\ker (L_{\vec{e}})$
in $C^\perp$ contains exactly $p^{k-1}$ elements of $R$,
and each element in a coset has the same value under $L_{\vec{e}}$.
Therefore
\begin{eqnarray*}
\sum_{\vec{x}\in R} \omega^{\langle \vec{e},\vec{x}\rangle}
&=&\sum_{\vec{x}\in R} \omega^{L_{\vec{e}}(\vec{x})}\\
&=& p^{k-1} \sum_{i=0}^{p-1} \omega^i\\
&=& 0.
\end{eqnarray*}
Summarizing these results we have
\begin{eqnarray*}
\vec{e} \in C^\perp\setminus C_\mathrm{max} &\Longrightarrow&
\bra{\psi_1} \mathsf{XZ}(\vec{e}) \ket{\psi_1} =0 \mbox{ by Eq.\ (\ref{orth})},\\
\vec{e} \in C_\mathrm{max}\setminus C &\Longrightarrow&
\bra{\psi_2} \mathsf{XZ}(\vec{e}) \ket{\psi_2} =0,
\end{eqnarray*}
and by Eq.~(\ref{orth}) we have for $\vec{e}\in C^\perp$
\[
|\bra{\psi_1}\mathsf{XZ}(\vec{e})\ket{\psi_2}| = \frac{1}{\sqrt{p^k}}.
\]
Thus we have for $\vec{e} \in C^\perp\setminus C$
\begin{eqnarray*}
&&|\langle \psi_1 + \psi_2 |\mathsf{XZ}(\vec{e})| \psi_1 + \psi_2 \rangle|\\
&\leq&
\frac{1}{2+2/\sqrt{p^k}}
(\underbrace{|\bra{\psi_1}\mathsf{XZ}(\vec{e})\ket{\psi_1}|
+|\bra{\psi_2}\mathsf{XZ}(\vec{e})\ket{\psi_2}|}_{\leq 1} \\
&&\mbox{} +
\underbrace{2 | \bra{\psi_1}\mathsf{XZ}(\vec{e})\ket{\psi_2}|}_{=2/\sqrt{p^k}})\\
&\leq& \frac{1+2/\sqrt{p^k}}{2+2/\sqrt{p^k}}\\
&\leq& 3/4,
\end{eqnarray*}
which completes the proof of Lemma \ref{badcodeword}.
\qed
\end{document}
|
\begin{document}
\title{Diffusion-Convolutional Neural Networks}
\begin{abstract}
We present diffusion-convolutional neural networks (DCNNs), a new model for graph-structured data. Through the introduction of a diffusion-convolution operation, we show how diffusion-based representations can be learned from graph-structured data and used as an effective basis for node classification. DCNNs have several attractive qualities, including a latent representation for graphical data that is invariant under isomorphism, as well as polynomial-time prediction and learning that can be represented as tensor operations and efficiently implemented on the GPU. Through several experiments with real structured datasets, we demonstrate that DCNNs are able to outperform probabilistic relational models and kernel-on-graph methods at relational node classification tasks.
\end{abstract}
\section{Introduction}
Working with structured data is challenging. On one hand, finding the right way to express and exploit structure in data can lead to improvements in predictive performance; on the other, finding such a representation may be difficult, and adding structure to a model can dramatically increase the complexity of prediction and learning.
The goal of this work is to design a flexible model for a general class of structured data that offers improvements in predictive performance while avoiding an increase in complexity. To accomplish this, we extend convolutional neural networks (CNNs) to general graph-structured data by introducing a `diffusion-convolution' operation. Briefly, rather than scanning a `square' of parameters across a grid-structured input like the standard convolution operation, the diffusion-convolution operation builds a latent representation by scanning a diffusion process across each node in a graph-structured input.
This model is motivated by the idea that a representation that encapsulates graph diffusion can provide a better basis for prediction than a graph itself. Graph diffusion can be represented as a matrix power series, providing a straightforward mechanism for including contextual information about entities that can be computed in polynomial time and efficiently implemented on the GPU.
In this paper, we present diffusion-convolutional neural networks (DCNNs) and explore their performance at various classification tasks on graphical data. Many techniques include structural information in classification tasks, such as probabilistic relational models and kernel methods; DCNNs offer a complementary approach that provides a significant improvement in predictive performance at node classification tasks.
As a model class, DCNNs offer several advantages:
\begin{itemize}
\item \textbf{Accuracy:} In our experiments, DCNNs significantly outperform alternative methods for node classification tasks and offer comparable performance to baseline methods for graph classification tasks.
\item \textbf{Flexibility:} DCNNs provide a flexible representation of graphical data that encodes node features, edge features, and purely structural information with little preprocessing. DCNNs can be used for a variety of classification tasks with graphical data, including node classification, edge classification, and whole-graph classification.
\item \textbf{Speed:} Prediction from an DCNN can be expressed as a series of polynomial-time tensor operations, allowing the model to be implemented efficiently on a GPU using existing libraries.
\end{itemize}
The remainder of this paper is organized as follows. In Section \ref{sec:model}, we present a formal definition of the model, including descriptions of prediction and learning procedures. This is followed by several experiments in Section \ref{sec:experiments} that explore the performance of DCNNs at node and graph classification tasks. We briefly describe the limitations of the model in Section \ref{sec:limitations}, then, in Section \ref{sec:relatedwork}, we present related work and discuss the relationship between DCNNs and other methods. Finally, conclusions and future work are presented in Section \ref{sec:conclusion}.
\section{Model}
\label{sec:model}
\begin{figure}
\caption{Node classification}
\label{fig:tensormodelnode}
\caption{Graph classification}
\label{fig:tensormodelgraph}
\caption{Edge classification}
\label{fig:tensormodeledges}
\caption{DCNN model definition for node, graph, and edge classification tasks.}
\label{fig:tensormodel}
\end{figure}
Consider a situation where we have a set of $T$ graphs $\mathcal{G} = \left\{G_t | t \in 1 ... T \right\}$. Each graph $G_t = (V_t, E_t)$ is composed of vertices $V_t$ and edges $E_t$. The vertices are collectively described by an $N_t \times F$ design matrix $X_t$ of features\footnote{Without loss of generality, we assume that the features are real-valued.}, where $N_t$ is the number of nodes in $G_t$, and the edges $E_t$ are encoded by an $N_t \times N_t$ adjacency matrix $A_t$, from which we can compute a degree-normalized transition matrix $P_t$ that gives the probability of jumping from node $i$ to node $j$ in one step. No constraints are placed on the form $G_t$; the graph can be weighted or unweighted, directed or undirected. Either the nodes, edges, or graphs have labels $Y$ associated with them, with the dimensionality of $Y$ differing in each case.
We are interested in learning to predict $Y$; that is, to predict a label for each of the nodes in each graph, or a label for each of the edges in each graph, or a label for each graph itself. In each case, we have access to some labeled entities (be they nodes, graphs, or edges), and our task is predict the values of the remaining unlabeled entities.
This setting is capable of representing several well-studied machine learning tasks. If $T=1$ (i.e. there is only one input graph) and the labels $Y$ are associated with the nodes or edges, this reduces to the problem of \emph{semisupervised classification}; if there are no edges present in the input graph, this reduces further to standard \emph{supervised classification}. If $T>1$ and the labels $Y$ are associated with each graph, then this represents the problem of \emph{supervised graph classification}.
DCNNs were designed to perform any task that can be represented within this formulation. An DCNN takes $\mathcal{G}$ as input and returns either a hard prediction for $Y$ or a conditional distribution $\mathbb{P}(Y|X)$. Each entity of interest (be it a node, a graph, or an edge) is transformed to a diffusion-convolutional representation, which is a $H \times F$ real matrix defined by $H$ hops of graph diffusion over $F$ features, and it is defined by an $H \times F$ real-valued weight tensor $W^c$ and a nonlinear differentiable function $f$ that computes the activations. So, for node classification tasks, the diffusion-convolutional representation of graph $t$, $Z_t$, will be a $N_t \times H \times F$ tensor, as illustrated in Figure \ref{fig:tensormodelnode}; For graph or edge classification tasks, $Z_t$ will be a $H \times F$ matrix or a $M_t \times H \times F$ tensor respectively, as illustrated in Figures \ref{fig:tensormodelgraph} and \ref{fig:tensormodeledges}.
The term `diffusion-convolution' is meant to evoke the ideas of feature learning, parameter tying, and invariance that are characteristic of convolutional neural networks. The core operation of a DCNN is a mapping from nodes and their features to the results of a diffusion process that begins at that node. In contrast with standard CNNs, DCNN parameters are tied according to search depth rather than their position in a grid. The diffusion-convolutional representation is invariant with respect to node index rather than position; in other words, the diffusion-convolututional activations of two isomorphic input graphs will be the same\footnote{A proof is given in the appendix.}. Unlike standard CNNs, DCNNs have no pooling operation.
\paragraph{Node Classification}
Consider a node classification task where a label $Y$ is predicted for each input node in a graph. If we let $P_t^{*}$ be an $N_t \times H \times N_t$ tensor containing the power series of $P_t$, the diffusion-convolutional activation $Z_{tijk}$ for node $i$, hop $j$, and feature $k$ of graph $t$ is given by
\begin{align}
Z_{tijk} &= f\left(W^c_{jk} \cdot \sum\limits_{l=1}^{N_t} P^*_{tijl} X_{tlk}\right)
\end{align}
The activations can be expressed more concisely using tensor notation as
\begin{align}
Z_t &= f\left(W^c \odot P_t^* X_t\right)
\label{eqn:activations}
\end{align}
where the $\odot$ operator represents element-wise multiplication; see Figure \ref{fig:tensormodelnode}. The model only entails $O(H \times F)$ parameters, making the size of the latent diffusion-convolutional representation independent of the size of the input.
The model is completed by a dense layer that connects $Z$ to $Y$. A hard prediction for $Y$, denoted $\hat{Y}$, can be obtained by taking the maximum activation and a conditional probability distribution $\mathbb{P}(Y|X)$ can be found by applying the softmax function:
\begin{align}
\hat{Y} &= \arg \max \left(f\left(W^d \odot Z\right)\right) \\
\mathbb{P}(Y|X) &= \text{softmax}\left(f\left(W^d \odot Z\right)\right)
\label{eqn:top}
\end{align}
This keeps the same form in the following extensions.
\paragraph{Graph Classification}
DCNNs can be extended to graph classification by simply taking the mean activation over the nodes
\begin{align}
Z_t &= f\left(W^c \odot 1_{N_t}^T P_t^* X_t / N_t\right)
\label{eqn:graphactivations}
\end{align}
where $1_{N_t}$ is an $N_t \times 1$ vector of ones, as illustrated in Figure \ref{fig:tensormodelgraph}.
\paragraph{Edge Classification and Edge Features}
Edge features and labels can be included by converting each edge to a node that is connected to the nodes at the tail and head of the edge. This graph graph can be constructed efficiently by augmenting the adjacency matrix with the incidence matrix:
\begin{align}
A_t' &= \left( \begin{array}{cc}
A_t & B_t^T \\
B_t & 0 \end{array} \right)
\end{align}
$A_t'$ can then be used to compute $P_t'$ and used in place of $P_t$ to classify nodes and edges.
\paragraph{Purely Structural DCNNs}
DCNNs can be applied to input graphs with no features by associating a `bias feature' with value 1.0 with each node. Richer structure can be encoded by adding additional structural node features such as Pagerank or clustering coefficient, although this does introduce some hand-engineering and pre-processing.
\paragraph{Learning}
DCNNs are learned via stochastic minibatch gradient descent on backpropagated error. At each epoch, node indices are randomly grouped into several batches. The error of each batch is computed by taking slices of the graph definition power series and propagating the input forward to predict the output, then setting the weights by gradient ascent on the back-propagated error. We also make use of windowed early stopping; training is ceased if the validation error of a given epoch is greater than the average of the last few epochs.
\section{Experiments}
\label{sec:experiments}
In this section we present several experiments to investigate how well DCNNs perform at node and graph classification tasks. In each case we compare DCNNs to other well-known and effective approaches to the task.
In each of the following experiments, we use the AdaGrad algorithm \cite{duchi2011adaptive} for gradient ascent with a learning rate of $0.05$. All weights are initialized by sampling from a normal distribution with mean zero and variance $0.01$. We choose the hyperbolic tangent for the nonlinear differentiable function $f$ and use the multiclass hinge loss between the model predictions and ground truth as the training objective. The model was implemented in Python using Lasagne and Theano \cite{bergstra+al:2010-scipy}.
\subsection{Node classification}
\begin{table}[b]
\centering
\begin{tabular}{c|c|c|c||c|c|c|}
\cline{2-7}
& \multicolumn{3}{c||}{Cora} & \multicolumn{3}{c|}{Pubmed} \\
\cline{2-7}
Model & Accuracy & F (micro) & F (macro) & Accuracy & F (micro) & F (macro) \\
\hline
l1logistic & 0.7087 & 0.7087 & 0.6829 & 0.8718 & 0.8718 & 0.8698 \\
l2logistic & 0.7292 & 0.7292 & 0.7013 & 0.8631 & 0.8631 & 0.8614 \\
KED & 0.8044 & 0.8044 & 0.7928 & 0.8125 & 0.8125 & 0.7978 \\
KLED & 0.8229 & 0.8229 & 0.8117 & 0.8228 & 0.8228 & 0.8086 \\
CRF-LBP & 0.8449 & -- & 0.8248 & -- & -- & -- \\
2-hop DCNN & \textbf{0.8677} & \textbf{0.8677} & \textbf{0.8584} & \textbf{0.8976} & \textbf{0.8976} & \textbf{0.8943} \\
\hline
\end{tabular}
\caption{A comparison of the performance between baseline $\ell 1$ and $\ell 2$-regularized logistic regression models, exponential diffusion and Laplacian exponential diffusion kernel models, loopy belief propagation (LBP) on a partially-observed conditional random field (CRF), and a two-hop DCNN on the Cora and Pubmed datasets. The DCNN offers the best performance according to each measure, and the gain is statistically significant in each case. The CRF-LBP result is quoted from \cite{Sen:2007wh}, which follows the same experimental protocol.}
\label{tab:cora}
\end{table}
\begin{figure}
\caption{Search Breadth}
\label{fig:nodesearchbreadth}
\caption{Cora Learning Curve}
\label{fig:nodeclasscoraprop}
\caption{Pubmed Learning Curve}
\label{fig:nodeclasspubmedprop}
\caption{The effect of search breadth (\ref{fig:nodesearchbreadth}
\label{fig:nodeclass}
\end{figure}
We ran several experiments to investigate how well DCNNs can classify nodes within a single graph. The graphs were constructed from the Cora and Pubmed datasets, which each consist of scientific papers (nodes), citations between papers (edges), and subjects (labels).
\paragraph{Protocol} In each experiment, the set $\mathcal{G}$ consists of a single graph $G$. During each trial, the input graph's nodes are randomly partitioned into training, validation, and test sets, with each set having the same number of nodes. During training, all node features $X$, all edges $E$, and the labels $Y$ of the training and validation sets are visible to the model. We report classification accuracy as well as micro-- and macro--averaged F1; each measure is reported as a mean and confidence interval computed from several trials.
We also provide learning curves for the CORA and Pubmed datasets. In this experiment, the validation and test set each contain 10\% of the nodes, and the amount of training data is varied between 10\% and 100\% of the remaining nodes.
\paragraph{Baseline Methods} `l1logistic' and `l2logistic' indicate $\ell 1$ and $\ell 2$-regularized logistic regression, respectively. The inputs to the logistic regression models are the node features alone (e.g. the graph structure is not used) and the regularization parameter is tuned using the validation set. `KED' and `KLED' denote the exponential diffusion and Laplacian exponential diffusion kernels-on-graphs, respectively, which have previously been shown to perform well on the Cora dataset \cite{Fouss:2012bf}. These kernel models take the graph structure as input (e.g. node features are not used) and the validation set is used to determine the kernel hyperparameters. `CRF-LBP' indicates a partially-observed conditional random field that uses loopy belief propagation for inference. Results for this model are quoted from prior work \cite{Sen:2007wh} that uses the same dataset and experimental protocol.
\paragraph{Node Classification Data} The Cora corpus \cite{sen:aimag08} consists of 2,708 machine learning papers and the 5,429 citation edges that they share. Each paper is assigned a label drawn from seven possible machine learning subjects, and each paper is represented by a bit vector where each feature corresponds to the presence or absence of a term drawn from a dictionary with 1,433 unique entries. We treat the citation network as an undirected graph.
The Pubmed corpus \cite{sen:aimag08} consists of 19,717 scientific papers from the Pubmed database on the subject of diabetes. Each paper is assigned to one of three classes. The citation network that joins the papers consists of 44,338 links, and each paper is represented by a Term Frequency Inverse Document Frequency (TFIDF) vector drawn from a dictionary with 500 terms. As with the CORA corpus, we construct an adjacency-based DCNN that treats the citation network as an undirected graph.
\paragraph{Results Discussion} Table \ref{tab:cora} compares the performance of a two-hop DCNN with several baselines. The DCNN offers the best performance according to different measures including classification accuracy and micro-- and macro--averaged F1, and the gain is statistically significant in each case with negligible p-values. For all models except the CRF, we assessed this via a one-tailed two-sample Welch's t-test. The CRF result is quoted from prior work, so we used a one-tailed one-sample test.
Figures \ref{fig:nodeclasscoraprop} and Figure \ref{fig:nodeclasspubmedprop} show the learning curves for the Cora and Pubmed datasets. The DCNN generally outperforms the baseline methods on the Cora dataset regardless of the amount of training data available, although the Laplacian exponential diffusion kernel does offer comparable performance when the entire training set is available. Note that the kernel methods were prohibitively slow to run on the Pubmed dataset, so we do not include them in the learning curve.
Finally, the impact of diffusion breadth on performance is shown in Figure \ref{fig:nodeclass}. Most of the performance is gained as the diffusion breadth grows from zero to three hops, then levels out as the diffusion process converges.
\subsection{Graph Classification}
\begin{figure}
\caption{Search Breadth}
\label{fig:graphclasssearch}
\caption{MUTAG Learning Curve}
\label{fig:graphclassmutag}
\caption{ENZYMES Learning Curve}
\label{fig:graphclassenzymes}
\caption{The effect of search breadth (\ref{fig:graphclasssearch}
\label{fig:graphclass}
\end{figure}
We also ran experiments to investigate how well DCNNs can learn to label whole graphs.
\begin{table}[h]
\begin{tabular}{c|c|c|c||c|c|c|}
\cline{2-7}
& \multicolumn{3}{c||}{NCI1} & \multicolumn{3}{c|}{NCI109} \\
\cline{2-7}
Model & Accuracy & F (micro) & F (macro) & Accuracy & F (micro) & F (macro) \\
\hline
l1logistic & 0.5728 & 0.5728 & 0.5711 & 0.5555 & 0.5555 & 0.5411 \\
l2logistic & 0.5688 & 0.5688 & 0.5641 & 0.5586 & 0.5568 & 0.5402 \\
deepwl & 0.6215 & \textbf{0.6215} & 0.5821 & 0.5801 & 0.5801 & 0.5178 \\
2-hop DCNN & 0.6250 & 0.5807 & 0.5807 & 0.6275 & 0.5884 & 0.5884 \\
5-hop DCNN & \textbf{0.6261} & 0.5898 & \textbf{0.5898} & \textbf{0.6286} & \textbf{0.5950} & \textbf{0.5899} \\
\hline
\end{tabular}
\begin{tabular}{c|c|c|c||c|c|c|}
\cline{2-7}
& \multicolumn{3}{c||}{MUTAG} & \multicolumn{3}{c|}{PTC} \\
\cline{2-7}
Model & Accuracy & F (micro) & F (macro) & Accuracy & F (micro) & F (macro) \\
\hline
l1logistic & \textbf{0.7190} & 0.7190 & 0.6405 & 0.5470 & 0.5470 & 0.4272 \\
l2logistic & 0.7016 & 0.7016 & 0.5795 & 0.5565 & \textbf{0.5565} & \textbf{0.4460} \\
deepwl & 0.6563 & 0.6563 & 0.5942 & 0.5113 & 0.5113 & 0.4444 \\
2-hop DCNN & 0.6635 & 0.7975 & 0.79747 & \textbf{0.5660} & 0.0500 & 0.0531 \\
5-hop DCNN & 0.6698 & \textbf{0.8013} & \textbf{0.8013} & 0.5530 & 0.0 & 0.0526 \\
\hline
\end{tabular}
\begin{tabular}{c|c|c|c|}
\cline{2-4}
& \multicolumn{3}{c|}{ENZYMES} \\
\cline{2-4}
Model & Accuracy & F (micro) & F (macro) \\
\hline
l1logistic & 0.1640 & 0.1640 & 0.0904 \\
l2logistic & 0.2030 & 0.2030 & 0.1110 \\
deepwl & \textbf{0.2155} & \textbf{0.2155} & \textbf{0.1431} \\
2-hop DCNN & 0.1590 & 0.1590 & 0.0809 \\
5-hop DCNN & 0.1810 & 0.1810 & 0.0991 \\
\hline
\end{tabular}
\caption{A comparison of the performance between baseline methods and two and five-hop DCNNs on several graph classification datasets.}
\label{tab:graphclassification}
\end{table}
\paragraph{Protocol} At the beginning of each trial, input graphs are randomly assigned to training, validation, or test, with each set having the same number of graphs. During the learning phase, the training and validation graphs, their node features, and their labels are made visible; the training set is used to determine the parameters and the validation set to determine hyperparameters. At test time, the test graphs and features are made visible and the graph labels are predicted and compared with ground truth. Table \ref{tab:graphclassification} reports the mean accuracy, micro-averaged F1, and macro-averaged F1 over several trials.
We also provide learning curves for the MUTAG (Figure \ref{fig:graphclassmutag}) and ENZYMES (Figure \ref{fig:graphclassenzymes}) datasets. In these experiments, validation and test sets each containing 10\% of the graphs, and we report the performance of each model as a function of the proportion of the remaining graphs that are made available for training.
\paragraph{Baseline Methods} As a simple baseline, we apply linear classifiers to the average feature vector of each graph; `l1logistic' and `l2logistic' indicate $\ell 1$ and $\ell 2$-regularized logistic regression applied as described. `deepwl' indicates the Weisfeiler-Lehman (WL) subtree deep graph kernel. Deep graph kernels decompose a graph into substructures, treat those substructures as words in a sentence, and fit a word-embedding model to obtain a vectorization \cite{Yanardag:2015fm}.
\paragraph{Graph Classification Data} We apply DCNNs to a standard set of graph classification datasets that consists of NCI1, NCI109, MUTAG, PCI, and ENZYMES. The NCI1 and NCI109 \cite{Wale:2007ec} datasets consist of 4100 and 4127 graphs that represent chemical compounds. Each graph is labeled with whether it is has the ability to suppress or inhibit the growth of a panel of human tumor cell lines, and each node is assigned one of 37 (for NCI1) or 38 (for NCI109) possible labels. MUTAG \cite{debnath1991structure} contains 188 nitro compounds that are labeled as either aromatic or heteroaromatic with seven node features. PTC \cite{toivonen2003statistical} contains 344 compounds labeled with whether they are carcinogenic in rats with 19 node features. Finally, ENZYMES \cite{borgwardt2005protein} is a balanced dataset containing 600 proteins with three node features.
\paragraph{Results Discussion}
In contrast with the node classification experiments, there is no clear best model choice across the datasets or evaluation measures. In fact, according to Table \ref{tab:graphclassification}, the only clear choice is the `deepwl' graph kernel model on the ENZYMES dataset, which significantly outperforms the other methods in terms of accuracy and micro-- and macro--averaged F measure. Furthermore, as shown in Figure \ref{fig:graphclass}, there is no clear benefit to broadening the search breadth $H$. These results suggest that, while diffusion processes are an effective representation for \emph{nodes}, they do a poor job of summarizing \emph{entire graphs}. It may be possible to improve these results by finding a more effective way to aggregate the node operations than a simple mean, but we leave this as future work.
\section{Limitations}
\label{sec:limitations}
\paragraph{Scalability} DCNNs are realized as a series of operations on dense tensors. Storing the largest tensor ($P^*$, the transition matrix power series) requires $O(N_t^2 H)$ memory, which can lead to out-of-memory errors on the GPU for very large graphs in practice. As such, DCNNs can be readily applied to graphs of tens to hundreds of thousands of nodes, but not to graphs with millions to billions of nodes.
\paragraph{Locality} The model is designed to capture local behavior in graph-structured data. As a consequence of constructing the latent representation from diffusion processes that begin at each node, we may fail to encode useful long-range spatial dependencies between individual nodes or other non-local graph behavior.
\section{Related Work}
\label{sec:relatedwork}
In this section we describe existing approaches to the problems of semi-supervised learning, graph classification, and edge classification, and discuss their relationship to DCNNs.
\paragraph{Other Graph-Based Neural Network Models} Other researchers have investigated how CNNs can be extended from grid-structured to more general graph-structured data. \cite{DBLP:journals/corr/BrunaZSL13} propose a spatial method with ties to hierarchical clustering, where the layers of the network are defined via a hierarchical partitioning of the node set. In the same paper, the authors propose a spectral method that extends the notion of convolution to graph spectra. Later, \cite{Henaff:2015uw} applied these techniques to data where a graph is not immediately present but must be inferred. DCNNs, which fall within the spatial category, are distinct from this work because their parameterization makes them transferable; a DCNN learned on one graph can be applied to another. A related branch of work that has focused on extending convolutional neural networks to domains where the structure of the graph itself is of direct interest \cite{Scarselli:ku, Micheli:bn, Duvenaud:2015ww}. For example, \cite{Duvenaud:2015ww} construct a deep convolutional model that learns real-valued fingerprint representation of chemical compounds.
\paragraph{Probabilistic Relational Models} DCNNs also share strong ties to probabilistic relational models (PRMs), a family of graphical models that are capable of representing distributions over relational data \cite{Koller:2009:PGM:1795555}. In contrast to PRMs, DCNNs are deterministic, which allows them to avoid the exponential blowup in learning and inference that hampers PRMs.
Our results suggest that DCNNs outperform partially-observed conditional random fields, the state-of-the-art model probabilistic relational model for semi-supervised learning. Furthermore, DCNNs offer this performance at considerably lower computational cost. Learning the parameters of both DCNNs and partially-observed CRFs involves numerically minimizing a nonconvex objective -- the backpropagated error in the case of DCNNs and the negative marginal log-likelihood for CRFs. In practice, the marginal log-likelihood of a partially-observed CRF is computed using a contrast-of-partition-functions approach that requires running loopy belief propagation twice; once on the entire graph and once with the observed labels fixed \cite{Verbeek:2007tb}. This algorithm, and thus each step in the numerical optimization, has exponential time complexity $\mathcal{O}(E_tN_t^{C_t})$ where $C_t$ is the size of the maximal clique in $G_t$ \cite{Cohn:2006cv}. In contrast, the learning subroutine for an DCNN requires only one forward and backward pass for each instance in the training data. The complexity is dominated by the matrix multiplication between the graph definition matrix $A$ and the design matrix $V$, giving an overall polynomial complexity of $\mathcal{O}(N_t^2 F)$.
\paragraph{Kernel Methods} Kernel methods define similarity measures either between nodes (so-called kernels on graphs) \cite{Fouss:2012bf} or between graphs (graph kernels) and these similarities can serve as a basis for prediction via the kernel trick. Note that `kernels on graphs', which are concerned with nodes, should not be confused with `graph kernels', which are concerned with whole graphs. The performance of graph kernels can be improved by decomposing a graph into substructures, treating those substructures as a words in a sentence, and fitting a word-embedding model to obtain a vectorization \cite{Yanardag:2015fm}.
DCNNs share ties with the exponential diffusion family of kernels on graphs. The exponential diffusion graph kernel $K_{ED}$ is a sum of a matrix power series:
\begin{align}
K_{ED} = \sum\limits_{j = 0}^{\infty} \frac{\alpha^j A^j}{j!} = \exp(\alpha A)
\end{align}
The diffusion-convolution activation given in (\ref{eqn:activations}) is also constructed from a power series. However, the representations have several important differences. First, the weights in (\ref{eqn:activations}) are learned via backpropagation, whereas the kernel representation is not learned from data. Second, the diffusion-convolutional representation is built from both node features and the graph structure, whereas the exponential diffusion kernel is built from the graph structure alone. Finally, the representations have different dimensions: $K_{ED}$ is an $N_t \times N_t$ kernel matrix, whereas $Z_t$ is a $N_t \times H \times F$ tensor that does not conform to the definition of a kernel.
\section{Conclusion and Future Work}
\label{sec:conclusion}
By learning a representation that encapsulates the results of graph diffusion, diffusion-convolutional neural networks offer performance improvements over probabilistic relational models and kernel methods at node classification tasks. We intend to investigate methods for a) improving DCNN performance at graph classification tasks and b) making the model scalable in future work.
\section{Appendix: Representation Invariance for Isomorphic Graphs}
\label{sec:appendix}
If two graphs $G_1$ and $G_2$ are isomorphic, then their diffusion-convolutional activations are the same. Proof by contradiction; assume that $G_1$ and $G_2$ are isomorphic and that their diffusion-convolutional activations are different. The diffusion-convolutional activations can be written as
\begin{align*}
Z_{1jk} &= f\left(W^c_{jk} \odot \sum\limits_{v \in V_1} \sum\limits_{v' \in V_1} P^{*}_{1vjv'} X_{1v'k} / N_1\right) \\
Z_{2jk} &= f\left(W^c_{jk} \odot \sum\limits_{v \in V_2} \sum\limits_{v' \in V_2} P^{*}_{2vjv'} X_{2v'k} / N_2\right)
\end{align*}
Note that
\begin{align*}
&V_1 = V_2 = V \\
&X_{1vk} = X_{2vk} = X_{vk} \;\forall\; v \in V, k \in \left[1,F\right] \\
&P^{*}_{1vjv'} = P^{*}_{2vjv'} = P^{*}_{vjv'} \; \forall \; v,v' \in V, j \in \left[0,H\right] \\
&N_1 = N_2 = N
\end{align*}
by isomorphism, allowing us to rewrite the activations as
\begin{align*}
Z_{1jk} &= f\left(W^c_{jk} \odot \sum\limits_{v \in V} \sum\limits_{v' \in V} P^{*}_{vjv'} X_{v'k} / N\right) \\
Z_{2jk} &= f\left(W^c_{jk} \odot \sum\limits_{v \in V} \sum\limits_{v' \in V} P^{*}_{vjv'} X_{v'k} / N\right)
\end{align*}
This implies that $Z_1 = Z_2$ which presents a contradiction and completes the proof.
\subsubsection*{Acknowledgments}
We would like to thank Bruno Ribeiro, Pinar Yanardag, and David Belanger for their feedback on drafts of this paper.
\end{document}
|
\begin{document}
\title{Conflict-free (vertex)-connection numbers of graphs with small diameters\footnote{Supported by NSFC No.11871034, 11531011 and NSFQH No.2017-ZJ-790.}}
\author{
\small Xueliang Li$^{1,2}$, Xiaoyu Zhu$^1$\\
\small $^1$Center for Combinatorics and LPMC\\
\small Nankai University, Tianjin 300071, China\\
\small Email: [email protected]; [email protected]\\
\small $^2$School of Mathematics and Statistics\\
\small Qinghai Normal University, Xining, Qinghai 810008, China
}
\date{}
\maketitle
\begin{abstract}
A path in an(a) edge(vertex)-colored graph is called a conflict-free path if there exists a color used on only one of its edges(vertices). An(A) edge(vertex)-colored graph is called conflict-free (vertex-)connected if for each pair of distinct vertices, there is a conflict-free path connecting them. For a connected graph $G$, the conflict-free (vertex-)connection number of $G$, denoted by $cfc(G)(\text{or}~vcfc(G))$, is defined as the smallest number of colors that are required to make $G$ conflict-free (vertex-)connected. In this paper, we first give the exact value $cfc(T)$ for any tree $T$ with diameters $2,3$ and $4$. Based on this result, the conflict-free connection number is determined for any graph $G$ with $diam(G)\leq 4$ except for those graphs $G$ with diameter $4$ and $h(G)=2$. In this case, we give some graphs with conflict-free connection number $2$ and $3$, respectively. For the conflict-free vertex-connection number, the exact value $vcfc(G)$ is determined for any graph $G$ with $diam(G)\leq 4$.
{\flushleft\bf Keywords}: conflict-free (vertex-)connection coloring; conflict-free (vertex-)connection number; diameter
{\flushleft\bf AMS subject classification 2010}: 05C15, 05C40, 05C05.
\end{abstract}
\section{Introduction}
In this paper, all graphs considered are simple, finite and undirected. We refer to book \cite{BM} for notation and terminology in graph theory not defined here. Among all subjects of graph theory, chromatic theory is no doubt the most arresting. In this paper, we mainly deal with the conflict-free (vertex-) connection coloring of graphs.
In \cite{ELRS}, Even et al. first introduced the hypergraph version of conflict-free (vertex-)coloring. Actually, this coloring emerged as the requirement of the times. It was motivated to solve the problem of assigning frequencies to different base stations in cellular networks. Since then, this coloring has received wide attention due to its practical application value.
Afterwards, Czap et al. introduced the concept of conflict-free connection coloring in \cite{CJV}. In an edge-colored graph, a path is called \emph{conflict-free} if there is at least one color used on exactly one of its edges. This edge-colored graph is said to be \emph{conflict-free connected} if any pair of distinct vertices of the graph are connected by a conflict-free path, and the coloring is called a {\it conflict-free connection coloring}. The \emph{conflict-free connection number} of a connected graph $G$, denoted by $cfc(G)$, is defined as the smallest number of colors required to make $G$ conflict-free connected. There are many results on this topic, for more details, please refer to \cite{CDHJLS,CHL,CJLZ,CJV,DLLMZ}. It is easy to see that $1\leq cfc(G)\leq n-1$ for a connected graph $G$.
Motivated by the above concept, Li et al. \cite{LZZMZJ} introduced the concept of \emph{conflict-free vertex-connection}. A path in a vertex-colored graph is called \emph{conflict-free} if there is a color used on exactly one of its vertices. This vertex-colored graph is said to be \emph{conflict-free vertex-connected} if any two distinct vertices of the graph are connected by a conflict-free path, and the coloring is called a {\it conflict-free vertex-connection coloring}. The \emph{conflict-free vertex-connection number} of a connected graph $G$, denoted by $vcfc(G)$, is defined as the smallest number of colors required to make $G$ conflict-free vertex-connected. In \cite{DS,LZZMZJ,LW}, various results were given in respect of this concept. It has already been obtained that $2\leq vcfc(G)\leq \lceil \log_2(n+1)\rceil$.
We use $S_n$ to denote the \emph{star graph} on $n$ vertices and denote by $T(n_1,n_2)$ the \emph{double star} in which the degrees of its two (adjacent) center vertices are $n_1+1$ and $n_2+1$, respectively. For a connected graph $G$, the \emph{distance} between two vertices $u$ and $v$ is the minimum length of all paths between them, and we write it as $d_G(u,v)$. The \emph{eccentricity} of a vertex $v$ of $G$ is defined by $ecc_G(v)=max_{u\in V(G)}~d_G(u,v)$. The {\it diameter} of $G$ is defined by $diam(G)=max_{v\in V(G)}~ecc_G(v)$ while the {\it radius} of $G$ is defined by $rad(G)=min_{v\in V(G)}~ecc_G(v)$. These parameters have much to do with graph structures and are very significant in the field of graph study. So it stimulates our interest to research on the conflict-free (vertex-)connections of graphs with small diameters.
In this paper, we first give the exact value $cfc(T)$ for any tree $T$ with diameters $2,3$ and $4$. Based of this result, the conflict-free connection number is determined for any graph $G$ with $diam(G)\leq 4$ except for those graphs $G$ with diameter $4$ and $h(G)=2$. In this case, we give some graphs with conflict-free connection numbers $2$ and $3$, respectively. For the conflict-free vertex-connection number, the exact value $vcfc(G)$ is determined for any graph $G$ with $diam(G)\leq 4$.
\section{$cfc$-values for trees with diameters $2,3$ and $4$}\label{tree}
For a connected graph $G$, let $X$ denote the set of cut-edges of $G$, and let $C(G)$ denote the subgraph induced by the set $X$. It is easy to see every component of $C(G)$ is a tree and $C(G)$ is a forest. Let $h(G)=\text{max}~\{cfc(T):T ~\text{is a component of}~ C(G)\}$. In \cite{CJV} and \cite{CHL}, the authors showed the following result.
\begin{lem}\cite{CJV}\label{cfcbound}
For a connected graph $G$, we have $h(G)\leq cfc(G)\leq h(G) + 1.$
Moreover, the bounds are sharp.
\end{lem}
So, $h(G)$ is a crucial parameter to determine the conflict-free connection number of a connected graph $G$. Nevertheless, from the definition of $h(G)$,
determining the value of $h(G)$ depends on determining the conflict-free connection numbers of trees. Therefore, in this section we first give the exact values of the
conflict-free connection numbers of trees with diameters $2,3$ and $4$.
\begin{thm}
For a tree $T$ with diameter $2$ or $3$, we have $cfc(T)=\Delta(T)$.
\end{thm}
\noindent {\it Proof.} It is easy to see that $T$ is a star $S_n$ if and only if it has diameter $2$, and a double star $T(n_1,n_2)(n_1\geq n_2)$ if and only if it has diameter $3$. For the former case, any two edges of $T$ must be colored differently in any conflict-free connection coloring, and thus $cfc(T)=\Delta(T)$. While in the latter case, we can obtain that $cfc(T)=n_1+1=\Delta(T)$ by a similar analysis.\qed
For a tree $T$ of diameter $4$, we denote by $u$ the unique vertex with eccentricity two. The neighbors of $u$ are pendent vertices $w_1,w_2,\cdots,w_\ell$ and $v_1,v_2,\cdots,v_k$ with degrees $p_1\geq p_2\geq\cdots\geq p_k$. Certainly, $k+\ell=d(u)$. In every conflict-free connection coloring $c$ of $T$, the incident edges of every vertex must receive different colors \ding{172}. Without loss of generality, set $c(uv_i)=i(1\leq i\leq k)$ and $c(uw_j)=k+j(1\leq j\leq \ell)$. Observe that if one incident edge of $v_i$ is assigned with color $j$, then color $i$ can not appear on any edge incident with $v_j(1\leq i,j\leq k)$ \ding{173}. Actually, we are seeking for the minimum number of colors satisfying \ding{172} and \ding{173}.
Next we define a vector class $S_r(r\in N^{+})$. We say that an $r$-tuple $(s_1,s_2,s_3,\cdots,s_r)(s_i(1\leq i\leq r)\in N)$ belongs to $S_r$ if we can find a sequence of distinct $2$-tuples $(1,i_{1,1}),(1,i_{1,2}),\cdots,(1,i_{1,s_1}),(2,i_{2,1}),\cdots,(2,i_{2,s_2}), \cdots,$\\ $(r,i_{r,s_r})$ the components of which are all from $[r]$ satisfying that: (1) the two components of every $2$-tuple are different, (2) $(i,j)$ and $(j,i)(1\leq i,j\leq r)$ can not both appear. Note that if $(s_1,s_2,s_3,\cdots,s_r)\in S_r$ then any permutation of its components also belongs to $S_r$. Thus we may suppose $s_1\geq s_2\geq s_3\geq\cdots\geq s_r$.
\begin{lem}\label{tuple}
An $r$-tuple $(s_1,s_2,s_3,\cdots,s_r)(s_i(1\leq i\leq r)\in N)$ belongs to $S_r$ if and only if $\sum\limits_{i=1}^{j}s_i\leq \frac{(2r-1-j)j}{2}(1\leq j\leq r)$.
\end{lem}
\noindent {\it Proof.} First we show the necessity. If $(s_1,s_2,s_3,\cdots,s_r)\in S_r$, then accordingly there is a sequence of $2$-tuples for them according to the definition. Suppose both $(i,j)$ and $(j,i)(1\leq i,j\leq r,i\neq j)$ do not appear. Then, randomly add one of them to the sequence. Repeat this operation until nothing can be added. Finally there are $\frac{(r-1)r}{2}$ $2$-tuples and the corresponding $r$-tuple is $(s'_1,s'_2,s'_3,\cdots,s'_r)$. Assume, to the contrary, there exists a $j$ such that $\sum_{i=1}^{j}s_i> \frac{(2r-1-j)j}{2}(1\leq j\leq r)$. Then $\sum_{i=1}^{j}s'_i\geq \sum_{i=1}^{j}s_i> \frac{(2r-1-j)j}{2}$. Obviously, $j\neq r$. Besides, we have $\sum_{i=j+1}^{r}s'_i\geq \frac{(r-j)(r-j-1)}{2}$ simply by checking the sequence. However, this implies that $\frac{(r-1)r}{2}=\sum_{i=1}^{r}s'_i=\sum_{i=1}^{j}s'_i+\sum_{i=j+1}^{r}s'_i>\frac{(2r-1-j)j}{2}+\frac{(r-j)(r-j-1)}{2}=\frac{(r-1)r}{2}$, a contradiction. Thus the necessity holds.
For the sufficiency, we prove it by applying induction on $r$. When $r=0,1,2$, it is easy to check that if $\sum_{i=1}^{j}s_i\leq \frac{(2r-1-j)j}{2}(1\leq j\leq r)$ for $(s_1,s_2,s_3,$ $\cdots, s_r)$, then this $r$-tuple belongs to $S_r$. Assume that the sufficiency holds for $r=p$. Consider the case $r=p+1$. For $(s_1,s_2,s_3,\cdots,s_{p+1})$, suppose $s_1=p-q$. We distinguish two cases to clarify.
\textbf{Case 1.} $s_{q+1}>s_{q+2}$. In this case, we prove that $(s_2-1,s_3-1,\cdots,s_{q+1}-1, s_{q+2},\cdots,s_{p+1})\in S_p$. When $2\leq j\leq q+1$, we have $\sum_{i=2}^{j}(s_i-1)\leq (j-1)(p-q-1)< \frac{(2p-j)(j-1)}{2}$. When $q+2\leq j\leq p+1$, $\sum_{i=2}^{q+1}(s_i-1)+\sum_{i=q+2}^{j}s_i=\sum_{i=1}^{j}s_i-p\leq \frac{(2p-j+1)j}{2}-p=\frac{(2p-j)(j-1)}{2}$. Therefore, $(s_2-1,s_3-1,\cdots,s_{q+1}-1, s_{q+2},\cdots,s_{p+1})\in S_p$, and so there exists a sequence for it. By adding $(1,p+1),(2,p+1),\cdots,(q,p+1),(p+1,q+1),(p+1,q+2),\cdots,(p+1,p)$ to this sequence, we get a sequence satisfying (1), (2) for $(s_2,\cdots,s_{p+1},s_1)$, implying that $(s_1,s_2,\cdots,s_{p+1})$ belongs to $S_{p+1}$.
\textbf{Case 2.} $s_{q+1}=s_{q+2}$. Suppose $s_r>s_{r+1}=s_{r+2}=\cdots=s_{q+1}=s_{q+2}=\cdots=s_t>s_{t+1}$. Again, we prove that $s'=(s'_1,s'_2,\cdots,s'_p)=(s_2-1,s_3-1,\cdots,s_r-1,s_{r+1},\cdots,s_{t-q+r-1},s_{t-q+r}-1,\cdots,s_t-1,s_{t+1},\cdots,s_{p+1})\in S_p$. Similar to the discussion in \textbf{Case 1}, for $1\leq j\leq r-1$ or $t-1\leq j\leq p$, $\sum_{i=1}^{j}s'_i\leq \frac{(2p-1-j)j}{2}$. Thus if $s'\notin S_p$, the first $j$ such that $\sum_{i=1}^{j-1}s'_i\leq \frac{(2p-j)(j-1)}{2}$ and $\sum_{i=1}^{j}s'_i> \frac{(2p-j-1)j}{2}$ must appear between $r$ and $t-2$. Then we also deduce that $s'_j>p-j$, so $s'_i\geq p-j(j+1\leq i\leq t-1)$. However, this leads to $\frac{(2(p+1)-1-t)t}{2}\geq \sum_{i=1}^{t}s_i=\sum_{i=1}^{t-1}s'_i+p=\sum_{i=1}^{j}s'_i+\sum_{i=j+1}^{t-1}s'_i+p>\frac{(2p-j-1)j}{2}+(t-1-j)(p-j)+p>\frac{(2(p+1)-1-t)t}{2}$,
a contradiction. Thus $s'\in S_p$. By a similar analysis as in \textbf{Case 1}, we can check that $(s_1,s_2,\cdots,s_{p+1})\in S_{p+1}$. The proof is thus complete.\qed
We call the colors from $[k]$ the old colors. In any conflict-free connection coloring of $T$, we denote by $h_i(1\leq i\leq k)$ the number of old colors used on the edges incident with $v_i$ except $uv_i$. Obviously $(h_1,h_2,\cdots,h_k)\in S_k$. In order to add new colors as few as possible, we are actually seeking for the number $a=max(min\{max\{p_i-1-h_i:1\leq i\leq k\}:(h_1,h_2,\cdots,h_k)\in S_k\},0)$.
Let $c_i=p_i-k+i-1(1\leq i\leq k)$, $b=max\{\lceil max\{\sum_{i=1}^{j}\frac{c_i}{j}:1\leq j\leq k\}\rceil,0\}$. Suppose that $max\{\sum_{i=1}^{j}\frac{c_i}{j}:1\leq j\leq k\}$ is obtained by $j=t$. Assume $a<b$. Then $a<\sum_{i=1}^{t}\frac{c_i}{t}$. Thus there exists a $k$-tuple $(h_1,h_2,\cdots,h_k)\in S_k$ such that $h_i\geq p_i-1-a>p_i-1-\sum_{i=1}^{t}\frac{c_i}{t}$. However, this implies that $\sum_{i=1}^{t}h_i>\sum_{i=1}^{t}(p_i-1)-\sum_{i=1}^{t}c_i=\sum_{i=1}^{t}(k-i)=\frac{(2k-1-t)t}{2}$, a contradiction to $(h_1,h_2,\cdots,h_k)\in S_k$ by Lemma \ref{tuple}. Thus $a\geq b$. Next, we only need to construct $(h_1,h_2,\cdots,h_k)\in S_k$ with $b=max\{p_i-1-h_i:1\leq i\leq k\}$. Let $h_i=max\{p_i-1-b,0\}$, it can be easily verified that $(h_1,h_2,\cdots,h_k)$ satisfies our demand. As a result, $a=b$.
Combining Lemma \ref{tuple} with the above analysis, we get the following result.
\begin{thm}
Let $T$ be a tree with diameter $4$, and denote by $u$ its unique vertex with eccentricity two. The neighbors of $u$ are pendent vertices $w_1,w_2,\cdots,w_\ell$ and $v_1,v_2,\cdots,v_k$ with degrees $p_1\geq p_2\geq\cdots\geq p_k$. Then $cfc(T)=max\{k+b,d(u)\}$ where $b=max\{\lceil max\{\sum_{i=1}^{j}\frac{c_i}{j}:1\leq j\leq k\}\rceil,0\}$ and $c_i=p_i-k+i-1(1\leq i\leq k)$.
\end{thm}
\section{Results for graphs with diameters $2,3$ and $4$}
Based on the results in the above section for trees with diameters 2,3, and 4, we are now ready to determine the
$cfc(G)$ and $vcfc(G)$ for graphs with diameters $2,3$ and $4$. At first, we present some auxiliary lemmas that will be used in the sequel.
\begin{lem}\cite{LZZMZJ}\label{vcfc=2}
For a connected graph $G$ of order at least $3$, we have that $vcfc(G)=2$
if and only if $G$ is $2$-connected or $G$ has only one cut-vertex.
\end{lem}
\begin{lem}\cite{LZZMZJ}
For a connected graph $G$, we have $vcfc(G)\leq rad(G)+1$.
\end{lem}
For the conflict-free connection of graphs, the following results have already been obtained.
\begin{lem}\cite{CJV}
For a noncomplete $2$-connected graph $G$, we have $cfc(G)=2$.
\end{lem}
\begin{lem}\cite{CHL}\label{2-edge-connected}
For a noncomplete $2$-edge-connected graph $G$, we have $cfc(G)=2$.
\end{lem}
\begin{lem}\cite{CJV}\label{order2}
If $G$ is a connected graph and $C(G)$ is a linear forest whose each component has an order $2$, then $cfc(G)=2$.
\end{lem}
\begin{lem}\cite{CHL}\label{unique}
Let $G$ be a connected graph with $h(G)\geq 2$. If there exists a unique component $T$ of $C(G)$ such that $cfc(T)=h(G)$, then $cfc(G)=h(G)$.
\end{lem}
\begin{rem}
We have calculated the exact value $cfc(T)$ for any tree $T$ with $diam(T)\leq 4$ in \textbf{Section \ref{tree}}. If $G$ is a connected graph with $diam(G)\leq 4$,
then any component of $C(G)$ must be a tree with diameter no more than four. Thus we can calculate $h(G)$ according to the theorems in \textbf{Section \ref{tree}}.
\end{rem}
For graphs with diameter $2$, we have the following result.
\begin{thm}\label{diameter2}
For a connected graph $G$ with diameter $2$, we have $vcfc(G)=2$ and $cfc(G)=\text{max}~\{2,h(G)\}$.
\end{thm}
\noindent {\it Proof.} Since $G$ has diameter $2$, it is easy to find that $G$ has at most one cut-vertex. According to Lemma \ref{vcfc=2}, $vcfc(G)=2$. If $G$ is $2$-edge-connected, then $cfc(G)=2$ by Lemma \ref{2-edge-connected}. Otherwise, $C(G)$ must be a star, and thus $cfc(G)=\text{max}~\{2,h(G)\}$ by Lemmas \ref{order2} and \ref{unique}.\qed
For graphs with diameter $3$, we have the following result. Recall that a vertex in a block of a graph $G$ is called an \emph{internal vertex} if it is not a cut-vertex of $G$.
\begin{thm}\label{diameter3}
For a connected graph $G$ with diameter $3$, we have that $vcfc(G)\leq3$ and $cfc(G)=max\{2,h(G)\}$ except for the graph depicted in \textbf{Figure 1} which has conflict-free connection number $h(G)+1=3$.
\end{thm}
\noindent {\it Proof.} Removing all internal vertices of end blocks of $G$, it is easy to check that at most one block is left. Since otherwise if there are two blocks $B_1,B_2$, we can always find two other blocks $C_1,C_2$ such that $V(B_i)\bigcap V(C_i)\neq \phi$ and $V(B_i)\bigcap V(C_{3-i})= \phi(i=1,2)$ and for any two internal vertices $u\in V(C_1),v\in V(C_2)$, every $u$-$v$ path is a $u$-$C_1$-$B_1$-$B_2$-$C_2$-$v$ path. However, this implies that the distance between $u$ and $v$ is at least four, contradicting the fact that $diam(G)=3$. Suppose that $G$ contains no more than one cut-vertex. Then $vcfc(G)=2$ according to Lemma \ref{vcfc=2}. Otherwise for the left block $B_1$, it is bound to contain all cut-vertices of $G$, and we assign the color $3$ to one of them and the color $2$ to all remaining vertices of $V(B_1)$. Other unmentioned vertices of $G$ share the color $1$. It is easy to check that $G$ is conflict-free vertex-connected under this coloring. As a result, $vcfc(G)\leq 3$.
The conflict-free connection number of $G$ has been determined by Lemmas \ref{2-edge-connected}, \ref{order2} and \ref{unique} when $h(G)\leq 1$ or $h(G)\geq 2$ and there exists a unique component $T$ such that $cfc(T)=h(G)$. Thus we only need to consider the remaining cases. This implies that $B_1$ exists and it is nontrivial. Besides, every component of $C(G)$ is a star with its center attached to $B_1$.
Let $h(G)=k$. If $k\geq3$, since $cfc(G)\geq k$, to prove $cfc(G)=k$, we only need to provide a conflict-free connection $k$-coloring of $G$. For each component of $C(G)$, give it a conflict-free connection coloring from $[k]$. As for each nontrivial block, give two of its edges the colors $2$ and $3$ respectively and all others the color $1$. It can be verified that $G$ is conflict-free connected in this way.
When $k=2$, we denote by $n_1$ the number of vertices of $B_1$ and $\ell$ the number of components of $C(G)$. If $\ell<n_1$, then there exists a vertex $v$ of $B_1$ not attached by any component of $C(G)$. Note that since $diam(G)=3$, the subgraph induced by the vertices each of which is attached by some component of $C(G)$ is complete. We only need to give a conflict-free connection $2$-coloring of $G$: The edges of each component of $C(G)$ receive different colors from $[2]$. Randomly choose an edge $e$ of $B_1$ incident with $v$ and each edge for each of other nontrivial blocks, then assign to them the color $2$. The remaining edges are given the color $1$. The checking process is omitted.
For the case $\ell=n_1$, certainly $B_1$ is complete with vertices $v_1,v_2,\cdots,v_{n_1}$. Since $diam(G)=3$, for any end block of $G$, all its internal vertices are adjacent to the cut-vertex it contains. If $n_1\geq4$, we offer a conflict-free connection $2$-coloring of $G$: Assign different colors to the edges of each component of $C(G)$ from $[2]$; give color $2$ to all edges of the path $v_1v_2\cdots v_{n_1}$ and color $1$ to the remaining edges of $B_1$. Observe that each edge of $B_1$ with color $i(i\in[2])$ is contained in a triangle the other two edges of which receive distinct colors. Then pick one edge for each end block and give it color $2$. Other edges are given color $1$. The verification is similar.
Suppose $n_1=3$ with at least one component of $C(G)$ being $P_2$. Choose one of such and give its edge the color $1$. Without loss of generality, assume that this edge is incident with $v_1\in V(B_1)$. Pick one edge of $B_1$ incident with $v_1$ and give it the color $2$, again, other edges of $B_1$ share the same color $1$. We color the edges of other components of $C(G)$ and nontrivial blocks the way as we did in the last paragraph. Obviously, this is a conflict-free connection $2$-coloring for $G$.
If $n_1=3$ and any component of $C(G)$ is a $P_3$, we show that two colors are not enough. Note that there are always two adjacent edges of $B_1$ sharing the same color if only two colors are used. Without loss of generality, suppose that the edges $v_3v_1,v_3v_2$ both have color $1$. Let $v_1u_1,v_2u_2$ have color $1$ and $v_1w_1,v_2w_2$ have color $2$ where these edges are all cut-edges. It is easy to check that there is no conflict-free path between $u_1$ and $u_2$ or $w_1$ and $w_2$ no matter what color the edge $v_1v_2$ is assigned, a contradiction. Thus according to Lemma \ref{cfcbound}, $cfc(G)=h(G)+1=3$.\qed
Finally, we study the conflict-free (vertex-)connection number of graphs with diameter $4$ in the next two results.
\begin{thm}\label{diameter4}
For a connected graph $G$ with diameter $4$, we have that $vcfc(G)\leq 3$, and $cfc(G)=2$ if $h(G)\leq 1$; $cfc(G)=h(G)$ if $h(G)\geq3$.
\end{thm}
\noindent {\it Proof.} Since $G$ has diameter $4$, then after removing all internal vertices of end blocks, the resulting graph has at most one cut-vertex. If there is none, we can give colors as we did in the proof of Theorem \ref{diameter3}. Otherwise, give color $3$ to this cut-vertex $v_1$ and color $2$ to all vertices of blocks incident with $v_1$ except for $v_1$. Finally, assign color $1$ to all remaining vertices. Surely, $G$ is conflict-free vertex-connected under this coloring.
Let $h(G)=k$. If $k\leq 1$, the result follows from Lemmas \ref{2-edge-connected} and \ref{order2}. If $k\geq 3$, we assign to $E(G)$ $k$ colors as we did in the third paragraph of the proof of Theorem \ref{diameter3}. For every pair of distinct vertices $u,v\in V(G)$, any path between them contains the same set of cut-edges. If they belong to the same component of $C(G)$, the conflict-free path is clear. Otherwise, since $diam(G)=4$, there are at most three cut-edges on the path. Thus at least one color of $2$ and $3$ (say $2$) appears at most once. If it does not appear, then we can choose a $u$-$v$ path passing the $2$-colored edge of a nontrivial block and evading all other such edges of the nontrivial blocks it goes through. Else, the desired path is one avoiding all $2$-colored edges of the nontrivial blocks it passes. Thus, $k$ colors are enough in this case.\qed
\begin{cor}
For a connected graph with $diam(G)\leq4$, we have that $vcfc(G)=3$ if and only if $G$ has more than one cut-vertex.
\end{cor}
\noindent {\it Proof.} The result is an immediate corollary of Lemma \ref{vcfc=2}, Theorems \ref{diameter2}, \ref{diameter3} and \ref{diameter4}.\qed
\begin{rem}
If $k=2$, according to Lemma \ref{cfcbound}, we have $2\leq cfc(G)\leq3$. The situation in this case is complicated. Suppose there are exactly $\ell$ components of $C(G)$ with conflict-free connection number 2. Then for each $\ell\geq2$, we give some graphs of diameter $4$ with conflict-free connection numbers 2 and 3, respectively.
\begin{figure}
\caption{: The graph $G_\ell$ with $cfc(G_\ell)=2(\ell\geq2)$.}
\end{figure}
See \textbf{Figure 2} for the graph $G_\ell$ with $cfc(G_\ell)=2(\ell\geq2)$. Each $v_i(2\leq i\leq \ell+1)$ of $G_\ell$ is attached to a $P_3$. We give each such $P_3$ the colors $1$ and $2$ to its two edges, respectively. Besides, give color $1$ to $u_1v_i$ and $2$ to $u_2v_i(3\leq i\leq \ell+1)$. The coloring for other edges are labelled in \textbf{Figure 2}. It is easy to check that this is a conflict-free connection $2$-coloring for $G_\ell$.
\begin{figure}
\caption{: The graph $H_\ell$ with $cfc(H_\ell)=3(\ell\geq2)$.}
\end{figure}
The graph $H_\ell$ with $cfc(H_\ell)=3(\ell\geq2)$ is depicted in \textbf{Figure 3}. Suppose, to the contrary, there exists a conflict-free connection $2$-coloring $c$ for $H_\ell$. When $\ell=2$, without loss of generality, let $c(x_1x_2)=c(x_3x_4)=c(x_6x_7)=1, c(x_2x_3)=c(x_7x_8)=2$. Then if $c(x_3x_7)=1$, to ensure a conflict-free path between $x_4$ and $x_6$, there must be $c(x_3x_5)\neq c(x_5x_7)$. However, there is no conflict-free path between $x_1$ and $x_8$, a contradiction. The case when $c(x_3x_7)=2$ can be dealt with similarly. Thus $cfc(H_2)=3$. With the same method, we can deduce that $cfc(H_3)=3$.
For $H_\ell(\ell\geq4)$, without loss of generality, set $c(v_1w_1)=c(v_2w_3)=1, c(v_1w_2)=c(v_2w_4)=2$. Suppose there exist two monochromatic paths (say $u_1v_1u_2$ and $u_1v_2u_2$) with the same color between $u_1$ and $u_2$. Then there is no conflict-free path between $w_1$ and $w_3$ or $w_2$ and $w_4$, contracting our assumption. If this two monochromatic paths receive different colors, then there is no $w_1$-$w_4$ conflict-free path, a contradiction. Assume that $c(u_1v_1)=c(u_1v_2)\neq c(u_2v_1)=c(u_2v_2)$. For the sake of the existences of conflict-free paths between $w_1$ and $w_3$, $w_2$ and $w_4$, there must be two monochromatic $u_1$-$u_2$ paths with different colors, a contraction to our above analysis. Therefore, $u_1$ and $u_2$ are connected by at most three distinct paths, contradicting with $\ell\geq4$. As a result, $cfc(H_\ell)=3(\ell\geq4)$.
\end{rem}
\end{document}
|
\begin{document}
\title{
\normalsize \bfseries QKD Quantum Channel Authentication}
\author{\small J.T. Kosloski
\\%\affiliation{
\emph{\footnotesize
National Security Agency, 9800 Savage Road, Fort George G. Meade, Maryland 20755}
\thanks{[email protected]}}
\date{(\footnotesize Dated: \today)}
\maketitle
\begin{abstract}Several simple yet secure protocols to authenticate the quantum channel of
various QKD schemes, by coupling the photon sender's knowledge of a shared secret and the
QBER Bob observes, are presented. It is shown that Alice can encrypt certain portions of
the information needed for the QKD protocols, using a sequence whose security is based on
computational-complexity, without compromising all of the sequence's entropy. It is then
shown that after a Man-in-the-Middle attack on the quantum and classical channels, there is
still enough entropy left in the sequence for Bob to detect the presence of Eve by
monitoring the QBER.
Finally, it is shown that the principles presented can be implemented to authenticate the quantum channel associated with any
type of QKD scheme, and they can also be used for Alice to authenticate Bob.
\end{abstract}
\begin{multicols}{2}
\section{Introduction}
Quantum Key Distribution (QKD) has gained considerable interest in the academic and
commercial sectors in recent years because of its ability to offer absolute security against
all attacks that can be carried out on classical and quantum computers. This is in stark
contrast to current classical public-key schemes that have been shown to be vulnerable to
attacks on a quantum computer \cite{shor}. However, these same classical schemes do have a
significant advantage in that they can be used to authenticate messages and eliminate
Man-in-the-Middle attacks, at least when Eve (the adversary) is limited to a classical
computer. In the absence of an authenticated public channel, most QKD protocols, such as
BB84 \cite{BB84}, are not secure against Man-in-the-Middle attacks.
The current method to secure commercially viable QKD protocols against such an attack is to
authenticate the classical communications between Alice and Bob. This prevents Eve from
establishing key with either one because she would not be able to carry out the classical
communications necessary for the protocols, and she would be limited to attacks that
increase Bob's observed quantum bit error rate (QBER). The Wegman-Carter authentication
scheme \cite{w-c_authen} and variations thereof \cite{zielinger_authen} seem to be the most
commonly implemented methods to authenticate QKD public channels. They also seem to be
sufficient to protect against Man-in-the-Middle attacks. However, these schemes do not
actually authenticate the users of a quantum channel, and there could be situations where
this is desired.
There have been several quantum authentication protocols developed for the purpose of
authenticating quantum messages \cite{quant_authen_1} \cite{quant_authen_2}
\cite{quant_authen_3}, with much of the focus being on the use of entanglement. A quantum
message is a normal message sent over a quantum channel using quantum codes. On the other
hand, only random bits are transmitted over the quantum channel in QKD, and all messages are
sent over the classical channel. In many of the quantum message authentication schemes, a
shared secret is used to encrypt a message that is transmitted using one of several quantum
codes. An imposter is then detected by monitoring the errors in the code words. One problem
these schemes have is the inherent structure in the codes and Eve's ability to take
advantage of possible correlations between two sequential bits, resulting from the
structures of quantum codes.
However, in QKD, there are no bit-to-bit correlations, assuming a perfectly random raw bit
sequence, so it seems reasonable that QKD could be simpler to authenticate than a quantum
message.
In this article, it is shown that the quantum-based
security of entanglement-based authentication may not be necessary, and that
computational-complexity-based schemes are sufficient to authenticate the quantum channel of
a QKD system.
Four protocols are presented, each of which requires only a shared secret and a
key-expansion function, in addition to
the standard QKD protocols, to detect an imposter. Through examples of Man-in-the-Middle
attacks, it is shown that even though information about the shared secret will be leaked to
Eve during a QKD session, as long as determining the shared secret (given the expanded key)
requires more computation than is possible in a few seconds, there is enough entropy
remaining in the expanded key for Bob to detect the presence of an imposter by monitoring
the QBER. Finally, it is shown that the basic principles used for these protocols can be
implemented to authenticate the quantum channel associated with any type of QKD scheme, and
that these protocols can also be used for Alice to authenticate Bob.
\section{Protocols}Consider the situation where Alice and Bob are going to generate key
using BB84 and have an $n$-bit shared secret $K$, where $n$ is chosen based on the level of
desired security. Also suppose that Alice and Bob have agreed on a key expansion function
$F()$, which need not be kept secret, that they consider secure from time-limited
cryptographic attacks on both classical and quantum computers. The time it takes to
determine $K$ given $F(K)$ needs to be longer than the time it takes to perform a QKD
session.
For notational purposes, let $F(K)^i$ be the $i^{th}$ bit of the expanded sequence, which is
synchronized with clocks at Alice and Bob. Let $x^t$ and $y^t$ be Alice's bit and basis
choice at time $t$, and let $z^t$ be Bob's basis choice. Let the observable being used
(phase, polarization, orbital angular momentum, ...) be represented by $\Gamma$, and let the
two conjugate bases be denoted by $\Gamma_0$ and $\Gamma_1$. To put the notation into
context, the quantum portion of BB84 is carried out when Alice sends $\Gamma_{y^t}=x^t$ and
Bob measures $\Gamma_{z^t}$.
Each of the protocols below allow Bob, after Error Correction (EC), to conclude whether or
not the photons originated with an impersonator, as well as whether or not he communicated
with an impersonator during either sifting or EC, depending on the protocol. That is not to
say that these protocols protect against the possibility that Eve is intercepting
information, which is the purpose of the actual QKD protocol, but it does say that the
information did not originate with Eve.
Note that $F(K)$, which is a pseudo-random bit sequence, will not be
available to Eve for analysis until she has recovered the random bit stream with which it is
combined, such as Alice's bit or basis choices, because a random stream $Xored$ with any
stream produces a random stream. So, Eve will not even be able to begin working on the
recovery of $K$ until after EC. It should also be noted that in the protocols below, $1a$
and $2a$ have timing limitations that $1b$ and $2b$ do not. Namely, $1a$ and $2a$ are only
secure if Eve does not have the opportunity to complete the entire Alice-Eve session before
starting Eve-Bob because Eve can simply omit sending a photon to Bob for the times that
correspond to $j\in\{t\}$ for which he did not learn $F(K)^j$, and Bob would attribute a
lack of detection events to attenuation of the photons. Conversely, $1b$ and $2b$ force Eve
to use a continuous stream of $F(K)$ starting at $F(K)^0$, so she cannot avoid times for
which she does not know $F(K)^t$. Also note that the timing requirement for the first two
protocols is not unreasonable and can easily be met.
\subsection*{Protocol 1.a}
\begin{enumerate}
\item
\begin{enumerate}
\item Alice sends a photon with $\Gamma_{y^t}=x^t \oplus F(K)^t$.
\item Bob measures $\Gamma_{z^t}$, and records\\ $x'^t=\Gamma_{z^t}\oplus F(K)^t$.
\item This step continues until enough photons have been sent for Bob to accurately calculate
the QBER.
\end{enumerate}
\item Alice and Bob perform bit distillation. Alice publicly discloses the set of her basis
choices, $\{y\}$. Bob then compares $\{y\}$ to $\{z\}$ and publicly discloses a list of the
times that have valid bits. (Alice $\rightarrow$ Bob Sifting using $\{y\}$ and $\{z\}$)
\item Alice and Bob perform EC on the bits of $\{x\}$ and $\{x'\}$ retained after sifting,
using some agreed-upon scheme such as CASCADE \cite{cascade}. There is a possibility that
the error correction scheme used does not correct all of the errors, but corrects for some
maximum error rate, $\Delta$, with a high degree of certainty. $\Delta$ could either be a
limitation of the correction scheme or Alice's unwillingness to correct more than a certain
number of errors. For simplicity, suppose that $QBER \le \Delta$ implies there will be no
errors left after EC (with some degree of certainty) and $QBER>\Delta$ implies there will be
about a $(QBER - \Delta )$ error rate after EC.
\item Bob makes a conclusion about the security of the error-corrected bits. If the QBER is
too high, Bob concludes that either Eve has gained too much information concerning the key
that he established with Alice (standard BB84 conclusion) or that Alice did not send the
original photons (conclusion concerning the authenticity of the photons).
\item Either Alice and Bob perform privacy amplification to create final keys, or they start
over.
\item Alice and Bob create a new K. Alice and Bob take $n$ secure bits, either pre-placed or established during a QKD
session, and create a new $K$ to authenticate the next QKD session.\\
\end{enumerate}
\subsection*{Protocol 1.b}
\begin {enumerate}
\item
\begin {enumerate}
\item Alice sends a photon with $\Gamma_{y^t}=x^t$.
\item Bob measures $\Gamma_{z^t}$, and records $x'^t=\Gamma_{z^t}$.
\item This step continues until enough photons have been sent for Bob to accurately calculate
the QBER.
\end{enumerate}
\item Alice $\rightarrow$ Bob Sifting using $\{y\}$ and $\{z\}$. Bob and Alice then apply the stream $F(K)$ to the
bits retained after sifting with a bit-wise Xor.
\item Alice and Bob perform EC on the bits of $F(K)$ applied to the bits of $\{x\}$ and $\{x'\}$
retained after sifting.
\item Bob makes a conclusion about the security of the error-corrected bits.
\item Either Alice and Bob perform privacy amplification to create final keys, or they start
over.
\item Alice and Bob create a new $K$.
\end {enumerate}
\subsection*{Security of Protocol 1}
The QBER for these protocols is a function of $\Gamma$ being measured and any tampering that
may occur on the quantum channel as well as the original sender's knowledge of the $F(K)$
sequence. If the established QBER is sufficiently low, Bob concludes that the person he is
communicating with for the EC, over the classical channel, either knows $(\Gamma_{y^t}$ and
$F(K)^t)$ or $(x^t$ and $F(K)^t)$, for the half of the time slots that correspond to his
detection events. This doesn't directly guarantee that the photon was originally sent by
Alice, but rather guarantees that the person Bob is communicating with for the EC, over the
classical channel, has knowledge that only the sender of the photons would have as well as
knowledge of $F(K)$, which only Alice has. Put another way, this protocol guarantees that
Bob is communicating classically with the sender of the photons for the EC, and that the
sender knows $F(K)$. Therefore, the
original sender must be Alice.
To understand the security of these protocols, consider the following Man-in-the-Middle
attack against Protocol 1.a, assuming the timing restrictions for Protocol 1.a, as noted
above, have been met.
\begin{enumerate}
\item
\begin{enumerate}
\item Alice sends a photon with $\Gamma_{y^t}=x^t \oplus F(K)^t$.
\item Eve measures $\Gamma_{\mu^t}$, and records $\chi^t=\Gamma_{\mu^t}.$
\item Eve sends a photon with $\Psi_{\nu^t}=\xi^t$, where $\Psi$ and $\Gamma$ are the same
observable.
\item Bob measures $\Psi_{z^t}$, and records \\$x^t=\Psi_{z^t}\oplus F(K)^t$.
\item This step continues until enough photons have been sent for Bob to accurately calculate
the QBER.
\end{enumerate}
\item Alice and Eve perform bit distillation. Alice sends Eve the set of her basis choices,
$\{y\}$. Eve then compares $\{y\}$ to $\{\mu\}$ and sends Alice a list of which bits to
include, with about half of them being discarded. (Alice $\rightarrow$ Eve sifting)
\item Alice and Eve perform EC. Eve didn't know $F(K)$, so she has a $.50$ error rate in
her key, relative to Alice. After EC, Eve still has a $\alpha = max\{0,(.50-\Delta) \} $
error rate for the bits retained after sifting. If $\Delta$ is sufficiently small, this
could prevent Eve from establishing perfect keys with Alice, and could allow Alice to detect
an imposter while Alice and Eve are communicating with the keys.
After EC, Eve's $F(K)$, for the bits corresponding to events retained after sifting, has an
error rate of $\alpha$, and her error rate for the complete $F(K)$ is then $(.25
+\frac{\alpha}{2})$.
\item Eve and Bob perform bit distillation. Eve sends Bob the set of her basis choices,
$\{\nu\}$. Bob then compares $\{\nu\}$ to $\{z\}$ and sends Eve a list of which bits to
include, with about half of them being discarded (Eve $\rightarrow$ Bob Sifting). As long
as Eve did not know $\{y\}$ prior to (Alice $\rightarrow$ Eve Sifting) and did not know
$\{z\}$ prior to (Eve $\rightarrow$ Bob Sifting), about half of the bits retained by Bob and
Eve will correspond to bits retained by Eve and Alice.
\item Eve and Bob perform EC. Eve's total error rate of $F(K)$ is $(.25+\frac{\alpha}{2})$,
so her key will have a $(.25+\frac{\alpha}{2})$ error rate relative to Bob's key.
\item Bob will calculate a $.25 \le (QBER = .25+\frac{\alpha}{2}) \le .50 $ and conclude
that Eve must be involved.
\end{enumerate}
An analogous Man-in-the-Middle attack carried out against 1.b would have similar results in
practice, but without the timing restriction. Against 1.b the attack would, in theory,
induce a $QBER=\alpha$ which implies $0\le QBER \le .5$. However, keeping in mind that the
most trivial attacks against QKD produce a $QBER=.25$, it is unlikely that Alice would allow
$\Delta
> .25$ and therefore, in practice, Bob will also calculate $.25 < (QBER = \alpha) \le .5$
with protocol 1.b. Also note that during attacks on 1.a and 1.b, through interactive EC with
Bob, Eve can take advantage of some of the information she gains during the interaction to
ensure that the QBER appears to be a little lower than it actually is. This threat can be
eliminated by using forward error correction, during which no information is leaked by Bob
back to Eve.
\subsection*{Protocol 2.a}
\begin{enumerate}
\item
\begin{enumerate}
\item Alice sends a photon with $\Gamma_{y^t\oplus F(K)^t}=x^t$.
\item Bob measures $\Gamma_{z^t\oplus F(K)^t}$, and records \\$x'^t=\Gamma_{z^t\oplus
F(K)^t}$.
\item This step continues until enough photons have been sent for Bob to accurately calculate
the QBER.
\end{enumerate}
\item Alice $\rightarrow$ Bob Sifting using $\{y\}$ and $\{z\}$
\item Alice and Bob perform EC on the bits of $\{x\}$ and $\{x'\}$ retained after sifting.
\item Bob makes a conclusion about the security of the error-corrected bits.
\item Either Alice and Bob perform privacy amplification, or they start over.
\item Alice and Bob create a new $K$.
\end{enumerate}
\subsection*{Protocol 2.b}
\begin{enumerate}
\item
\begin{enumerate}
\item Alice sends a photon with $\Gamma_{y^t}=x^t$.
\item Bob measures $\Gamma_{z^t}$, and records $x'^t=\Gamma_{z^t}$.
\item This step continues until enough photons have been sent for Bob to accurately calculate
the QBER.
\end{enumerate}
\item Bob publicly discloses a list of the times for which he had a detection event. Alice and
Bob remove their basis choices for times that do not correspond to detection events to
create the sets $\{y'\}$ and $\{z'\}$ respectively. Alice $\rightarrow$ Bob Sifting using
$\{y'\}\oplus F(K)$ and $\{z'\}\oplus F(K)$.
\item Alice and Bob perform EC on the bits of $\{x\}$ and $\{x'\}$ retained after sifting.
\item Bob makes a conclusion about the security of the error-corrected bits.
\item Either Alice and Bob perform privacy amplification, or they start over.
\item Alice and Bob create a new $K$.
\end{enumerate}
\subsection*{Security of Protocol 2}
These protocols offer similar assurances to Bob
as Protocols 1.a and 1.b, except that they guarantee, after EC, that the person with which
he performed sifting is someone that knows information that only the sender of the photon
and Alice could know. In particular, after EC, Bob knows that he performed the sifting with
someone who knew both $\{y\}$ and $F(K)$, otherwise, he would have randomly selected which
bits to use for the EC and would have a substantial error rate. Therefore, the original
sender must be Alice.
The QBER is a function of $\Gamma$ being measured and any tampering that may occur on the
quantum channel, in addition to the original sender not knowing the correct $F(K)$ sequence
that was Xored to Alices's basis stream. When Bob is trying to perform EC with a user that
does not know $F(K)$, the error rate will be inflated because Bob would have randomly
selected his bits from all of the bits, roughly half of which are in the wrong basis. Note
that, unlike in Protocol 1, the knowledge Eve can gain during interactive EC will not help
her reduce the QBER induced by her not knowing the correct basis during the sifting. So, for
protocols $2.a$ and $2.b$, Eve does not gain an advantage by performing interactive EC with
Bob as opposed to forward EC.
To understand the security of these two protocols, consider the following Man-in-the-Middle
attack against Protocol 2.a, assuming that the timing restrictions for Protocol 2.a, as
noted above, have been met (similar security when carried out against Protocol 2.b, but
without the timing restrictions).
Allow for the possibility that Alice-Eve EC is completed after Eve-Bob photon transmission,
but before Eve-Bob sifting.
\begin{enumerate}
\item Eve creates a set of times $\{\tau\}$ that correspond to bits of $F(K)$ she intends to
learn.
\item
\begin{enumerate}
\item Alice sends a photon with $\Gamma_{y^t\oplus F(K)^t}=x^t$.
\item Eve measures $\Gamma_{\mu^t}$, and records $\chi^t=\Gamma_{\mu^t}$.
\item Eve sends a photon with $\Psi_{\nu^t}=\xi^t$ if $t\in \{\tau\}$, where $\Psi$ and
$\Gamma$ are the same observable.
\item Bob measures $\Psi_{z^t\oplus F(K)^t}$, and records \\$x'^t=\Psi_{z^t\oplus F(K)^t}$
if $t\in \{\tau\}$.
\item This step continues until enough bits have been sent for Bob to accurately calculate
the QBER.
\end{enumerate}
\item Alice and Eve perform bit distillation. Alice sends Eve the set $\{y\}$.
Eve tells Alice that they agreed on the basis selection for the times $t \in \{\tau\}$
\item Alice and Eve perform EC. Eve didn't know $F(K)$, so she has a $.25$ error rate in
her key, relative to Alice. After EC, Eve still has a $( .25-\Delta)$ error rate for the
bits retained after sifting. Again, $\Delta$ could be chosen to prevent Eve from
establishing perfect keys with Alice, and could allow Alice to detect the imposter while
Alice and Eve are communicating with the keys as input to their encryption systems.
To understand what Eve knows after EC with Alice, consider the fact that Eve knows $y^t$ and
$\mu^t$ for all $t$. Through EC she learns $y^t\oplus F(K)^t \not= \mu^t$, $t\in \{\tau\}$,
for some number of errors, which is sufficient to calculate $F(K)^t$ for these times. For
the times that she had the correct bit value, Eve doesn't know if $y^t\oplus F(K)^t =\mu^t$
or if $y^t \oplus F(K)^t \not=\mu^t$. Therefore, Eve's copy of $\{F(K)^\tau \}$, the bits of
$F(K)$ that correspond to possible detection events at Bob, has a $(\gamma =
max\{\frac{.75}{2}, \frac{1-\Delta}{2}\} )$ error rate.
\item Eve and Bob perform bit distillation. Eve sends Bob the set $\{\nu\} \oplus F(K')$,
where $F(K')$ is Eve's flawed version of $F(K)$. Bob then compares $\{\nu\} \oplus F(K')$ to
$\{z\}$ and sends Eve a list of which bits to include. This set of events will be about
half of the events included by Eve and Alice.
\item Eve and Bob perform EC. Eve's error rate of $\{F(K')^\tau\}$ is $\gamma$, so her key
will have a $(\frac{\gamma}{2})$ error rate relative to Bob's key.
\item Bob will calculate a $.1675 \le (QBER = \frac{\gamma}{2} )\le .25$ and conclude that Eve
must be involved.
\end{enumerate}
\section{Conclusions}
The security of the shared-secret authentication lies in Eve's inability to predict the
secret bits, so it is imperative that the secret bits be well protected until Bob has a
chance to verify the sender's identity. In each of the above protocols Alice leaks
information about $F(K)$ to the person with whom she is performing EC, so $F(K)$ is not
completely secure. However, as long as determining $K$ from $F(K)$ is a relatively
computationally-intensive process, then there is enough entropy in the shared secret during
the QKD session to prevent Eve from successfully carrying out a Man-in-the-Middle attack.
The significance of these protocols is that each of them could easily be implemented in
current QKD systems and would only require minor software modifications. Each of the
protocols can be used to authenticate the quantum channel of prepare-and-measure QKD
systems, such as BB84. However, note that in Protocol 2.b, Alice only has to know her basis
choice when she performs sifting and not when actually sending the photons. This feature
allows 2.b to actually be used for any 2-Basis QKD schemes that require bit distillation and
EC, even entanglement schemes. Similarly, Protocol 1.b only relies on Alice and Bob having a
bit stream with errors and a shared secret, implying that it can be used with all QKD
schemes, even no-switching QKD \cite{no-switch}, as long as the QKD schemes require EC.
Suppose that the roles in the sifting and EC were reversed, such that Bob's key prior to EC
was assumed to be correct, and Bob helped Alice correct her bits that differed from Bob's.
Alice would then calculate the QBER, and they could use the protocols for Alice to verify
that the photons were detected by someone who knows $F(K)$, Bob, and that she communicated
with him for the sifting and EC. For example, Protocol 2.b would only differ in that Bob
would send Alice $\{z\}\oplus F(K)$ for sifting, and they would change roles for EC.
Therefore, Alice could also authenticate Bob's identity, and they could adjust the protocols
so that they can authenticate each other for every QKD session.
It should be noted that the protocols presented in this article belong to a more general
class of authentication protocols that use a shared secret, a symmetric-key algorithm, and
monitoring of the QBER to detect a Man-in-the-Middle attack. These four were chosen to
represent the versatitlity and utility of the protocols, but were certainly not inclusive of
all of the ways to use classical cryptography to authenticate a QKD quantum channel.
Alternative protocols could be created by replacing the $F(K)\oplus (Information)$ step by
running the information through an algorithm such as $AES$, or varying where the
encrypt/decrypt takes place, among other options. As was shown, the QBER induced by a
Man-in-the-Middle attack would vary between the different protocols, but many of the
possible protocols in this class are more than sufficient for authentication purposes.
I would like to thank my colleagues at LLL, LTS, and NSA for their insightful comments.
\end{multicols}
\begin{multicols}{2}
\end{multicols}
\end{document}
|
\begin{document}
\title[Einstein metrics via torsion]{Einstein metrics via intrinsic\\ or
parallel torsion}
\author{Richard Cleyton}
\address[Cleyton]{Department of Mathematics and Computer Science\\
University of Southern Denmark\\
Campusvej 55\\
DK-5230 Odense M\\
Denmark}
\email{[email protected]}
\author{Andrew Swann}
\address[Swann]{Department of Mathematics and Computer Science\\
University of Southern Denmark\\
Campusvej 55\\
DK-5230 Odense M\\
Denmark}
\email{[email protected]}
\begin{abstract}
The classification of Riemannian manifolds by the holonomy group of their
Levi-Civita connection picks out many interesting classes of structures,
several of which are solutions to the Einstein equations. The
classification has two parts. The first consists of isolated examples:
the Riemannian symmetric spaces. The second consists of geometries that
can occur in continuous families: these include the Calabi-Yau structures
and Joyce manifolds of string theory. One may ask how one can weaken the
definitions and still obtain similar classifications. We present two
closely related suggestions. The classifications for these give isolated
examples that are isotropy irreducible spaces, and known families that
are the nearly Kähler manifolds in dimension 6 and Gray's weak holonomy
G$_2$ structures in dimension 7.
\end{abstract}
\lie{su}bjclass[2000]{Primary 53C10; Secondary 17B10, 53C25, 53C29}
\maketitle
\tableofcontents
\section{Introduction}
Linear connections and the equivalent notion of $G$-structures are
fundamental areas of interest in differential geometry. Their equivalence
goes through the holonomy of the connection and it is a well-known Theorem
by Hano and Ozeki that any subgroup $G$ of the general linear group may be
realised as the holonomy of a connection on some
manifold~\cite{Hano-Ozeki:holonomy}. On the other hand, one has the much
more restrictive class of torsion-free connections and their holonomy. The
classification of the irreducible holonomy representations of torsion-free
connections was completed by Merkulov and
Schwachh\"ofer~\cite{Merkulov-Schwachhoefer:irreducible} and recently
Schwachh\"ofer has given a more algebraic
proof~\cite{Schwachhoefer:Berger}. This article concerns itself exclusively
with Riemannian manifolds and therefore the particular case of Riemannian
holonomy will have special significance. Riemannian holonomies have
yielded geometric structures such as Calabi-Yau manifolds, Joyce manifolds,
hyperK\"ahler and quaternionic K\"ahler manifolds of great interest in both
mathematics and physics.
If $M^n$ is a Riemannian manifold, the holonomy algebra $\lie g$ acts on
tangent spaces via a representation $V$. This induces actions of $\lie g$
on the spaces of trace-less symmetric two-tensors $S^2_0V$ and algebraic
curvature tensors $\mathcal K(\lie g)$ with values in $\lie g$. Apart from the generic
holonomy representation of $\lie{so}(n)$ on $\mathbb{R}^n$ and that of $\lie{u}(n)$
on $\mathbb{C}^n$ of K\"ahler geometry, all holonomy representation satisfy
that the representations $\mathcal K(\lie g)$ and $S^2_0V$ have no irreducible summands in
common. Schur's Lemma shows that this happens precisely when the equation
$(\mathcal K(\lie g){\otimes} S^2_0V)^G = \{0\}$ is satisfied. As a consequence, a
Riemannian manifold is Einstein as soon as the Lie algebra of its holonomy
group is a proper subalgebra of $\lie{so}(n)$ not equal to $\lie{u}(n/2)$. Our aim
is to generalise this type of statement to the more general framework of
metric connections and their holonomy.
Among metric connections, the connections that give rise to the Riemannian
holonomy groups may be viewed as precisely those with vanishing intrinsic
torsion. The torsion of a metric $G$-connection is a tensor taking values
in $\Lambda^2V^*{\otimes}V$, where $V$ represents tangent space as a $G$
module. If $\lie g^\bot$ denotes the orthogonal complement of the Lie algebra $\lie
g$ of $G$ in $\lie{so}(n)$ with respect of the metric then the projection of the
torsion to the image of $V\otimes\lie g^\bot$ under the anti-symmetrising map
$\delta \colon V^*{\otimes}\lie{so}(n) \to \Lambda^2V^*{\otimes} V$ is
independent of the chosen $G$-connection. The tensor thus defined is called
the intrinsic torsion of the associated $G$-structure and encodes its
differential geometric data.
Using the decomposition of $V{\otimes} \lie g^\bot$ into irreducible $G$ modules
one may classify the geometries induced by a $G$-structure according to
where the intrinsic torsion takes its values, an approach first taken by
Gray and Hervella~\cite{Gray-H:16} for almost Hermitian manifolds and since
used by many others.
Interesting examples of connections with non-trivial intrinsic torsion have
arisen from Gray's definition of weak holonomy~\cite{Gray:weak}. These
include the six-dimensional nearly K\"ahler geometry with structure group
$\Lie{SU}(3)$ and weak holonomy $G_2$ in dimension~$7$. Both geometries give
Einstein metrics.
In this article we take the following approach. We consider all
$G$-structures on Riemannian manifolds with non-trivial intrinsic torsion.
If we consider all metric connections with torsion the Hano-Ozeki Theorem
tells us that any Lie subgroup of $\Lie{O}(n)$ can be realised as the
holonomy group of a metric connection on some Riemannian manifold. We
therefore impose various extra conditions on the $G$-structure and its
intrinsic torsion with the specific aim of obtaining Einstein metrics. The
results are classifications akin to Berger's in the sense that we arrive at
a discrete family of manifolds made up by the non-symmetric isotropy
irreducible homogeneous spaces and continuous families made up of manifolds
of weak holonomy $G_2$ and nearly K\"ahler six-manifolds.
The contents may be outlined as follows: In section~\ref{sec:2}, we give
some of the basic facts and definitions we will need. In
section~\ref{sec:3}, Theorem~\ref{thm:1} gives three conditions on the
intrinsic torsion and tangent space representation of a $G$-structure that
guarantee a solution to the Einstein equations. The first two of these
conditions have as consequences that the intrinsic torsion must be
skew-symmetric and that $G$ acts irreducibly on tangent spaces. Given that
$G$ acts irreducibly on $V$, we investigate the consequences of the third
condition: $(\mathcal K(\lie g)\otimes S^2_0V)^G=\{0\}$ in the fourth section. These are
listed in~Theorem~\ref{thm:6}. As a by-product of this investigation, we
obtain an algorithm for computing the space of algebraic curvature tensors
for an arbitrary representation $\lie g\to\lie{so}(n)$. Using the results of
section~\ref{sec:4}, Theorem~\ref{thm:3} gives a classification of the
holonomy groups of metric connection with parallel torsion when the
holonomy group acts irreducibly on tangent spaces. Finally, in
Theorem~\ref{thm:8}, we make a similar classification of manifolds with a
$G$-structure with invariant skew-symmetric intrinsic torsion and $G$
acting irreducibly on tangent spaces. The crucial fact used here is that
under these conditions we may conclude that the intrinsic torsion is
parallel.
Gray-Hervella type classifications of spaces with skew-symmetric intrinsic
torsion have been made by A.~Fino~\cite{Fino:torsion}, while the case of
skew-symmetric torsion have been considered by, among others, Friedrich and
Ivanov~\cite{Friedrich:non-integrable, Friedrich-I:skew, Ivanov:Spin7}.
\section{Riemannian Geometry and Metric Connections}
\label{sec:2}
Let $(M^n,g)$ be a Riemannian manifold. The space $\Lie{O}(M)$ of frames on
the tangent bundle orthonormal with respect to the metric $g$ forms a
principal $\Lie{O}(n)$-bundle, where $\Lie{O}(n)$ acts on the right by
change of basis. Write $\mathbb{R}^n$ for the standard representation of
$\Lie{O}(n)$. Then the tangent bundle can be identified with the associated
vector bundle $\Lie{O}(M) \times_{\Lie{O}(n)} \mathbb{R}^n$ and, similarly,
the bundles of $(p,q)$-tensors on $M$ may be identified with
$\Lie{O}(M)\times_{\Lie{O}(n)} (\mathbb{R}^n)^{(p,q)}$. Sections of bundles
of tensors may be identified with equivariant maps $\sigma \colon
\Lie{O}(M) \to (\mathbb{R}^n)^{(p,q)}$ for which
$\sigma(pg)=g^{-1}\sigma(p)$ for $g\in\Lie{O}(n)$ and these sections or
maps may be decomposed according to the decomposition of
$(\mathbb{R}^n)^{(p,q)}$ into irreducible $\Lie{O}(n)$-submodules. In what
follows we will write $\Lambda^p$ for the $p$-th exterior power of $\mathbb
R^n$ as an $\Lie{O}(n)$-representation.
\begin{definition}
Assume that a Riemannian manifold $(M,g)$ has a structure reduction $P
\lie{su}bset \Lie{O}(M)$ to a $G$-structure. Let $V$ be the induced
representation of $G$ on tangent spaces. When $M$ has such a structure
reduction we will say that the triple $(M,g,V)$ is a \emph{$G$-manifold}.
If the representation $V$ is irreducible we say that $M$ or, more
precisely, $(M,g,V)$ is \emph{$G$-irreducible}.
If the structure reduction is given by the holonomy $G$ of a metric
connection $\hat\nabla$ on $(M,g)$ we will say that $(M,g,V)$ is the
$G$-manifold \emph{determined by} $\hat\nabla$.
A \emph{$G$-connection} $\hat\nabla$ on a $G$-manifold $(M,g,V)$ is a connection
for which the holonomy group acts on tangent spaces as a subgroup of
$G$. A \emph{metric connection} on a Riemannian manifold $(M,g)$ is
connection with holonomy contained in $\Lie{O}(n)$.
\end{definition}
When $(M,g,V)$ is a $G$-manifold the decompositions of
$\Lie{O}(n)$-re\-pre\-sen\-ta\-tions into $G$-modules let us decompose the
tensors of $M$ even further. In this case we have vector bundle isomorphisms
$T^{(p,q)}M \cong (M,g) \times_{\Lie{O}(n)} (\mathbb{R}^n)^{(p,q)} \cong
P\times_G V^{(p,q)}$ and sections may be thought of as $G$-equivariant maps
$\sigma \colon P \to V^{(p,q)}$. Let $\tau$ be a section of the tensor
bundle $T^{(p,q)}M$ and assume that $\tau$ via the identification with a
$G$-equivariant map $P\to V^{(p,q)}$ actually takes its values in some
submodule $W\lie{su}bset V^{(p,q)}$. When this is the case we use the convenient
notation $\tau\in W$.
\begin{remark}
Note that representations are real unless we state otherwise and all Lie
groups and Lie algebras are subgroups and subalgebras of the orthogonal
ones. Therefore we are free to identify representations with their duals
and will do so.
\end{remark}
As an important example, let ${\hat\nabla}$ be a metric connection on
a Riemannian manifold $(M,g)$ and let $(M,g,V)$ be the $G$-manifold
determined by $\hat\nabla$. Since $\hat\nabla$ is a metric connection its holonomy
algebra is a subalgebra of $\lie g\leqslant\lie{so}(n)$. The difference
between the metric connection and the Levi-Civita connection $\nabla^g$
therefore forms a tensor taking values in $V {\otimes} \lie{so}(n)$:
\begin{equation*}
\eta:=\nabla^g-\hat\nabla\in V {\otimes} \lie{so}(n)
\end{equation*}
The map $\delta\colon V {\otimes} \lie{so}(n)\to\Lambda^2V{\otimes} V$ given by
$(\delta\alpha)_XY=\alpha_XY-\alpha_YX$ is an isomorphism mapping
$\eta$ to the torsion $\hat T$ of $\hat\nabla$. This justifies using the terms
torsion or torsion tensor for either $\eta$ or $\hat T$ interchangeably.
Let ${\hat R}$ be the curvature of $\hat\nabla$:
\[
{\hat R}_{X,Y} = [{\hat\nabla}_X,{\hat\nabla}_Y]-{\hat\nabla}_{[X,Y]}.
\]
Then the Ambrose-Singer Theorem~\cite{Ambrose-Singer:holonomy} tells us
that ${\hat R} \in \Lambda^2V {\otimes} \lie g$.
On $M$ the Levi-Civita connection $\nabla^g$ is singled out by requiring
that it is a metric connection which is torsion-free:
\begin{equation*}
T^g_XY:=\nabla^g_XY-\nabla^g_YX-[X,Y]=0.
\end{equation*}
A consequence of this is that the Riemannian curvature tensor $R^g$
satisfies the first and second Bianchi identities:
\begin{gather*}
R^g_{X,Y}Z+R^g_{Y,Z}X+R^g_{Z,X}Y=0\\
\nabla^g_XR^g_{Y,Z}+\nabla^g_YR^g_{Z,X}+\nabla^g_ZR^g_{X,Y}=0.
\end{gather*}
We write $\textrm{b}_1$ for the map $\Lambda^2 {\otimes} \lie{so}(n) \to \Lambda^3
{\otimes} \Lambda^1$ given by
\begin{equation*}
(\textrm{b}_1 R)(X,Y,Z)=R(X,Y,Z)+R(Y,Z,X)+R(Z,X,Y)
\end{equation*}
and $\textrm{b}_2$ for the map $\Lambda^1 {\otimes} \Lambda^2 {\otimes} \lie{so}(n) \to
\Lambda^3 {\otimes} \lie{so}(n)$ given by
\begin{equation*}
(\textrm{b}_2 R')(X,Y,Z,W)=R'(X,Y,Z,W)+R'(Y,Z,X,W)+R'(Z,X,Y,W).
\end{equation*}
Write $\mathcal K(\lie{so}(n))$ for the kernel of $\textrm{b}_1$ and $\mathcal K'(\lie{so}(n))$
for the kernel of $\textrm{b}_2$. Then the first and second Bianchi
identities say, respectively, that:
\begin{gather*}
R^g \in \mathcal K(\lie{so}(n)) \quad\text{and}\quad \nabla^gR^g \in
\mathcal K'(\lie{so}(n)).
\end{gather*}
Note that when we identify $\Lambda^2\cong\lie{so}(n)$, we have $\mathcal
K(\lie{so}(n)) \lie{su}bset S^2(\lie{so}(n))$.
If the Riemannian holonomy of $g$ is contained in $G$ then, by the
Ambrose-Singer Theorem, $R^g$ takes values in $\Lambda^2V{\otimes}\lie
g$. But $R^g$ also satisfies the first Bianchi identity whence
$R\in\mathcal K(\lie{so}(n))$. Thus
\begin{equation*}
R\in \mathcal K(\lie{so}(n))\cap(\Lambda^2V{\otimes}\lie g)=\ker\{\textrm{b}_1\colon \Lambda^2V{\otimes}\lie g\to\Lambda^3V{\otimes} V\}.
\end{equation*}
\begin{definition}
The representation
\begin{equation}
\label{eq:11}
\mathcal K(\lie g) :=\ker\{\textrm{b}_1\colon \Lambda^2V{\otimes}\lie g\to\Lambda^3V{\otimes} V\}
\end{equation}
is called the \emph{space of algebraic curvature tensors with values
in $\lie g$}.
\end{definition}
In the presence of a metric connection $\hat\nabla$ with torsion $\eta=\nabla^g-\hat\nabla$ we
may write the Riemannian curvature as
\begin{equation}
\label{eq:1}
R^g=\hat R+(\hat\nabla\eta)+(\eta^2),
\end{equation}
where $(\hat\nabla\eta)$ is the anti-symmetrisation of the covariant derivative of
$\eta$ with respect to $\hat\nabla$:
\[(\hat\nabla\eta)_{X,Y}Z:=(\hat\nabla_X\eta)_YZ-(\hat\nabla_Y\eta)_XZ\]
and
\[(\eta^2)_{X,Y}Z:=[\eta_X,\eta_Y]Z-\eta_{\eta_XY-\eta_YX}Z.\]
Since $R^g\in\mathcal K(\lie{so}(n))$ and $\nabla^gR^g\in\mathcal K'(\lie{so}(n))$ we have
\begin{gather}
\label{eq:2}
\textrm{b}_1\hat R=-\textrm{b}_1(\hat\nabla\eta)-\textrm{b}_1(\eta^2)
\intertext{and}\label{eq:4}
\textrm{b}_2\hat\nabla\hat R=-\textrm{b}_2\hat\nabla(\hat\nabla\eta)-\textrm{b}_2\hat\nabla(\eta^2).
\end{gather}
We will refer to equations~(\ref{eq:2}) and~(\ref{eq:4}) as,
respectively, the first and second \emph{Bianchi relations}.
\section{Intrinsic Torsion and Einstein Manifolds}
\label{sec:3}
Let $(M,g,V)$ be the Riemannian $G$-manifold and let $\hat\nabla$ be a
$G$-connection on $(M,g)$. Write $\lie g^\bot$ for the orthogonal complement of
$\lie g<\lie{so}(n)$ and $\eta^{\lie g}$ for the component of the torsion tensor
$\eta$ in $V {\otimes} \lie g$. Then the tensor
\begin{equation*}
\xi:=\eta-\eta^{\lie g}\in V {\otimes} \lie g^\bot
\end{equation*}
is independent of the choice of $G$-connection on $(M,g,V)$. Corresponding
to $\xi$ is a connection $\tilde\nabla:=\nabla^g-\xi$.
\begin{definition}
When $(M,g,V)$ is a $G$-manifold we call the $G$-con\-nec\-tion $\tilde\nabla$ the
\emph{minimal connection} and $\xi$ the \emph{intrinsic torsion} of the
$G$-structure.
\end{definition}
This definition is justified by the fact that $\xi$ may be identified with
the intrinsic torsion of the $G$-structure as defined in the introduction
via the isomorphism $\delta$. The argument above proves
\begin{proposition}
Let $(M,g,V)$ be a $G$-manifold. Then the minimal connection $\tilde\nabla$ is the
unique $G$-connection $\tilde\nabla$ on $M$ such that the torsion tensor
\begin{equation*}
\xi=\nabla^g-\tilde\nabla\in V{\otimes} \lie g^\bot.
\end{equation*}
Among the $G$-connections on $(M,g,V)$ the connection $\tilde\nabla$ has the
property that it minimises the $L^2$-norm of torsion tensors (on compact
subsets of $M$).\qed
\end{proposition}
The curvature $\tilde R$ of the minimal connection is of course related to the
Riemannian curvature precisely as in equation~\eqref{eq:1} for general
metric connections:
\begin{equation}
\label{eq:3}
R^g=\tilde R+(\tilde\nabla\xi)+(\xi^2)
\end{equation}
and the first Bianchi relation for $\tilde R$ is
\begin{equation*}
\textrm{b}_1\tilde R=-\textrm{b}_1(\tilde\nabla\xi)-\textrm{b}_1(\xi^2).
\end{equation*}
Write $\mathcal K(\lie g)$ for the kernel of the restriction $\textrm{b}_1\colon\Lambda^2V
{\otimes} \lie g \to \Lambda^3V{\otimes} V$ and $\mathcal K(\lie g)^{\perp}$ for its
orthogonal complement in $\Lambda^2V {\otimes} \lie g$. Then we may split
$\tilde R$ into the respective components $\tilde R=\tilde R_0+\tilde R_1$ and conclude that
since $\textrm{b}_1$ is injective on $\mathcal K(\lie g)^{\perp}$, $\tilde R_1$ is completely
determined by the intrinsic torsion and its covariant derivative. This
observation forms the central idea in
\begin{theorem}\label{thm:1}
Let $(M,g,V)$ be a Riemannian $G$-manifold. Assume that the
intrinsic torsion takes values in the $G$-submodule $W\lie{su}bset
V{\otimes}\lie g^\bot$. Then $g$ is Einstein when the following conditions are
satisfied:
\begin{enumerate}[\upshape (a)]
\item
$(V{\otimes} W{\otimes} S^2_0V)^G=\{0\}$,
\label{cond:A}
\item
$(S^2W{\otimes} S^2_0V)^G=\{0\}$,
\label{cond:B}
\item
$(\mathcal K(\lie g){\otimes} S^2_0V)^G=\{0\}$.
\label{cond:C}
\end{enumerate}
\end{theorem}
\begin{proof}
Write the curvature tensor of $\nabla^g$ as in formula~\eqref{eq:3}. As
$\xi\in W$ the tensors $(\tilde\nabla\xi)$ and $(\xi^2)$ satisfy
\begin{gather*}
(\tilde\nabla\xi)\in V{\otimes} W,\\(\xi^2)\in S^2W.
\end{gather*}
Since the component $\tilde R_1$ of the curvature of the minimal connection is
determined by these two tensors we also have $\tilde R_1\in V{\otimes} W +
S^2W$. Therefore the three conditions together, through Schur's Lemma,
imply that no component of the Riemannian curvature can contribute to the
trace-free Ricci-tensor, whence $g$ is Einstein.
\end{proof}
The conditions are very strong. Condition~(a) implies, first of
all, that no irreducible $G$-module can occur in the decomposition of both
$V$ and $W$, i.e., that $(V{\otimes} W)^G=\{0\}$. Therefore $(W {\otimes} (V
{\otimes} S^2V))^G=\{0\}$. By exactness of the sequence of $G$-modules
\begin{equation*}
0 \longrightarrow \Lambda^3V \longrightarrow
V {\otimes} \Lambda^2V \longrightarrow S^2V {\otimes} V \longrightarrow
S^3V \longrightarrow 0
\end{equation*}
this implies that $W \lie{su}bset \Lambda^3V$.
Condition~(b) implies that $V$ is irreducible. We therefore have the
following
\begin{corollary}\label{cor:1}
Let $(M,g,V)$ be a Riemannian $G$-manifold for which the intrinsic
torsion takes values in $W\lie{su}bset V{\otimes}\lie g^\bot$. If $V$ and $W$
satisfy conditions $\textrm{(a)}$ and $\textrm{(b)}$ of
Theorem~\ref{thm:1} then $(M,g,V)$ is $G$-irreducible and the
intrinsic torsion is a three-form.\qed
\end{corollary}
In particular, Corollary~\ref{cor:1} ensures that it is not restrictive
to assume that $V$ is irreducible when conditions~(a) and~(b) of
Theorem~\ref{thm:1} are satisfied. Furthermore, when $V$ is an
irreducible representation of a Lie group $G$ it is possible to say
precisely when condition~(c) of Theorem~\ref{thm:1} is satisfied. This
will be the main result of the next section.
\section{Berger Algebras and Algebraic Curvature Tensors}
\label{sec:4}
The question we wish to address now is: Given that $G$ is a Lie group
acting irreducibly on a real vector space $V$, when does the space of
algebraic curvature tensors consist of Einstein tensors only? We will
obtain an answer to this at the end of the section. This will be obtained
using tools of the Riemannian holonomy classification, most notably the
concept of the Berger algebra $\underline{\lie g}$ of the Lie algebra of $\lie g$. Note
that the Lie algebra $\lie g$ of $G$ may act reducibly on $V$ even though
$G$ acts irreducibly. Therefore we need to be able to calculate Berger
algebras and the space of algebraic curvature tensors for reducible as well
as irreducible representations.
\lie{su}bsection{Facts and Definitions}
First a few words on notation: we write $\lie{so}(V)$ for the representation of
$\lie{so}(n)$ on $V$ whenever $V$ is a real $n$-dimensional vector space.
\begin{definition}
Let $\lie g$ be a Lie algebra and $V$ be a faithful representation
of $\lie g$ as a subalgebra of $\lie{so}(V)$. We then call the pair
$(\lie g,V)$ a \emph{metric representation}. If the representation
$V$ of $\lie g$ is irreducible we say that $(\lie g,V)$ is
irreducible.
The Berger algebra $\underline{\lie g}$ of a metric representation $(\lie g,V)$ is the
smallest subspace $\lie p$ of $\lie g$ such that $\mathcal K(\lie
p)=\mathcal K(\lie g)$.
\end{definition}
We collect some facts about the Berger algebra and the space of algebraic
curvature tensors of a metric representation $(\lie g,V)$. The first two
are elementary consequences of the definitions. For proofs of the latter
three we refer the reader to~\cite{Bryant:status, Bryant:hol-survey,
Schwachhoefer:Berger}.
\newcounter{fact}
\begin{list}{{\bf{Fact}~\arabic{fact}:}}{\usecounter{fact}
\setlength{\labelwidth 1.5cm}\setlength{\leftmargin
2.6cm}\setlength{\labelsep 0.5cm}\setlength{\rightmargin 0.5cm}}
\item $(\underline{\lie g},V)$ is a metric representation and ~$\lie{u}derline{\underline{\lie g}}=\underline{\lie g}$.
\item The space of algebraic curvature tensor for the representation $V$
of $\lie g$ satisfies
\begin{equation*}
\mathcal K(\lie g)=S^2(\lie g)\cap\mathcal K(\lie{so}(V)).
\end{equation*}
\item The Berger algebra $\underline{\lie g}$ of a Lie algebra $\lie g$ is an ideal in
$\lie g$.
\item The Berger algebra satisfies
\begin{equation*}
\underline{\lie g} = \{r(\alpha):r\in\mathcal K(\lie g),\quad\alpha\in\Lambda^2V\}.
\end{equation*}
\item A metric representation $(\lie g,V)$ is a Riemannian holonomy
representation if and only if ~$\lie g=\underline{\lie g}$.
\end{list}
For ease of reference we also provide a list of the irreducible
Riemannian holonomy representations and their associated space of
algebraic curvature tensors here, see Table~\ref{tab:hol}. For a complex
representation $U$ the notation $\real{U}$ is used to indicate the real
representation obtained by restricting scalar multiplication to $\mathbb
R$. In the table and and hereafter the symbols $L,~H,~E$ are used for the
standard complex representations of $\lie{u}(1)$, $\lie{sp}(1)$, $\lie{sp}(n)$,
respectively and $\Lambda^{1,0}$ is used for the standard representations
of both $\lie{u}(n)$ and $\lie{su}(n)$. We use the notation $V^d_\lambda$ for the
irreducible representation of dimension $d$ and highest weight $\lambda$.
Special names have been given to the spin-representation $\Delta$ of
$\lie{spin}(7)$, the space of Weyl curvature $W$ and the highest weight
module $\Sigma^{2,2}_0$ of $S^2(\Lambda^{1,0}) {\otimes}
S^2(\Lambda^{0,1})$.
\begin{table}[tp]
\begin{tabular}{@{}ccc@{}}
\toprule
\( \lie g \)&\( V \)&\( \mathcal K(\lie g) \)\\
\midrule
\( \lie{so}(n) \)&\( \Lambda^1=\mathbb R^n \)&\( W+S^2_0+\mathbb R \)\\
\( \lie{u}(n) \)&\( \real{\Lambda^{1,0}}=\mathbb C^n \)&\(
\Sigma^{2,2}_0+\Sigma^{1,1}_0+\mathbb R \)\\
\( \lie{su}(n) \)&\( \real{\Lambda^{1,0}}=\mathbb C^n \)&\( \Sigma^{2,2}_0 \)\\
\( \lie{sp}(n)\oplus\lie{sp}(1) \)&\( EH=\mathbb H^n \)&\( S^4E+\mathbb R \)\\
\( \lie{sp}(n) \)&\( \real{E}=\mathbb H^n \)&\( S^4E \)\\
\( \lie{spin}(7) \)&\( \Delta=\mathbb R^8 \)&\( V^{168}_{(0,2,0)} \)\\
\( \lie g_2 \)&\( V^7=\mathbb R^7 \)&\( V^{77}_{(0,2)} \)\\
\( \lie g \)&\( \lie p \)&\( \mathbb R \)\\
\bottomrule
\end{tabular}
\caption{The irreducible Riemannian holonomy representations and the
associated spaces of algebraic curvature tensors. In the last row
$\lie g$ and $\lie p$ denotes the isotropy algebra and
representation, respectively, of those irreducible symmetric spaces
not covered by earlier entries in the table.}
\label{tab:hol}
\end{table}
\lie{su}bsection{Reducible Representations}
We start by considering the following special instance:
\begin{example}\label{ex:1}
Let $V=V_1\oplus V_2$, where $V_1$ and $V_2$ are both non-trivial.
Consider the inclusion $\lie{so}(V_1) \oplus \lie{so}(V_2)\lie{su}bset\lie{so}(V)$. We have
\begin{equation*}
S^2(\lie{so}(V_1) \oplus \lie{so}(V_2)) = S^2(\lie{so}(V_1)) \oplus
\bigl(\lie{so}(V_1){\otimes}\lie{so}(V_2) \bigr)\oplus S^2(\lie{so}(V_2)).
\end{equation*}
The image of $S^2(\lie{so}(V)))$ under $\textrm{b}_1$ is $\Lambda^4V$ which
decomposes as
\begin{equation*}
\Lambda^4V =
\Lambda^4V_1 \oplus \bigl(\Lambda^3V_1 {\otimes}
V_2\bigr) \oplus \bigl(\Lambda^2V_1 {\otimes}
\Lambda^2V_2\bigr) \oplus \bigl(V_1 {\otimes}
\Lambda^3V_2\bigr) \oplus \Lambda^4V_2.
\end{equation*}
Let $e_1,\dots,e_p$ be an orthonormal basis of $V_1$ and
$f_{1},\dots,f_{q}$ an orthonormal basis of $V_2$. Then the set
consisting of
\begin{equation*}
(e_i\wedge e_j)\vee(f_k\wedge f_l),
\end{equation*}
where $1\leqslant i<j\leqslant p$ and~$1\leqslant k<l\leqslant q$, gives
a basis of the subspace $\lie{so}(V_1){\otimes}\lie{so}(V_2) \lie{su}bset S^2(\lie{so}(V))$.
The images of these tensors under $\textrm{b}_1$ span the subspace
$\Lambda^2V_1{\otimes} \Lambda^2V_2$ of $\Lambda^4V$. Therefore
$\textrm{b}_1\colon\lie{so}(V_1){\otimes}\lie{so}(V_2))\to\Lambda^2V_1 {\otimes}
\Lambda^2V_2$ is an isomorphism. Moreover, $\textrm{b}_1(S^2(\lie{so}(V_1))) =
\Lambda^4V_1$ and $\textrm{b}_1(S^2(\lie{so}(V_2))) = \Lambda^4V_2$, whence
\begin{align*}
\mathcal K(\lie{so}(V_1) \oplus \lie{so}(V_2)) &=
S^2(\lie{so}(V_1) \oplus \lie{so}(V_2))\cap\mathcal K(\lie{so}(V))\\ &=
\mathcal K(\lie{so}(V_1))\oplus \mathcal K(\lie{so}(V_2)).
\end{align*}
\end{example}
More generally, we wish to consider the situation where $(\lie g, V)$ is a
metric representation and $V_1$ and $V_2$ are orthogonal submodules of $V$
such that
\begin{equation*}
V=V_1\oplus V_2.
\end{equation*}
Let $\pi\colon\lie g\to\lie{so}(V)$ be the representation of $\lie g$ on $V$ and
let $\pi_i\colon\lie g\to\lie{so}(V_i),~i=1,2$ be the two sub-representations.
Then $\pi=\pi_1+\pi_2$ and, since $\pi$ is faithful, the kernels $\hat{\lie
g}_1=\ker\pi_2$ and $\hat{\lie g}_2=\ker\pi_1$ intersect trivially, so
$\hat{\lie g}_1\oplus\hat{\lie g}_2\lie{u}lhd\lie g$. On the other hand, if
$\lie g_i:=\pi_i(\lie g)$ then $\lie g\lie{u}lhd\lie g_1\oplus\lie g_2$ via the
inclusion $\pi$.
We consider two extremal cases. First, when $\hat{\lie g}_1=\lie g_1$ and
$\hat{\lie g}_2=\lie g_2$. Then $\lie g = \lie g_1 \oplus \lie g_2
\overset{\pi} \mathbin{\lrcorner}rightarrow \lie{so}(V_1) \oplus \lie{so}(V_2)$, where $\pi(\lie
g_i) \lie{su}bset \lie{so}(V_i)$. The computations of Example~\ref{ex:1} show that in
this case
\begin{equation*}
\mathcal K(\lie g) = \left(S^2(\lie g_1)\cap\mathcal K(\lie{so}(V_1))\right) \oplus \left(S^2(\lie
g_2)\cap\mathcal K(\lie{so}(V_2))\right) = \mathcal K(\lie g_1)\oplus\mathcal K(\lie g_2).
\end{equation*}
In other words, we have
\begin{lemma}\label{lem:7}
Let $(\lie g_1,V_1)$ and $(\lie g_2,V_2)$ be metric representations. Then
$(\lie g_1\oplus\lie g_2, V_1\oplus V_2)$ is a metric representation and
\begin{gather}
\lie{u}derline{\lie g_1\oplus\lie g_2} = \lie{u}derline{\lie g_1} \oplus
\lie{u}derline{\lie g_2},\\
\mathcal K(\lie g_1\oplus\lie g_2) = \mathcal K(\lie g_1)\oplus\mathcal K(\lie g_2).
\end{gather}\qed
\end{lemma}
The lemma shows that new metric representations may be obtained by making
direct products of Lie algebras and representations and, furthermore, that
the Berger algebra and space of algebraic curvature tensors of these new
metric representation are obtained from those of the summands by direct
product. If a metric representation is obtained in this fashion, we will
use the shorthand notation
\begin{equation}\label{eq:10}
\bigoplus(\lie g_i,V_i) := \left(\bigoplus\lie g_i, \bigoplus
V_i\right).
\end{equation}
The second extremal case is when $\hat{\lie g}_1=\{0\}=\hat{\lie g}_2$.
\begin{lemma}\label{lem:6}
If $(\lie g, V)$ is a metric representation and $V=V_1\oplus V_2$ is an
orthogonal decomposition of $V$ into submodules $V_1, V_2$ such that both
the induced representations $\pi_i\colon\lie g\to\lie{so}(V_i)$ are faithful
then
\begin{equation*}
\underline{\lie g}=\{0\}\quad\text{and}\quad\mathcal K(\lie g)=\{0\}.
\end{equation*}
\end{lemma}
\begin{proof}
Let $(\lie g,V)$ be a metric representation and $V=V_1\oplus V_2$ is
an orthogonal decomposition of $V$ where both the
sub-representations $\pi_i\colon\lie g \to \lie{so}(V_i)$ are faithful.
Then $\lie g$ is included diagonally into a direct sum $\lie g_1
\oplus \lie g_2$ of two copies $\lie g_i,~i=1,2$ of $\lie g$ with
$\lie g_i \lie{su}bset \lie{so}(V_i)$. Therefore $S^2(\lie g)\lie{su}bset S^2(\lie
g_1)\oplus \lie g_1{\otimes}\lie g_2\oplus S^2(\lie g_2)$, where
$S^2(\lie g_i)\lie{su}bset S^2(\lie{so}(V_i))$ and $\lie g_1{\otimes}\lie g_1
\lie{su}bset \lie{so}(V_1){\otimes}\lie{so}(V_2)$. As $\textrm{b}_1(\lie g_1{\otimes}\lie
g_2) \cong \lie g_1{\otimes}\lie g_2$ is orthogonal to
$\textrm{b}_1(S^2(\lie g_1) \oplus S^2(\lie g_2))$ any element $R$ of $\mathcal K(\lie g)$
must project to zero in $\lie{so}(V_1){\otimes} \lie{so}(V_2)$. Assume that
$\gamma^1,\dots,\gamma^d$ is an orthogonal basis of $\lie g$. Write
$\gamma^k=\gamma^k_1+\gamma^k_2$, where
$\gamma^k_i=\pi_i(\gamma^k)$. Any element $R\in\mathcal K(\lie g)$ may then be
written as
\begin{equation*}
R=\lie{su}m_{k\leqslant l}a_{kl}\gamma^k\vee\gamma^l.
\end{equation*}
The projection of $R$ to $\lie{so}(V_1){\otimes} \lie{so}(V_2)$ is
\begin{equation*}
\lie{su}m_{k\leqslant l} a_{kl} (\gamma^k_2\vee\gamma^l_1 +
\gamma^k_1\vee\gamma^l_2) = \lie{su}m_{k,l}a_{kl}\gamma^k_1\vee\gamma^l_2
\end{equation*}
which is zero only if $R=0$. The Lemma follows.
\end{proof}
\begin{proposition}
Let $(\lie g, V)$ be a metric representation. Assume that $V=V_1\oplus
V_2$ is a decomposition of $V$ into orthogonal submodules and let
$\pi_i:\lie g\to\lie{so}(V_i),~i=1,2$ be the induced representations. Define
$\hat{\lie g}_1:=\ker\pi_2$ and $\hat{\lie g}_2:=\ker\pi_1$. Then
$(\hat{\lie g}_1,V_1)$ and $(\hat{\lie g}_2,V_2)$ are metric
representations such that
\begin{gather}
\underline{\lie g}=\hat{\underline{\lie g}}_1\oplus\hat{\underline{\lie g}}_2\\\intertext{and}
\mathcal K(\lie g)=\mathcal K(\hat{\lie g}_1)\oplus\mathcal K(\hat{\lie g}_2).
\end{gather}
\end{proposition}
\begin{proof}
When $(\lie g,V)$ is metric representation with an orthogonal
decomposition of $V$ into submodules $V_1\oplus V_2$ we define $\hat{\lie
g}_i$, and $\lie g_i$ as before. Let $\tilde{\lie g}\lie{u}lhd\lie g$ be the
orthogonal complement of $\hat{\lie g}_1\oplus\hat{\lie g}_2$ in $\lie
g$. Then $\lie g_i\cong\hat{\lie g}_i\oplus\tilde{\lie g}_i$, where
$\tilde{\lie g}_i=\pi_i(\tilde{\lie g})$ and thus
\begin{equation*}
\hat{\lie g}_1\oplus\hat{\lie g}_2\lie{su}bset\lie g \lie{su}bset \lie g_1 \oplus \lie g_2 \cong
\left(\hat{\lie g}_1 \oplus \tilde{\lie g}_1\right)
\oplus \left(\hat{\lie g}_2 \oplus \tilde{\lie g}_2\right)
\lie{su}bset\lie{so}(V_1) \oplus \lie{so}(V_2).
\end{equation*}
By Lemma~\ref{lem:7},
\begin{equation*}
\mathcal K(\hat{\lie g}_1 \oplus \hat{\lie g}_2) =
\mathcal K(\hat{\lie g}_1) \oplus \mathcal K(\hat{\lie g}_2)
\quad\text{and}\quad
\mathcal K(\lie g_1 \oplus \lie g_2) = \mathcal K(\lie g_1) \oplus
\mathcal K(\lie g_2),
\end{equation*}
whence
\begin{equation*}
\mathcal K(\hat{\lie g}_1) \oplus \mathcal K(\hat{\lie g}_2) \lie{su}bset
\mathcal K(\lie g)\lie{su}bset\mathcal K(\lie g_1)\oplus
\mathcal K(\lie g_2)\lie{su}bset S^2(\lie g_1) \oplus S^2(\lie g_2).
\end{equation*}
The final inclusion shows that any algebraic curvature tensor
$R\in\mathcal K(\lie g)$ must have trivial projection to $\tilde{\lie
g}_1{\otimes}\tilde{\lie g}_2\lie{su}bset S^2(\lie g_1\oplus\lie g_2)$.
By an argument similar to the one given in the proof of
Lemma~\ref{lem:6}, any curvature tensor in $\mathcal K(\lie g)$ must satisfy that
the component taking values in $\tilde{\lie g}$ vanishes and
therefore $\mathcal K(\lie g) = \mathcal K(\hat{\lie g}_1) \oplus \mathcal K(\hat{\lie
g}_2)$.
\end{proof}
\lie{su}bsection{Berger Decomposition}
Metric representations are not generally of the form given by
equation~(\ref{eq:10}). An obvious question to ask is therefore: how may
one compute the Berger algebra and the space of algebraic curvature tensors
for an arbitrary metric representation $(\lie g, V)$? The results of the
previous section will allow us to introduce a Berger decomposition of the
metric representation and show that the Berger algebra of the metric
representation may be computed as a direct sum of Berger algebras of the
irreducible summands of the Berger decomposition. The irreducible case is
then dealt with in the next section.
\begin{definition}
Let $(\lie g,V)$ be a reducible metric representation and let
\begin{equation*}
V=\bigoplus_{i}V_i\tag{$\mathord{\mkern1mu *}$}
\end{equation*}
be an orthogonal decomposition of $V$ into irreducible submodules.
For each $i$ let
\begin{equation*}
\hat{V}_i:=\bigoplus_{j\not=i}V_j,
\end{equation*}
let $\pi_i$ and $\hat{\pi}_i$ be the projections $\pi_i \colon \lie
g \to \lie{so}(V_i)$ and $\hat{\pi}_i \colon \lie g \to \lie{so}(\hat{V}_i)$,
and let $\lie g_i=\ker\hat{\pi}_i$. Then \emph{the Berger decomposition} of
$(\lie g, V)$ with respect to the decomposition~($\mathord{\mkern1mu *}$) is
\begin{equation*}
B(\lie g, V)=\bigoplus_i(\lie g_i,V_i).
\end{equation*}
\end{definition}
The definition and the results of the previous section proves
\begin{proposition}\label{prop:3}
Let $(\lie g, V)$ be a metric representation. Assume that
\begin{equation*}
B(\lie g, V)=\bigoplus_i(\lie g_i,V_i).
\end{equation*}
is a Berger decomposition of $(\lie g, V)$. Then
\begin{gather*}
\underline{\lie g}=\bigoplus_i\underline{\lie g}_i\quad\text{and}\quad\mathcal K(\lie g)=\bigoplus_i\mathcal K(\lie g_i).
\end{gather*}\qed
\end{proposition}
\begin{example}
Let $\lie g$ be a simple Lie algebra and let $V$ be a non-trivial,
real representation of $\lie g$. Then $(\lie g, V)$ is a metric
representation. If $V_i\lie{su}bset V$ is a non-trivial submodule of $V$
then $\lie g_i$ is non-trivial only if $\hat{V}_i\cong k\mathbb R$, where
$k$ is a non-negative integer. Therefore $\underline{\lie g}\not=\{0\}$ only if $V$
is either irreducible or isomorphic to $V'\oplus k\mathbb R$ for some
irreducible representation $V'$.
\end{example}
Note that the component representations of a Berger decomposition may
themselves be reducible. So to calculate the Berger algebra of an arbitrary
metric representation we might need to invoke Proposition~\ref{prop:3}
several times. However, we also have the following corollary of
Lemma~\ref{lem:6}:
\begin{lemma}\label{lem:5}
Let $(\lie g, V)$ be a metric representation. If $V\cong kV'$ for $k>1$
and some representation $V'$ of $\lie g$ then $\underline{\lie g}=\{0\}$.
\end{lemma}
\begin{proof}
If $(\lie g,V)$ is a metric representation where $V=kV'$ then any
orthogonal decomposition $V=V'\oplus(k-1)V'$ satisfies that the
induced representations $\pi'\colon\lie g\to\lie{so}(V')$ and
$\pi''\colon\lie g\to\lie{so}((k-1)V')$ are faithful. Therefore
Lemma~\ref{lem:6} applies.
\end{proof}
Using Lemma~\ref{lem:5} we may eliminate any reducible component
representations from a Berger decomposition. To see this, consider the case
of $\lie g$ represented on $V = V_1\oplus V_2$ where $V_1$ is an
irreducible representation and $V_2$ is its orthogonal complement. Write
$\lie g=\hat{\lie g}_1\oplus\hat{\lie g}_2\oplus\tilde{\lie g}$ as above.
Then $\hat{\lie g}_1\oplus\tilde{\lie g}\cong\pi_1(\lie g)$ acts
irreducibly on $V_1$. Thus, $\lie g_1$ must act on $V_1$ as a direct sum of
isomorphic representations if it does not act irreducibly.
The results we have found in the present section, form an algorithm for
finding the Berger algebra of an arbitrary metric representation $(\lie g,
V)$: first decompose $V$ into irreducible submodules $V=\bigoplus V_i$ and
construct its Berger decomposition $B(\lie g, V)=\bigoplus(\lie g_i,V_i)$.
Then the Berger algebra may be computed using Proposition~\ref{prop:3},
Lemma~\ref{lem:5} and Proposition~\ref{prop:5}.
\lie{su}bsection{Irreducible Representations}
The promised result for determining when condition~(c) of
Theorem~\ref{thm:1} is satisfied, is nearly at hand. In fact, from the
results of the previous section we may conclude
\begin{proposition}\label{prop:5}
Let $(\lie g,V)$ be an irreducible metric representation. Then $(\lie
g,V)$ satisfies either
\begin{enumerate}[\upshape (i)]
\item $\underline{\lie g}=\{0\}$,\label{item:6}
\item $\lie g$ is a Riemannian holonomy representation,\label{item:7}
\item or $\underline{\lie g}=\lie{sp}(n)$ and $(\lie g,V)=(\lie{sp}(n)+\lie{u}(1),\real{EL})$, where
$E$ and $L$ are the standard complex representations of $\lie{sp}(n)$ and
$\lie{u}(1)$.\label{item:8}
\end{enumerate}
\end{proposition}
\begin{proof}
Let $(\lie g,V)$ be an irreducible metric representation. Assume
that $\underline{\lie g}\not=\{0\}$. Write $\lie g=\lie g_1\oplus\lie g_2$, where
$\lie g_1=\underline{\lie g}$. Then the complexification $V{\otimes}\mathbb C$ falls
into one of the following cases, depending on the types of
representation of $\lie g$,~$\lie g_1$ and $\lie g_2$ on $V$. In
this respect we follow the conventions of Br\"ocker and tom
Dieck~\cite{Broecker-tom-Dieck:Lie}.
\begin{enumerate}
\item If $V$ is of real type, then either
\begin{enumerate}
\item $V{\otimes}\mathbb C=U_1{\otimes} U_2$, where $U_1$ and $U_2$ are
irreducible complex representations of real type, or,\label{item:1}
\item $V{\otimes}\mathbb C=U_1{\otimes} U_2$, where $U_1$ and $U_2$ are
irreducible complex representations of quaternionic
type.\label{item:2}
\end{enumerate}
\item If $V$ is of complex type, then $V{\otimes}\mathbb C=U_1{\otimes}
U_2+\overline{U_1{\otimes} U_2}$ where $U_1$ and $U_2$ are irreducible
complex representations and either $U_1$ or $U_2$ is of complex
type.\label{item:3}
\item If $V$ is of quaternionic type then $V{\otimes}\mathbb
C=2U_1{\otimes} U_2$ where $U_1$ and $U_2$ are irreducible complex
representations and either
\begin{enumerate}
\item $U_1$ is of quaternionic type and $U_2$ is of real type,
or,\label{item:4}
\item $U_1$ is of real type and $U_2$ is of quaternionic
type.\label{item:5}
\end{enumerate}
\end{enumerate}
Lemma~\ref{lem:5} ensures that the restriction of the
representation $V$ to $\lie g_1$ is irreducible. This places severe
restrictions on the dimension of $U_2$. To obtain the desired result
all that is needed is essentially book-keeping: In case~(\ref{item:1}),
the dimension of $U_2$ must be one, so $\lie g_2 \leqslant \lie{so}(1) =
\{0\}$. In case~(\ref{item:2}) if $q=\dim U_1$ then $\lie g_1=\lie{sp}(q/2)$
as this is the only holonomy representation of quaternionic type, and
$\dim U_2 = 2$ whence $\lie g_2 = \{0\}$ or $\lie{sp}(1)$. However, the
later possibility contradicts the holonomy classification, so $\lie
g_2=\{0\}$.
In case~(\ref{item:3}), $\dim U_2$ must be one for $V$ to be irreducible
and $\lie g_2$ must then be either $\{0\}$ or $\lie{u}(1)$. Then $\lie
g_1$ and $U_1$ must be either $\lie{u}(n),~\lie{su}(n)$ or $\lie{sp}(n)$ acting on
their standard complex representations. However, $\lie{u}(n)\oplus\lie{u}(1)$
does not act faithfully on $\Lambda^{1,0}{\otimes} L$ and
$\lie{su}(n)\oplus\lie{u}(1)$ acting on $\Lambda^{1,0}{\otimes} L$ is a holonomy
representation, so the only possibilities are $\lie g_2=\{0\}$ or
$\lie g_1 = \lie{sp}(n)$ and $\lie g_2=\lie{u}(1)$ with $U_1=E$, $U_2=L$.
In case~(\ref{item:4}), $\dim U_2$ is one again and $\lie g_2 \leqslant
\lie{so}(1) = \{0\}$. In case~(\ref{item:5}), $\dim U_2\geqslant 2$ and this
implies that $V$ is reducible and thus we have a contradiction with
the initial assumption.
\end{proof}
\begin{corollary}\label{cor:2}
If $V$ is an irreducible representation of a Lie algebra $\lie g$
then either $\mathcal K(\lie g)=\{0\}$ or $(\lie g, V)$ is an irreducible holonomy
representation, or $(\lie{sp}(n)\oplus\lie{u}(1),\real{EL})$.\qed
\end{corollary}
The assumption of irreducibility in Corollary~\ref{cor:2} is not quite what
we want. If $G$ is a connected Lie group there is no problem as any
irreducible $G$-representation will be an irreducible module of its Lie
algebra $\lie g$. If $G$ is not connected we may have an irreducible $G$
representation $V$ that is reducible as a representation of $\lie g$. But
then its decomposition as a $\lie g$-module is into a direct sum of
isomorphic submodules. This is so since the identity component $G_0$ of $G$
preserves the $\lie g$-irreducible submodules. So if $V_1$ and $V_2$ are
$G_0$-irreducible subrepresentations of $V$ then there is some element of
$G \setminus G_0$ that maps $V_1$ to $V_2$ whereby they are seen to be
isomorphic as $G_0$-representations. Lemma~\ref{lem:5} then yields:
\begin{corollary}\label{cor:3}
Let $V$ be an $n$-dimensional, irreducible, real representation of a Lie
group $G$. If the Lie algebra $\lie g$ of $G$ acts reducibly on $V$ then
$\mathcal K(\lie g)=\{0\}$.\qed
\end{corollary}
The Corollaries~\ref{cor:2} and~\ref{cor:3} and the third column of
Table~\ref{tab:hol} give us:
\begin{theorem}\label{thm:6}
Let $V$ be an irreducible representation of a Lie group $G$. Then one of
the following holds
\begin{enumerate}[\upshape (a)]
\item $\mathcal K(\lie g)=\{0\}$,
\item $G$ acts on $V$ as an irreducible holonomy representation, or,
\item $G=\Lie{Sp}(n)\Lie{U}(1)$ and $V=\real{EL}$.
\end{enumerate}
In particular, the space of algebraic curvature tensors consists only of
Einstein tensors if and only if $\lie g$ is a proper subalgebra of
$\lie{so}(n)$ and $V$ is not the standard representation of $\lie{u}(n/2)$.\qed
\end{theorem}
Note for future use, that in case~(c) the Berger algebra is $\lie{sp}(n)$.
\section{Parallel Torsion and Einstein Metrics}
\label{sec:5}
In this section we return to the following set-up: Let $(M,g,V)$ be a
$G$-manifold determined by $\hat\nabla$, a metric connection on $M$. Let $\eta \in
V {\otimes} \Lambda^2V$ be its torsion tensor. Assume that the torsion tensor
is parallel with respect to $\hat\nabla$:
\begin{equation*}
\hat\nabla\eta=0.
\end{equation*}
Note that this implies that $\eta$ is invariant by the holonomy $G$ of
$\hat\nabla$, whence $\eta_X^{\lie g}.\eta=0$ where $.$ denotes the standard
action of $\lie{so}(n)$ on $V{\otimes}\lie{so}(n)$. We conclude that the
intrinsic torsion $\xi$ of $(M,g,V)$ is invariant by $G$ as well as
parallel with respect to the minimal connection. Therefore the
following definitions are equivalent:
\begin{definition}
Let $(M,g)$ be Riemannian manifold. We say that \emph{ $(M,g)$ has
parallel torsion} if it admits a metric connection $\hat\nabla$ for which
the torsion
$\eta$ satisfies $\hat\nabla\eta=0$.
\end{definition}
\begin{definition}
Let $(M,g,V)$ be a $G$-manifold. We say that \emph{$(M,g,V)$ is a
parallel $G$-manifold} if the intrinsic torsion is parallel with respect
to the minimal connection.
\end{definition}
The following result is then an easy consequence of
equation~(\ref{eq:1}) and invariance of the torsion by the holonomy
group.
\begin{theorem}\label{thm:7}
Let $(M,g,V)$ be a parallel $G$-manifold which furthermore is
$G$-irreducible. Then $(M,g)$ is Einstein if $(\mathcal K(\lie g){\otimes}
S^2_0V)^G=\{0\}$.\qed
\end{theorem}
\lie{su}bsection{Ambrose-Singer Manifolds}
A particular instance of parallel $G$-manifolds are those which admit a
connection for which both the curvature and the torsion are parallel.
\begin{definition}
Let $D$ be a metric connection on a Riemannian manifold $(M,g)$ for which
the curvature $R^D$ and torsion $T^D$ satisfies
\begin{equation*}
DT^D=0,\qquad DR^D=0.
\end{equation*}
Then $D$ is called \emph{an Ambrose-Singer connection}. A triple
$(M,g,D)$ where $(M,g)$ is a Riemannian manifold and $D$ is an
Ambrose-Singer connection will be called \emph{an Ambrose-Singer
manifold}.
\end{definition}
\begin{remark}
In the literature, an Ambrose-Singer manifold is often called a
\emph{locally homogeneous manifold}. Note that Ambrose-Singer manifolds
are \emph{not} locally diffeomorphic to homogeneous spaces.
\end{remark}
To each Ambrose-Singer connection $D$ we may of course associate the
$G$-manifold $(M,g,V)$ given by its holonomy. Thereby we obtain a
parallel $G$-manifold $(M,g,V)$. An obvious question is: When do we
obtain an Ambrose-Singer manifold from a Riemannian manifold with
parallel torsion?
\begin{lemma}
Let $(M,g)$ be a Riemannian manifold and let $\hat\nabla$ be a metric connection
on $M$ with parallel torsion. If the Riemannian $G$-manifold $(M,g,V)$
given by the holonomy of $\hat\nabla$ has trivial Berger algebra then
$(M,g,\hat\nabla)$ is an Ambrose-Singer manifold.
\end{lemma}
\begin{proof}
Let $(M,g)$ be a Riemannian manifold with a metric connection $\hat\nabla$ for
which the torsion tensor $\eta$ is parallel. Let $\lie g$ be the Lie
algebra of the holonomy of $\hat\nabla$. Assume that $\underline{\lie g}=\{0\}$ or,
equivalently, that $\mathcal{K}(\lie g)=\{0\}$. This implies that the
curvature $\hat R$ is determined completely by the tensor $(\eta^2)$ through
the Bianchi relation~\eqref{eq:2}. Therefore both
\begin{equation*}
\hat\nabla\eta=0\quad\text{and}\quad\hat\nabla\hat R=0
\end{equation*}
hold.
\end{proof}
To an Ambrose-Singer manifold $(M,g,D)$ one may also associate an
\emph{infinitesimal model}. Briefly, this consists in building a Lie
bracket $[ \cdot , \cdot ]_{\lie h}$ on $\lie h:=\lie g\oplus V$ by
defining
\begin{equation}\label{eq:5}
[A+X,B+Y]_{\lie h}:=\left([A,B]_{\lie g}+\hat R_{X,Y}\right)
+\left(AY-BX-\hat T_XY\right).
\end{equation}
where $\hat T_XY=-\eta_XY+\eta_YX$ is the `usual' torsion of $\hat\nabla$, $A,B \in
\lie g$ and $X,Y \in V$. The Bianchi relations and invariance of $\hat T$
and $\hat R$ by $\lie g$ ensures that this satisfies the Jacobi-identity.
Thus, we obtain a pair of Lie algebras $(\lie g,\lie h)$ with $\lie g
\leqslant \lie h$.
\begin{definition}
Let $(\lie g,\lie h)$ be a pair of Lie algebras. We say that $(\lie
g,\lie h)$ is \emph{effective} if $\lie g\leqslant\lie h$ and the
representation of $\lie g$ on $\lie h/\lie g$ is faithful.
\end{definition}
\begin{remark}
When $\lie g$ is the holonomy algebra of an Ambrose-Singer
connection and $\lie h=\lie g\oplus V$ with Lie bracket defined as
in equation~(\ref{eq:5}) above then the pair of Lie algebras $(\lie
g,\lie h)$ is effective.
\end{remark}
\begin{definition}
Let $\lie g$ and $\lie h$ be Lie algebras such that $(\lie g,\lie
h)$ is effective. Let $H$ be the connected, simply-connected Lie
group with Lie algebra $\lie h$ and $G$ the connected Lie subgroup
of $H$ with Lie algebra $\lie g$. We will say that $(\lie g,\lie
h)$ is \emph{regular} if $H$ a closed subgroup of $G$. Similarly we will
say that \emph{an Ambrose-Singer manifold is regular} if the pair of Lie
algebras obtained from its infinitesimal model is regular.
\end{definition}
In~\cite{Tricerri:Local-homogeneous}, Tricerri proved the following
Theorem.
\begin{theorem}\label{thm:2}
An Ambrose-Singer manifold is locally isometric to a homogeneous space if
and only it is regular.\qed
\end{theorem}
\begin{corollary}
Let $(M,g,V)$ be a $G$-manifold where $G$ is the holonomy of a metric
connection $\hat\nabla$ on $M$ with parallel torsion. Assume that the Berger
algebra of $\lie g$ is trivial. Then $(M,g,\hat\nabla)$ is an Ambrose-Singer
manifold and $(M,g)$ is locally isometric to a homogeneous space if and
only if $(M,g,\hat\nabla)$ is regular.\qed
\end{corollary}
Note that if a Lie algebra $\lie h$ has a reductive decomposition
$\lie h=\lie g\oplus V$ where $V$ is an irreducible and faithful
representation of $\lie g$ then $\overline{G}$ must be either $H$ or
$G$. By continuity $\overline{G}$ preserves the reductive
decomposition $\lie g\oplus V$. So assuming that $\overline{G}=H$
leeds to the conclusion that the action of $\ad_{\lie h}$ preserves
the subspaces of the reductive splitting. In particular, $[\lie
g,V]\lie{su}bset V$ and $[V,\lie g]\lie{su}bset \lie g$. This implies that
$\lie g$ acts trivially on $V$ and therefore establishes a
contradiction. So we have
\begin{proposition}\label{fn:1}
Let $(M,g,V)$ be a $G$-irreducible manifold where $G$ is the holonomy of
a metric connection for which the torsion is parallel. Assume that the
Berger algebra of $\lie g$ is trivial. Then $(M,g)$ is locally isometric
to an isotropy irreducible homogeneous space $H/G$.\qed
\end{proposition}
\lie{su}bsection{Classification}
Let $(M,g)$ be a Riemannian manifold, parallel with respect to some metric
connection $\hat\nabla$. Assume that the holonomy algebra $\lie g$ of $\hat\nabla$ acts
irreducibly on the tangent spaces $V$. If the torsion is assumed to be
non-trivial, this immediately places heavy restrictions on the pair $(\lie
g,V)$ since $\lie g$ must leave some tensor in $V {\otimes} \lie{so}(n)$
invariant. If we write $\lie{so}(n)=\lie g \oplus \lie g^\bot$ then we have one of two
possibilities: Either $\lie g\cong V$ and $\lie g$ is a simple Lie algebra
or $\lie g^\bot$ contains a submodule isomorphic to $V$. The following lemma is
obtained by inspection of representations.
\begin{lemma}\label{lem:1}
Let $V\cong\mathbb{R}^n$ be an irreducible representation of a Lie
algebra $\lie g\leqslant\lie{so}(n)$ and let $\lie g^\bot$ be the orthogonal
complement of $\lie g$ in $\lie{so}(n)$.
\begin{enumerate}[\upshape (i)]
\item Assume that $\left(V {\otimes} \lie g\right)^G \not= \{0\}$. Then
$\lie g$ is simple Lie algebra $V \cong \lie g$ and $\mathcal K(\lie g)\cong\mathbb
R$.
\item Assume that $\left(V{\otimes}\lie g^\bot\right)^G \not= \{0\} \not= \mathcal K(\lie g)$.
Then $(\lie g,V)$ is either $(\lie{su}(3),\mathbb{C}^3)$ or $(\lie
g_2,V^7)$. In both cases $\mathcal K(\lie g)$ is an irreducible representation not
isomorphic to $V$ nor to $\mathbb R$.
\end{enumerate}
In all cases the invariant tensors are three-forms.
\qed
\end{lemma}
Lemma~\ref{lem:1} allows us to make the following classification Theorem:
\begin{theorem}\label{thm:3}
Let $(M,g,V)$ be a $G$-irreducible manifold determined by a metric
connection $\hat\nabla$ on $(M,g)$. Assume that the torsion of $\hat\nabla$ is parallel
with respect to $\hat\nabla$. Then one of the following statements holds:
\begin{enumerate}[\upshape (a)]
\item $(M,g)$ is locally isometric to a non-symmetric, isotropy
irreducible homogeneous space,
\item $(M,g)$ is locally isometric to one of the irreducible symmetric
spaces $(G\times G)/G$ or $G^{\mathbb C}/G$,
\item $(M,g)$ has weak holonomy $\Lie{SU}(3)$ or $G_2$,
\item the torsion of $\hat\nabla$ vanishes and $(M,g,V)$ is the $G$-manifold
determined by the Levi-Civita connection and $G$ is the Riemannian
holonomy group of $(M,g)$.
\end{enumerate}
\end{theorem}
\begin{proof}
Assume that $(M,g,V)$ is a $G$-irreducible Riemannian manifold determined
by a metric connection such that the torsion $\eta$ is non-trivial and
parallel: $\hat\nabla\eta=0$.
If the space of algebraic curvature tensors is trivial then
proposition~\ref{fn:1} applies $(M,g,\hat\nabla)$ is an Ambrose-Singer manifold
and locally isometric to an isotropy irreducible space.
If $\mathcal K(\lie g)\not=\{0\}$ then $\lie g$ acts irreducibly on $V$ by
Lemma~\ref{lem:5}. The torsion $\eta$ is therefore skew-symmetric by
Lemma~\ref{lem:1}. Since $\hat\nabla\eta=0$ we may write
\begin{equation*}
R^g=\hat R+(\eta^2)
\end{equation*}
where
\begin{equation*}
(\eta^2)_{X,Y}Z=[\eta_X,\eta_Y]Z-\eta_{\eta_XY-\eta_YX}Z.
\end{equation*}
Note that since $\eta$ is skew-symmetric we have
\begin{equation*}
(\eta^2)=\eta^2+\textrm{b}_1\eta^2
\end{equation*}
where $\eta^2_{X,Y}Z := \eta_Z(\eta_XY)$. Also note that
\begin{equation*}
(\textrm{b}_1\eta^2)_{X,Y}Z = \eta_X(\eta_YZ) - \eta_{\eta_XY}Z - \eta_Y(\eta_XZ)
= (\eta_X.\eta)_YZ
\end{equation*}
where $.$ denotes the standard action of $\lie{so}(n)$ on $V {\otimes}
\lie{so}(n)$.
Assume that $\textrm{b}_1\eta^2 = 0 = \eta.\eta = \textrm{b}_1\hat R$. Then both $\hat R$ and
$\eta^2$ are algebraic curvature tensors. Furthermore, since $0 =
\textrm{b}_1\eta^2 = \eta.\eta$ the torsion tensor takes values in $V{\otimes}\lie
g'$ where $\tilde{\lie g} = \stab\eta \geqslant \lie g$. This means that
$(\eta^2)=\eta^2\in\mathcal K(\tilde{\lie g})$ and therefore $\eta^2$ spans a
trivial submodule $\mathcal K(\tilde{\lie g})$. Now Lemma~\ref{lem:1} and
Table~\ref{tab:hol} shows that $\tilde{\lie g}$ must be a simple Lie
algebra and that $V\cong \tilde{\lie g}$, and thus also that $\lie g =
\tilde{\lie g}$ and $\mathcal K(\lie g)\cong\mathbb R$. So $\hat R=\kappa\eta^2$ for some
function $\kappa\colon M\to\mathbb R$. But $(M,g)$ is Einstein by
Theorem~\ref{thm:7}, so the scalar curvature $s_g$ is constant. But
\begin{equation*}
s_g = \lie{su}m_{i,j}g(R^g_{e_i,e_j}e_j,e_i) =
(1+\kappa)\lie{su}m_{i,j}g(\eta_{e_i}e_j,\eta_{e_i}e_j) =
2(1+\kappa)\norm{\eta}^2
\end{equation*}
where $\{e_i:i=1,\dots,n\}$ is an orthonormal basis of $V$ and both $s_g$
and $\norm{\eta}^2$ are constants. Therefore $\kappa$ must be constant
too, whence $\nabla^gR^g = (1+\kappa)(\hat\nabla(\eta^2)+\eta.(\eta^2)) = 0$.
Finally, if $\eta.\eta\not=0$ then the projection $\xi$ of $\eta$ to
$V{\otimes}\tilde{\lie g}$ is non-trivial. Applying Lemma~\ref{lem:1}
shows that $\tilde{\lie g}=\lie g$, $\xi=\eta$ and that $(\lie g,V)$
is either $(\lie{su}(3),\mathbb C^3)$ or $(\lie g_2,V^7)$.
\end{proof}
\section{Invariant Intrinsic Torsion}
\label{sec:6}
Empirical evidence suggests that condition~(\ref{cond:B}) of
Theorem~\ref{thm:1} in fact implies that the intrinsic torsion must be
invariant, i.e., $W \cong k\mathbb R$. Let this serve as motivation for
considering that case in particular detail. Theorem~\ref{thm:1} with
$W\cong k\mathbb R$ becomes
\begin{proposition}
Let $(M^n,g,V)$ be a Riemannian $G$-manifold. Assume that the intrinsic
torsion takes its values in the $G$-submodule $W\cong k\mathbb R\lie{su}bset
V{\otimes}\lie g^\bot$. Then $g$ is Einstein if the following conditions are
satisfied:
\begin{enumerate}[\upshape (a$'$)]
\item $(V{\otimes} S^2_0V)^G=\{0\}$\label{cond:A'}
\item $V$ is irreducible.
\item $\lie g$ is a proper subalgebra of $\lie{so}(n)$ and $\lie
g\not=\lie{u}(n/2)$.
\end{enumerate}\qed
\end{proposition}
As before, these assumptions imply that the intrinsic torsion is a
three-form.
\lie{su}bsection{Invariant versus Parallel Torsion}
The assumption of invariance of the intrinsic torsion appears more general
than that of parallel torsion. It is clear that a manifold with parallel
intrinsic torsion must have invariant intrinsic torsion, since the
$G$-structure given by the holonomy of the minimal connection in this case
leaves the torsion invariant. In this section, we will prove that the
converse holds under a quite weak condition.
\begin{theorem}\label{thm:5}
Let $(M,g,V)$ be a $G$-manifold with skew-symmetric intrinsic
torsion taking values in some submodule $W$ of $V{\otimes}\lie g^\bot$. If
$(V{\otimes} W{\otimes} S^2W)^G=\{0\}$ then the intrinsic torsion is
parallel with respect to the minimal connection.
\end{theorem}
\begin{proof}
Let $\tilde\nabla$ be the minimal connection and $\xi$ the intrinsic torsion of
$M$. Write $\lie g^\bot$ for the orthogonal complement of $\lie g$ in $\lie{so}(n)$.
We write the Riemannian curvature tensor as
\begin{equation*}
R^g=\tilde R+(\tilde\nabla\xi)+(\xi^2).
\end{equation*}
where $\tilde R \in \Lambda^2V{\otimes}\lie g$, $(\tilde\nabla\xi) \in
\Lambda^2V{\otimes}\lie g^\bot$ and $(\xi^2) \in (S^2(\lie g^\bot))^G$. We have that
$(\tilde\nabla\xi)$ is the anti-symmetrisation on the first two factors of $\tilde\nabla\xi
\in V{\otimes}\Lambda^3V$. When restricted to $V{\otimes}\Lambda^3V$,
this anti-symmetrisation is an isomorphism $V{\otimes}\Lambda^3V\cong
\Lambda^4V+\Lambda^2(\Lambda^2V)$. Therefore,
\begin{equation*}
0 = \left<(\tilde\nabla\xi),R^g\right> = \left<(\tilde\nabla\xi),(\tilde\nabla\xi)+(\xi^2)\right>
= \left<(\tilde\nabla\xi),(\tilde\nabla\xi)\right>.
\end{equation*}
The last equality holds since the assumption $(V{\otimes} W{\otimes}
S^2W)^G=\{0\}$ implies $(\tilde\nabla\xi)\in V\otimes W$ and $(\xi^2)\in S^2W$ are
orthogonal.
\end{proof}
\begin{corollary}\label{cor:4}
Let $(M,g,V)$ be a $G$-manifold with invariant intrinsic torsion such
that $V^G=\{0\}$ and such that the intrinsic torsion is skew-symmetric.
Then the intrinsic torsion is parallel with respect to the minimal
connection.\qed
\end{corollary}
The conclusion, that $\tilde\nabla\xi=0$, implies that $H := \Hol(\tilde\nabla) \lie{su}bset
\Lie{Stab}(\xi)$. However, as $\tilde\nabla$ is a $G$-connection we also have $H\lie{su}bset
G$. Thus, if $\xi\not=0$ then the holonomy group $H$ must be a proper
subgroup of $G$.
\begin{lemma}\label{lem:8}
Let $(M,g,V)$ be a $G$-manifold with intrinsic torsion $\xi$
and minimal connection $\tilde\nabla$. Write $H$ for the holonomy group of
$\tilde\nabla$ and let $(M,g,V')$ be the $H$-manifold determined by the
holonomy of $\tilde\nabla$. Then the intrinsic torsion of $(M,g,V')$ is $\xi$.
\end{lemma}
\begin{proof}
Since $\tilde\nabla$ is a $G$-connection $H=\Hol(\tilde\nabla)\lie{su}bset G$. Therefore
$\tilde\nabla$ is a $H$-connection and, moreover, $\lie g^\bot\lie{su}bset\lie h^\perp$,
whence $\xi\in V'{\otimes}\lie h^{\perp}$.
\end{proof}
\begin{proposition}
Let $(M,g)$ be a Riemannian manifold. Assume there exists a
$G$-structure on $M$ with tangent representation $V$ and
skew-symmetric intrinsic torsion taking values in $W$, where $V$ and
$W$ satisfy $(V{\otimes} W{\otimes} S^2W)^G=\{0\}$. Then there
exists an $H$-structure on $M$ with invariant skew-symmetric
intrinsic torsion which is parallel with respect to the minimal
connection.
\end{proposition}
\begin{proof}
Assume that $(M,g,V)$ is a Riemannian $G$-manifold with intrinsic
torsion $\xi\in
W\lie{su}bset\Lambda^3V\cap\left(V{\otimes}\lie g^\bot\right)$. If
$\left(V{\otimes}W{\otimes}S^2W\right)^G=\{0\}$ then $\tilde\nabla\xi=0$ by
Theorem~\ref{thm:5}. As we have argued above, $H=\Hol(\tilde\nabla)$ then
leaves $\xi$ invariant. The Proposition now follows from
Lemma~\ref{lem:8}.
\end{proof}
\lie{su}bsection{Classification}
As corollaries of Lemma~\ref{lem:1}, Theorem~\ref{thm:3} and
Corollary~\ref{cor:4} we obtain the following classifications.
\begin{theorem}\label{thm:8}
Let $(M,g,V)$ be a $G$-irreducible Riemannian manifold. If the
intrinsic torsion of $M$ as a $G$-manifold is invariant
skew-symmetric and non-vanishing then either $(M,g)$ has weak
holonomy $\Lie{SU}(3)$ or $G_2$ or $(M,g)$ is locally isometric to a
non-symmetric isotropy irreducible homogeneous space. In particular,
$(M,g)$ is Einstein.\qed
\end{theorem}
Recall that a $G$-manifold $(M,g,V)$ with intrinsic torsion taking values
in $W\lie{su}bset V\otimes\lie g^\bot$ for which the three
conditions~(\ref{cond:A}),~(\ref{cond:B}) and~(\ref{cond:C}) of
Theorem~\ref{thm:1} are satisfied is $G$-irreducible and has skew-symmetric
intrinsic torsion. For the particular case of $W\cong k\mathbb R$ we now
have:
\begin{theorem}
Let $(M,g,V)$ be a Riemannian $G$-manifold. Assume that the intrinsic
torsion takes its values in the $G$-submodule $W\cong k\mathbb R\lie{su}bset
V{\otimes}\lie g^\bot$. Furthermore, assume that $V$ satisfies
\begin{enumerate}[\upshape (a$'$)]
\item $(V{\otimes} S^2_0V)^G=\{0\}$
\item $V$ is irreducible.
\item $\lie g$ is a proper subalgebra of $\lie{so}(n)$ and $\lie
g\not=\lie{u}(n/2)$.
\end{enumerate}
Then either $(M,g)$ has weak holonomy $\Lie{SU}(3)$ or~$G_2$ or $(M,g)$
is locally isometric to a non-symmetric isotropy irreducible
homogeneous space.\qed
\end{theorem}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\end{document}
|
\begin{document}
\title[Reinhardt domains with a cusp at the origin]
{Reinhardt domains with a cusp at the origin}
\author{Oscar Lemmers}
\email[Oscar Lemmers]{[email protected]}
\author{Jan Wiegerinck}
\email[Jan Wiegerinck]{[email protected]}
\keywords{Gleason problem, Reinhardt domain, $\overline{\partial}$-problem}
\subjclass{Primary : 32A07; Secondary : 46J15}
\date{December 31, 2001}
\begin {abstract}
Let $\Omega$ be a bounded pseudoconvex Reinhardt domain in ${\mathbb{C}} ^2$ with many
strictly pseudoconvex points and logarithmic image $\omega$. It was known that
the maximal ideal in $H^{\infty}(\Omega)$ consisting of all functions
vanishing at $(p_1,p_2) \in \Omega$ is generated by the coordinate functions
$z_1 - p_1$, $z_2 - p_2$ (meaning that
one can solve the Gleason problem for $H^{\infty}(\Omega)$) if
$\omega$ is bounded. We show that one can solve Gleason's problem for
$H^{\infty}(\Omega)$ as well if there are positive numbers $a$, $b$ and a
positive rational number $\frac{k}{l}$ such that $\Omega$ looks like
$\{(z_1,z_2) \in {\mathbb{C}}^2 : a |z_2|^l \leq |z_1|^k \leq b |z_2|^l \}$ for small
$z$.
\end{abstract}
\maketitle
\section{Introduction}
\noindent Let $\Omega$ be a bounded domain in ${\mathbb{C}}^n$, let $p = (p_1,\ldots,
p_n)$ a point in $\Omega$. Recall the Gleason problem, cf. \cite{glea} : is
the maximal ideal in $A(\Omega)$ (or $H^{\infty}(\Omega)$) consisting of
functions vanishing at $p$ generated by the (translated) coordinate functions
$z_1 - p_1$, $\ldots$, $z_n - p_n$ ? We say that one can solve the Gleason
problem if this is indeed the case for every $p \in \Omega$. Gleason mentioned
the difficulty of solving this problem even for such a simple domain as the
unit ball $B(0,1)$ in ${\mathbb{C}}^2$, $p=(0,0)$. This case was solved by Leibenzon
(\cite{khen}), who gave a solution tot the Gleason problem for every convex
domain in ${\mathbb{C}}^n$ with $C^2$ boundary.\\
Kerzman and Nagel (\cite{kena}) used
sheaf-theoretic methods and estimates on the solutions of $\overline
{\partial}$-problems to solve the Gleason problem for $A(\Omega)$, where
$\Omega$ is a bounded strictly pseudoconvex domain in ${\mathbb{C}}^2$ with
$C^4$
boundary. Lieb (\cite{lieb}) independently solved the
Gleason problem for $A(\Omega)$ on bounded strictly pseudoconvex domains in
${\mathbb{C}}^n$ with $C^5$ boundary;
\O vrelid improved this in \cite{oevr} to $C^{2}$ boundary. See also Henkin
(\cite{khen}) and Jakobczak (\cite{jako}).\\
In ${\mathbb{C}}^2$ the Gleason problem was also solved for domains of finite
type (\cite{foev}, \cite{noel}).
Backlund and
F\"{a}llstr\"{o}m showed (\cite{bafa3}) that there exists an
$H^{\infty}$-domain of holomorphy on which the Gleason problem is not solvable.
In \cite{bafa4} Backlund and F\"{a}llstr\"{o}m used ideas similar to those of
Beatrous (\cite{beat}), to solve the Gleason problem for $A(\Omega)$ if
$\Omega$ is a bounded pseudoconvex Reinhardt domain in ${\mathbb{C}}^2$ with $C^2$
boundary that contains the origin. These ideas were expanded by the authors
(\cite{lewi}),
who solved the Gleason problem for both $A(\Omega)$ and $H^{\infty}(\Omega)$
if $\Omega$ is a bounded Reinhardt domain in ${\mathbb{C}}^2$ with $C^2$ boundary. Thus
the domain does not need to be pseudoconvex, and the condition that it contains
the origin could also be dropped. The condition of $C^2$ boundary could be
weakened quite a lot, since it was only the behavior of the domain at
the origin that was important. In this paper, we consider bounded pseudoconvex
Reinhardt domains $\Omega$ in ${\mathbb{C}}^2$ that for small $z$ look like
\[\{(z_1,z_2) : a < \left|\frac{z_1^k}{z_2^l}\right|<b\}, \; k,l \in {\mathbb{N}}^+,
a,b \in {\mathbb{R}}^+,\]
and are rounded of strictly pseudoconvexily. Thus, $\partial \Omega$ is
non-smooth near the origin. We solve the Gleason problem for
$H^{\infty}(\Omega)$ in a way like \cite{lewi}. More detailed,
we divide the
domain in two parts. On one part the problem is solved by splitting $f$ into
functions for which an explicit solution is constructed. Adding these explicit
solutions then gives a solution to the Gleason problem for $f$ on this part of
$\Omega$. On the other
part, the problem is solved using the $\overline{\partial}$-methods
of \cite{lewi}. Then we patch the two local solutions together
to a
global solution, using a new $\overline{\partial}$-result.\\
We conclude by solving the Gleason problem for $H^{\infty}(\Omega)$ on the
Hartogs triangle and related domains.
\section{Definitions}
\noindent
We let ${\mathbb{C}}^*$ stand for ${\mathbb{C}} \setminus \{0\}$.
Let \[L : ({\mathbb{C}}^*)^2 \rightarrow {\mathbb{R}}^2, \; L(z_1,z_2):=(\log |z_1|,\log|z_2|).\]
Throughout this paper $\Omega$ will be a bounded Reinhardt domain in
${\mathbb{C}}^2$.
We denote its logarithmic image $L(\Omega \cap ({\mathbb{C}}^*)^2)$ by $\omega$. The
boundary of $\Omega$
and $\omega$ will be denoted by $\partial \Omega$ and $\partial \omega$
respectively, while $S(\Omega)$ shall stand for the strictly
pseudoconvex boundary points of $\Omega$ that are $C^5$.\\
We denote the
derivative of a function $g$ with respect to the $j$'th coordinate with $D_jg$.
The interior and the closure of a set $V$ are denoted by $V^{\circ}$ and
$\overline{V}$ respectively. We denote the set \[\{(z_1,z_2) \in {\mathbb{C}}^2 :
g(z_1,z_2) = c\} \] by $[g(z_1,z_2)=c]$, and use a similar notation with e.g.
$\leq$ instead of $=$.
\vskip5mm \noindent
{\bf Definition.} We say that $\Omega$ is an
$A$-domain, if $\Omega$ is a bounded
pseudoconvex Reinhardt domain in ${\mathbb{C}}^2$ such that
\begin{itemize}
\item There exist $a$, $b$, $\epsilon \in {\mathbb{R}}^{+}$, $k$, $l \in {\mathbb{N}}^{+}$, with
\[\Omega \cap
B(0,\epsilon) = \{(z_1,z_2) \in B(0,\epsilon) : a < \left|\frac{z_1^{k}}
{z_2^l}\right| <b \}\]
\item The boundary points of $\Omega$ outside $\overline{B(0,\epsilon)}$ are
all $C^5$ and strictly pseudoconvex.
\end{itemize}
\noindent{\bf Definition.} Let $U \subseteq {\mathbb{R}}^n$ be an open set. For $0 <
\alpha < 1$ we define
\[\Lambda_{\alpha}(U) = \{f \in C(U) : \sup_{x,x+h \in U} |f(x+h) - f(x)|
/|h|^{\alpha} + ||f||_{L^{\infty}(U)} \]
\[= ||f||_{\Lambda_{\alpha}}(U) < \infty\}.\]
\section{Solving a Cauchy-Riemann equation}
\noindent
The goal of this section is to prove the following theorem.
\begin{Thm}{\label{Thm:aholder}}
Let $\Omega$ be an $A$-domain. Suppose that $f$ is a
$\overline{\partial}$-closed $(0,1)$-form with coefficients that are smooth and
bounded on $\Omega$, and that $\text {supp} f \cap \overline{B(0,
\epsilon)} = \emptyset$. Then there exists a $u \in \Lambda^{1/2}(\Omega)$
with $\overline{\partial}u = f$.
\end{Thm}
\noindent From this follows immediately that this $u$ is bounded on $\Omega$.
Note that under the assumptions of the theorem, the support of $f$ near the
boundary lies only near the strictly pseudoconvex points.
The setup of the
proof is very similar to that of the standard result on strictly
pseudoconvex domains with $C^5$ boundary. We will follow the book of Krantz
(\cite{kran}), sections 5.2 and 9.1-9.3 (10.1-10.3 in the new edition). The
proof is subdivided in a series of lemmas. Proofs are given or indicated if
there is a difference with the standard situation, otherwise we
refer to \cite{kran}. We do realize that the reader who is not that familiar
with $\overline{\partial}$-problems will not be very happy about this decision.
In our opinion the alternative, copying over 25 pages word by word, would be
even worse.\\
Both in our case and the standard case, one has to
construct holomorphic support functions $\Phi(\cdot,P)$. Estimates on it
are derived by solving a $\overline{\partial}$-problem using the
$L^2$-technique with weights of H\"{o}rmander (\cite{horm1}). In our case, we
use that the
$A$-domain $\Omega$ is contained in a slightly larger $A$-domain
$\Omega_{1/n}$. The necessary estimate on a ball $B$ around the origin is
derived by a smart choice of the weight function $\phi$. The estimate on
$\Omega \setminus B$ is derived using that $\Omega \setminus B$ is compactly
contained in $\Omega_{1/n} \setminus B$. Compare this to the strictly
pseudoconvex case, where one uses that the domain is compactly contained in a
strictly pseudoconvex domain that is strictly larger.
\vskip5mm \noindent We fix an $A$-domain $\Omega$.
Let $\epsilon$ be the smallest number such that $\partial \Omega \setminus
\overline{B(0, \epsilon)}$ contains only
strictly pseudoconvex points. We set $V:= \{w \in \partial \Omega : |w| >
\epsilon\}$; then $V$ contains only strictly pseudoconvex points.
Let $\rho : {\mathbb{C}}^2 \rightarrow {\mathbb{R}}$ be a defining function for $\Omega$ that
is $C^5$ and strictly plurisubharmonic on a neighborhood of $V$.
The function $L : {\mathbb{C}}^2 \times {\mathbb{C}}^2 \setminus
\overline{B(0,\epsilon)} \rightarrow {\mathbb{C}}$ given by
\[L_P(z)= L(z,P):= \rho(P) + \sum_{j=1}^{2} \frac{\partial \rho}{\partial z_j}
(P)(z_j - P_j) \]\[+ \frac{1}{2} \sum_{j,k=1}^{2} \frac{\partial ^2 \rho(P)}
{\partial z_j \partial z_k}(P)(z_j - P_j)(z_k - P_k)\]
is known as the
Levi polynomial at $P$. It has the following properties :
\begin{enumerate}
\item For all $P \in {\mathbb{C}}^2 \setminus \overline{B(0,\epsilon)}$, the function $z
\mapsto L(z,P)$ is holomorphic (it is even a polynomial).
\item For all $P \in V$, there is a neighborhood $U_P$ such that if $z \in
\overline{\Omega} \cap \{w \in U_P : L_P(w) = 0\}$ then $z=P$.
\end{enumerate}
\noindent
The goal is to construct for every $P \in V$ a holomorphic support function
$\Phi(\cdot,P)$. This is a smooth function on $\Omega \times V$ that
is holomorphic in the first variable, such that $\Phi(z,P)=0 \Leftrightarrow
z=P$. Thus, this function should have the first property of the
Levi polynomial at $P$. The difference is that one does not have to restrict
in (2) to a small neighborhood of $P \in V$. The construction of these
functions $\Phi(\cdot, P)$ will be done via some lemmas.
\vskip5mm \noindent
Choose $\gamma$, $\delta > 0$ such that
\[\sum_{j,k=1}^{n} \frac{\partial ^2 \rho}{\partial z_j \partial
\overline{z}_k}(P)v_j \overline{v}_k \geq \gamma |v|^2 \quad \forall
P \in \{z \in {\mathbb{C}}^n \setminus \overline{B(0,\epsilon)} : |\rho(z)| < \delta\},
v \in {\mathbb{C}}^n.\]
\begin{Lem}{\label{Lem:lambda}}
There is a $\lambda > 0$ such that if $P \in V$ and $|z-P| < \lambda$,
then $2 \Re L_P(z) \leq \rho(z) - \frac{\gamma|z-P|^2}{2}$.
\end{Lem}
\noindent For every $n \in {\mathbb{N}}$, we shall now define $A$-domains
$\Omega_{1/n}$ that are close to $\Omega$. That is :
\[\Omega_{1/n} \cap B(0,\epsilon) = \{(z_1, z_2) \in B(0,\epsilon) :
(1 - 1/n)a < \left|\frac {z_1^{k}}{z_2^{l}}\right| < (1 + 1/n)b\},\] and
$\Omega_{1/n}$ is rounded off strictly pseudoconvexily, having a $C^5$
defining
function $\rho_{1/n}$ on a neighborhood $U$ of $\Omega_{1/n} \setminus
B(0,\epsilon)$ such that
\begin{itemize}
\item $\Omega \subset \Omega_{1/n}$, $\partial \Omega \cap \partial
\Omega_{1/n} = \{0\}$
\item $\Omega_{1/(n+1)} \subset \Omega_{1/n} \quad \forall n \in {\mathbb{N}}$,
$\partial \Omega_{1/(n+1)} \cap \partial \Omega_{1/n} = \{0\}$
\item $\lim_{n \rightarrow \infty} ||\rho_{1/n} - \rho||_{C^5(U)} = 0.$
\end{itemize}
We also construct $A$-domains $\Omega_{-1/n}$ that are close to $\Omega$. That
is :
\[\Omega_{-1/n} \cap B(0,\epsilon) = \{(z_1, z_2) \in B(0,\epsilon) :
(1 + 1/n)a < \left|\frac {z_1^{k}}{z_2^l}\right| < (1 - 1/n)b\},\] and
$\Omega_{-1/n}$ is rounded off strictly pseudoconvexily, having a $C^5$
defining function $\rho_{-1/n}$ on a neighborhood $U$ of $\Omega \setminus
B(0,\epsilon)$ such that
\begin{itemize}
\item $\Omega_{-1/n} \subset \Omega$, $\partial \Omega \cap \partial
\Omega_{-1/n} = \{0\}$
\item $\Omega_{-1/n} \subset \Omega_{-1/(n+1)} \quad \forall n \in {\mathbb{N}}$,
$\partial \Omega_{-1/(n+1)} \cap \partial \Omega_{-1/n} = \{0\}.$
\item $\lim_{n \rightarrow \infty} ||\rho_{-1/n} - \rho||_{C^5(U)} = 0.$
\end{itemize}
This is possible, cf. the setup in \cite{lewi} : we only need to
consider convex domains in ${\mathbb{R}}^2$ instead of pseudoconvex Reinhardt domains
in ${\mathbb{C}}^2$.\\
We choose $n \in {\mathbb{N}}$ such that $||\rho_{1/n} - \rho||_{C^5(U)} \leq
\frac{\gamma \lambda^2}{20}$ (where $\lambda$ is the constant of lemma
\ref{Lem:lambda}). We may assume that $||\rho_{1/n} - \rho||_{C^5(U)} <
\lambda < \delta <1$.
\begin{Lem}
If $P \in V$, $z \in \Omega_{1/n}$, $\lambda/3 \leq |z-P| \leq 2\lambda/3$,
then $\Re L_P(z) < 0$.
\end{Lem}
\noindent
Let $\eta : {\mathbb{R}} \rightarrow [0,1]$ be a $C^{\infty}$ function that satisfies
$\eta(x)=1$ for $x \leq \lambda/3$, $\eta(x)=0$ for $x \geq 2\lambda/3$.
\begin{Lem}
Let $P \in V$. The $(0,1)$-form
\[f_P(z) = \left\{ \begin{array}{ll} -\overline{\partial}_z(\eta(|z-P|)) \cdot
\log L_P(z) & \mbox{\!\!,\quad if $|z-P| < \lambda, z \in \Omega_{1/n}$}\\
0 & \mbox{\!\!,\quad if $|z-P| \geq \lambda, z \in \Omega_{1/n}$}\\
\end{array}\right.\]
is well defined (if we take the principal branch for the logarithm) and has
$C^{\infty}$ coefficients for $z \in \Omega_{1/n}$. Furthermore,
$\overline{\partial}_z f_P(z) = 0$ on $\Omega_{1/n}$.
\end{Lem}
\begin{Lem}{\label{Lem:dbaradom}}
Let $f$ be a $\overline
{\partial}$-closed $(0,1)$-form on $\Omega_{1/n}$ with $C^1$ coefficients
that are bounded.
Suppose that $\overline{B(0, \epsilon)} \cap \text{supp} f =
\emptyset$. Then there exist a $C_{\epsilon}$ (that does not
depend on $f$) and a function $u$ with $\overline{\partial}u = f$
such that \[||u||_{L^{\infty}(\Omega_{1/2n})} \leq C_{\epsilon}||f||_
{L^{\infty}(\Omega_{1/n})}.\]
\end{Lem}
\begin{proof}
If $f$ is identically zero, we are done. So assume that
$||f||_{L^{\infty}(\Omega_{1/n})} > 0$.
We choose a weight $\phi$ that blows up near the boundary of $\Omega_{1/n}$.
Then
we add several times $\log |z|$ such that $e^{- \phi(z)}$ will behave like
$|z|^{-k}$ (this $k$ will be chosen later). We let $u$ be the solution of the
$\overline{\partial}$-equation on $\Omega_{1/n}$ for the weight $\phi$, as
constructed by H\"{o}rmander (\cite{horm1}). Then
\[\int_{\Omega_{1/n}} \frac{|u(z)|^2 e^{- \phi(z)}}{(1 + |z|^2)^2} d\lambda
\leq \int_{\Omega_{1/n}} |f(z)|^2 e^{- \phi(z)} d\lambda < \infty.\]
The first inequality is the estimate of H\"{o}rmander, the second one holds
because $f$ has bounded coefficients. We start by showing that the
assumption
that there is a sequence $\{z_n\}^{\infty}_{n=1}$ in $\Omega_{1/(2n)}$ that
converges to
$0$ such that $|u(z_n)| \geq ||f||_{L^{\infty}(\Omega_{1/n})}$ leads to a
contradiction. This yields an estimate for $||f||_{\infty}$ near the origin.\\
There are constants $R$, $\beta > 0$ such that
\[z \in \Omega_{1/(2n)} \cap B(0,\epsilon) \Rightarrow B(z,R(|z|^{\beta})
\subset \Omega_{1/n}.\]
Thus for large $n$, one has that $B(z_n, R|z_n|^{\beta})$ is contained
completely in $B(0,\epsilon) \cap \Omega_{1/n}$.
We choose $k > 4 \beta$.
We assumed that $f$ has no support on $\overline{B(0,\epsilon)} \cap
\Omega_{1/n}$, thus $u$ is holomorphic there. We now
apply the mean value inequality on $u$.
\[\infty > \int_{\Omega_{1/n}} |f(z)|^2 e^{- \phi(z)} d\lambda \geq
\int_{\Omega_{1/n}}
\frac{|u(z)|^2 e^{- \phi(z)}}{(1 + |z|^2)^2} d\lambda \]\[
> \int_{B(z_n, R|z_n|^
{\beta})} \frac{|u(z)|^2 e^{- \phi(z)}}{(1 + |z|^2)^2} d\lambda > C
\frac{|u(z_n)|^2 R^4 |z_n|^{4 \beta}}{|z_n|^k} > C' |z_n|^{4 \beta - k}
\rightarrow \infty\]
if $n \rightarrow \infty$. Thus there is a $\delta$ with $0 < \delta <
\epsilon$ such that $|u(z)| \leq
||f||_{L^{\infty}(\Omega_{1/n})}$ for $z \in \Omega_{1/(2n)} \cap
B(0,\delta)$.\\
Now we shall make the appropriate estimate on $\Omega_{1/(2n)} \setminus
B(0,\delta)$.
Remember the H\"{o}rmander construction (\cite{kran}, chapter 4), with
$\phi$, $\phi_1$, $\phi_2$ and
\[T = \overline{\partial}_{0,0} : L^2_{(0,0)}(\Omega_{1/n}, \phi_1) \rightarrow
L^2_{(0,1)}(\Omega_{1/n}, \phi_2).\]
Then
\[\sup _{\Omega_{1/(2n)} \setminus B(0, \delta)} |u| \leq C(||u||_{L^2(
\Omega_{3/(4n)}
\setminus B(0, \delta))} + ||\overline{\partial} u||_{L^{\infty}_{(0,1)}
(\Omega_{3/(4n)} \setminus B(0, \delta))})\]
\[\leq C' (||u||_{L^2(\Omega_{1/n} \setminus B(0, \delta), \phi_1)} +
||f||_{L^{\infty}_
{(0,1)} (\Omega_{3/(4n)} \setminus B(0, \delta))})\]
\[\leq C'' (||f||_{L^2_{(0,1)}(\Omega_{1/n} \setminus B(0, \delta), \phi_2)} +
||f||_{L^{\infty}_{(0,1)} (\Omega_{3/(4n)} \setminus B(0, \delta))})\]
\[\leq C''' ||f||_{L^{\infty}_{(0,1)}(\Omega_{1/n} \setminus B(0, \delta))}\]
since $e^{- \phi_2(z)}$ tends to zero as $z$ tends to a boundary point of
$\Omega_{1/n}$ that is non-zero.
\end{proof}
\noindent
For every $P \in V$, we let $u_P$ be a solution of $\overline{\partial}u_P =
f_P$ that satisfies the estimate above. We now define
\[\Phi(z,P) = \left\{ \begin{array}{ll} L_P(z) \cdot \exp(u_P(z)) & \mbox{\!\!,
\quad if $|z-P| < \lambda/3$} \\
\exp(u_P(z) + \eta(|z-P|)\log L_P(z)) & \mbox{\!\!, \quad if $\lambda/3 \leq
|z-P| < \lambda$} \\
\exp(u_P(z)) & \mbox{\!\!, \quad if $\lambda \geq |z-P|$}\\ \end{array}
\right.\]
We proceed to show that these functions $\Phi(\cdot,P)$ are holomorphic support
functions.
\begin{Lem}{\label{Lem:holsup}}
For every $P \in V$,
the function $\Phi(\cdot,P)$ is holomorphic on $\Omega_{1/n}$. For fixed $z \in
\Omega_{1/(2n)}$, $\Phi(z,\cdot)$ is continuous in $P$. There is a
$C > 0$, independent of $P$, such that for all $z \in \Omega_{1/(2n)}$ we have
\[\text{if} \quad |z-P| < \lambda/3, \quad \text{then} \quad |\Phi(z,P)|
\geq C|L_P(z)|,\]
\[\text{if} \quad |z-P| \geq \lambda/3, \quad \text{then} \quad |\Phi(z,P)|
\geq C.\]
\end{Lem}
\begin{proof}
The function $f_P$ is bounded on $\Omega_{1/(2n)}$ uniformly in $P$, hence
$u_P$ is bounded on $\Omega_{1/(2n)}$
uniformly in $P$. Thus there is a $C > 0$ such that $|\exp
u_P(z)| \geq C$. Working this out yields the appropriate estimates.
\end{proof}
\begin{Lem}{\label{Lem:holsupbdd}}
For every $P \in V$ there exist functions $\Phi_1(z,P)$, $\Phi_2(z,P)$ that
are holomorphic in $z \in \Omega_{1/n}$ and a constant $C$ that does not
depend on $P$, such that
\[\Phi(z,P) = \Phi_1(z,P) (z_1 - P_1) + \Phi_2(z,P) (z_2 - P_2) \quad \forall
z \in \Omega_{1/n},\]
\[|\Phi_j(z,P)| \leq C \quad \forall z \in \Omega_{1/(2n)}, P \in V, j = 1,2.\]
\end{Lem}
\begin{proof}
We will follow the approach of Backlund and F\"{a}llstr\"{o}m in \cite{bafa4}.
A line with positive rational slope $\frac{k}{l}$ in ${\mathbb{R}}^2$ passing through
$L(p)$ can be seen as the logarithmic image of the zero set of the polynomial
$z_1^k p_2^l - z_2^l p_1^k$, while a line with negative rational slope
$\frac{-k}{l}$ in ${\mathbb{R}}^2$ passing through $L(p)$ can be seen as the logarithmic
image of the zero set of the polynomial $z_1^k z_2^l - p_1^k p_2^l$.\\
Fix $P \in V$.
We choose polynomials $g$ and $h$ such that $L(Z_g)$ and $L(Z_h)$ are lines
in ${\mathbb{R}}^2$ that intersect
the boundary of $\Omega$ only in $V$, and $[g=0] \cap [h=0] \cap
\Omega_{1/n} = \{P\}$.
Now choose a ball $U_0$ around $P$ that lies compactly in $\Omega_{1/n}$, and
choose open sets $U_1$, $U_2$ such that
\begin{itemize}
\item $\overline{\Omega_{1/n}} \subset \cup_i U_i$
\item For a certain positive number $\mu$ one has that $|g| > \mu$ on $U_1$,
$|h| > \mu$ on $U_2$.
\item $\overline{U_1} \cap \overline{U_2} \cap \overline{B(0,\epsilon)} =
\emptyset$.
\end{itemize}
\noindent Now choose functions $\phi_k \in C^{\infty}_0(U_k) \quad (k=0,1,2)$
such that $0 \leq \phi_k \leq 1$ and $\sum_{k=0}^2 \phi_k = 1$ on $\overline
{\Omega_{1/n}}$.
Recall that $\Phi(\cdot,P)$ vanishes at $z=P$. Because $\Phi(\cdot,P)$ is
holomorphic on $\Omega_{1/n}$, and $U_0 \subset \subset \Omega_{1/n}$, the
lemma of Oka-Hefer implies that there exist functions $\Phi_1^0(\cdot,P)$,
$\Phi_2^0(\cdot,P) \in H^{\infty}(U_0)$ such that
\[\Phi(z,P) = \Phi_1^0(z,P)(z_1 - P_1) + \Phi_2^0(z,P)(z_2 - P_2) \quad \quad
\forall z \in U_0.\]
We set \[\tilde{\Phi}_1^1(z,P):= \frac{\Phi(z,P)}{g(z)}, \tilde{\Phi}_2^1(z,P)
:=0,\] \[\tilde{\Phi}_1^2(z,P):=0, \tilde{\Phi}_2^2(z,P):=\frac{\Phi(z,P)}
{h(z)}.\]
Then $\tilde{\Phi}_j^i \in H^{\infty}(U_i \cap \Omega_{1/n})$ and
\[\Phi(z,P) = \tilde{\Phi}_1^i(z,P)g(z) + \tilde{\Phi}_2^i(z,P)h(z) \quad
\forall z \in U_i, i \in \{1,2\} \quad \quad \quad(*).\]
Since $g$ is an analytic polynomial, vanishing at $P$, there are polynomials
$g_{1}$, $g_{2} \in H({\mathbb{C}}^{2})$ such that
$g = g_{1} (z_1 - P_1) + g_{2} (z_2 - P_2)$ on ${\mathbb{C}}^{2}$. A similar
formula holds for $h$. Substituting this in $(*)$, we obtain the existence of
functions $\Phi^{i}_{j} \in H^{\infty}(U_i \cap \Omega_{1/n})$, $i = 1$, $2$,
such that
\[ \Phi(z,P) = \Phi_{1}^{i}(z,P) (z_1 - p_1) + \Phi_{2}^{i}(z,P) (z_2 - p_2)
\; \text { on } \; \overline{U_i} \cap \Omega_{1/n}, \; \; \; i = 1, 2.\]
Therefore
\[j_1 := \sum_{k=0}^{2} \phi_k \Phi_1 ^k \; \text { and } \;
j_2 := \sum_{k=0}^{2} \phi_k \Phi_2 ^k \]
\noindent give a smooth solution of our problem. We want to find $u$ such that
\[\Phi_1 = j_1 + u(z_2 - P_2) \; \text { and } \; \Phi_2 = j_2 - u(z_1 - P_1)
\quad \quad \quad (**)\] are in $H(\Omega_{1/n}) \cap L^{\infty}
(\Omega_{1/(2n)})$.
Define a form $\lambda$ as follows :
\[ \lambda := \frac {- \overline{\partial}j_1}{z_2 - P_2} =
\frac {\overline{\partial}j_2}{z_1 - P_1} \]
\noindent This form $\lambda$ is a bounded $\bar{\partial}$-closed
$(0,1)$-form on $\Omega_{1/n}$.
The support of $\lambda$ is contained in $\overline{U_i} \cap \overline{U_j}$,
$i \not = j$. These sets all lie outside $\overline{B(0,\epsilon)}$.
Lemma \ref{Lem:dbaradom} gives the existence of a
function $u \in L^{\infty}(\Omega_{1/(2n)})$
such that $\bar{\partial}u = \lambda$.
With this $u$, $\Phi_1$, $\Phi_2$ as defined at $(**)$,
\[\Phi = \Phi_1 (z_1 - P_1) + \Phi_2 (z_2 - P_2)\] on $\Omega_{1/n}$
, and $\Phi_1(\cdot,P)$, $\Phi_2(\cdot,P)$ both belong to
$H(\Omega_{1/n}) \cap L^{\infty}(\Omega_{1/(2n)})$.\\
For fixed $z \in \Omega_{1/(2n)}$, the function $\Phi(z,\cdot)$ depends
continuously on $P$. Studying the
construction above carefully, we see that we can choose $\Phi_1(z,\cdot)$ and
$\Phi_2(z,\cdot)$ continuously in $P$ as well. Thus, because supp $\lambda \cap
\partial \Omega$ is compact,
there exists a uniform bound on $||\Phi_i(z,P)||_{\Omega_{1/(2n)}}$.
\end{proof}
\begin{Thm}
Let $\Omega$ be an $A$-domain. Let $f$ be a $\overline{\partial}$-closed
$(0,1)$-form on an $A$-domain that contains $\overline{\Omega} \setminus \{0\}$
with $C^1$ coefficients.
Suppose that $\text{supp} f \cap \overline{B(0,\epsilon)} = \emptyset$.
Then there is a function $u$ such that $\overline{\partial} u =f$, and
\[||u||_{L^{\infty}(\Omega)} \leq C||f||_{L^{\infty}(\Omega)}.\]
\end{Thm}
\begin{proof}
Let
$H_{\Omega}(f)(z)$ be the Khenkin solution to the $\overline{\partial}$
equation; then $\overline{\partial} H_{\Omega}(f) = f$.
To prove the necessary estimates, we start by
writing $f=f_1 d \overline{\zeta}_1 + f_2 d \overline{\zeta}_2$.
Then the Khenkin solution can be rewritten to
\[H_{\Omega}(f)(z)= \frac{1}{4 \pi^2} \{\int_{\Omega} \frac{f_1(\zeta)\cdot
(\overline{\zeta}_1 - \overline{z}_1) + f_2(\zeta) \cdot (\overline{\zeta}_2 -
\overline{z}_2)}{|\zeta - z|^4} \times d\overline{\zeta}_1 \wedge d\overline
{\zeta}_2 \wedge d \zeta_1 \wedge d \zeta_2 \]
\[- \int_{\partial \Omega} \frac{\Phi_1(z,\zeta)(\overline{\zeta}_2 -
\overline{z}_2) - \Phi_2(z,\zeta)(\overline{\zeta}_1 - \overline{z}_1)}
{\Phi(z, \zeta)|\zeta - z|^2} \times (f_1(\zeta)d\overline{\zeta}_1 + f_2(
\zeta)d \overline{\zeta}_2) \wedge d \zeta_1 \wedge \zeta_2 \}\]
\[= \int_{\Omega} f_1(\zeta)K_1(z, \zeta) dV(\zeta) +
\int_{\Omega} f_2(\zeta)K_2(z, \zeta) dV(\zeta) \]
\[+ \int_{\partial \Omega} f_1(\zeta)L_1(z, \zeta) dV(\zeta) +
\int_{\partial \Omega} f_2(\zeta)L_2(z, \zeta) dV(\zeta)\]
where the identity defines the kernels. Now let $T$ be so large that $\Omega
\subseteq B(z,T)$ for every $z \in \Omega$. Then
\[\int_{\Omega} |K_i(z, \zeta|dV(\zeta) \leq \int_{B(z,T)} |z - \zeta|^{-3}
dV(\zeta) = C \int_{0}^{T} r^{-3}r^3 dr = C' \quad j=1,2.\]
Because $f$ has no support on $\overline{B(0,\epsilon)}$, one has that
\[\int_{\partial \Omega} f_j(\zeta)L_j(z, \zeta) dV(\zeta) =
\int_{V} f_j(\zeta)L_j(z, \zeta) dV(\zeta) \quad j=1,2 .\]
Using lemmas \ref{Lem:holsup} and \ref{Lem:holsupbdd}, one can prove that
\[\int_{V} |L_j(z, \zeta)| dV(\zeta) \leq D_j \quad j=1,2,\]
where the bounds are independent of $z \in \Omega$.
This implies that
\[||H_{\Omega}(f)||_{L^{\infty}(\Omega)} \leq (2 C' + D_1 + D_2)
||f||_{L^{\infty}(\Omega)}.\]
\noindent Keeping in mind that $|\Phi_1(z,\zeta)|$ and $|\Phi_2(z,\zeta)|$
are bounded on $\Omega_{1/(2n)}$ uniformly in $\zeta$, one can simply follow
\cite{kran}.
\end{proof}
\noindent Repeating all the arguments used over there exactly, yields :
\begin{Thm}{\label{Thm:holder}}
Let $\Omega$ be an $A$-domain. Let $f$ be a $\overline{\partial}$-closed
$(0,1)$-form on an $A$-domain that contains $\overline{\Omega} \setminus \{0\}$
with $C^1$ coefficients.
Suppose that $\text {supp} f \cap \overline{B(0,\epsilon)} =
\emptyset$. Then $H_{\Omega}(f)$ is well defined, continuous on $\overline
{\Omega}$ and
\[||H_{\Omega}(f)||_{\Lambda_{1/2}(\Omega)} \leq C||f||_{L^{\infty}(\Omega)}.\]
\end{Thm}
\begin{Thm}
Let $\Omega$ be an $A$-domain. Then there is an $N \in
{\mathbb{N}}$ such that if $n \geq N$, then
theorem \ref{Thm:holder} holds on $\Omega_{-1/n}$ with $C_{\Omega_{-1/n}}
\leq 2 C_{\Omega}$.
\end{Thm}
\noindent
Now we give the proof of theorem \ref{Thm:aholder}. \begin{proof}
Let $\Omega$ be an $A$-domain. For $n \in {\mathbb{N}}$ large,
the stability result will apply on $\Omega_{-1/n}$. Now let $f$ be a $\overline
{\partial}$-closed $(0,1)$-form defined on $\Omega$ (not necessarily on a
neighborhood of $\overline{\Omega}$) with bounded $C^1$ coefficients. For
each sufficiently small $-1/n < 0$, the form $f$ satisfies the hypotheses
of theorem \ref{Thm:holder} on $\Omega_{-1/n}$. Therefore $H_{\Omega_
{-1/n}}(f)$ is well defined and satisfies $\overline{\partial}H_{\Omega_
{-1/n}}(f) = f$ on $\Omega_{-1/n}$. Moreover,
\[||H_{\Omega_{-1/n}}(f)||_{\Lambda_{1/2}(\Omega)} \leq C_{\Omega_
{-1/n}}||f||_{L^{\infty}(\Omega)} \leq 2 C_{\Omega}||f||_{L^{\infty}(\Omega)}
.\]
Thus, given a compact subset $K$ of $\Omega$, the functions
$\{H_{\Omega_{-1/n}}(f)\}$ form an equicontinuous family on $K$ if $n$ is
large. Of course, this family is also equi-bounded. By the Arzel\`{a}-Ascoli
theorem and diagonalization, we see that there is a
subsequence $H_{\Omega_{-1/j}}(f)$, $j=1$, $2$, $\ldots$, such that $H_{\Omega_
{-1/j}}(f)$ converges uniformly on compacta to a $u \in \Lambda_{1/2}
(\Omega)$ with $\overline{\partial}u = f$ on $\Omega$.
\end{proof}
\noindent {\bf Remark.} Note that theorem \ref{Thm:aholder} also holds for e.g.
a Reinhardt domain $\Omega$ that for small $z$ looks like
\[\{(z_1,z_2) : 0 < |z_1^k| < |z_2^l|\},\]
and is rounded off strictly pseudoconvexily.
\section{Auxiliary results}
\begin{Lem}{\label{Lem:solfrac}}
Let $\Omega$ be a domain in ${\mathbb{C}}^2$, let $(p_1,p_2) \in \Omega$, let $k$, $l
\in {\mathbb{N}}^*$.
Suppose that $\frac{z_1^k}{z_2^l} \in H^{\infty}(\Omega)$.
Let \[R_1(z_1,z_2) := \frac{1}{p_2^l} \frac{z_1^k - p_1^k}{z_1 - p_1},\]
\[R_2(z_1,z_2) := \frac{1}{p_2^l} \frac{z_1^k}{z_2^l} \frac{p_2^l - z_2^l}
{z_2-p_2} .\]
Then \[\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l} = R_1(z_1,z_2) (z_1-p_1) +
R_2(z_1,z_2) (z_2-p_2),\] and $R_1$, $R_2 \in H^{\infty}(\Omega)$.
\end{Lem}
\begin{proof}
This can be checked by hand.
\end{proof}
\begin{Lem}{\label{Lem:solpol}}
Let $P$ be a polynomial in $z_1$ and $z_2$ that vanishes at $(p_1,p_2) \in
{\mathbb{C}}^2$. There exist polynomials $P_1$, $P_2$ such that \[P(z_1,z_2) = P_1(z_1,
z_2)(z_1-p_1) + P_2(z_1,z_2)(z_2-p_2).\]
\end{Lem}
\begin{proof}
For $(p_1,p_2)=(0,0)$, this follows immediately. For other points apply the
appropriate coordinate transform.
\end{proof}
\begin{Lem}{\label{Lem:driehoek}}
Suppose there are points $t$, $u$, $v \in \partial \omega$ having
neighborhoods $T$, $U$, $V \subset \partial \omega$
consisting only of strictly convex points of $\partial \omega$ respectively,
such that $L(p) \in Co(tuv)$. Then one can solve the Gleason problem for
$H^{\infty}(\Omega)$ at $p$.
\end{Lem}
\begin{proof}
We choose, just as in lemma \ref{Lem:holsupbdd}, analytic polynomials $g$,
$h$, open
sets $U_0$, $U_1$, $U_2$ and a constant $\mu > 0$ such that:
\begin{itemize}
\item $[g(z) = 0] \cap [h(z) = 0] \cap \overline{\Omega} = \{p\}$
\item $U_0$ is strictly pseudoconvex, and $p \in U_0 \subset \subset
\Omega$
\item $|g| > \mu$ on $U_1$, $|h| > \mu$ on $U_2$
\item $\overline{\Omega} \subset \cup _i U_i$
\item $U_i \cap U_j \cap B(0,\epsilon) = \emptyset, j=0,1,2.$
\end{itemize}
Now formulate the corresponding $\overline{\partial}$-problem, again as in
lemma \ref{Lem:holsupbdd}. This yields a
bounded $(0,1)$-form that has only support outside $B(0, \epsilon)$.
Applying theorem \ref{Thm:aholder} yields a bounded solution to the
$\overline{\partial}$-problem, and this can be used to solve the Gleason
problem in the standard way.
\end{proof}
\section{Dividing $\Omega$ in two pieces}{\label{section:pieces}}
\noindent Suppose that $\Omega$ is an $A$-domain, and that $p \in \Omega$.
Then the line with slope $\frac{k}{l}$ through $L(p)$ intersects $\partial
\omega$ in only one point $A$. This point is strictly convex. Thus there is a
line $N$ in ${\mathbb{R}}^2$ with rational slope $\not=\frac{k}{l}$ that
intersects $\partial \omega$ only at strictly convex points such that $A$ and
the part of $\omega$ in the third quadrant lie on different sides of $N$.
Say $N$ is given by the equation
$y=\frac{-m}{n} x + r$, where $m$, $n \in {\mathbb{N}}$. Then $N$ is the logarithmic
image of $[z_1^m z_2^n = e^{rn}]$. There is a $\delta > 0$ such that
\[\{(x,y) \in \partial \omega, \tilde{r} \in [r - \delta, r] : y =
\frac{-m}{n}x + \tilde{r} \} \subset S(\omega).\]
Let
\[\omega_1 := \{(x,y) \in \omega : y \geq \frac{-m}{n}x + r - \delta\},\]
\[\omega_2 := \{(x,y) \in \omega : y \leq \frac{-m}{n}x + r\},\]
and $\Omega_1$, $\Omega_2$ be $(\overline{L^{-1}(\omega_1)})^{\circ}$,
$(\overline{L^{-1}(\omega_2)})^{\circ}$ respectively.
If $p$ lies in $\Omega_1$, everything is easy : apply lemma
\ref{Lem:driehoek} to solve the Gleason problem for $H^{\infty}(\Omega)$ at
$p$.
\\In the rest of the article we shall assume that $p$ does not lie in
$\Omega_1$. We will use that there is an
$\nu > 0$ such that $|z_1^m z_2^n - p_1^m p_2^n| > \nu$ for $(z_1,z_2) \in
\Omega_1$ to obtain a local solution on $\Omega_1$. The next section
consists of the construction of a local solution on $\Omega_2$. Afterwards,
the two local solutions will be patched together using the standard arguments.
\section{Constructing a local solution}{\label{section:constrloc}}
\noindent We fix $p= (p_1,p_2) \in \Omega_2$ and $f \in H^{\infty}(\Omega)$
that vanishes at $p$.
The main idea of the following construction is to project
$(z_1, z_2)$ on the zero set of $\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}$,
because
\[\frac{f(z_1, z_2) - f((\frac{p_1^k z_2^l}{p_2^l})^{1/k}, z_2)}{\frac{z_1^k}
{z_2^l} - \frac{p_1^k}{p_2^l}} (\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}) +
\frac{f((\frac{p_1^k z_2^l}{p_2^l})^{1/k}, z_2)}{z_2-p_2} (z_2-p_2)\]
comes close to being a solution for the Gleason problem. However, as there
appear roots in the argument of the function, we lose in general the
holomorphy. We decompose $f$ in functions where one can take the
appropriate root. Then we solve the Gleason problem for those functions, add
all these solutions and end up with a solution of the Gleason problem for $f$.
\vskip5mm \noindent By
$\zeta$ we denote the $(kn +lm)$'th elementary root of unity.
\begin{Lem}{\label{Lem:symmetry}}
Suppose $f$ is a bounded holomorphic function on $\Omega_2$. Then for every
$0 \leq i$, $j \leq kn + lm - 1$ there exist functions $f_{i,j} \in
H(\Omega_2)$ such that :
\begin{itemize}
\item $z_1^i z_2^j f_{i,j}$ is bounded for $0 \leq i,j \leq kn +lm -1$
\item $f_{i,j}(z_1,z_2) = f_{i,j}(\zeta z_1,z_2) = f_{i,j}(z_1, \zeta z_2)$
for all $(z_1,z_2) \in \Omega_2$, $0 \leq i,j \leq kn + lm -1$
\item $f(z_1,z_2) = \sum_{i,j=0}^{kn + lm -1} z_1^i z_2^j f_{i,j}(z_1,z_2)$
for all $(z_1,z_2) \in \Omega_2$.
\end{itemize}
\end{Lem}
\begin{proof}
Let
\[f_{i,j}(z_1,z_2) := \frac{1}{(kn + lm)^2 z_1^i z_2^j} \sum_{s,t=1}^
{kn + lm} \zeta^{-is -jt} f(\zeta^{s}z_1,\zeta^{t}z_2).\]
The domain $\Omega$ does not contain points with a zero coordinate, hence
$f_{i,j}$ is well defined. Since $f$ is bounded, we see immediately that
$z_1^i z_2^j f_{i,j}(z_1,z_2)$ is bounded as well.
\[ (kn + lm)^2 f_{i,j}(\zeta z_1, z_2) = \frac{1}{(\zeta z_1)^i z_2^j}
\sum_{s,t=1}^{kn +lm} \zeta^{-is-jt} f(\zeta^{s+1}z_1, \zeta^{t}z_2) = \]
\[\frac{1}{(\zeta z_1)^i z_2^j} \sum_{t=1}^{kn + lm} \zeta^{-jt}
\sum_{s=2}^{kn +lm +1} \zeta^{-i(s-1)} f(\zeta^{s}z_1, \zeta^{t}z_2) = \] \[
\frac{\zeta^{i}}{(\zeta z_1)^i z_2^j} \sum_{t=1}^{kn + lm}
\zeta^{-jt}\left( \zeta^{-i(kn + lm +1)} f(\zeta^{kn + lm +1}z_1,\zeta^{t} z_2)
+ \sum_{s=2}^{kn + lm} \zeta^{-is} f(\zeta^{s}z_1, \zeta^{t}z_2) \right) =\]
\[=\frac{1}{ z_1^i z_2^j} \sum_{s,t=1}^{kn + lm} \zeta^{-is-jt}
f(\zeta^{s}z_1, \zeta^{t}z_2) = (kn +lm)^2 f_{i,j}(z_1,z_2).\]
The equality $f_{i,j}(z_1, \zeta z_2) = f_{i,j}(z_1,z_2)$ can be proven
similarly.
Since \[\sum_{i,j=0}^{kn + lm -1} \zeta^{-is -jt} = \sum_{i=0}^{kn + lm -1}
\zeta^{-is} \sum_{j=0}^{kn + lm -1} \zeta^{-jt} = \left\{ \begin{array}{ll}
0 & \mbox {\quad $s,t \neq kn + lm$} \\ (kn +lm)^2 & \mbox{\quad $s,t=kl +mn$}
\\ \end{array} \right. \]
we have that \[\sum_{i,j=0}^{kn +lm -1} z_1^i z_2^j f_{i,j}(z_1,z_2) = \frac{1}
{(kn +lm)^2} \sum_{i,j=0}^{kn +lm -1}
\sum_{s,t=1}^{kn + lm} \zeta^{-is -it} f(\zeta^{s}z_1, \zeta^{t}z_2) = \]
\[=\frac{1}{(kn +lm)^2}
\sum_{s,t=1}^{kn +lm} f(\zeta^{s}z_1, \zeta^{t}z_2) \sum_{i,j=0}^{kn +lm -1}
\zeta^{-is -it} = f(z_1,z_2).\]
\end{proof}
\noindent
{\bf Remark.} There is a polynomial $P$ such that \[P(\zeta^s p_1, \zeta^t p_2)
= f(\zeta^s p_1, \zeta^t p_2) \quad \forall 1 \leq s,t \leq kn + lm.\]
From lemma \ref{Lem:solpol} it follows that one can solve the Gleason problem
for the function
$f$ if and only if one can solve the Gleason problem for $f - P$.
The corresponding functions $(f - P)_{i,j}$ all vanish
at $p$. Hence we may assume from now on that $f_{i,j}$ vanishes at
$p$.
\begin{Lem}
The multi valued map $\pi$ given below, maps a point $(z_1,z_2) \in \Omega_2$
to the set $[\frac{z_1^k}{z_2^l}=\frac{p_1^k}{p_2^l}] \cap \Omega$.
The function $f_{i,j} \circ \pi$ is a holomorphic single valued map on
$\Omega_2$, and it can be viewed as a function of $z_1^m z_2^n$.
\[\pi(z_1,z_2):=\]
\[\left(((z_1^m z_2^n)^{1/(kn+lm)})^l \left(\left(\frac{p_1^k}
{p_2^l}\right)^{1/(kn+lm)}\right)^n,
((z_1^m z_2^n)^{1/(kn+lm)})^k \left(\left(\frac{p_1^k}{p_2^l}\right)^
{1/(kn+lm)}\right)^{-m}\right),\]
where in both of the coordinates the same branch of the root is taken.
\end{Lem}
\begin{proof}
This follows from an easy computation. Since $f_{i,j}$ has a $kn+lm$-symmetry
in the two variables, it is well defined and holomorphic.
\end{proof}
\begin{Lem}{\label{Lem:deelbdd}}
For every $0 \leq i,j \leq kn+lm -1$ there exist functions $f_{i,j}^1$,
$f_{i,j}^2 \in H^{\infty}(\Omega_2)$ such that \[z_1^i z_2^j f_{i,j}(z_1,z_2)
= f_{i,j}^1(z_1,z_2) \left(\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}\right) +
f_{i,j}^2(z_1,z_2) (z_1^m z_2^n - p_1^m p_2^n).\]
\end{Lem}
\begin{proof}
We start by constructing good holomorphic candidates for $f_{i,j}^1$ and
$f_{i,j}^2$. Then we show that these functions are indeed bounded.\\
A meromorphic solution of the problem is \[z_1^i z_2^j f_{i,j}(z_1,z_2) =
z_1^i z_2^j
\frac{f_{i,j}(z_1,z_2)} {\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}}
\left(\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}\right)+ 0 \cdot
(z_1^m z_2^n - p_1^m p_2^n).\] We search for a function $h$ such that
\[f_{i,j}^1(z_1,z_2) = z_1^i z_2^j \frac{f_{i,j}(z_1,z_2)}
{\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}} + h(z_1,z_2) (z_1^m z_2^n - p_1^m
p_2^n) \quad \quad \quad \quad \text {(*)}\] and
\[f_{i,j}^2(z_1,z_2) = - h(z_1,z_2) \left(\frac{z_1^k}{z_2^l} -
\frac{p_1^k}{p_2^l}\right) \] are holomorphic. Then
\[h(z_1,z_2) = \frac{-f_{i,j}^2(z_1,z_2)}{\frac{z_1^k}{z_2^l} -
\frac{p_1^k}{p_2^l}},\] \[f_{i,j}^1 (z_1,z_2) = \frac{z_1^i z_2^j f_{i,j}
(z_1,z_2) - f_{i,j}^2(z_1,z_2) (z_1^m z_2^n -p_1^m p_2^n)}
{\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}}.\] We want $f_{i,j}^1$ to be
holomorphic.
Then it is necessary and sufficient that $f_{i,j}^2 (z_1,z_2) = \frac{z_1^i
z_2^j f_{i,j}(z_1,z_2)}{z_1^m z_2^n - p_1^m p_2^n}$ for points on the zero
set of
$\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}$. Therefore we define $f_{i,j}^2$
as
\[f_{i,j}^2 (z_1,z_2) := \frac{z_1^i z_2^j f_{i,j} (\pi(z_1,z_2))}{z_1^m z_2^n
- p_1^m p_2^n},\] and $f_{i,j}^1$ according to (*) as
\[f_{i,j}^1 (z_1,z_2) := \frac{z_1^i z_2^j \left(f_{i,j}(z_1,z_2) - f_{i,j}
(\pi(z_1,z_2))\right)}{\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}}.\]
These are holomorphic functions, and we have that \[z_1^i z_2^j f_{i,j}(z_1,
z_2) = f_{i,j}^1(z_1,z_2) (\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}) +
f_{i,j}^2(z_1,z_2) (z_1^m z_2^n - p_1^m p_2^n).\]
We proceed to show that the functions $f_{i,j}^1$ and $f_{i,j}^2$ are bounded
on $\Omega_2$.
We start with the function $f_{i,j}^2$.
We define a function $F$, similar to $f_{i,j}^2$, and show that it is bounded
on $\Omega_2$.
\[F(z_1,z_2):= (kn + lm)^2 f_{i,j}^2(z_1,z_2) = \]\[\frac{(\frac{p_1^k z_1^k}
{p_2^l z_2^l})^{\frac{in-jm}{kn+lm}}
\sum_{s,t =1}^{kn + lm} \zeta^{-is -jt} f \left(\zeta^s \left( (z_1^m z_2^n)^l
(\frac{p_1^k}{p_2^l})^n \right)^{1/(kn+lm)}, \zeta^t \left((z_1^m z_2^n)^k
(\frac{p_1^k}{p_2^l})^{-m}\right)^{1/(kn+lm)}\right)} {z_1^m z_2^n - p_1^m
p_2^n}.\] Then $(\frac{p_1^k z_1^k}{p_2^l z_2^l})^{jm - in} F^{kn + lm}$ is
equal to
\[\left(\frac {\sum_{s,t =1}^{kn + lm} \zeta^{-is -jt} f(\zeta^s \left(
(z_1^m z_2^n)^l (\frac{p_1^k}{p_2^l})^n \right)^{1/(kn+lm)}, \zeta^t
\left(( z_1^m z_2^n)^k (\frac{p_1^k} {p_2^l})^{-m}\right)^{1/(kn+lm)})}
{z_1^m z_2^n - p_1^m p_2^n}\right)^{kn +lm}.\]
We substitute $x=z_1^n z_2^m$ in the last line, and it becomes
\[\left(\frac {\sum_{s,t =1}^{kn + lm} \zeta^{-is -jt} f(\zeta^s \left(x^l
(\frac{p_1^k}{p_2^l})^n \right)^{1/(kn+lm)},\zeta^t \left( x^k (\frac{p_1^k}
{p_2^l})^{-m}\right)^{1/(kn+lm)})}{x - p_1^m p_2^n}\right)^{kn +lm}.\]
The numerator is bounded, and we have a removable singularity at $x=p_1^m
p_2^n$. Hence the
function is bounded. Since $(\frac{z_1^k}{z_2^l})^{in - jm}$ is bounded, $F$
is bounded as well. The same goes for $f_{i,j}^2$.\\
Now we turn our attention to the function $f_{i,j}^1$. Remember that $\omega_2$
was given by $\{(x,y) : y \leq \frac{-m}{n} x + r\}$. The line given by $y =
\frac{-m}{n} x + r$ corresponds to a curve in ${\mathbb{C}}^2$ given by $z_1^m z_2^n =
nr$. For $|K| \leq nr$, let $\Omega_2^K := \Omega_2 \cap [z_1^m z_2^n =K]$. We
will estimate $f_{i,j}^1$ on the sets $\Omega_2^K$. Since we have that
$z_1^i z_2^j f_{i,j}(z_1,z_2)$ is bounded (by construction) and that $z_1^i
z_2^j f_{i,j} (\pi (z_1,z_2))$ is bounded (as shown while proving that
$f_{i,j}^2$ is bounded), for every $\mu > 0$ there exists a constant $C$ such
that
\[|f_{i,j}^1(z_1,z_2)| \leq C \quad \text { on } \Omega_2 \setminus \{(z_1,z_2)
\in \Omega_2 : \left|\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l} \right| <
\mu\}.\] The construction of $\Omega_2$
implies the existence of an $\mu >0$ such that for every $(z_1,z_2) \in
\Omega_2^K$ (with $|K| \leq nr$), $\Theta \in [0, 2 \pi]$, there is a point
$(s,t) \in \Omega_2^K$ with $\frac{s^k}{t^l} - \frac{p_1^k}{p_2^l} = \mu
e^{i \Theta}$.
Since $\Omega_2^K$ can locally be seen as an open set in ${\mathbb{C}}$ (after the
appropriate biholomorphic mapping), applying the maximum principle yields that
\[|f_{i,j}^1(z_1,z_2)| \leq \max_{s,t} |f_{i,j}^1(s,t)| \leq C.\]
So $f_{i,j}^1$ is bounded as well.
\end{proof}
\begin{Prop}{\label{Prop:locsol}}
Let $f$ be a bounded holomorphic function on $\Omega_2$ that vanishes at
$(p_1,p_2)$. There exist functions $\tilde{f_1}$, $\tilde{f_2} \in H^{\infty}
(\Omega_2)$ such that \[f(z_1,z_2) = \tilde{f_1} (z_1,z_2) (z_1-p_1) +
\tilde{f_2} (z_1,z_2) (z_2-p_2).\]
\end{Prop}
\begin{proof}
Since $z_1^m z_2^n - p_1^m p_2^n$ is a polynomial that vanishes at $p$, there
are by lemma \ref{Lem:solpol} polynomials $P_1$ and $P_2$ such that
\[z_1^m z_2^n - p_1^m p_2^n = P_1(z_1,z_2) (z_1-p_1) + P_2(z_1,z_2) (z_2-p_2)
\quad \quad \forall z_1,z_2 \in {\mathbb{C}}^2.\]
Use lemma \ref{Lem:solfrac}
to obtain a similar result for $\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}$.
We substitute this and the solutions obtained for $z_1^i z_2^j f_{i,j}(z_1,z_2)
\; (0 \leq i,j \leq kn +lm -1$; note that we may assume that $f_{i,j}(p)=0$,
as remarked after lemma \ref{Lem:symmetry}). This yields that
\[f(z_1,z_2) = \sum_{i,j=0}^{kn +lm -1} z_1^i z_2^jf_{i,j}(z_1,z_2) = \]
\[\sum_{i,j=0}^{kn +lm
-1} \left( f_{i,j}^1(z_1,z_2) (\frac{z_1^k}{z_2^l} - \frac{p_1^k}{p_2^l}) +
f_{i,j}^2(z_1,z_2) (z_1^m z_2^n - p_1^m p_2^n) \right) = \]
\[\left(\sum_{i,j=0}^{kn +lm -1} f_{i,j}^1(z_1,z_2)R_1(z_1,z_2) +
f_{i,j}^2 P_1(z_1,z_2) \right)(z_1-p_1) + \]
\[\left(\sum_{i,j=0}^{kn +lm -1} f_{i,j}^1(z_1,z_2)R_2(z_1,z_2) +
f_{i,j}^2 P_2(z_1,z_2) \right) (z_2-p_2) = \]
\[\tilde{f_1} (z_1,z_2) (z_1-p_1) + \tilde{f_2} (z_1,z_2) (z_2-p_2) .\]
The functions $\tilde{f_1}$ and $\tilde{f_2}$ are in $H^{\infty}(\Omega_2)$.
\end{proof}
\section{Main result}
\begin{Thm}{\label{Thm:adomsol}}
Let $\Omega$ be an $A$-domain. Then for every $f \in H^{\infty}(\Omega)$ that
vanishes at $p=(p_1, p_2) \in \Omega$
there exist functions $f_1$, $f_2 \in H^{\infty}(\Omega)$ such that
\[f(z_1,z_2) = f_1(z_1,z_2) (z_1-p_1) + f_2(z_1,z_2) (z_2-p_2) \quad \forall z
\in \Omega.\]
Thus one can solve the Gleason problem for $H^{\infty}(\Omega)$.
\end{Thm}
\begin{proof}
Let $\Omega_1$, $\Omega_2$ be as in section \ref{section:pieces}. As noted
there, one can find such $f_1$, $f_2$ if $p \in \Omega_1$. So suppose $p \in
\Omega_2$. We make the local solutions on $\Omega_1$ and $\Omega_2$, using
theorem \ref{Prop:locsol}. The $\overline{\partial}$-problem corresponding to
the patching of the two local solutions yields a bounded $(0,1)$-form that
has support outside $B(0, \epsilon)$. Theorem \ref{Thm:aholder} yields a
bounded solution to this particular $\overline{\partial}$-problem. Now
proceed in the standard way (e.g. lemma \ref{Lem:holsupbdd}) to obtain the
appropriate $f_1$ and $f_2 \in H^{\infty}(\Omega)$.
\end{proof}
\section{The Hartogs triangle and related domains}{\label{S:Hartogsfriends}}
\noindent
For $k$, $l \in {\mathbb{N}}^+$ let $\Omega_{k,l}$ be the domain
defined by \[\Omega_{k,l} := \{(z_1, z_2) \in {\mathbb{C}}^2 :
|z_1|^k < |z_2|^l < 1\}.\] The
Hartogs triangle is exactly $\Omega_{1,1}$.
The situation becomes slightly more complicated compared to the previous
sections, since $\Omega_{k,l}$ contains
points of the form $(0,a)$. Thus the functions $f_{i,j}$ as constructed in
lemma \ref{Lem:symmetry} may no longer be holomorphic. We will show that one
can still solve the Gleason problem for $H^{\infty}(\Omega_{k,l})$.\\
If $p_1 \neq 0$, we return to the construction in section
\ref{section:constrloc}; the domain is now cut off with $|z_2|=1$. We still
project a point of $z_1^m z_2^n = c$ onto the zero set of $\frac{z_1^k}{z_2^l}
- \frac{p_1^k}{p_2^l}$, but now $m=0$, $n=1$, thus $z_1^m z_2^n$ is simply
$z_2$. Now repeat the proof in section \ref{section:constrloc} to see that
there exist $f_1$, $f_2 \in H^{\infty}(\Omega_{k,l})$
with $f(z) = f_1(z)(z_1 - p_1) + f_2(z)(z_2 - p_2)$.
There are only two things to point out :
\begin{itemize}
\item The functions $f_{i,j}$ may no longer be holomorphic (in their
definitions we
divide by $z_1^i$), but $z_1^i z_2^j f_{i,j}$ is still bounded and holomorphic.
\item The expression $\left(\frac{p_1^kz_1^k}{p_2^l z_2^l}\right)^
{\frac{in -jm}{kn + lm}}$ becomes $\left(\frac{p_1^kz_1^k}{p_2^l z_2^l}\right)^
{i/k}$. Thus we only need that $\frac{z_1^k}{z_2^l}$ is bounded, and not that
$\frac{z_2^l}{z_1^k}$ is bounded.
\end{itemize}
Now we consider the case that $p_1 = 0$. It is tempting to repeat the previous
argument, but this is impossible. Namely, in the remark after
\ref{Lem:symmetry}, we assume that $f_{i,j}$ vanishes at $p$. Unfortunately,
$f_{i,j}$ is not defined at $p$. There is another construction however.
\begin{Lem}
Let $f \in H^{\infty}(\Omega_{k,l})$ such that $f$
vanishes at $(0,p_2)$. Let
\[f_1(z_1,z_2) := \frac{z_2^l}{p_2^l} \frac{f(z_1,z_2) - f(0,z_2)}{z_1},\]
\[f_2(z_1,z_2) := \frac{p_2^l - z_2^l}{p_2^l(z_2-p_2)} (f(0,z_2) - f(z_1,z_2))
+ \frac{f(0,z_2)}{z_2-p_2}.\]
Then $f_1$, $f_2 \in H^{\infty}(\Omega_{k,l})$ and
\[f(z_1,z_2) = f_1(z_1,z_2)z_1 + f_2(z_1,z_2) (z_2-p_2) \quad \forall \;
(z_1,z_2) \in \Omega_{k,l}.\]
\end{Lem}
\begin{proof}
We see immediately that $f_1$ and $f_2$ are holomorphic, that $f_2$ is bounded
and that the last equality holds. We rewrite $f_1$ :
\[f_1 (z_1,z_2) = \frac{z_1^{k - 1}}{p_2^l} \frac{z_2^l}{z_1^{k}}
(f(z_1,z_2) - f(0,z_2)).\]
For every $c \in {\mathbb{C}}$ with $|c| \leq 1$
the set $\Omega_{k,l} \cap [z_2=c]$ (a disc) contains a circle with radius
$|\frac{c}{2}|^{l/k}$. On this circle, we have that
$|\frac{z_2^l} {z_1^{k}}| = 2^l$. Applying the maximum principle
yields that
\[|f_1(z_1,c)| \leq \frac{2^{l+1} ||f||_{\infty}}{p_2^l} \quad \forall \;
|z_1| \leq |\frac{c}{2}|^{l/k}.\]
It follows that $f_1$ is bounded on $\Omega_{k,l}$.
\end{proof}
\noindent Thus we have the following theorem :
\begin{Thm}{\label{Thm:hartogs}}
For $k$, $l \in {\mathbb{N}}^+$ let $\Omega_{k,l}$ be the domain defined by
\[\Omega_{k,l} := \{(z_1, z_2) \in {\mathbb{C}}^2 : |z_1|^k < |z_2|^l < 1\}.\]
One can solve the Gleason problem for $H^{\infty}(\Omega_{k,l})$.
\end{Thm}
\section{If the domain meets one of the coordinate axes}
\noindent In this section, we study domains that are connected both to the
$A$-domains and the domains $\Omega_{k,l}$. Namely, let $\Omega \subset {\mathbb{C}}^2$
be a bounded pseudoconvex
Reinhardt domain, such that for $c$ close to $- \infty$, $\partial \omega \cap
[y<c]$ consists of 2 arcs, one of them being a half line with rational slope.
We assume that $0
\notin \Omega$, and that $\Omega$ meets the $z_2$-axis. (Because of symmetry,
everything will hold if $\Omega$ only meets the $z_1$-axis as well.)\\
Let $K_1$, $K_2$ be constants such that $\partial \omega$ is a half line for
$[y < K_1]$, and $\omega$ is rounded off strictly convexily above $[y < K_1]$,
such that $\{(x,y) \in \partial \omega : y > K_1\}$ has $y=K_2$ as horizontal
asymptote. We fix $p = (p_1, p_2) \in \Omega$, and an $f \in H^{\infty}
(\Omega)$ that vanishes at $p$. We will now solve the Gleason problem for $f$
at $p$. There is a strictly convex point $A=(a_1,a_2) \in \partial
\omega$ with $\log |p_2| < a_2$. This point has a neighborhood in $\partial
\omega$ consisting only of strictly convex points. Take a point $B=(b_1,b_2)$
in this neighborhood with $\log |p_2| < b_2 < a_2$. Then is $\frac{f(z_1,z_2)}
{z_2 - p_2}$ bounded on $\Omega \cap [|z_2| > \exp(b_2)]$, and on this set we
have that $f(z_1,z_2) = \frac{f(z_1,z_2)}{z_2 - p_2} (z_2-p_2)$. The boundary
of $\omega \cap [y < a_2]$ is a straight line for $y$ small. Thus one can
solve
the Gleason problem for $H^{\infty}(\Omega \cap [|z_2|<\exp(a_2)])$, just as
section \ref{S:Hartogsfriends}.
One can patch the two local solutions together to a global solution using the
standard techniques, since $\partial \Omega \cap [|z_2| > \exp(b_2)] \cap
[|z_2|<\exp(a_2)] \subset S(\Omega)$.
\noindent
The case where a part of $\partial \omega$ is described by $[y=c]$ can be dealt
with in a similar way. This yields the following theorem :
\begin{Thm}
Let $\Omega \subset {\mathbb{C}}^2$ be a bounded pseudoconvex Reinhardt domain, that
meets exactly one of the axes. Suppose that one part of $\partial \omega$ is
a half line, and that the other boundary points of $\partial \omega$ are
strictly convex and $C^5$. Then one can solve the Gleason problem for
$H^{\infty}(\Omega)$.
\end{Thm}
\section{Final remarks}
\noindent The results in this article all rely on theorem \ref{Thm:aholder}.
As noted before, one can
prove this theorem for Reinhardt domains that for small $z$ look like
\[\{(z_1,z_2) : a < \left|\frac{z_1^k}{z_2^l}\right| < b\},\]
and are rounded off pseudoconvexily. Thus one can still solve the Gleason
problem if there are ``enough'' strictly pseudoconvex points in the sense of
\cite{lewi}. The condition that the strictly pseudoconvex points
have to be $C^5$ can, as usual, be relaxed to $C^2$, but this would even need
more machinery.
\vskip5mm
\noindent Now let $\Omega$ be a bounded pseudoconvex Reinhardt domain in
${\mathbb{C}}^2$ that has a strictly pseudoconvex $C^5$ boundary outside a ball around
the origin. If for $c$ close to $- \infty$, $\partial \omega \cap [y<c]$
consists of 2 arcs that have parallel asymptotes with rational slope, theorem
\ref{Thm:adomsol} holds for $\Omega$ as well. This is because we are either in
the situation described in the previous remark, or every point in $\omega$
lies in a triangle of strictly convex points of $\omega$, and one can apply
lemma \ref{Lem:driehoek}.
\vskip5mm \noindent
We do not yet know how to solve the Gleason problem for $H^{\infty}(\Omega)$
if $\Omega$ is a Reinhardt domain that for small $z$ looks like
\[\{(z_1,z_2) : a < \left|\frac{z_1^{\alpha}}{z_2}\right| < b\},\]
where $\alpha \notin {\mathbb{Q}}$, or (with $r \not= l$)
\[\{(z_1,z_2) : a |z_2^l| < |z_1^k| < b |z_2^r|\}.\]
The first problem is hard because $z_1^{\alpha}$ is not a holomorphic function;
the second problem is hard because the function $|z_1^k z_2^{-l}|^{\frac{in
-jm}{kn +lm}}$ (that appeared in the proof of theorem \ref{Lem:deelbdd}) is
no longer bounded.
\vskip5mm \noindent
Oscar Lemmers
\vskip10mm \noindent
Jan Wiegerinck\\
Department of mathematics\\
University of Amsterdam\\
Plantage Muidergracht 24\\
1018 TV Amsterdam\\
The Netherlands
\end{document}
|
\begin{document}
\hyphenation{act-ually angle local-iz-ation local-ized micro-wave momen-tum numer-ical numer-ically para-meter para-meters}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{proposition}[theorem]{Proposition}
\theoremstyle{definition}
\newtheorem{definition}{Definition}
\theoremstyle{remark}
\newtheorem*{remark}{Remark}
\newtheorem{example}{Example}
\title{Quantum Localization for Two Coupled Kicked Rotors}
\author{Borzumehr Toloui}
\email{[email protected]}
\affiliation{Institute for Quantum Information Science, University of Calgary, Calgary, Alberta, Canada T2N 1N4.\\
Physics Department, Simon Fraser University, Burnaby, British Columbia, Canada V5A 1S6.}
\author{Leslie E. Ballentine}
\email{[email protected]}
\affiliation{Physics Department, Simon Fraser University, Burnaby, British Columbia, Canada V5A 1S6.}
\date{\today}
\pacs{05.45.Mt, 05.45.Pq, 72.15.Rn, 05.45.Ra}
\begin{abstract}
We study a system of two coupled kicked rotors, both classically and quantum mechanically,
for a wide range of coupling parameters. This was motivated by two published reports, one
of which reported quantum localization, while the other reported diffusion.
The classical systems are chaotic, and exhibit normal diffusive behavior.
In the quantum systems, we found different regimes, depending on the strength of the coupling.
For weak coupling, we found quantum localization similar to that exhibited by single kicked rotors.
For strong coupling, we found a quasi-diffusive growth of the width of the momentum distribution,
in which the apparent diffusion coefficient decreased as time increased.
The behavior in this regime can be described by the scaling theory of weak localization for
two-dimensional disordered lattices.
The weak and strong coupling regimes are separated by a regime of complex intermediate behavior.
Thus we explain the apparent contradiction in the literature as being due to qualitatively
different regimes of behavior, which we call strong and weak quantum localization.
\end{abstract}
\maketitle
\section{Introduction}
Kicked rotor models~\cite{kr-gen1,kr-gen2,kr-gen3} have played a prominent role in the study of classically chaotic systems and their corresponding quantum behavior.
Although quite simple, these models exhibit most of the distinctive features
of classical and quantum chaos.
The particular phenomenon that is the subject of this paper is {\it quantum localization},
which is a localization in angular momentum space that is in sharp contrast to the
diffusive motion typical of classical chaos.
The understanding of quantum localization is considerably facilitiated by the existence
of a mapping from the rotor model to a model of a particle moving in a disordered lattice~\cite{Fishman},
which exhibits the phenomenon of Anderson localization~\cite{Anderson}.
Single rotor systems have been thoroughly studied and are well understood~\cite{Fishman}.
However, the situation is less clear for systems of two coupled kicked rotors.
Indeed, there are examples in the literature whose results appear to
be contradictory.
Doron and Fishman~\cite{Doron} have studied one such model, and
they found quantum localization of the Anderson type, scaling exponentially with the
coupling strength.
On the other hand, Adachi {\it et~ al.}~\cite{Ikeda1} have studied a model with
different coupling potentials, and have obtained results that exhibit diffusive growth in the
width of the state.
They concluded that the coupling between the two rotors can restore the mixing \cite{Ikeda2}
that would otherwise be suppressed by quantum localization.
There are several possible explanations for this apparent contradiction.
The two papers are based on models that differ in the form of the kicking and coupling potentials.
Although it is logically possible for the two models to behave qualitatively differently,
this would be very surprizing, since the forms of the potentials are similar enough that
we should expect them to belong to the same generic class.
The ranges of interaction parameter strength used in the two papers are quite different.
Indeed, the lattice emerging from the choice of parameters in~\cite{Ikeda1} turns out to
be periodic, which would give rise to extended Bloch states. So we need to study the
two models in equivalent ranges of paramenters.
The two papers also used different criteria for identifying localization.
Doron and Fishman~\cite{Doron} look for an exponential fall-off in the angular momentum
distribution, while Adachi {\it et~ al.}~\cite{Ikeda1} look for a saturation in time of
the width (standard deviation) of the distribution.
So we need to apply both criteria to both models to determine whether they agree.
A further complication arises because two-dimensional disordered lattices contain
the marginal case of weakly localized states, which creates more interesting possibilities.
In this paper we investigate these questions.
To do so, we consider a more general Hamiltonian that includes the models of~\cite{Doron} and~\cite{Ikeda1} as special cases.
We study the system numerically for a suitable range of parameters,
and examine the shape and fall-off of the angular momentum distribution,
together with its standard deviation as a measure of the state width.
In Section II we discuss the Hamiltonian, the initial state, and the method of numerical
calculation. In Section III the classical behavior of the system is studied, and is shown
to lie within the chaotic region of phase space for the chosen range of parameters.
Section IV presents the numerical results for the quantum system.
In Section V we examine whether the results are consistent with the scaling properties of
weak localization.
Section VI discusses the conclusions.
\section{Model}
The general Hamiltonian is chosen to be
\begin{align}
&H=T(p_{1}, p_{2}) + V(\theta_{1}, \theta_{2}) \sum_{n=-\infty}^{+\infty} \delta(t-n)& \nonumber \\
&T(p_{1}, p_{2})=\frac{1}{2} \;\alpha_{1} \;p_{1}^{2}+\frac{1}{2} \;\alpha_{2} \;p_{2}^{2}&\nonumber
\end{align}
\begin{align}
V\left(\theta_{1}, \theta_{2}\right)=[\lambda_{1} \cos \theta_{1} +\lambda_{2} \cos \theta_{2}+ \lambda_{3} \cos \theta_{1}\;\cos \theta_{1} \nonumber \\ + \lambda_{4} \cos ( \theta_{1}-\theta_{2} )]
\end{align}
The dimensionless parameters are related to the physical quantities as follows:
\begin{equation}
\label{alphas}
\alpha_{1} = \frac{\hbar \tau}{I_{1}}, \:\:\:\alpha_{2} = \frac{\hbar \tau}{I_{2}}
\end{equation}
Here $\tau$ is the time span between kicks, and $I_{1}$, $I_{2}$ are the moments of inertia of
the first and second rotors, respectively.
The parameters $\lambda_{1}$ and $\lambda_{2}$ are the single-rotor kicking strengths,
while $\lambda_{3}$ and $\lambda_{4}$ are the couplings between the two rotors.
The choice $\lambda_{1} = \lambda_{2}=\lambda_{4}=0.0$ and $\alpha_{1}=1,
\alpha_{2}=\sqrt{2},\, \hbar=1$ will yield the Hamiltonian studied in~\cite{Doron}.
The main systems analyzed in~\cite{Ikeda1} correspond to
$\lambda_{3}=0.0$, $\alpha_{1}=\alpha_{2}=\frac{41}{512}\times 2\pi$, and $\alpha_{1}=\alpha_{2}=\frac{41}{1024}\times 2\pi$.
Since these values of $\alpha$ are commensurate with $\pi$, they yield periodic lattices,
giving rise to extended Bloch-like states and excluding the possibility of Anderson localization.
Therefore, we have used a different value in our numerical studies.
The quantum system is obtained by replacing the momentum variables with operators
$\hat{p}_{1}= -\imath \partial /\partial \theta_{1}$ and $\hat{p}_{2}= -\imath \partial /\partial \theta_{2}$.
The time evolution for one period of the kicking Hamiltonian is given by the Floquet operator
\begin{eqnarray}
\hat{U}= e^{-\imath V(\theta_{1}, \theta_{2})} \; e^{-\imath T(\hat{p}_{1}, \hat{p}_{2})}
\label{U}
\end{eqnarray}
The numerical calculation is performed in two stages. Starting in the momentum representation,
the propagator $e^{-\imath T(\hat{p}_{1}, \hat{p}_{2})}$, which is diagonal in this basis,
is applied for the duration between two kicks.
Then the kicking propagator $e^{-\imath V(\theta_{1}, \theta_{2})}$ is applied in the
angle representation. The transformation between the angle and angular momentum representations
is achieved by Fast Fourier Transform (FFT) and its inverse.
The same process is then repeated for the next period, and so on.
The initial state is chosen to have the form
\begin{align}
\Psi(\theta_{1}, \theta_{2}) = \psi(\theta_{1}) \; \psi(\theta_{2}) \nonumber \\
\psi(\theta) = \sum_{m} \; a_{m} \; e^{\imath m (\theta-\theta_{0})} \nonumber \\
a_{m} = e^{- [\frac{m-m_{0}}{2 \Delta m}]^{2}}
\end{align}
This is a product of Gaussian wave packets, centered at momentum $m_{0}$
and angle $\theta_{0}$, with momentum width $ \Delta m = 1.25786$.
Hence we have $\hbar \Delta m \, \Delta \theta = \hbar/2$.
A summary of the parameter values are given in Table \ref{Tablez}.
\begin{table}
\caption{Parameters of the initial state}
\label{Tablez}
\begin{center}
\begin{tabular}{|c|c|c|c|}
\hline
Rotor no. & $m_{0}$ & $\Delta m$ & $\theta_{0}$\\
\hline
\hline
1 & 0 & 1.25786 & 0\\
\hline
2 & 0 & 1.25786 & 0\\
\hline
\end{tabular}
\end{center}
\end{table}
The momentum values form a discrete grid.
The angle values are also set on a grid of the same size, since the Fast Fourier Transforms are
instances of the discrete Fourier transform algorithm.
The grid size is chosen to be $2^{11}=2048$ grid points, ranging from $m= -1023$ to $1024$ of
dimensionless angular momentum values. The numerical calculation is performed for 30,000 kicking steps.
The coupling parameters $\lambda_{3}$ and $\lambda_{4}$ range from $0.0$ to $3.0$
in steps of $0.5$, for each value of $\lambda_{1}$ and $\lambda_{2}$ in the set
$\left\{0.0, 0.25, 0.5\right\}$.
The simulations are done for the values $\alpha_{1}=\alpha_{2}=1.0$, which are not commensurate
with $\pi$, thus ensuring that the corresponding Anderson lattice is non-periodic.
\section{Classical Diffusion}
In order to analyze the quantum dynamics, it is necessary to determine whether the
classical counterpart lies in the chaotic regions of phase space.
Only when it is verified that the corresponding classical evolution is chaotic, can the
localization in the quantum system be positively attributed to quantum effects.
It is also interesting to compare the behavior of the quantum system to its classical
counterpart.
The classical mapping of each point $(\theta^{[n]}_{1}, \theta^{[n]}_{2}, p^{[n]}_{1}, p^{[n]}_{2})$ of the phase space from its values at kick $n$ to kick $n+1$ is:
\begin{widetext}
\begin{align}
\begin{cases}
p^{[n+1]}_{1} = p^{[n]}_{1} + \lambda_{1} \, \sin \theta^{[n]}_{1} + \lambda_{3} \, \sin \theta^{[n]}_{1} \cos \theta^{[n]}_{2} +\,\lambda_{4} \, \sin ( \theta^{[n]}_{1}-\theta^{[n]}_{2}) \\
p^{[n+1]}_{2} =p^{[n]}_{2} + \lambda_{2} \, \sin \theta^{[n]}_{2} +\lambda_{3} \, \cos \theta^{[n]}_{1} \sin \theta^{[n]}_{2} -\,\lambda_{4} \, \sin ( \theta^{[n]}_{1}-\theta^{[n]}_{2}) \\
\theta^{[n+1]}_{1} =\theta^{[n]}_{1}+p^{[n+1]}_{1} \\
\theta^{[n+1]}_{2} =\theta^{[n]}_{2}+p^{[n+1]}_{2}
\end{cases}
\label{clasmap}
\end{align}
\end{widetext}
For the classical calculations, an ensemble of 1,000,000 initial states was created, with the same
Gaussian distribution in phase space as the initial quantum state.
Each member of the ensemble was evolved separately using the mapping (\ref{clasmap}).
The results of the calculation show similar chaotic behavior for all of our choices of parameters,
with the exception of $\lambda_1 = \lambda_2 = \lambda_3 = 0$ and $\lambda_4 > 0$, for which
the total angular momentum is a constant of motion.
The variance of the angular-momentum distribution for each rotor exhibited
diffusive growth with time,
\begin{align}
\label{cl-dif}
\begin{cases}
\left<p_{1}^{2}\right>-\left<p_{1}\right>^{2}= D_{1}\; t\\
\left< p_{2}^{2}\right>-\left<p_{2}\right>^{2}= D_{2} \; t
\end{cases}
\end{align}
where the average is over the classical ensemble.
This is illustrated in Fig.\ref{fig:1}.
From this we conclude that the chosen parameters do lie within the classically chaotic region
of parameter space.
\begin{figure}
\caption{(Color online) Width of the angular momentum distribution for a classical rotor [defined similar to (\ref{width}
\label{fig:1}
\end{figure}
For our chosen initial states, we have $\left<p\right> = 0$ in all cases, so
we can replace the variance with $\left<p^{2}\right>$.
The dependence of the diffusion coefficients, $D_i$, on the kicking and coupling parameters
can be analyzed in the lowest order (so-called {\it quasi-linear}) approximation
by assuming a uniform probability
distribution for the angle after a sufficiently large number of kicking steps $n$,
and integrating $(p^{[n+1]}-p^{[n]})^{2}$ over this distribution for each of the
two rotors~\cite{LL}:
\begin{align}
\begin{cases}
D^{0}_{1} =\frac{1}{2\pi} \int_{0}^{2\pi}{ \left(p^{[n+1]}_{1}-p^{[n]}_{1}\right)^{2} \; d\theta^{[n]}_{1} d\theta^{[n]}_{2}} \\ \\
D^{0}_{2} =\frac{1}{2\pi} \int_{0}^{2\pi} \left(p^{[n+1]}_{2}-p^{[n]}_{2}\right)^{2} \; d\theta^{[n]}_{1} d\theta^{[n]}_{2} \\
\end{cases}
\end{align}
This yields
\begin{align}
\begin{cases}
D^{0}_{1} = \frac{(\lambda_{3}+\lambda_{4})^2}{4} + \frac{\lambda_{4}^{2}}{4} +\frac{\lambda_{1}^{2}}{2}\\ \\
D^{0}_{2} = \frac{(\lambda_{3}+\lambda_{4})^2}{4} + \frac{\lambda_{4}^{2}}{4} +\frac{\lambda_{2}^{2}}{2}
\end{cases}
\end{align}
A comparison between this approximation and the numerically computed diffusion constant is shown
in Fig.\ref{fig:2}.
\begin{figure}
\caption{Ratio of the classical diffusion coef. to the quasi-linear approx.,
$D_{1}
\label{fig:2}
\end{figure}
The computed diffusion coefficients oscillate above and below the approximate value.
This is similar to a known result for the standard map, for which
one has $D^{0} = \frac{K^{2}}{4}$, where $K$ is the kicking strength in the standard map. This result was also verified by a more systematic method similar to that used in~\cite{LL}.
\section{Results for the Quantum Rotors}
The evolution of the quantum system yields a momentum-space wave function, $\Psi(p_{1},p_{2})$,
as a function of time, for each set of the Hamiltonian parameters.
From it, we obtain the two-rotor momentum probability distribution, $|\Psi(p_{1},p_{2})|^2$,
and calculate the widths (standard deviation) of the single-rotor momentum distributions,
\begin{equation}
\ S = \sqrt{\left < \left(\hat{p}-\left<\hat{p}\right>\right)^{2} \right>}
\label{width}
\end{equation}
as a function of time (measured in kick numbers).
These are plotted as a function of the square root of time, rather than time,
in Figures \ref{fig:3}, \ref{fig:4}, and \ref{fig:5} because this choice of variable
is effective in showing both short and long time behavior on the same scale.
This plot would yield a straight line for normal diffusion.
\begin{figure}
\caption{(Color online) Localized regime. \\
{\bf a}
\label{fig:3}
\end{figure}
\begin{figure}
\caption{(Color online) Intermediate regime. {\bf a}
\label{fig:4}
\end{figure}
\begin{figure}
\caption{(Color online) Quasi-diffusive regime.\ {\bf a}
\label{fig:5}
\end{figure}
We observed three kinds of behavior of the momentum width as a function of time.
\begin{description}
\item[Localized:] The width of the momentum distribution saturates in time,
as illustrated in Fig.\ref{fig:3}.
\item[Quasi-diffusive:] The width of the momentum distribution increases with time,
but the growth rate diminishes, as illustrated in Fig.\ref{fig:5}.
\item[Intermediate:] The width of the momentum distribution may exhibit a complex
oscillatory behavior, as illustrated in Fig.\ref{fig:4}.
\end{description}
Tables \ref{tableX1} and \ref{tableX2} show the occurence of the three behaviors
as a function of the coupling strengths $\lambda_3$ and $\lambda_4$, for fixed values of the
kicking strengths $\lambda_1$ and $\lambda_2$.
For vanishing kicking strengths, $\lambda_{1}=\lambda_{2}=0$, all states with non-zero coupling strengths appeared to be localized.
\begin{table}[h]
\caption{Behavior of the angular momentum distribution width for fixed kicking parameters $\lambda_{1}=\lambda_{2}=0.50$. {\bf D}: Quasi-diffusive regime. {\bf I}: Intermediate regime. {\bf L}: Localized regime. }
\label{tableX1}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &0.0 &0.5 &1.0 &1.5 &2.0 &2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&&&\\
0.0 &L &L &L &L &L &L &L\\
\hline
0.5 &L &L & L&L &L &I &I\\
\hline
1.0 &L &L &L &L &I &I &I\\
\hline
1.5 &L &L &L &I &I &I &D\\
\hline
2.0 &L &L &I &I &I &D &D\\
\hline
2.5 &L &L &I &I &D &D &D\\
\hline
3.0 &L &I &I &D &D &D &D\\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[h]
\caption{Behavior of the angular momentum distribution width for fixed kicking parameters $\lambda_{1}=\lambda_{2}=0.25$. {\bf D}: Quasi-diffusive regime. {\bf I}: Intermediate regime. {\bf L}: Localized regime. }
\label{tableX2}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &0.0 &0.5 &1.0 &1.5 &2.0 &2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&&&\\
0.0 &L &L &L &L &L &L &L\\
\hline
0.5 &L &L &L &L &L &L &L\\
\hline
1.0 &L &L &L &L &L &L &L\\
\hline
1.5 &L &L &L &L &L &L &L\\
\hline
2.0 &L &L &L &L &I &I &I\\
\hline
2.5 &L &L &L &I &I &I &D\\
\hline
3.0 &L &L &L &I &I &D &D\\
\hline
\end{tabular}
\end{center}
\end{table}
The quasi-diffusive behavior occurs for the largest values of the couplings
between the rotors.
Figure \ref{fig:6} shows that at the earliest times, the quantum rotor behaves diffusively,
as does the classical rotor, but at later times the quantum diffusion rate decreases.
It is not clear whether at very long times the quantum rotor will continue to behave diffusively
but with a reduced diffusion coefficient, or whether it will eventually saturate at some large
localization scale. We shall return to this question in the next section.
\begin{figure}
\caption{(Color online) Width of angular momentum distribution at early times. \ {\bf a}
\label{fig:6}
\end{figure}
The two localization criteria -- exponential fall-off of the angular-momentum distribution,
and saturation of the momentum-distribution widths -- are consistent with each other.
For states that are classified as ``Localized" in the tables, we have verified that after
sufficient time has elapsed, the momentum distribution falls off exponentially with a rate
that is independent of time.
For the ``Quasi-diffusive" states, the momentum distribution continues to spread.
This is illustrated in Figures \ref{fig:7} and \ref{fig:8}, repectively, where we plot the
angular momentum distribution for $p_1$ along the section $p_2=0$.
\begin{figure}
\caption{(Color online) Localized regime: angular momentum distribution for rotor \#1 along the section $p_2=0$.
$\lambda_{1}
\label{fig:7}
\end{figure}
\begin{figure}
\caption{(Color online) Quasi-diffusive regime: angular momentum distribution for rotor \#1 along the section $p_2=0$.
$\lambda_{1}
\label{fig:8}
\end{figure}
\section{Scaling Theory and the Quasi-diffusive Regime}
In the Quasi-diffusive regime, the computations do not clearly determine
whether the motion will remain diffusive at very long times, or whether it will become
localized at some scale that is greater than the grid size of the computation.
In this section we apply the scaling theory of Anderson localization (see~\cite{Lee} for a review)
to answer this question.
In its usual context, the phenomenon of Anderson localization refers to a charged particle
moving in a disordered medium.
Scaling theory considers the behavior of the conductance $g$ (not the conductivity)
as a function of the system size $L$.
The mean-free-path $l$ may be defined as the distance beyond which the phase of the wave
function is essentially randomized by the scattering.
No phase coherence exists between regions separated by more than $l$, and scaling
behavior is possible only for $L$ greater than $l$.
The conductance $g_0 = g(l)$ at this cutoff length acts as a measure of the disorder
in the system.
It is useful to distinguish the regimes of strong and weak disorder.
Strong disorder gives rise to exponentially localized states, and to a non-ohmic conductance
that scales as $g(L) \propto \exp(-L/\zeta)$, where $\zeta$ is a localization length that
is generally greater than $l$, but may approach $l$ in extreme cases.
In $d=1$ dimension all states exhibit this {\it strong localization}, while in $d=3$ dimensions
this strong localization occurs only for sufficiently strong disorder.
The case of $d=2$ dimensions may exhibit strong localization, but it can also exhibit
a
{\it marginal localization} for weak disorder due to the coupling of the two rotors, for which $g(L)$ differs from ohmic conduction by a
weakly varying correction proportional to $\log(L)$.
Weak disorder usually leads to ohmic conductance, however $d=2$ is an exceptional case.
Abrahams {\it et~ al}~\cite{Abrahams} have argued that the
logarithmic derivative of the conductance $g$ with respect to
sample size $L$ is a function $\beta(g)$ of the conductance alone,
\begin{align}
\frac{d \log g}{d \log L}=\beta(g)
\end{align}
The form of $\beta(g)$ for ohmic conduction in $d$ dimensions is
\begin{align}
\beta(g) = (d-2)
\end{align}
For strong localization, it takes the form
\begin{align}
\beta(g) = \log \left(\frac{g}{g_{0}}\right)
\end{align}
There exists a critical value for $g_{0}$ at which the large scale behavior changes from
conducting to localized.
For weak disorder, it is possible to use perturbation theory to calculate the corrections
to ohmic conduction, which yields
\begin{align}
\label{beta-gen}
\beta(g) = (d-2) - \frac{a}{g}
\end{align}
where $a$ is some constant for the system.
It is clear that $d=2$ is a special case.
We can now increase the size of the system $L$ ($L>l$).
From (\ref{beta-gen}) for $d=2$, we have
\begin{align}
\frac{d \log g}{d \log L} = -\frac{a}{g}
\label{betaeq}
\end{align}
Integrating this with respect to $L$ from the lower bound $l$ yields
\begin{align}
g(L) = g_{0} - a\, \log \left [ \frac{L}{l}\right]
\label{glg0}
\end{align}
The localization length, $\zeta$, is defined as the length scale $L$ for which
$g(L)=0$, that is
\begin{align}
\zeta = l \, \exp \left[ \frac{g_{0}}{a}\right]
\label{zeta1}
\end{align}
In transport theory for weak scattering~\cite{Lee}, we have
\begin{align}
g_{0}\propto l
\label{gproptol}
\end{align}
so we can rewrite (\ref{zeta1}) as
\begin{align}
\zeta=l \, e^{b \, l}
\label{zeta2}
\end{align}
where $b$ is a constant for the system.
Recall that the kicked rotor system can be mapped onto an Anderson-type lattice~\cite{Fishman}.
If the phase of the rotor's wave function is randomized between two kicks,
then the average displacement in momentum after the first kick
can be regarded as the counterpart of the mean-free-path in the Anderson lattice.
This should be true for the large values of the rotor coupling parameters,
for which we obtained quasi-diffusive behavior.
The average displacement in momentum after the first kick is
\begin{align}
l^{2} = \sum_{r_{1}, r_{2} = - \infty}^{\infty} (r_{1}^{2}+ r_{2}^{2}) \left| U_{{\it 0},\b{r}} \right|^{2}
\label{defl}
\end{align}
where
\begin{eqnarray}
U_{0,\b{r}}:=\left<0,0\right| \hat{U} \left|r_{1},r_{2} \right>, \;\;\;\;\b{r} = (r_{1},r_{2})
\end{eqnarray}
and $\hat{U}$ is the Floquet operator defined in (\ref{U}).
The matrix elements are in the angular-momentum basis.
If, in the quasi-diffusive regime, the state is actually localized on some
large scale, then we may expect the scaling theory of Anderson localization to apply.
The initial state for our numerical calculations is a Gaussian, effectively localized in a
finite portion of the infinite angular-momentum lattice.
The support of the wave function grows with time.
At any finite time, the effect of lattice sites far outside the wave function's support will be
negligible, and it would not make any difference to the dynamics if the wave function
were instead located on a finite lattice of a size not less than the width of the state.
So it is reasonable to regard the state width $S$ (or some multiple of it) as being the
analog of the sample size $L$ in the Anderson lattice.
It is also reasonable to assume that the diffusion in angular-momentum space of the rotor will
contain the same kind of information as does the diffusion of charged particles on the
Anderson lattice, and that they will scale similarly with $S$ or $L$, respectively.
According to the Einstein relation~\cite{eins}, the diffusion coefficient is proportional to
the mobility, and hence to the conductivity.
But in $d=2$ dimensions, the conductance $g$ scales the same
way with size as does the conductivity.
Thus the diffusion coefficient $D$ of the rotor should scale the same as does the conductance
$g$ of the lattice.
The measure of the angular-momentum state width of the rotor at time $t$ is taken to be
$\ S = \sqrt{\left < \left(\hat{p}-\left<\hat{p}\right>\right)^{2} \right>}$.
By analogy with the classical equation (\ref{cl-dif}), we define the
diffusion coefficient $D$ of the kicked rotor to be the slope of the line
relating $S^2$ to $t$.
If the relationship is not a straight line, as is the case for Fig.(\ref{fig:5}) and (\ref{fig:6})
(which, however show $S$ vs $\sqrt t$),
then we can define a time-dependent diffusion coefficient as the local slope of the curve
of $S^2(t)$ vs $t$.
In the spirit of the scaling hypothesis, we assume that the time-dependent diffusion
coefficient $D$ at any time $t$ is a function only of $S$ at that time,
that is to say, $D = D(S(t))$.
In analogy with (\ref{betaeq}), we write
\begin{equation}
\frac{d \log D}{d \log S}=-\frac{a}{D}
\end{equation}
with $a$ being some unknown constant for the system.
In the kicked-rotor model, all quantities change only at the discrete time steps $t_{n}$.
The state width at time $t_{n}$ is $S_{n}$.
From the definition of $D_{n}=D(t_{n})$ as the slope of the curve of $S^2$ vs $t$,
we have
\begin{equation}
\label{s2dif1}
S_{n+1}^{2}-S_{n}^{2} = D_{n} \Delta T
\end{equation}
where $\Delta T = t_{n+1}-t_{n}$.
According to the scaling assumption, we should have a relation of the form
\begin{equation}
D_{n} = c \,l - a\log \left[ \frac{S_{n}}{l}\right]
\end{equation}
by analogy with (\ref{glg0}), with two as yet undetermined parameters $c$ and $a$.
Hence we obtain
\begin{align}
S_{n+1}^{2}-S_{n} ^{2}= \Delta T \left(c\,l-a \log \left[ \frac{S_{n}}{l}\right]\right)
\label{s2dif2}
\end{align}
As in the original scaling argument that introduced the localization length $\zeta$ in the
Anderson lattice, we can define the saturation length $\Lambda$
as the value of $S_{n}$ that sets the right hand side of the above equation to zero.
This yields a result analogous to (\ref{zeta2}),
\begin{equation}
\label{lscale}
\Lambda=l \, e^{bl}
\end{equation}
with $b=c/a$.
Since the corresponding parameter $b$ was a constant for the Anderson lattice,
we expect that here $b$ will be approximately constant in the Quasi-diffusive regime
of the rotor if the condition of
marginal localization for weak disorder is valid.
\begin{table}
\caption{Numerical values of $l$, $b$ and $\Lambda$ [See (\ref{defl}) and (\ref{lscale})] for each run with $\lambda_{1}=\lambda_{2}= 0.50 $}
\label{tableY3}
\begin{center}
\begin{tabular}{|c|}
\hline
$l$\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &1.0 &1.5 &2.0 &2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&\\
1.0 & & & & &3.57\\
\hline
1.5& & & &3.37 &3.86\\
\hline
2.0 & & &3.20 &3.67 &4.15\\
\hline
2.5 & &3.06 & 3.52&3.98&4.46\\
\hline
3.0 & 2.96 & 3.39&3.84 &4.30 & 4.77\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|}
\hline
$b$\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &1.0 &1.5 &2.0 &2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&\\
1.0 & & & & &0.87\\
\hline
1.5& & & &0.86&1.01\\
\hline
2.0 & & &0.84 &1.02 &1.19\\
\hline
2.5 & &0.97 & 1.00&1.11&1.11\\
\hline
3.0 & 0.82 & 0.93&1.14 &1.22& 1.16\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|}
\hline
$\Lambda$\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &1.0&1.5&2.0&2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&\\
1.0&&&&&80\\
\hline
1.5 &&&&61&190\\
\hline
2.0 &&&47&155&579\\
\hline
2.5 &&60&115&330&630\\
\hline
3.0 &34&79&306&816& 1207\\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}
\caption{Numerical values of $l$, $b$ and $\Lambda$ for each run with $\lambda_{1}=\lambda_{2}= 0.25$}
\label{tableY31}
\begin{center}
\begin{tabular}{|c|}
\hline
$l$\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &1.0&1.5&2.0&2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&\\
1.0&&&&&3.54\\
\hline
1.5 &&&&3.34&3.83\\
\hline
2.0 &&&3.17& 3.64&4.13\\
\hline
2.5 &&3.03&3.49&3.96&4.43\\
\hline
3.0 &2.92&3.36&3.81&4.27& 4.75\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|}
\hline
$b$\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &1.0 & 1.5 &2.0 &2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&\\
1.0 &&&&&0.60\\
\hline
1.5 &&&&0.65&0.71\\
\hline
2.0 &&&0.57&0.67& 0.80\\
\hline
2.5 &&0.57&0.69&0.79& 0.86\\
\hline
3.0 & 0.58 &0.67&0.77 &0.83&0.79\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|}
\hline
$\Lambda$\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
$\lambda_{4}$ &1.0&1.5&2.0&2.5 &3.0 \\
\hline
$\lambda_{3}$&&&&&\\
1.0&&&&&30\\
\hline
1.5 &&&&29&58\\
\hline
2.0 &&&19&42&112\\
\hline
2.5 &&17&39&90&201\\
\hline
3.0 &16&32&72&149& 202\\
\hline
\end{tabular}
\end{center}
\end{table}
We now wish to test the scaling theory by comparison with the results of numerical simulation.
In the previous section, we computed $S^2$ as a function of $t$,
for a range of the kicking and coupling parameters. These results are now regarded
as data.
It should be noted that, although the diffision coefficient $D = d(S^2)/dt$ plays a
fundamental role in developing the scaling theory, it is not necessary to compute that derivative
numerically. Only the data $S^2$ vs $t$ is needed for the computation.
To carry out the scaling-theory calculation, we first compute
$l$ from (\ref{defl}). The matrix elements in (\ref{defl}) can be obtained by running
the numerical simulation for one kicking step from an initial state that is localized
at $p_{1}=p_{2}=0$. These values of $l$ are contained in Tables \ref{tableY3} and \ref{tableY31}.
Next we use the recurrence relation (\ref{s2dif2}) to obtain the scaling theory values
for $S_n^2$, which we denote as $\tilde{S}_n^2$, using $\tilde{S}_0^2=l^2$ as the starting value.
The parameters $c$ and $a$ cannot be determined directly, so we must do the recursion calculation
for several values of $c$ and $a$, and determine the best values of these two parameters
by minimizing the sum of squared differences between the scaling-theory values $\tilde{S}_n^2$
and the previously computed data for $S^2(t_n)$.
The choice of $\Delta T$ in (\ref{s2dif1}) and (\ref{s2dif2}) must be made so that $\tilde S^2(t)$
varies nearly linearly with $t$ during the interval $\Delta T$.
Taking $\Delta T=1$ kicking step has the disadvantage of yielding a curve whose slope might
fluctuate considerably from step to step. It is better to choose a larger value of $\Delta T$
so as to smooth out such short-time fluctuations, but of course $\Delta T$ must not be so
large as to cover a range over which the curve is appreciably nonlinear.
We found the choice of $\Delta T=300$ kicking steps to be a good compromise, but the results
are insensitive (in the first decimal place) to the precise value that was chosen.
The least-squares fitted values of $a$ and $c$ were used to calculate the parameter $b=c/a$.
The values for $b$ and the corresponding saturation length $\Lambda$, from (\ref{lscale}),
are listed in Tables \ref{tableY3} and \ref{tableY31}.
The values of $b$ seem to become nearly constant as we move to the lower right of the tables,
away from the transition zone between the Localized and Quasi-diffisive regions shown in
Tables \ref{tableX1} and \ref{tableX2}.
This is what we should expect if the scaling hypothesis is valid, and our
Quasi-diffusive regime is actually a regime of
marginal localization for weak disorder.
In contrast to the nearly constant parameter $b$, the localization length $\Lambda$
varies over orders of magnitude in these ranges of kicking and coupling strengths.
Our results are consistent with
marginal localization for weak disorder on the scale of $\Lambda$.
\section{Conclusions}
This paper was motivated by an apparent contradiction between the conclusions of two
published papers~\cite{Doron,Ikeda1} on very similar models of coupled kicked rotors.
One group found quantum localization, while the other reported diffusive behavior.
It was unclear whether the differing conclusions were due to the different criteria for
localization used by the two groups, or whether they reflected fundamental differences
between the two models, or some other reason.
By studying a more general model, of which those two are special cases, we were able to
resolve the apparent contradiction. The two models do, indeed, belong to the same generic
class and have similar behavior. When the two criteria for localization are applied to the
same model in the same range of parameters, they always agree.
The differences between the results reported
by the two groups were primarily due to their use of very different interaction strengths and kicking intervals.
Beyond resolving this discrepancy in the literature, our study of two
coupled kicked rotors with two different coupling interactions has revealed some
interesting systematic behavior as the kicking and coupling strength parameters are varied.
The classical model exhibits diffusive motion.
An analytic formula for the dependence of the classical diffusion coefficient on the kicking
and coupling strength parameters was derived, by means of a random-phase approximation.
The numerically computed diffusion coefficient oscillates about this approximate formula,
much as occurs for uncoupled rotors.
As the couplings between the rotors become large, the behavior of the rotors is determined
mainly by the sum of the two coupling parameters, $\lambda_3 + \lambda_4$.
This generalization holds, at least qualitiatively, for the quantum rotors too.
The quantum mechanical model shows classical diffusion at short times, but at long times
the diffusion is limited by a kind of quantum localization.
For zero coupling strength, the model reduces to two independent kicked rotors,
each of which exhibits the well-established quantum localization that is the analog of
Anderson localization in a one-dimensional random lattice.
This single rotor localization behavior
persists for a moderate range of couplings between the two rotors.
As the coupling strength increases further, the system passes through a complex transition zone,
before entering a quasi-diffusive regime for strong coupling. In this quasi-diffusive regime,
the mean squared width of the momentum distribution increases with time, but the slope of the
curve (the local diffusion coefficient) gradually declines.
Within practical limits of grid size and simulation time, it was not possible to determine
directly whether the motion is bounded on a much larger scale or grows without bound.
We found that the
marginal localization for weak disorder due to the coupling of the two rotors fits well to this quasi-diffusive motion,
and thereby we conclude that the state is very probably localized on a larger scale
than our practical computational grid.
We were also able to estimate this large localization scale, which varies smoothly with
the coupling strength parameters.
In the intermediate region between strong and weak localization, the state-width
oscillates in a complicated way, but its envelope always lies beneath that found in
the regime of weak localization.
Thus we have shown that a system of two coupled kicked rotors exhibits a complex set of
behaviors, including a transition from strong to weak localization.
This would be expected from the formal mapping of the two-rotor system onto a two-dimensional
Anderson lattice. However, this correspondence is not trivial, since the theory of
Anderson localization assumes a {\it random} lattice potential, whereas the effective potential
generated by the kicked-rotor system is not random, but merely quasi-periodic with
incommensurate frequencies. Thus the existence of strong and weak localization in the rotor
system is not guaranteed by the theory of localization in random lattices.
However, we disagree with one conclusion of Adachi {\it et al}~\cite{Ikeda1},
who claimed that, for sufficiently large coupling strength, the coupled
quantum rotors exhibit normal diffusion.
Their results were obtained for a special set of parameters,
for which the dimensionless kicking interval (\ref{alphas}) is exactly
commensurate with $\pi$. Such special cases give rise to periodic lattices,
for which the quantum states are not localized, regardless of the kicking and
coupling strengths. These highly special cases do not correspond to
typical physical situations.
It is more realistic to study the generic non-commensurate case.
We believe that, in the generic case, the two coupled quantum rotors exhibit
weak localization on a long time scale.
Finally, we mention a possible connection between this model and experiment.
Experimental realization of systems like the one considered in the present paper are nowadays intesnly explored. The experimental observation of 2-dimensional optical localization have been reported~\cite{Nat,PRL}, where
the localized results were shown to match with the predictions of scaling theory.
\begin{acknowledgments}
BT acknowledges support from Alberta's Informatics Circle of Research Excellence (iCORE), Canada's National Sciences and Engineering research Council (NSERC), and the Canadian Centre of Excellence for Mathematics of Information Technology and Complex Systems (MITACS QIP and MITACS US ARO).
\end{acknowledgments}
\end{document}
|
\begin{document}
\title{Quantum voting and violation of Arrow's Impossibility Theorem}
\author{Ning~Bao\footnote{E-mail: [email protected]}}
\affiliation{Institute for Quantum Information and Matter, California Institute of Technology, Pasadena, CA 91125, USA}
\affiliation{Walter Burke Institute for Theoretical Physics, California Institute of Technology, Pasadena, CA 91125}
\author{Nicole~Yunger~Halpern\footnote{E-mail: [email protected]}}
\affiliation{Institute for Quantum Information and Matter, California Institute of Technology, Pasadena, CA 91125, USA}
\nopagebreak
\date{\today}
\pacs{03.67.Ac}
\begin{abstract}
We propose a quantum voting system, in the spirit of quantum games
such as the quantum Prisoner's Dilemma.
Our scheme enables a constitution
to violate a quantum analog of Arrow's Impossibility Theorem.
Arrow's Theorem is a claim proved deductively in economics:
Every (classical) constitution
endowed with three innocuous-seeming properties is a dictatorship.
We construct quantum analogs of
constitutions, of the properties, and of Arrow's Theorem.
A quantum version of majority rule, we show,
violates this Quantum Arrow Conjecture.
Our voting system allows for tactical-voting strategies
reliant on entanglement, interference, and superpositions.
This contribution to quantum game theory helps elucidate how
quantum phenomena can be harnessed for strategic advantage.
\end{abstract}
\maketitle{}
Today's voting systems are classical.
Societies hold elections to determine
which of several candidates will win an office.
Each voter ranks the candidates, forming a \emph{preference}.
Voters' preferences are combined deterministically
according to some rule set, or constitution.
What if citizens could entangle, superpose, and interfere preferences?
We formulate a quantum voting system,
in the tradition of quantum games,
that highlights the power of quantum resources.
Quantum game theory has flourished
over the past several years~\cite{FlitneyA02,Brunner_13_Connection}.
In a classical game, players can perform
only local operations and classical communications.
Each player can prepare and measure only systems in his/her lab.
Players can communicate only via classical channels
(e.g., by telephone), if at all.
Examples include the Prisoner's Dilemma.
Suppose that the police arrest two suspected criminals.
The suspects are isolated in separate cells.
If neither suspect confesses,
each will receive a lenient sentence
(e.g., one year in jail).
If both suspects confess, both will receive moderate sentences
(e.g., two years).
If just one suspect confesses, s/he will receive no sentence.
The other suspect will suffer a heavy penalty (e.g., three years).
Unable to communicate with the other prisoner,
each suspect can optimize his/her future by confessing.
Both suspects would benefit more
if they could agree to remain silent.
The Prisoner's Dilemma consists of
the tension between
(i) the optimal strategy attainable
and (ii) the optimal strategy that the prisoners could attain
if they could communicate.
Quantizing the game diminishes the tension~\cite{EisertWL99}.
Eisert \emph{et al.} associate each prisoner with a Hilbert space.
They translate each prisoner's options
(to cooperate with the police and to defect)
into basis elements ($\ket{C}$ and $\ket{D}$).
The game becomes a quantum circuit.
Measuring the prisoners' joint state
determines their penalties.
This quantization alters the landscape of
possible outcomes and strategies.
Similar insights result from quantizing
the penny-flipping game~\cite{Meyer99}, the Monty Hall problem~\cite{Flitney_02_Monty,Dariano_02_Monty},
and Conrad's Game of Life~\cite{Bleh_12_QGoL,Arrighi_10_QGoL}.
A game elucidates
the canonical demonstration of entanglement's power:
Clauser, Holt, Shimony, and Hauser (CHSH) reformulated Bell's Theorem
in terms of a protocol cast as
``the CHSH game''~\cite{CHSH_69_Proposed,Wilde_12_Quantum}.
Elections have been cast in game-theoretic terms~\cite{Ordeshook_03_Game}.
Elections therefore merit generalization with quantum theory.
Upshots of quantization, we show,
include a violation of a quantum analog of Arrow's Impossibility Theorem,
as well as quantum voting strategies.
Arrow's Theorem is a result derived, in economics,
from deductive logic and definitions~\cite{Arrow50}.
According to the theorem,
every constitution that has
three innocuous-seeming properties
(transitivity, unanimity, and independence of irrelevant alternatives,
defined below)
is a dictatorship.
Arrow's Theorem is surprisingly deep
and has fundamentally impacted game theory and voting theory
(e.g.,~\cite{Arrow_02_Handbook}).
Yet Arrow's Theorem derives from classical logic.
A quantum version, we find, is false.
Classical constitutions disobey Arrow's Theorem
under certain conditions.
For example, Black supplements Arrow's three postulates
with extra assumptions~\cite{Black69}.
The extra assumptions, he argues, are properties
as reasonable as Arrow's
for a constitution to have.
No constitution, he shows, can satisfy
Arrow's postulates and the extras
while being a nondictatorship.
Some sets of votes, however,
prevent constitutions that satisfy Black's extra assumptions
from satisfying all of Arrow's postulates.
Probabilistic mixtures of votes, too, evade Arrow's Theorem.
Suppose that a voter can pledge
40\% of his/her support to candidate Alice,
40\% to Bob, and 20\% to Charlie.
Such voters can form a constitution
not subject to Arrow's Theorem.
Yet such constitutions do not satisfy all of Arrow's postulates~\cite{Riker_82_Liberalism,Kalai_02_Fourier,Mossel09}.
We cleave to Arrow's postulates
and avoid restricting voters' preferences.
Rather, we recast Arrow's scheme in quantum terms.
Like Black's extra postulates and like probabilistic votes,
alternative classical voting schemes evade Arrow's Theorem.
Engaging in \emph{range voting}, a voter assigns each candidate
a number of points
independently of the other candidates~\cite{Harsanyi_55_Cardinal,Harsanyi_77_Rational}.
Whichever candidate receives the most points wins.
Voters behave identically when using \emph{majority judgment}~\cite{Balinski_10_Majority}.
The majority-judgment winner has the highest median number of points.
Under \emph{approval voting}, each voter assigns each candidate
a thumbs-up or a thumbs-down~\cite{Ottewell_87_Arithmetic}.
Range voting, majority judgment, and approval voting
contrast with \emph{ordinal voting}.
Ordinal-voting citizens rank candidates.
Arrow's Theorem governs just ordinal voting.
Yet a generalization of Arrow's Theorem, the Gibbard-Satterthwaite (GS) Theorem~\cite{Benoit_00_Gibbard,Reny_01_Arrow's},
governs majority judgment and approval voting.
(Range voting is not a scheme of the class
governed by the GS Theorem,
just as range voting is not a scheme of the class
governed by Arrow's Theorem.)
Like the GS Theorem, our Quantum Arrow Conjecture
generalizes Arrow's Theorem.
Yet classicality does not constrain our generalization
as it constrains the GS Theorem.
Like range voting, majority judgment, and approval voting,
our voting scheme is not precisely ordinal.
Yet ordinal rankings form the basis for our quantum votes' Hilbert spaces,
as discussed below.
In addition to disproving a Quantum Arrow Conjecture,
we present four quantum strategic-voting tactics.
How one should vote is not always clear,
even to opinionated citizens.
You might favor a candidate unlikely to win, for example.
Voting for a more likely candidate
whose policies you could tolerate
can optimize the election's outcome.
Strategic voting is
the submission of a preference
other than one's opinion,
in a competition amongst three or more candidates~\cite{Barbera_01_Intro}.
Quantizing voting unlocks new voting strategies.
We exhibit three tactics reliant on entanglement
and one reliant on interference and superpositions.
Earlier work on quantum voting has focused on
privacy, security, and cryptography~\cite{VaccaroASC07,HilleryZBB06,JiangHNXZ12}.
These references answer questions such as
``How can voters and election officials hinder cheaters?''
In contrast, we draw inspiration from game theory.
The paper is organized as follows.
First, we introduce our quantum voting system.
We define quantum analogs of properties of classical constitutions.
Four of these properties appear in Arrow's Theorem,
which we review and quantize.
We disprove the conjecture by a counterexample.
The counterexample relies on the quantization of
a fifth property available to classical constitutions: majority rule.
Finally, we present three strategic-voting strategies
based on entanglement
and one strategy based on interference.
\section{Quantum voting system}
\label{section:InitialDefns}
A voting system involves a \emph{society}
that consists of \emph{voters}.
\emph{Candidates} $a, b, \ldots, m$ vie for office.
Each voter ranks the candidates, forming a \emph{preference}.
A preference is a transitive ordered list.
Each candidate is ranked above, ranked below, or tied with
each other candidate: $a > b$, $a < b$, or $a = b$.
A list is transitive if $a \geq b$ and $b \geq c$,
together, imply $a \geq c$.
The voters' preferences form a \emph{profile}.
The profile serves as input to a \emph{constitution}
during an \emph{election}.
We focus on elections that feature at least three candidates.
The constitution combines the voters' preferences,
forming \emph{society's preference}.
Society's preference implies which candidate wins.
We quantize this classical election scheme.
Our strategy resembles that of Eisert \emph{et al.}~\cite{EisertWL99}.
Their quantum game consists of
a general quantum process:
a preparation procedure, an evolution, and a measurement~\cite{NielsenC10}.
So does our quantum voting scheme.
We introduce a Hilbert-space formalism
for quantum preferences.
Elections are formulated as quantum circuits~\cite{NielsenC10}.
We define quantum constitutions
and five properties that constitutions can have.
\subsection{Hilbert-space formalism for quantum voters}
Let $\mathcal{S}$ denote a society of voters.
The voters are indexed by $i = 1, 2, \ldots N$.
We associate with voter $i$
the $i^\th$ copy of
a Hilbert space $\mathcal{H}$.
The space of density operators
(unit-trace linear positive-semidefinite operators)
defined on $\mathcal{H}$
is denoted by $\mathcal{D}( \mathcal{H} )$.
Society is associated with a \emph{joint quantum state}
$\sigma_{\rm soc} \in
\mathcal{D} \left( \mathcal{H}^{ \otimes N } \right)$.
The joint state encodes all the information
in the voters' preferences.
This information may include correlations,
such as entanglement, between votes.
Consider tracing out every subsystem
except the $i^\th$.
The result is voter $i$'s \emph{quantum preference},
$\rho_i := {\rm Tr}_{\neq i} ( \sigma_{\rm soc} )$.
We sometimes denote a pure quantum preference by $\ket{ \rho_i }$.
The set of all voters' quantum preferences
forms society's \emph{quantum profile},
$\mathcal{P} := \{ \rho_1, \ldots, \rho_N \}$.
Processing $\mathcal{P}$ must lead to
the identification of a winner.
More generally, the quantum society must generate
a transitive ordered list of the candidates.
We call such a list a \emph{classical preference}.
Each classical preference corresponds to
a state in $\mathcal{H}$.
For example, $c > a = b > d$ corresponds to
$ \ket{ c{>}a{=}b{>}d }$.
We denote by $\ket{\gamma}$
the $\gamma^{\rm th}$ classical-preference state
and by $\chi_i^\gamma := \ketbra{\gamma}{\gamma}$
the associated density operator.
The set $\{ \ket{ \gamma } \}$ forms
the \emph{preference basis} $ \mathcal{B} $
for $\mathcal{H}$.
Consider any pair $(a, b)$ of candidates.
$\mathcal{H}$ decomposes into subspaces
associated with the possible relationships
between $a$ and $b$.
By $\mathcal{G}^{a > b}$,
we denote the subspace spanned by
the $ \mathcal{B} $ elements
that encode $a > b$
(e.g., $\ket{ a{>}b{=}c}$, $\ket{ c{>}a{>}b}$, etc.).
The subspaces $\mathcal{G}^{b > a}$ and
$\mathcal{G}^{a = b}$ are defined analogously.
For example, $\ket{a{>}b{>}c}$ occupies
the intersection of three subspaces:
$\ket{a{>}b{>}c} \in \mathcal{G}^{a > b} \cap
\mathcal{G}^{a > c} \cap
\mathcal{G}^{b > c}$.
The $a > b$, $b > a$, and $a = b$ subspaces are disjoint.
For example, $\mathcal{G}^{a > b}
\cap \mathcal{G}^{b > a} = \emptyset$.
$\Pi^{a > b}$ denotes
the projector onto the subspace
$\mathcal{G}^{a > b}$.
The projector $\Pi^{a = b}$
is defined analogously.
Consider measuring projectively
a quantum preference $\rho_i$
with respect to $ \mathcal{B} $.
The measurement yields a classical preference.
If $\rho_i$ is a nontrivial linear combination or mixture
of $ \mathcal{B} $ elements,
the measurement is probabilistic.
A voter's ability to superpose classical preferences resembles
a prisoner's ability to superpose classical tactics
in the quantum Prisoner's Dilemma~\cite{EisertWL99}.
During a \emph{quantum election},
society's joint state is transformed into
\emph{society's quantum preference}:
$\sigma_{\rm soc} \mapsto \rho_{\rm soc} \in \mathcal{D} ( \mathcal{H} )$.
This $\rho_{\rm soc}$ is measured with respect to $ \mathcal{B} $.
generating society's classical preference.
The quantum election can be formulated
as a quantum circuit~\cite{NielsenC10}.
A quantum constitution, which we now introduce,
implements the transformation.
\subsection{Quantum constitutions}
\label{section:Constitutions}
A \emph{classical constitution} $ \mathcal{C} $ is a map from
the profile of the voters' classical preferences
to society's classical preference.
We define quantum constitutions analogously.
Having completed the definition of quantum elections,
we define their classical limit.
The classical constitutions that obey Arrow's Theorem
have four properties.
We review and quantize these properties.
\subsubsection{Definition of ``quantum constitution''}
Quantum constitutions have the form of
general quantum evolutions,
as does the Quantum Prisoner's Dilemma~\cite{EisertWL99}.
A general quantum evolution is
a convex-linear completely positive trace-preserving
(CPTP) map~\cite{NielsenC10}.
A map $\mathcal{E}$ is convex-linear if,
given a probabilistic combination $\sum_i p_i \rho_i$
of states $\rho_i$,
$\mathcal{E}$ transforms the component states independently:
$\mathcal{E} \left( \sum_i p_i \rho_i \right)
= \sum_i p_i \mathcal{E} ( \rho_i ) \, ,$
wherein $p_i \geq 0 \; \forall i$ and
$\sum_i p_i = 1$~\cite{NielsenC10}.
Every CPTP map is equivalent to
the tensoring on of an ancilla,
a unitary transformation of the system-and-ancilla composite,
and the tracing out of a subsystem~\cite{NielsenC10}.
Each quantum constitution accepts, as input,
society's joint state, $\sigma_{\rm soc}$,
and an ancilla.
The ancilla is initialized to a fiducial state $\ketbra{0}{0}$.
When outputted by the constitution,
the ancilla holds society's quantum preference, $\rho_{\rm soc}$.
\begin{definition}[Quantum constitution]
\label{definition:Constitution}
A \emph{quantum constitution} is a convex-linear CPTP map
\begin{align*}
\mathcal{E} :
\mathcal{D} \left( \mathcal{H}^{ \otimes (N + 1) } \right)
\to \mathcal{D}( \mathcal{H} )
\end{align*}
that transforms society's joint state and an ancilla
into society's quantum preference:
\begin{equation}
\mathcal{E}(\sigma_{\rm soc} \otimes \ketbra{0}{0} ) = \rho_{\rm soc} \, .
\end{equation}
\end{definition}
Having defined constitutions, we can define
the classical limit.
\begin{definition} \label{definition:ClassLim}
The \emph{classical limit} of a quantum election is
the satisfaction of the following conditions:
\begin{enumerate}
\item Every quantum voter preference $\rho_i$ is
an element of the preference basis $\mathcal{B}$.
\item The quantum constitution $\mathcal{E}$ consists of
classical probabilistic logic gates.
\end{enumerate}
\end{definition} \noindent
In the classical limit, $\mathcal{E}$ can output only
elements of $\mathcal{B}$ and probabilistic combinations thereof.
Classical and quantum constitutions can have various properties.
Four properties appear in Arrow's Theorem.
We review these classical properties, then quantize them.
\subsubsection{The four constitutional properties in Arrow's Theorem
and quantum analogs}
\label{section:Properties}
Arrow's Theorem features four properties
available to classical constitutions:
transitivity, respecting of unanimity,
respecting of independence of irrelevant alternatives,
and being a dictatorship.
We review these properties and define quantum analogs.
Two principles guide the quantization strategy.
First, each quantum definition should preserve
the corresponding classical definition's spirit.
Second, each quantum definition should make sense
in the context of entanglement and superpositions---should
be able to characterize a quantum circuit.
A classical constitution $ \mathcal{C} $ is \emph{transitive} if
every classical preference in its range is transitive.
Suppose that society prefers candidate $a$ to $b$
and prefers $b$ to $c$.
$ \mathcal{C} $ outputs a societal preference
in which $a$ ranks above $c$:
$a \geq b$ and $b \geq c$, together, imply $a \geq c$.
\begin{definition}[Quantum transitivity]
\label{definition:Transitivity}
A quantum constitution $\mathcal{E}$ respects \emph{quantum transitivity} if every possible output $\rho_{\rm soc}$, upon being measured in
the preference basis $ \mathcal{B} $,
collapses to a state $\ket{a \ldots m}$ associated with
a transitive classical preference $(a \ldots m)$.
\end{definition}
\noindent Every $\mathcal{E}$ obeys quantum transitivity by definition:
Given any input, $\mathcal{E}$ outputs a $\rho_{\rm soc}$ that is
a linear combination or a mixture of preference-basis elements.
A $ \mathcal{B} $ measurement of $\rho_{\rm soc}$ yields
a $ \mathcal{B} $ element.
Every $ \mathcal{B} $ element corresponds to
a transitive classical preference.
Classical \emph{unanimity} is defined as follows.
Let $ \mathcal{C} $ denote a classical constitution that respects unanimity.
Suppose that every voter ranks
a candidate $a$ strictly above
a candidate $b$: $a > b$.
The constitution outputs a societal preference
in which $a$ ranks strictly above $b$: $a > b$.
\begin{definition}[Quantum unanimity]
\label{definition:Unanimity}
A quantum constitution $ \mathcal{E} $ respects \emph{quantum unanimity} if
it has the following two subproperties:
\begin{enumerate}
\item \label{item:Have}
Suppose that every voter's quantum preference
has support on the $a > b$ subspace:
${\rm Tr} \left( \Pi^{a > b} \: \rho_i \right) > 0 \; \,
\forall i = 1, 2, \ldots, N$.
$ \mathcal{E} $ outputs a societal quantum preference $\rho_{\rm soc}$
that has support on that subspace:
${\rm Tr} \left( \Pi^{a > b} \: \rho_{\rm soc} \right) > 0$.
\item \label{item:Only}
Suppose that every voter's quantum preference
has support only on the $a > b$ subspace.
$ \mathcal{E} $ outputs a societal quantum preference $\rho_{\rm soc}$
that has support only on that subspace:
\begin{align}
& \supp (\rho_i) \subseteq \Pi^{a > b}
\quad \forall i = 1, 2, \ldots, N
\quad \Rightarrow \quad
\nonumber \\ &
\supp ( \rho_{\rm soc} ) \subseteq \Pi^{a > b} \, ,
\end{align}
wherein $\supp (\rho)$ denotes
the support of the quantum state $\rho$.
\end{enumerate}
\end{definition}
Subproperty~\ref{item:Only} might appear extraneous,
seeming to lack a classical counterpart.
But classical unanimity satisfies
the classical analog of~\ref{item:Only} implicitly,
as the following argument shows.
\begin{enumerate}[label=(\Alph*)]
\item
Suppose that every voter's preference
satisfies the classical analog of
having support only on $\Pi^{a > b}$:
Every voter prefers $a > b$ strictly.
\item \label{item:Diff1}
Society prefers $a > b$ strictly,
by the definition of classical unanimity.
\item \label{item:Diff2}
Hence society ranks $a$ and $b$
neither as $a = b$ nor as $b > a$.
\item
Hence society's preference
satisfies the classical analog of
having support only on $ \Pi^{a > b}$.
\end{enumerate}
Definition~\ref{definition:Unanimity} must contain
subproperty~\ref{item:Only} explicitly because
the quantum analog of step~\ref{item:Diff1}
does not imply
the quantum analog of step~\ref{item:Diff2}.
Even if $\rho_{\rm soc}$ has support on $\Pi^{a > b}$,
$\rho_{\rm soc}$ can have support on $\Pi^{a = b} \, :$
$\rho_{\rm soc}$ can be a linear combination of
elements of $B_{\rm soc}$
or can be a mixture.
The generality of quantum states
necessitates the articulation of subproperty~\ref{item:Only}.
Classical \emph{independence of irrelevant alternatives} (IIA)
is defined as follows.
In every classical preference,
the candidates $a$ and $b$ have some \emph{relative ranking}.
Either $a > b$, $b > a$, or $a = b$.
Suppose that society's relative ranking of $a$ and $b$
depends only on
every voter's relative ranking of $a$ and $b$.
Whether society prefers $a$ to $b$
(or prefers $b$ to $a$, etc.)
depends only on
whether each voter prefers $a$ to $b$
(or prefers $b$ to $a$, etc.).
How any voter ranks candidate $c$
fails to influence society's relative ranking of $a$ and $b$.
\begin{definition}[Quantum independence of irrelevant alternatives]
A quantum constitution respects
\emph{quantum independence of irrelevant alternatives} (QIIA) if
whether $\rho_{\rm soc}$ has support on
$\mathcal{G}^{a > b}$, on
$\mathcal{G}^{a < b}$, and/or on
$\mathcal{G}^{a = b}$ depends only on
whether each $\rho_i$ has support on
$\mathcal{G}^{a > b}$, on $\mathcal{G}^{a < b}$, and/or on
$\mathcal{G}^{a = b}$.
\end{definition}
A classical dictatorship has a dominant voter.
Suppose that society prefers $a$ strictly to $b$
if and only if
some voter $i$ prefers $a$ strictly to $b$,
for all pairs $(a, b)$ of candidates:
\begin{align}
& \exists i \: : \:
a > b \, , \; \text{according to $i$,}
\quad \Leftrightarrow \quad
\nonumber \\ & \qquad \;
a > b \, , \; \text{according to society,} \quad
\forall a, b \, .
\end{align}
The classical constitution $ \mathcal{C} $ that outputs society's preference
is a classical \emph{dictatorship}.
\begin{definition}[Quantum dictatorship]
\label{definition:Dictator}
A quantum constitution is a \emph{quantum dictatorship} if
there exists a voter $i$ who has
the following two characteristics:
\begin{enumerate}
\item
Society's quantum preference has support on the $a > b$ subspace
if and only if voter $i$'s has:
\begin{align}
{\rm Tr} \left( \Pi^{a > b} \rho_i \right) > 0
\quad \Leftrightarrow \quad
{\rm Tr} \left( \Pi^{a > b} \rho_{\rm soc} \right) > 0.
\end{align}
\item \label{item:Only2}
Society's quantum preference has support
only on the $a > b$ subspace
if and only if voter $i$'s has:
\begin{align}
\supp ( \rho_i ) \subseteq \Pi^{a > b}
\quad \Leftrightarrow \quad
\supp ( \rho_{\rm soc} ) \subseteq \Pi^{a > b} \, .
\end{align}
\end{enumerate}
\end{definition}
\noindent Subproperty~\ref{item:Only2} plays a role analogous to
subproperty~\ref{item:Only} in the definition of ``quantum unanimity.''
We have constructed quantum analogs of
the four properties in Arrow's Theorem.
A quantum version of Arrow's Theorem, we show,
is violated by
a quantum version of majority rule.
\subsection{Majority rule}
\label{section:MajorityRule}
Majority rule is a fifth property that constitutions can have.
We review classical majority rule,
then introduce a quantum analog.
\emph{Cyclic} voting preferences
prevent classical majority rule
from satisfying Arrow's assumptions.
Quantum majority rule is more robust.
\subsubsection{Classical majority rule}
Let $\mathcal{P}_{\rm cl}$ denote a classical society's voter profile.
Let $ \mathcal{C} $ denote a classical constitution
that respects majority rule.
$ \mathcal{C} $ reflects the wishes shared by most voters.
Suppose that over half the voters agree
on the relative ranking of candidates $a$ and $b$.
$ \mathcal{C} $ outputs a classical societal preference
that has the same relative ranking of $a$ and $b$.
A subtlety arises if $\mathcal{P}_{\rm cl}$ involves a cycle.
Let $T = \{a, b, \ldots, k\}$ denote
a set of candidates.
Suppose that $a$ and $b$ participate, in $\mathcal{P}_{\rm cl}$,
in pairwise preferences that violate transitivity.
Suppose that every pair of candidates in $T$ does.
$T$ forms a classical \emph{cycle}.
For example, let
$\mathcal{P}_{\rm cl} = \{ (a > b > c), (c > a > b), (b > c > a) \}.$
A na\"{i}ve application of majority rule implies
$a > b$ and $b > c$.
Transitivity implies $a > c$.
But a na\"{i}ve application of majority rule
implies also $c > a$.
But $c > a$, combined with the previously derived $a > c$,
violates transitivity.
The constitution may be defined
as outputting $a = b = c$ or
as outputting an error message.\footnote{
One profile can contain multiple cycles. For example,
$\{ (a > b > c), (b > a > c), (a > c > b) \}$ contains a cycle over $(a, b)$ (because voters 1 and 3 rank $a > b$, whereas voter 2 ranks $b > a$) and a cycle over $(b, c)$ (because voters 1 and 2 rank $b > c$, whereas voter 3 ranks $c > b$).}
Cycles prevent classical majority rule from satisfying
IIA and transitivity simultaneously.
Classical majority rule fails to satisfy Arrow's assumptions.
Hence classical majority rule
cannot contradict Arrow's Theorem.
A quantum analog of majority rule can.
\subsubsection{Quantum majority rule}
First, we introduce quantum cycles.
We then define the Quantum Majority-Rule (QMR) constitution $ \mathcal{E} QMR$.
This constitution, we show, respects
quantum transitivity, quantum unanimity, and QIIA.
These properties will enable $ \mathcal{E} QMR$ to violate
a quantum analog of Arrow's Theorem.
\textbf{Quantum cycles:}
Let $\chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu$ be
a product of preference-basis elements.
Suppose that at least two
$\chi_i^\gamma$'s are pure states labeled by
classical preferences that form a classical cycle.
The product will be said to contain a \emph{quantum cycle}.
\textbf{Operation of the Quantum Majority-Rule constitution:}
$ \mathcal{E} QMR$ performs the following sequence of steps.
First, $ \mathcal{E} QMR$ decoheres each quantum preference $\rho_i$
with respect to the preference basis:
\begin{align}
\rho_i \mapsto \sum_{ \gamma }
\ketbra{\gamma}{\gamma} \rho_i \ketbra{\gamma}{\gamma}
= \sum_\gamma p_i^\gamma \chi_i^\gamma
=: \rho'_i \, ,
\end{align}
wherein $\sum_\gamma p_i^\gamma = 1 \, .$
Society's quantum profile evolves as
\begin{align}
\label{eq:QMR_Help0a}
\sigma_{\rm soc} & \mapsto
\rho'_1 \otimes \ldots \otimes \rho'_N \\
& = \label{eq:QMR_Help1}
\sum_{\alpha, \ldots, \mu}
\left( p_1^\alpha \ldots p_N^\mu \right)
( \chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu ) \, .
\end{align}
Recall that $\chi_1^\alpha$ denotes
the element, labeled by the classical preference $\alpha$,
of the preference basis $ \mathcal{B} $ for
voter 1's Hilbert space $\mathcal{H}$.
$ \mathcal{E} QMR$, being a quantum constitution, obeys convex linearity.
To specify how $ \mathcal{E} QMR$ transforms the right-hand side
of Eq.~\eqref{eq:QMR_Help1},
we must specify just how $ \mathcal{E} QMR$ transforms
each factor $\chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu$.
For each factor, $ \mathcal{E} QMR$ constructs a directed graph, or digraph.
One vertex is formed for each candidate.
The edges are governed by
$\chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu$.
If more classical preferences $\gamma$ correspond to $a > b$
than to $b > a$,
an edge points from $a$ to $b$.
If exactly as many $\gamma$'s correspond to $a > b$
as to $b > a$,
an edge points from $a$ to $b$ and from $b$ to $a$.
$ \mathcal{E} QMR$ inputs the digraph into \emph{Tarjan's algorithm}~\cite{Tarjan72}.
Tarjan's algorithm finds a digraph's strongly connected components.
A \emph{strongly connected component} (SCC) is a subgraph.
Every vertex in the subgraph can be accessed from
every other vertex via edges.
Every vertex appears in exactly one SCC.
Every SCC in the QMR graph represents
a cycle or a set of interlinked cycles.
For example, let $\chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu
= \ketbra{b{>}a{>}c{>}d}{ b{>}a{>}c{>}d} \otimes
\ketbra{a{>}c{>}b{>}d }{ a{>}c{>}b{>}d }$.
Candidates $a$ and $b$ participate in a cycle,
as do $b$ and $c$.
The $a$, $b$, and $c$ vertices form an SCC.
The $d$ vertex forms another SCC.
The digraph appears in Fig.~\ref{fig:SCC}.
\begin{figure}
\caption{\textbf{Example digraph formed by
the Quantum Majority Rule (QMR) constitution $ \mathcal{E}
\label{fig:SCC}
\end{figure}
Tarjan's algorithm returns a list of the SCCs.
The later an SCC appears in the list,
the more popular the SCC's candidates, roughly speaking.
More precisely, let $i$ and $j$ label SCCs
such that $i < j$.
Every vertex in the $j^{\rm th}$ SCC is preferred to
every vertex in the $i^{\rm th}$.
For example, Tarjan's algorithm maps Fig.~\ref{fig:SCC} to
\mbox{$( \{ d \}, \{a, b, c\} )$.}
Consider the strict classical preferences in which
every candidate in the $j^\th$ SCC ranks above
every candidate in the $i^\th$ SCC,
for all $j > i$.
$ \mathcal{E} QMR$ forms a maximally mixed state $\rho_{\rm soc}'$
over the corresponding preference-basis elements.
In our example,
\begin{align}
\label{eq:QMR_Help0}
\rho_{\rm soc}' & = \frac{1}{ 6 }
( \ketbra{ abcd }{ abcd } + \ketbra{ acbd }{ acbd } +
\ketbra{ bacd }{ bacd }
\nonumber \\ & \quad
+ \ketbra{ bcad }{ bcad }
+ \ketbra{ cabd }{ cabd } + \ketbra{ cbad }{ cbad } ) \, .
\end{align}
$ \mathcal{E} QMR$ then ``gives the minority a shot.''
For any candidate pair $(a, b)$,
suppose that at least one $\chi_i^\gamma$ corresponds
to $a > b$.
The constitution spreads an amount $\delta \in (0, 1)$ of weight
across the $a > b$ subspace:\footnote{
``Giving the minority a shot'' resembles the action of
the United States electoral college.
Whichever candidate receives the most popular votes
usually wins the presidential election.
But a candidate who receives a minority
can win the presidency
if favored by enough of the electoral college.}
\begin{align}
\label{eq:Minority}
\rho'_{\rm soc} \mapsto \rho''_{\rm soc}
= ( 1 - \delta ) \rho'_{\rm soc}
+ \delta \, \Pi^{a > b } \, .
\end{align}
This $\delta$ serves as a parameter inputted to the constitution.
We omit $\delta$ from the notation $ \mathcal{E} QMR$
for conciseness.
Next, $ \mathcal{E} QMR$ enforces unanimity.
Suppose that every $\chi_i^\gamma$ corresponds to $a > b$,
for any candidate pair $(a, b)$:
$\supp ( \chi_i^\gamma ) \subseteq \Pi^{a > b }
\; \; \forall i = 1, 2, \ldots, N$.
The constitution projects $\rho''_{\rm soc}$ onto
the $a > b$ subspace:
\begin{align}
\label{eq:Project_QMR}
\rho''_{\rm soc} \mapsto \rho'''_{\rm soc}
= \Pi^{a > b } \: \rho''_{\rm soc} \: \Pi^{a > b} \, .
\end{align}
We have seen how $ \mathcal{E} QMR$ calculates the $\rho'''_{\rm soc}$
associated with each term in Eq.~\eqref{eq:QMR_Help1}.
Each $ ( \chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu )$
in Eq.~\eqref{eq:QMR_Help1} is replaced with
the corresponding $\rho'''_{\rm soc}$.
This replacement yields $\rho_{\rm soc}$.
The $\rho_{\rm soc}$ is measured with respect to $ \mathcal{B} $.
The measurement yields
society's classical preference.
\textbf{Three properties of QMR:}
$ \mathcal{E} QMR$ has three of the properties
introduced in Sec.~\ref{section:Properties}.
These properties will enable $ \mathcal{E} QMR$ to violate
a quantum analog of Arrow's Theorem.
\begin{lemma} \label{lemma:MajProperties}
The Quantum Majority-Rule constitution $ \mathcal{E} QMR$ respects
quantum transitivity, quantum unanimity, and
quantum independence of irrelevant alternatives.
\end{lemma}
\begin{proof}
Every quantum constitution respects quantum transitivity,
as explained below Definition~\ref{definition:Transitivity}.
$ \mathcal{E} QMR$ is a quantum constitution.
Therefore, $ \mathcal{E} QMR$ respects quantum transitivity.
Quantum unanimity involves two subproperties
(see Definition~\ref{definition:Unanimity}).
$ \mathcal{E} QMR$ respects subproperty~\ref{item:Have}
due to Tarjan's algorithm and Eq.~\eqref{eq:QMR_Help0}.
Suppose that every voter's quantum profile
has support on the $a > b$ subspace.
Most quantum profiles have support on that subspace.
Hence $a$ appears in the $b$ SCC
or in an SCC ``preferred to'' the $b$ SCC.
Hence $\rho'_{\rm soc}$ contains preference-basis elements
associated with $a > b$.
Equation~\eqref{eq:Project_QMR} ensures that
$ \mathcal{E} QMR$ respects subproperty~\ref{item:Only} of quantum unanimity.
Suppose that every voter's quantum preference has support
only on the $a > b$ subspace.
Every $\chi_i^\gamma$ has support
only on the $a > b$ subspace.
$ \mathcal{E} QMR$ projects $\rho''_{\rm soc}$ onto $\mathcal{G}^{a > b }$,
not onto $\mathcal{G}^{b > a}$ or onto $\mathcal{G}^{a = b}$.
Therefore, $\rho'''_{\rm soc}$ has support only on $\mathcal{G}^{a > b }$.
According to QIIA, whether $\rho_{\rm soc}$ has support on
$\mathcal{G}^{a > c} \, $ on $\mathcal{G}^{c > a} \, $
and/or on $\mathcal{G}^{a = c}$
depends only on
whether each voter's quantum preference, $\rho_i$,
has support on these subspaces---not
on whether any $\rho_i$ has support on,
e.g., $\mathcal{G}^{a > b}$.
To check that $ \mathcal{E} QMR$ respects QIIA,
we must analyze three cases:
\begin{enumerate}
\item $a$ does not participate in a cycle with $c$.
\begin{enumerate}
\item \label{item:Case1a}
$a$ participates in a cycle with
at least one candidate
that participates in a cycle with $c$.
For example, $a$ may participate in a cycle with $b$,
while $b$ participates in a cycle with $c$.
\item \label{item:Case1b}
$a$ participates in no cycle with any candidate
that participates in a cycle with $c$.
\end{enumerate}
\item \label{item:Case2}
$a$ participates in a cycle with $c$.
\end{enumerate}
Case~\ref{item:Case1a} requires the most thought.
Considering the example illustrated in Fig.~\ref{fig:SCC} suffices.
$a$ does not participate in a cycle with $c$.
Yet $a$ participates in a cycle with $b$,
which participates in a cycle with $c$.
Therefore, $a$ appears in the same SCC as $c$.
According to Eq.~\eqref{eq:QMR_Help0},
$\rho'_{\rm soc}$ has support on $\mathcal{G}^{c > a}$.
Yet every quantum voter preference $\rho_i$
has support only on $\mathcal{G}^{a > c}$.
How voters rank $b$ seems to influence
how society ranks $a$ relative to $c$.
$ \mathcal{E} QMR$ seems to violate QIIA.
Equation~\eqref{eq:Project_QMR} rectifies
this seeming violation.
$\rho''_{\rm soc}$ is projected onto
the $a > c$ subspace,
because every $\chi_i^\gamma$ corresponds to $a > c$.
But suppose that not every $\chi_i^\gamma$ corresponded to $a > c$.
Suppose that only a majority of $\chi_i^\gamma$'s did.
$\rho''_{\rm soc}$ would not be projected onto $\mathcal{G}^{a > c}$.
How voters ranked $b$ would again seem to influence
how society ranked $a$ relative to $c$.
$ \mathcal{E} QMR$ would again seem to violate QIIA.
$ \mathcal{E} QMR$ would not because of Eq.~\eqref{eq:Minority}.
Some $\chi_i^\gamma$'s have support on
the $c > a$ subspace.
The ``give the minority a shot'' step therefore
gives $\rho''_{\rm soc}$ support
on $\mathcal{G}^{c > a}$.
Society's quantum preference would have
support on $\mathcal{G}^{c > a}$
regardless of whether
$a$ participated in a cycle with
a $b$ that participated in a cycle with $c$.
How voters rank $b$ therefore does not affect
how society ranks $a$ relative to $c$.
In case~\ref{item:Case1b}, $a$ does not participate in
a cycle with any $b$ that participates in
a cycle with $c$.
Therefore, $a$ appears in an SCC
that ``is preferred'' to the $c$ SCC.
$\rho'_{\rm soc}$ therefore has support on
just the $a > c$ subspace,
regardless of any $b$'s.
In case~\ref{item:Case2}, $a$ participants in a cycle with $c$.
$\rho'_{\rm soc}$ has support on
the $a > c$ and $c > a$ subspaces,
regardless of any $b$'s.
\end{proof}
\noindent Because QMR satisfies the quantum analogs
of three properties in Arrow's Theorem,
QMR can violate
a quantum analog of Arrow's Theorem.
\section{Arrow's Impossibility Theorem}
\label{section:ArrowTheorem}
Transitivity, unanimity, and IIA
have innocent-sounding definitions.
They seem unlikely to buttress authoritarianism.
Yet possessing these properties, Arrow shows,
renders a classical constitution a dictatorship~\cite{Arrow50}.
\begin{theorem}[Arrow's Impossibility Theorem]
Consider any (classical) constitution
used, with ranked voter preferences,
to select from amongst at least three candidates.
If the constitution respects
transitivity, unanimity, and independence of irrelevant alternatives,
the constitution is a dictatorship.
\end{theorem}
\noindent
Multiple proof exist~\cite{Arrow50,Barbera_80_Pivotal,Geanakoplos05}.
Some involve a \emph{pivotal voter} $v$~\cite{Barbera_80_Pivotal,Geanakoplos05}.
If $v$ changes his/her mind
while all other preferences remain constant,
society's preference changes.
One proves first that the postulates imply
the existence of a voter slightly weaker than $v$.
This voter, one then shows, is pivotal and is a dictator.
No other dictator, one concludes, can exist.
We quantize Arrow's Theorem in the following conjecture.
\begin{conjecture}[Quantum Arrow Conjecture]
\label{conjecture:Arrow}
Every quantum constitution that respects quantum transitivity, quantum unanimity,
and quantum independence of irrelevant alternatives
is a quantum dictatorship.
\end{conjecture}
\begin{theorem} \label{theorem:Arrow}
The Quantum Arrow Conjecture is false.
\end{theorem}
\begin{proof}
We disprove the conjecture by counterexample.
The QMR constitution is combined with
a societal joint state $\sigma_{\rm soc}$
that encodes a cycle.
This combination, we show, lacks a dictator.
We have shown that QMR satisfies
quantum transitivity, quantum unanimity, and QIIA.
Satisfying the conjecture's assumptions
but not its conclusion,
QMR and cyclic voting disprove the conjecture.
For simplicity, we focus on strict pairwise preferences.
We consider, e.g., $a > b$, ignoring $a = b$.
This focus frees us to drop binary-relation symbols:
\mbox{$\ket{a b c} := \ket{a{>}b{>}c}$}.
\begin{figure}
\caption{\textbf{Digraph representation of the quantum votes
used to disprove the Quantum Arrow Conjecture:}
\label{fig:Disproof}
\end{figure}
Suppose that society's joint state is
a product that involves a cycle:
\begin{align} \label{eq:VoterPrefs}
\sigma_{\rm soc} = \ketbra{abc}{abc} \otimes \ketbra{ cab }{ cab }
\otimes \ketbra{ bca }{ bca } \, .
\end{align}
Decoherence relative to the preference basis
preserves the state.
$ \mathcal{E} QMR$ constructs the digraph in Fig.~\ref{fig:Disproof}.
One edge points from $a$ to $b$
(because two voters prefer $a > b$, whereas one prefers $b > a$),
one edge points from $b$ to $c$,
and one edge points from $c$ to $a$.
The digraph consists of one SCC.
$ \mathcal{E} QMR$ therefore constructs the linear combination
\begin{align}
\label{eq:Proof1}
\rho'_{\rm soc} & \propto
\ketbra{abc}{abc} + \ketbra{ cab }{ cab } + \ketbra{ bca }{ bca}
\nonumber \\ & \qquad
+ \ketbra{ cba }{ cba } + \ketbra{ bac }{ bac } + \ketbra{ acb }{ acb } \, .
\end{align}
The ``give the minority a shot'' step preserves the state:
$\rho''_{\rm soc} = \rho'_{\rm soc} \, .$
The voters do not unanimously prefer
any candidate to any other:
For every voter $i$, there exists a voter $j$ such that
$\supp ( \rho_i )$ and $\supp ( \rho_j )$ occupy
subspaces labeled by distinct classical preferences.
$ \mathcal{E} QMR$ therefore does not project $\rho''_{\rm soc}$
onto any subspace: $\rho'''_{\rm soc} = \rho''_{\rm soc}$.
Equation~\eqref{eq:QMR_Help1} consists of only one term,
so $\rho_{\rm soc} = \rho'''_{\rm soc}$.
Society's quantum preference appears in Eq.~\eqref{eq:Proof1}.
$\rho_{\rm soc}$ has support on multiple subspaces,
e.g., $\mathcal{G}^{a > b}$ and $\mathcal{G}^{b > a}$.
No quantum voter preference $\rho_i$ has.
No voter is a quantum dictator,
by definition~\ref{definition:Dictator}.
Yet $\mathcal{E}$ respects quantum transitivity, quantum unanimity, and QIIA,
by Lemma~\ref{lemma:MajProperties}.
$ \mathcal{E} QMR$ satisfies the assumptions, but violates the conclusion,
of the Quantum Arrow Conjecture.
The conjecture is therefore false.
\end{proof}
One can understand as follows why
our scheme violates the Quantum Arrow Conjecture.
The successes of quantum game theory motivate
the generalization of voting to accommodate
entangled and superposed preferences.
To introduce entanglement and superpositions,
one must formulate an election
as a general quantum process---a
preparation procedure, an evolution, and a measurement.
Classical constitutional properties must be quantized faithfully.
The quantum translations enable
the Quantum Majority-Rule constitution to respect
quantum transitivity and QIIA simultaneously.
Classical majority-rule constitutions cannot respect
transitivity and IIA simultaneously,
due to cyclic votes.
But QMR satisfies all the assumptions
in the Quantum Arrow Conjecture.
QMR, with a cyclic voter profile,
violates the conjecture.
Disproofs simpler than ours exist.
Ours offers interpretational advantages, however.
For instance, let $\mathcal{K}$ denote a quantum constitution
that outputs a superposition over all inputs.
$\mathcal{K}$ violates the conjecture.
But imposing $\mathcal{K}$ on society---choosing
society's classical preference totally randomly---makes
little economic sense.
Also, our disproof elucidates how quantization
invalidates Arrow's idea.
Classical majority rule fails to satisfy
Arrow's postulates, due to cycles.
Quantum Majority Rule is more resilient.
Quantization elevates
a classically inadequate disproof attempt
to a quantum disproof.
\section{Quantum voting tactics}
\label{section:Strategic}
Imagine that Alice, Bob, and Charlie vie for
the presidency of the American Physical Society.
Suppose that Alice and Bob have
greater chances of winning
than Charlie has.
Suppose that Charlie agrees more with Alice than with Bob.
Charlie's supporters might vote for Alice.
They would be trying to elect a president
whom they neither prefer most nor mind most.
Charlie's supporters would be practicing strategic voting.
\emph{Strategic voting} is the submission of
a preference other than one's opinion,
to secure an unobjectionable outcome,
in an election amongst three or more candidates~\cite{Barbera_01_Intro}.
We introduce \emph{quantum strategic voting}.
Voters leverage entanglement, superpositions, and interference.
We present three tactics reliant on entanglement
and one tactic reliant on interference and superpositions.
Other quantum tactics may exist and merit exploration.
To highlight the basic physics,
we focus on strict preferences,
as in the proof of Theorem~\ref{theorem:Arrow}.
For example, we consider $a > b$
to the exclusion of $a = b$.
We also focus on pure joint quantum states $\sigma_{\rm soc}$.
The strict-preferences assumption lets us compactify notation.
The classical preference
$a > b > \ldots > k > \ell > m$
has the even permutations
$m > a > b > \ldots > k > \ell$, \;
$\ell > m > a > b > \ldots > k$, etc.
These preferences are labeled $\alpha, \ldots, \mu \, .$
Each preference $\gamma$ corresponds to one anticycle.
We denote the anticycle with a bar: $\bar{\gamma}$.
For example, the cycle
$\alpha := a > b > \ldots > m$
corresponds to the anticycle
$\bar{\alpha} := m > \ldots > b > a$.
Every pure quantum preference has the form
\begin{equation} \label{eq:PureVote}
\sum_{\gamma}
( c_\gamma \ket{ \gamma }
+ c_{ \bar{\gamma} } \ket{ \bar{ \gamma } } ),
\quad {\rm wherein} \quad
\sum_\gamma \left( | c_\gamma |^2
+ | c_{ \bar{\gamma} } |^2 \right) = 1.
\end{equation}
Society's joint quantum state has the form
\begin{align}
\ket{ \sigma_{\rm soc} } =
(c_{\alpha_1} \ldots c_{\alpha_N} ) \ket{ \alpha \ldots \alpha }
+ \ldots +
(c_{{ \bar{\mu} }_1} \ldots c_{{ \bar{\mu} }_N} ) \ket{ \bar{\mu} \ldots \bar{\mu} }.
\end{align}
\subsection{Three entanglement-dependent voting tactics}
Let us simplify our quantum analog of majority rule,
now that QIIA, etc. need not concern us.
We introduce the variation \emph{QMR2}, labeled $ \mathcal{E} QMRTwo$.
QMR2 is defined as follows.
$ \mathcal{E} QMRTwo$ processes $\sigma_{\rm soc}$
as in Eqs.~\eqref{eq:QMR_Help0a} and~\eqref{eq:QMR_Help1}.
Society's joint quantum state $\sigma_{\rm soc}$ is decohered
with respect to the product of the voters' $ \mathcal{B} $'s.
$ \mathcal{E} QMRTwo$ processes each term
in Eq.~\eqref{eq:QMR_Help1} as follows.
The $j^\th$ term has the form
\begin{align}
\left( p_1^\alpha \ldots p_N^\mu \right)_j
( \chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu )_j
\equiv p_j
( \chi_1^\alpha \otimes \ldots \otimes \chi_N^\mu )_j \, .
\end{align}
The term is labeled by
a list $L_j = ( \alpha, \ldots, \mu )_j$ of classical preferences.
If most of the preferences are identical---if
most equal $\gamma$, say---the
$j^\th$ term in Eq.~\eqref{eq:QMR_Help1}
is associated with $( p_j , \gamma )$.
If no majority favors any $\gamma$,
$ \mathcal{E} QMRTwo$ chooses uniformly randomly from amongst
the classical preferences that appear with
the highest frequency in $L_j$.
$ \mathcal{E} QMRTwo$ has assembled a list $( p_j, \gamma_j )$.
Society's classical preference
is selected from amongst the $\gamma_j$'s
according to the probability distribution $\{ p_j \}$.
Entanglement can help one voter obstruct another.
Imagine that the Supreme Court justices vote via QMR2.
Suppose that Justice Alice wants to diminish Justice Bob's influence.
However Bob votes, Alice should vote oppositely.
Alice should entangle her quantum preference with Bob's.
(Given how opinionated Supreme Court justices are, Bob might not mind broadcasting his quantum preference.)
If Bob votes as in Eq.~\eqref{eq:PureVote}, Alice should form
\begin{align} \label{eq:SupremeCourt}
\sum_{\gamma}
( c_\gamma \ket{ \gamma \, \bar{\gamma} }
+ c_{ \bar{\gamma} } \ket{ \bar{ \gamma } \, \gamma } ) \, .
\end{align}
Insofar as $\gamma$ represents Bob's preference,
Alice votes oppositely, with $\bar{\gamma}$.
Even if Bob changes his mind seconds before everyone votes,
Alice need not scramble to alter her vote.
Entanglement also facilitates party-line voting, if society uses QMR2.
Suppose that Alice leads the Scientists' Party,
to which Bob and Charlie belong.
However Alice votes, Bob and Charlie wish to vote identically.
The voters should form the entangled state
\begin{align} \label{eq:GHZ}
\sum_{\gamma}
( c_\gamma \ket{ \gamma \, \gamma \, \gamma }
+ c_{ \bar{\gamma} }
\ket{ \bar{ \gamma } \, \bar{\gamma} \, \bar{\gamma} } ) \, ,
\end{align}
whose weights Alice chooses.
This state generalizes the GHZ state:
If the weights equal each other and only two candidates run,
\eqref{eq:GHZ} reduces to
$\frac{1}{ \sqrt{2} } ( \ket{ \alpha \alpha \alpha } +
\ket{ \bar{\alpha} \bar{\alpha} \bar{\alpha} } )$.
Finally, entangling voters' quantum preferences can
pare down society's possible classical preferences.
Suppose that Alice, Bob, and Charlie separately favor $\alpha$
twice as much as they prefer $\beta$.
Each voter plans to submit
$\sqrt{ \frac{2}{3} } \, \ket{ \alpha } + \sqrt{ \frac{1}{3} } \, \ket{ \beta }$.
Society's joint state would be
\begin{align} \label{eq:Entangle1}
\ket{ \sigma_{\rm soc} }
& = \left( \frac{2}{3} \right)^{ 3/2 } \ket{ \alpha \alpha \alpha }
+ \left( \frac{1}{3} \right)^{ 3/2 } \ket{ \beta \beta \beta }
\\ \nonumber & \qquad
+ \frac{2}{3^{3/2} } ( \ket{ \beta \alpha \alpha }
+ \ket{ \alpha \beta \alpha }
+ \ket{ \alpha \alpha \beta } )
\\ \nonumber & \qquad
+ \frac{ \sqrt{2} }{ 3^{3/2} } ( \ket{ \alpha \beta \beta }
+ \ket{ \beta \alpha \beta } + \ket{ \beta \beta \alpha } ).
\end{align}
If the constitution is QMR2, society might adopt $\alpha$ or $\beta$
as its classical preference.
Suppose that Alice, Bob, and Charlie misunderstand entanglement.
Eve can take advantage of their ignorance
to eliminate $\beta$ from society's possible classical preferences.
Suppose that Eve convinces the three citizens to submit
the W state
\begin{align} \label{eq:Entangle2}
\ket{ \sigma'_{\rm soc} } =
\frac{1}{ \sqrt{3} } ( \ket{ \beta \alpha \alpha }
+ \ket{ \alpha \beta \alpha } + \ket{ \alpha \alpha \beta } ).
\end{align}
This entangled analog of $\ket{ \sigma_{\rm soc} }$, Eve might claim,
represents the voters' opinion:
$\ket{ \sigma'_{\rm soc} }$ contains
twice as many $\alpha$'s as $\beta$'s.
But QMR2 cannot map $\ket{ \sigma'_{\rm soc} }$ to $\beta$.
Entangled states lead to
different possible election outcomes
than product states.
\subsection{Quantum strategic voting via interference}
Like entanglement, interference and relative phases
facilitate quantum strategic voting.
Consider a society $\mathcal{S}$
whose voters submit pure quantum preferences.
Let $\mathcal{S}$ use a variation \emph{QMR3},
denoted by $ \mathcal{E} QMRTh$, on QMR.
To illustrate QMR3 and the role of interference,
we consider the voter profile
\begin{align}
\mathcal{P} = \left\{ \ket{abc}, \frac{1}{\sqrt{2}} (\ket{ bac } + \ket{ acb }),
\frac{1}{\sqrt{2}} (\ket{ bac } + \ket{cba} ) \right\} \, .
\end{align}
Society's joint state has the form
\begin{align}
\ket{ \sigma_{\rm soc} } & =
\frac{1}{2} ( \ket{abc} \ket{bac} \ket{bac}
+ \ket{abc} \ket{bac} \ket{cba}
\nonumber \\ & \quad
+ \ket{abc} \ket{acb} \ket{bac}
+ \ket{abc} \ket{acb} \ket{cba} ) \, .
\end{align}
Similarly to $ \mathcal{E} QMR$,
$ \mathcal{E} QMRTh$ forms a digraph from
each $\ket{ \sigma_{\rm soc} }$ term.
Each digraph is inputted into Tarjan's algorithm,
which returns a list of the SCCs.
Just as $ \mathcal{E} QMR$ maps each list to a mixed state $\rho'_{\rm soc}$,
$ \mathcal{E} QMRTh$ maps the $i^{\rm th}$ list
to a superposition $\ket{ \rho_{\rm soc}^ {(i)} } \, $.
Society's quantum preference becomes
\begin{align}
\ket{ \rho_{\rm soc} }
& \propto \sum_{i = 1}^4 \ket{ \rho_{\rm soc}^ {(i)} } \, .
\end{align}
In our example,
\begin{align}
\ket{ \rho_{\rm soc} }
& = \frac{1}{ \sqrt{6} }
( \ket{bac} + \ket{bac} + \ket{abc} + \ket{acb} ) \\
& = \sqrt{ \frac{2}{3} } \; \ket{bac}
+ \frac{1}{ \sqrt{6} } ( \ket{abc} + \ket{acb} ) \, .
\end{align}
$\ket{ \rho_{\rm soc} }$ may vanish:
The QMR3 quantum circuit
may fail to output any quantum state.
If $\ket{ \rho_{\rm soc} } = 0$,
society can hold a revote.
(Because QMR3 is defined on just pure states
and does not preserve all inputs' norms,
QMR3 does not satisfy Definition~\ref{definition:Constitution}.
QMR3 can be regarded as belonging to
an extension of quantum constitutions.)
Suppose that voter 3 wishes to eliminate $bac$ from
society's possible classical preferences.
Eliminating $\ket{ bac }$ from
voter 3's quantum preference, $\ket{ \rho_3 }$, will not suffice.
Voter 3 should introduce a relative phase of $-1$
into $\ket{ \rho_3 }$.
(Alternatively, voter 3 could submit a superposition of
$\ket{ abc }$ and $\ket{acb}$.)
Society's quantum profile becomes
\begin{align}
\mathcal{P}' = \left\{ \ket{abc}, \frac{1}{\sqrt{2}} (\ket{ bac } + \ket{ acb }),
\frac{1}{\sqrt{2}} (- \ket{ bac } + \ket{cba} ) \right\}.
\end{align}
Tarjan's algorithm leads to
$ \ket{ \rho_{\rm soc} }
\propto \frac{1}{2} ( - \ket{bac} + \ket{bac} - \ket{abc} + \ket{acb} ) \, .$
Hence $\ket{ \rho_{\rm soc} } =
\frac{1}{\sqrt{2}} ( \ket{abc} - \ket{acb})$.
Keeping the undesired $\ket{bac}$ in voter 3's quantum preference
contradicts our intuitions.
Yet interfering the new $\ket{ \rho_3 }$ with the other votes
eliminates $bac$ from society's possible classical preferences.
\section{Conclusions}
We have quantized elections, in the tradition of quantum game theory.
The quantization obviates a quantum analog of Arrow's Theorem about the impossibility of a nondictatorship's having three simple properties.
Entanglement, superpositions, and interference expand voters' arsenals of manipulation strategies.
Whether other quantum strategies, unavailable to classical voters, exist
merits investigation.
So does whether monogamy of entanglement~\cite{CoffmanKW00} limits one voter's influence on others' quantum preferences.
If creating entanglement is difficult (as in many labs),
the resource theory of multipartite entanglement~\cite{Horodeckis09}
might illuminate how voters can optimize their influence.
Additionally, other voting schemes could be quantized.
Examples include proportional representation
(in which the percentage of voters who favor Party $a$ dictates
the number of government seats won by Party $a$)
and cardinal voting (in which voters grade,
rather than rank, candidates).
Finally, counterstrategies may be formulated.
Consider our first entanglement-dependent voting example:
Justice Bob of the Supreme Court prepares his vote.
Justice Alice blocks Bob's effort using entanglement.
How should Justice Bob parry? Can entanglement assist him?
This problem mirrors quantum-cryptographic problems:
A sender wishes to communicate with a receiver securely.
An eavesdropper attacks.
The eavesdropper may access quantum or only classical resources,
depending on the problem.
How can the sender and receiver parry?
We have illustrated how our ``eavesdropper,'' Justice Alice,
might wield entanglement.
How Justice Bob should counter merits thought.
These opportunities can help illuminate
how quantum theory changes the landscape
of possible outcomes and strategies in games.
\end{document}
|
\begin{document}
\thetaitle{Catalyzed entanglement concentration of qubit pairs}
\alphauthor{Siddhartha Santra}
\alphaffiliation{US Army Research Laboratory, Adelphi, Maryland 20783, USA}
\alphauthor{Vladimir S. Malinovsky}
\alphaffiliation{US Army Research Laboratory, Adelphi, Maryland 20783, USA}
\begin{abstract}
We analytically obtain the maximum probability of converting a finite number of copies of an arbitrary two-qubit pure state to a single copy of a maximally entangled two-qubit pure state via entanglement assisted local operations and classical communications using a two-qubit catalyst state. We show that the optimal catalyst for this transformation is always more entangled than the initial state but any two-qubit state can act as a (non-optimal) catalyst. Interestingly, the entanglement of the optimal two-qubit catalyst state is shown to decrease with that of the initial state. Entanglement assisted strategies for obtaining multiple Bell states are discussed.
\epsilonnd{abstract}
\muaketitle
Entanglement concentration (EC) \chiite{entconc1} is the process of obtaining maximally entangled pure states given some initial number of copies, $N$, of partially entangled pure states using local quantum operations and classical communications (LQCC) \chiite{entmonotones1}. Concentrated entanglement is an important resource for applications \chiite{santratelescope,wilde_2017, nielsen_chuang_2010} and EC protocols are of fundamental interest in quantum information theory \chiite{wilde_2017, nielsen_chuang_2010}. Various LQCC EC protocols, which work for different numbers of initial states and with varying efficiencies, are known \chiite{entconc1,lo-popescu,bose1}. Although LQCC is a natural operational paradigm, where observers Alice and Bob each possess and operate only on part of a quantum system while coordinating their actions through classical communications, more efficient EC protocols can be obtained using entanglement-assisted local quantum operations and classical communications (ELQCC) \chiite{catalysis1, catalysis2}. In this process, an ancillary entangled pure state, called the catalyst state, shared by Alice and Bob is utilized as part of an overall LQCC process to enhance its efficiency and the catalyst state is recovered intact at the end. Here, we analytically obtain the maximum probability of success for an EC protocol transforming $N$-copies of a two-qubit pure state to a single copy of a maximally-entangled two-qubit pure state, or Bell state, when provided with entanglement assistance in the form of a two-qubit pure state catalyst.
In the case of a large number of copies, $N\thetao \infty$, of a two-qubit pure state, $\kappaet{\alphalpha}=\sigmaqrt{\alphalpha}\kappaet{00}+\sigmaqrt{1-\alphalpha}\kappaet{11}$, a fundamental result \chiite{entconc1} is that the number, $M$, of Bell states $\kappaet{\varphii}=(\kappaet{00}+\kappaet{11})/\sigmaqrt{2}$, obtainable using LQCC achieves the value, $M=S_{VN}(\alphalpha)N$, with $S_{VN}(\alphalpha)=-\alphalpha\log_2(\alphalpha)-(1-\alphalpha)\log_2(1-\alphalpha)$ - the Von Neumann entropy of the reduced initial state. The result is interpreted to mean that a fraction, $f=M/N$, of the initial states are deterministically transformed to Bell states. A single Bell state, in the limit of an asymptotic number of copies of $\kappaet{\alphalpha}$ with $\alphalpha\nueq1$, can always be obtained with certainty. In the other limit, for $N=1$, a Bell state can be obtained only probabilistically via LQCC with the maximum probability being, $P=2(1-\alphalpha)<1$, \chiite{lo-popescu,bose1} since without loss of generality $\alphalpha\gammaeq0.5$. However, ELQCC does not increase the success probability of a transformation from a single copy of a two-qubit state to a Bell state. Therefore, in both these limits, i.e. $N\thetao1$ and $N\thetao\infty$, entanglement assistance does not help, that is, it cannot increase the number of Bell states, $M$, obtained asymptotically nor can the success probability, $P$, be increased for a single copy of $\kappaet{\alphalpha}$. However, in the intermediate regime of $N$, ELQCC can increase the expectation value of entanglement obtained in the form of maximally entangled states (of any dimension) in an EC procedure \chiite{catalysis1}.
We show that for finite $N\gammaeq 2$ entanglement assistance increases the success probability of the transformation, $\kappaet{\alphalpha}^{\omegatimes N}\thetao\kappaet{\varphii}$. We analytically find that while all pure and entangled two-qubit states can act as catalysts for this tansformation, i.e. increase its success probability, the optimal catalyst must be more entangled than the initial state $\kappaet{\alphalpha}$. Remarkably, we find that the entanglement of the optimal catalyst decreases with that of the initial state. Further, we find that the use of an ELQCC procedure for EC is most beneficial for smaller number of copies, $N$, of the initial state. To close, we comment on ELQCC strategies to obtain multiple copies of Bell states. Obtaining catalysts for entanglement transformations is in general a difficult problem analytically while numerical searches do not provide much insight into the general properties of catalyst states.
Entanglement assistance via the presence of a catalyst state, $\kappaet{C}$, can enable an otherwise impossible LQCC entanglement transformation \chiite{catalysis1}, i.e.,
\begin{align}
\kappaet{\pisi}&\upsilonnderset{LQCC}{\nuot\thetao}\kappaet{\varphii}\nuonumber\\
\kappaet{\pisi}\kappaet{C}&\upsilonnderset{LQCC}{\thetao} \kappaet{\varphii}\kappaet{C}.
\epsilonnd{align}
This result is based on Nielsen's theorem \chiite{Nielsen1} which provides a criterion for allowed LQCC transformations from one pure quantum state to another. The criteria states that the transformation from an initial state $\kappaet{I}$ to a final state $\kappaet{F}$ is possible with certainty, i.e. $P(I\thetao F)=1$, iff the sets of the squares of the non-increasingly ordered Schmidt coefficients (OSC), $\muathbf{a}r{\lambda}^I=(\lambda^I_1\gammaeq\lambda^I_2\gammaeq...\gammaeq\lambda^I_d)$ and $\muathbf{a}r{\lambda}^F=(\lambda^F_1\gammaeq\lambda^F_2\gammaeq...\gammaeq\lambda^F_d)$ with respect to the bipartition that defines the local quantum systems, obey the majorization relation,
\begin{align}
\muathbf{a}r{\lambda}^I\pireceq \muathbf{a}r{\lambda}^F,
\label{majrel}
\epsilonnd{align}
which is shorthand to denote that $\sigmaum_{j=1}^k \lambda^I_j\leq \sigmaum_{j=1}^k \lambda^F_j~\forall 1\leq k \leq d$. In case of incommensurate states, i.e. where the OSCs of the initial and final states do not obey Eq.~(\muathrm{e}f{majrel}), Vidal \chiite{vidal1} showed that the transformation from $\kappaet{I}\thetao\kappaet{F}$ is possible only probabilistically with the maximum probability given by,
\begin{align}
P(I\thetao F)=\upsilonnderset{1\leq l \leq d}{\thetaext{min}}\frac{E_l(\kappaet{I})}{E_l(\kappaet{F})},
\label{LQCCprob}
\epsilonnd{align}
where $E_l(\kappaet{I}):=1-\sigmaum_{j=0}^{l-1}\lambda^I_j$ and $\lambda_0=0$. For a pair of incommensurate states, Ref. \chiite{catalysis1} further showed that entanglement catalysis can increase the efficiency, $P_C(I\thetao F)>P(I\thetao F)$, of probabilistic transformations. It is this approach we take to obtain catalysts that can maximize the LQCC entanglement concentration success probability of a finite number of two-qubit pure states.
For the problem of entanglement concentration of multiple copies of 2-qubit pure states, $\kappaet{\alphalpha}=\sigmaqrt{\alphalpha}\kappaet{00}+\sigmaqrt{1-\alphalpha}\kappaet{11}$, we have the initial and final states of the form,
\begin{align}
\kappaet{\pisi}&=\kappaet{\alphalpha}^{\omegatimes N}\nuonumber\\
\kappaet{\varphii}&=(\kappaet{00}+\kappaet{11})/\sigmaqrt{2},
\label{stateform}
\epsilonnd{align}
which will be provided entanglement assistance via the catalyst state $\kappaet{C}$. We will first consider a fixed number of copies, $N$, in the above. Nielsen's theorem applied to the state pair of the form in Eq. (\muathrm{e}f{stateform}) implies the following,\\
\nuoindent \thetaextbf{Proposition}: If the states $\kappaet{\pisi}\thetao\kappaet{\varphii}$ are incommensurate, no catalyst can make the transformation deterministic.
\begin{proof} First, note that incompatibility arises iff $\lambda^I_1>\lambda^F_1$ since $\lambda^F_1+\lambda^F_2=1$ and $\sigmaum _{j=1}^{k} \lambda^I_j\leq 1 \forall 1\leq k\leq d$. Thus, the OSCs of the product states $\kappaet{\pisi}\kappaet{C}$ and $\kappaet{\varphii}\kappaet{C}$ remain incompatible since their largest Schmidt coefficients follow, $\lambda^I_1 c_1 > \lambda^F_1 c_1$, whereas $\sigmaum_{j=1}^k \lambda^I_j\leq \sigmaum_{j=1}^k \lambda^F_j~\forall ~2\leq k \leq d$. Here, $c_1$ is the square of the largest Schmidt coefficient of $\kappaet{C}$.
\epsilonnd{proof}
For a fixed $N\gammaeq1$, we focus on transformations $\kappaet{\pisi}^{\omegatimes N}\thetao\kappaet{\varphii}$ that are not possible with certainty using LQCC. The OSCs of the two states form probability vectors of length $2^N$ and are given by,
\begin{align}
\muathbf{a}r{\lambda}^\pisi&=\{\alphalpha^N\gammaeq\alphalpha^{N-1}(1-\alphalpha)\gammaeq\alphalpha^{N-2}(1-\alphalpha)^{2}\gammaeq...\gammaeq(1-\alphalpha)^N\}\nuonumber\\
\muathbf{a}r{\lambda}^\varphii&=\{0.5\gammaeq0.5\gammaeq0\gammaeq...\gammaeq0\}
\epsilonnd{align}
where the Schmidt coefficients $\alphalpha^{N-p}(1-\alphalpha)^p$ of $\kappaet{\pisi}$ have multiplicities of ${N \chihoose p}$ and $0.5\leq\alphalpha\leq1$.
The optimal success probability for such a transformation as given by Eq.~(\muathrm{e}f{LQCCprob}) is,
\begin{align}
P(\pisi\thetao\varphii)=\thetaext{min}[1,2(1-\alphalpha^N)].
\epsilonnd{align}
For LQCC transformations that are probabilistic the minimum in the R.H.S. above is less than unity. Therefore, we have that $2(1-\alphalpha^N)<1\implies \alphalpha>(1/2)^{1/N}$.
For such states we would like to find a catalyst, $\kappaet{C}=\sigmaqrt{c}\kappaet{00}+\sigmaqrt{1-c}\kappaet{11}$, i.e. a pure state on a qubit pair that provides the largest boost to the success probability, $P_C(I\thetao F)$, of the transformation,
\begin{align}
\kappaet{I}=\kappaet{\pisi}\kappaet{C}\upsilonnderset{LQCC}{\thetao}\kappaet{F}=\kappaet{\varphii}\kappaet{C}.
\epsilonnd{align}
To obtain $P_C(I\thetao F)$, first we need to evaluate the terms in the R.H.S of Eq.~(\muathrm{e}f{LQCCprob}). This requires the OSCs of the initial and final states. The OSCs of the final state $\kappaet{F}$ are,
\begin{align}
\muathbf{a}r{\lambda}^F=\{0.5c,0.5c,0.5(1-c),0.5(1-c),0,...,0\},
\epsilonnd{align}
with $0.5\leq c\leq 1$ where the zeros following the non-zero entries make the length of $\muathbf{a}r{\lambda}^F$ match the dimension of the initial state $\thetaext{dim}(\kappaet{I})=2^N\thetaimes 2$. Now, we note that the minimization problem in Eq.~(\muathrm{e}f{LQCCprob}) is restricted to the first four values of $l$ since, $E_l(\kappaet{F})=0\forall l\gammaeq 5$, and thus the ratios, $r_l(\alphalpha,c):=E_l(\kappaet{I})/E_l(\kappaet{F})=\infty$, for $l\gammaeq 5$ do not contribute to the complexity of the minimization in our case. Therefore, only the first four monotones, $E_l$, of the initial and final states are required. These can be obtained if the first 3 entries of the OSCs of the initial and final states are known. For the final state (in the entire domain $c\in(0.5,1)$) we have that,
\begin{align}
E_1(\kappaet{F})&=1,\nuonumber\\
E_2(\kappaet{F})&=1-c/2,\nuonumber\\
E_3(\kappaet{F})&=1-c,\nuonumber\\
E_4(\kappaet{F})&=(1-c)/2.
\label{Ef}
\epsilonnd{align}
For the initial state $\kappaet{I}$, the OSCs can have the following two orderings (of relevance are the first three entries of each) based on the value of $c$ relative to $\alphalpha$,
\begin{small}
\begin{align}
\muathbf{a}r{\lambda}^{I_1}&=\{c\alphalpha^N>(1-c)\alphalpha^{N}> c\alphalpha^{N-1}(1-\alphalpha)>...>(1- c)(1-\alphalpha)^N\}
\epsilonnd{align}
\epsilonnd{small}
which holds for $0.5<c\leq \alphalpha$ whereas for $\alphalpha<c\leq 1$,
\begin{small}
\begin{align}
\muathbf{a}r{\lambda}^{I_2}&=\{c\alphalpha^N> c\alphalpha^{N-1}(1-\alphalpha)> (1-c)\alphalpha^{N}>...> (1-c)(1-\alphalpha)^N\}
\epsilonnd{align}
\epsilonnd{small}
where the first three entries of $\muathbf{a}r{\lambda}^{I_1}$ have multiplicities $1,1,N$, while the multiplicities for the ordered entries of $\muathbf{a}r{\lambda}^{I_2}$ is $1,N,1$ respectively. Thus, for the two parts of the domain for $c$ the monotones $E_l(\kappaet{I})$ of the initial state evaluate to,
\begin{align}
E_1(\kappaet{I})&=1,~c\in(0.5,1)\nuonumber\\
E_2(\kappaet{I})&=1-c\alphalpha^N,~c\in(0.5,1)\nuonumber\\
E_3(\kappaet{I})&=\begin{cases}1-\alphalpha^N,~~0.5<c\leq\alphalpha\\1-c\alphalpha^{N-1},~\alphalpha<c<1\epsilonnd{cases}\nuonumber\\
E_4(\kappaet{I})&=\begin{cases}1-\alphalpha^N-c\alphalpha^{N-1}(1-\alphalpha),~0.5<c\leq\alphalpha\\1-c\alphalpha^{N-1}-c\alphalpha^{N-1}(1-\alphalpha),~\alphalpha<c<1\epsilonnd{cases}
\label{Ei}
\epsilonnd{align}
From Eqs.~(\muathrm{e}f{Ef}) and (\muathrm{e}f{Ei}) we have the four ratios of the entanglement monotones as functions of $\alphalpha, c$ and $N$,
\begin{align}
r_1(\alphalpha,c,N)&=1,~c\in(0.5,1)\nuonumber\\
r_2(\alphalpha,c,N)&=\frac{1-c\alphalpha^N}{1-c/2},~c\in(0.5,1)\nuonumber\\
r_3(\alphalpha,c,N)&=\begin{cases}\frac{1-\alphalpha^N}{1-c},~~0.5<c\leq\alphalpha\\\frac{1-c\alphalpha^{N-1}}{1-c},~\alphalpha<c<1\epsilonnd{cases}\nuonumber\\
r_4(\alphalpha,c,N)&=\begin{cases}\frac{2(1-\alphalpha^N-c\alphalpha^{N-1}(1-\alphalpha))}{1-c},~0.5<c\leq\alphalpha\\\frac{2(1-c\alphalpha^{N-1}-c\alphalpha^{N-1}(1-\alphalpha))}{1-c},~\alphalpha<c<1\epsilonnd{cases}
\label{R}
\epsilonnd{align}
\epsilonmph{Evaluation of the minimum among the ratios of entanglement monotones:}
First, note that for $N=1$ the minimum of the ratios in the above set of equations is given by, $r_4(\alphalpha,c,N)=2(1-\alphalpha)$, which is equal to the LQCC probability without a catalyst for all values of $0.5<c<1$. Thus, a catalyst cannot help increase the success probability of a LQCC transformation of a single copy of $\kappaet{\alphalpha}$ to $\kappaet{\varphii}$. This is consistent with the fact that catalysis is impossible when the initial and final states are both two-qubit states \chiite{catalysis1}.
For $N\gammaeq 2$, the minimum of the ratios $r_l(\alphalpha,c,N)$ for $l=2,3,4$ determine the probability of a successful catalyzed conversion from $\kappaet{\pisi}^{\omegatimes N}\thetao \kappaet{\varphii}$ (since $r_1(\alphalpha,c,N)=1$). For this we use the derivatives and continuity properties of $r_2,r_3,r_4$ to determine the minimum among the three. It turns out that for all values of, $\alphalpha>(1/2)^{(1/N)}$, the function $r_2(\alphalpha,c,N)$ decreases with $c$ with its maximum value $r_2^{\thetaext{max}}=(4/3)(1-\alphalpha^N/2)$ as $c$ approaches $0.5$. On the other hand, the function $r_3(\alphalpha,c,N)$ increases with $c$ in both parts of its domain. It is continuous across the domain boundary $c=\alphalpha$ and has a minimum value of $r_3^{\thetaext{min}}=2(1-\alphalpha^N)$ as $c$ approaches $0.5$. The minimum value of $r_2(\alphalpha,c,N)$ is given by $r_2^{\thetaext{min}}=2(1-\alphalpha^N)$ as $c$ approaches $1$ whereas the value of $r_3(\alphalpha,c,N)$ diverges as $c\thetao1$. Therefore, for fixed $\alphalpha,N$ the curves for $r_2(\alphalpha,c,N)$ and $r_3(\alphalpha,c,N)$ as a function of $c$ intersect in the domain $c\in(0.5,1)$. Further, note that $r_2^{\thetaext{max}}\gammaeq r_3^{\thetaext{min}}$ for $\alphalpha\gammaeq(1/2)^{(1/N)}$. Finally, the minimum of the ratios is never given by the value of the function $r_4(\alphalpha,c,N)$ in any part of the domain $c\in(0.5,1)$ as shown in the following.
For $c\leq\alphalpha$, one can show that $r_4(\alphalpha,c,N)\gammaeq r_3(\alphalpha,c,N)$ for all $N\gammaeq2$, so that $r_4(\alphalpha,c,N)$ is not the least of the ratios as follows,
\begin{align}
r_4(\alphalpha,c,N)&=\frac{2(1-\alphalpha^N-c\alphalpha^{N-1}(1-\alphalpha))}{1-c}\nuonumber\\
&=\frac{1-\alphalpha^N}{1-c}+\frac{1-\alphalpha^N-2c\alphalpha^{N-1}(1-\alphalpha)}{1-c}\nuonumber\\
&=r_3(\alphalpha,c,N)+\frac{p(\alphalpha,c)}{1-c}
\epsilonnd{align}
Now we note that the function, $p(\alphalpha,c,N)=1-\alphalpha^N-2c\alphalpha^{N-1}(1-\alphalpha)$, is a decreasing function of $c$ since $\alphalpha,(1-\alphalpha)\gammaeq0$. So w.r.t. $c$ the function takes its minimum value at $c=\alphalpha$ given by, $p_{\thetaext{min},c}(\alphalpha)=1+\alphalpha^N(2\alphalpha-3)$. This minimum value decreases with $\alphalpha$ since the sign of the derivative $dp_{\thetaext{min},c}(\alphalpha)/d\alphalpha<0$ for $\alphalpha<(3/2)\frac{N}{N+1}$ which always holds for $N\gammaeq 2$. The minimum value with respect to both arguments is at $c=\alphalpha$ and $\alphalpha=1$ and is given by $p_{\thetaext{min},c,\alphalpha}=0$.
For $\alphalpha<c<1$, one can show that $r_4(\alphalpha,c,N)\gammaeq r_3(\alphalpha,c,N)$ for $N\gammaeq 3$ whereas $r_4(\alphalpha,c,N)\gammaeq r_2(\alphalpha,c,N)$ for $N=2$, so that also in this region $r_4(\alphalpha,c,N)$ is not the least of the ratios as follows. From Eq. (\muathrm{e}f{R}) we have,
\begin{align}
r_4(\alphalpha,c,N)&=\frac{1-c\alphalpha^{N-1}}{1-c}+\frac{1-c\alphalpha^{N-1}-2c\alphalpha^{N-1}(1-\alphalpha)}{1-c}\nuonumber\\
&=r_3(\alphalpha,c,N)+\frac{q(\alphalpha,c)}{1-c},
\epsilonnd{align}
where the function, $q(\alphalpha,c,N)=1-c\alphalpha^{N-1}-2c\alphalpha^{N-1}(1-\alphalpha)=1-c\alphalpha^{N-1}(3-2\alphalpha)$, is a decreasing function of $c$. Therefore, the minimum of $q(\alphalpha,c,N)$ w.r.t. $c$ is at $c=1$ and is given by $q_{\thetaext{min},c}(\alphalpha)=1+\alphalpha^{N-1}(2\alphalpha-3)$. This minimum value decreases with $\alphalpha$ if the derivative $dq_{\thetaext{min},c}(\alphalpha)/dq<0$ which requires $\alphalpha\leq (3/2)(N-1)/N$ that always holds for $N\gammaeq3$. The minimum of $q_{\thetaext{min},c}(\alphalpha)$ is therefore at $\alphalpha=1$ given by $q_{\thetaext{min},c,\alphalpha}=0$ for $N\gammaeq3$. For $N=2$, we have that, $(3/2)(N-1)/N=3/4$, so $q_{\thetaext{min},c}(\alphalpha)<0$ for $(3/4)<\alphalpha<1$. However, for this range of $\alphalpha$ and $N=2$, we can show $r_4(\alphalpha,c,N=2)\gammaeq r_2(\alphalpha,c,N=2)$ by evaluating their difference,
\begin{align}
&r_4(\alphalpha,c,N=2)-r_2(\alphalpha,c,N=2)\nuonumber\\
&~~~~~~~~~~~~=\frac{2[1+(\frac{2c^2-4c}{\alphalpha}+(3c-2c^2))\alphalpha^2]}{(1-c)(2-c)}\nuonumber\\
&~~~~~~~~~~~~=\frac{s(\alphalpha,c)}{(1-c)(2-c)},
\epsilonnd{align}
where, $s(\alphalpha,c)=2[1+(\frac{2c^2-4c}{\alphalpha}+(3c-2c^2))\alphalpha^2]$. Note that the term $(2c^2-4c)$ decreases with increasing $c \forall c<1$ while the term $(3c-2c^2)$ decreases with increasing $c$ for $(3/4)<c<1$. Therefore, the minimum value of $s(\alphalpha,c)$ in this range is at $c=1$ given by $s_{\thetaext{min},c}(\alphalpha)=2(1-\alphalpha)^2$ which is always greater than or equal to zero.
~~~~~~~~~~~~~~$\sigmaquare$
\begin{figure}
\chientering
\includegraphics[width=\chiolumnwidth]{pic1.pdf}
\chiaption{Ratio of entanglement monotones as a function of the catalyst-state Schmidt coefficient, $c$, with fixed $\alphalpha=0.85$ and $N=2$. Shown in Blue is $r_2(0.85,c,2)$ which monotonically decreases while $r_3(0.85,c,2)$, in Green, monotonically increases with $c$. $r_4(0.85,c,2)$ shown in Red is never the minimum of the three monotones. The value of $c$ at the intersection point of the Blue and Green curves gives the optimal catalyst (vertical dashed line). The horizontal dashed line shows the probability for the LQCC transformation $\kappaet{\alphalpha=0.85}^{\omegatimes 2}\thetao\kappaet{\varphii}$.}
\label{fig:optimalc1}
\epsilonnd{figure}
These facts together imply that the maximum probability of a LQCC conversion, $\kappaet{I}\thetao\kappaet{F}$, is obtained where the curves for $r_2(\alphalpha,c,N)$ and $r_3(\alphalpha,c,N)$ w.r.t. $c$ intersect for a fixed $\alphalpha$ and $N$, see figure~(\muathrm{e}f{fig:optimalc1}). The intersection point, $c^{\thetaext{opt}}(\alphalpha,N)$, is obtained from the solution of one of the quadratic equations, $r_2(\alphalpha,c,N)=r_3^{c\leq\alphalpha}(\alphalpha,c,N)$, or, $r_2(\alphalpha,c,N)=r_3^{c>\alphalpha}(\alphalpha,c,N)$, as given by Eq.~(\muathrm{e}f{R}). We find that the latter has solutions, $c=0$ or $c>1$, which are unacceptable for a physically meaningful catalyst state, whereas the former equation provides an acceptable solution,
\begin{align}
c^{\thetaext{opt}}(\alphalpha,N)=\frac{1+3\alphalpha^N-\{(1+3\alphalpha^N)^2-16\alphalpha^{2N}\}^{1/2}}{4\alphalpha^N}.
\label{catalystsolution}
\epsilonnd{align}
The Schmidt coefficient, $c^{\thetaext{opt}}(\alphalpha,N)$, identifies a two-qubit catalyst pure state, $\kappaet{C^{\thetaext{opt}}(\alphalpha,N)}=\sigmaqrt{c^{\thetaext{opt}}(\alphalpha,N)}\kappaet{00}+\sigmaqrt{1-c^{\thetaext{opt}}(\alphalpha,N)}\kappaet{11}$, that provides the maximum success probability in an ELQCC procedure to obtain a maximally entangled two-qubit state from $N$-copies of partially entangled pure states. This probability is given by the value of $r_2(\alphalpha,c^{\thetaext{opt}},N)$ or $r_3(\alphalpha,c^{\thetaext{opt}},N)$,
\begin{align}
P^{\thetaext{max}}_{C}(I\thetao F)= \frac{1-\alphalpha^N}{1-c^{\thetaext{opt}}(\alphalpha,N)}
\label{probvalue}
\epsilonnd{align}
Further, since $c^{\thetaext{opt}}(\alphalpha,N)<\alphalpha$ the optimal catalyst state is always more entangled than $\kappaet{\alphalpha}$. However, even those states, $\kappaet{C}=\sigmaqrt{c}\kappaet{00}+\sigmaqrt{1-c}\kappaet{11}$, with $c\nueq c^{\thetaext{opt}}(\alphalpha,N)$ can act as (non-optimal) catalysts. This is because for such states $\kappaet{C}$ in the region $c<c^{\thetaext{opt}}(\alphalpha,N)$ the minimum of the ratios, $r_3(\alphalpha,c,N)$, is still greater than the LQCC transformation probability of $2(1-\alphalpha^N)$ as can be seen by evaluating $r_3(\alphalpha,c,N)$ for $c<\alphalpha$, see the Green curve in figure~(\muathrm{e}f{fig:optimalc1}). Whereas for those states in the region $c>c^{\thetaext{opt}}(\alphalpha,N)$ the minimum of the ratios, $r_2(\alphalpha,c,N)$, is again greater than the LQCC transformation probability of $2(1-\alphalpha^N)$, see the Blue curve in the same figure.
We remark that the transformation $\kappaet{I}\thetao\kappaet{F}$ can be achieved via LOCC operations jointly on the $N$-copies of the initial state and one-copy of the catalyst state in a two step procedure \chiite{Nielsen1,vidal1,lo-popescu} we briefly outline. In the first step a temporary state $\kappaet{\mathcal{G}amma}$ that majorises the initial state is obtained with certainty, i.e., $\kappaet{I}\pirec\kappaet{\mathcal{G}amma}$, via a sequence of LOCC operations on corresponding two-dimensional subspaces of Alice's and Bob's systems (of Hilbert space dimension $2^{N+1}$ each). That is, a single LOCC operation involves two-levels $\kappaet{i}_A,\kappaet{j}_A$ on Alice's systems and the corresponding two levels $\kappaet{i}_B,\kappaet{j}_B$ of Bob's systems with $i,j\in[1,2^{N+1}]$. Note that the operations on states, $\{\kappaet{i}_{A,B}\}_i$, involve the collective manipulation of $N$-qubits of the shared initial state and $1$-qubit of the shared catalyst state. The number of such $(\alphalpha,c)$-dependent two-level operations is upper bounded by $(2^{N+1}-1)$. In the second step, Bob performs a two-outcome generalized measurement on his portion of the shared state $\kappaet{\mathcal{G}amma}$. For one of the outcomes, which occurs with probability given by Eq.~(\muathrm{e}f{probvalue}), the post-measurement state obtained is $\kappaet{F}$ therefore in this case the catalyst state is recovered along with a Bell state whereas the other outcome signals the failure of the catalytic process and the post-measurement state may be discarded.
Now, we note from Eqs.~(\muathrm{e}f{R}) and (\muathrm{e}f{catalystsolution}) the following properties,
\begin{enumerate}
\item An optimal two-qubit catalyst state always exists for $N\gammaeq2$-copies of every state $\kappaet{\alphalpha}$ with $\alphalpha\in((1/2)^{1/N},1)$.
\item The optimal catalyst state is always more entangled than $\kappaet{\alphalpha}$ since $c^{\thetaext{opt}}(\alphalpha,N)< \alphalpha$.
\item {\epsilonmph Any} pure and entangled two-qubit state can act as a catalyst, that is, it provides a positive boost to the success probability of the $\kappaet{\pisi}^{\omegatimes N}\thetao\kappaet{\varphii}, N\gammaeq2$ transformation in an entanglement assisted procedure.
\item \epsilonmph{Optimal} self-catalysis is not possible, that is, $c^{\thetaext{opt}}(\alphalpha,N)\nueq\alphalpha$ for any $N$ and $\alphalpha<1$. However, an additional copy of the state $\kappaet{\alphalpha}$ can act as a non-optimal catalyst.
\item The optimal catalyst state $\kappaet{C^{\thetaext{opt}}(\alphalpha,N)}$ becomes less entangled as the state $\kappaet{\alphalpha}$ becomes less entangled ($\alphalpha\thetao1$) since the derivative, $dc^{\thetaext{opt}}(\alphalpha,N)/d\alphalpha>0$, in the region $\alphalpha\in(0.5,1)\forall N\gammaeq2$.
\item Catalysis with the optimal state is more beneficial if the initial state is less entangled, that is, the ratio of LQCC success probability with optimal catalysis to that without, $\frac{P_C^{\thetaext{max}}(I\thetao F)}{P(\pisi\thetao\varphii)}$ increases as $\alphalpha\thetao 1$, see figure~(\muathrm{e}f{fig:ratio1}).
\item Catalysis with the optimal two-qubit catalyst state is more effective for a smaller number of copies, $N$, of the initial state, see figure (\muathrm{e}f{fig:ratio1}).
\epsilonnd{enumerate}
\begin{figure}
\chientering
\includegraphics[width=\chiolumnwidth]{fig-ratio1.pdf}
\chiaption{Ratio of success probability for the transformation, $\kappaet{\pisi}^{\omegatimes N} \thetao \kappaet{\varphii}$, using optimal catalysis to that without catalysis. The curves from left to right are for different number of copies $N=2,4,8,16,32$.}
\label{fig:ratio1}
\epsilonnd{figure}
As a consequence of remark 3, we note that for a set of two-qubit pure states, $\muathcal{S}=\{\kappaet{\alphalpha_i}\}_i$ (none of which is a maximally entangled state), any two-qubit pure state can act as a common catalyst for all transformations,
\begin{align}
\kappaet{\alphalpha_i}^{\omegatimes N_i}\thetao\kappaet{\varphii},~N_i\gammaeq2.
\epsilonnd{align}
For obtaining multiple copies of Bell states higher dimensional catalysts are more efficient \footnote{Numerically one finds higher dimensional catalysts more efficient even to obtain a single Bell state. For example $\kappaet{C}=\sigmaqrt{.5}\kappaet{00}+\sigmaqrt{.35}\kappaet{11}+\sigmaqrt{.15}\kappaet{22}$ is more efficient for the conversion $(\sigmaqrt{.8}\kappaet{00}+\sigmaqrt{.2}\kappaet{11})^{\omegatimes2}\thetao\kappaet{\varphii}$}. For example, the initial state $\kappaet{\alphalpha}^{\omegatimes N}$ (with even $N$) can be transformed to $\kappaet{\varphii}^{\omegatimes m}$ with a catalyst of the form $\kappaet{C^{\thetaext{opt}}(\alphalpha,2)}^{\omegatimes N/2}$ in a pairwise ELQCC procedure where the number of obtained Bell states, $m=0,1,2,..,n=N/2$, is binomially distributed. The probability of obtaining $m$ Bell states is given by $p_m=\binom{N/2}{m}p^m(1-p)^{N/2-m}$ with $p=P^{\thetaext{max}}_{C}(I\thetao F)$ as in Eq.~(\muathrm{e}f{probvalue}), where $\kappaet{I}=\kappaet{\alphalpha}^{\omegatimes 2}\kappaet{C^{\thetaext{opt}}(\alphalpha,2)}$ and $\kappaet{F}=\kappaet{\varphii}\kappaet{C^{\thetaext{opt}}(\alphalpha,2)}$. The expected entanglement, $\muathbf{r}aket{E}=\sigmaum_m p_m*m=(N/2)P^{\thetaext{max}}_{C}(I\thetao F)$, in this entanglement concentration procedure, that we will call strategy-1, is linear in the number of copies $N$ of the initial state $\kappaet{\alphalpha}$.
To obtain a target number, $m_*$, of Bell states, however, a different method, strategy-2, may be more beneficial. In such a strategy, the initial $N$-copies of $\kappaet{\alphalpha}$ may be grouped into $m_*$ sets each of cardinality $N_j$ such that, $\sigmaum_{j=1}^{j=m_*}N_j=N$. The probability of obtaining $m_*$ Bell states will then be the maximum of the product of probabilities maximized over the size of the sets, $p_{m_*}=\thetaext{Max}_{{\{N_j\}}_j}\pirod_{j=1}^{j=m_*}P_j$, where $P_j$ is the probability of the transformation, $\kappaet{\alphalpha}^{\omegatimes N_j}\thetao\kappaet{\varphii}$. For sets with $N_j\gammaeq 2$ one can use an ELQCC transformation procedure, so that for such sets $P_j=P^{\thetaext{max}}_{C}(I\thetao F)$ with $\kappaet{I}=\kappaet{\alphalpha}^{\omegatimes N_j}\kappaet{C^{\thetaext{opt}}(\alphalpha,N_j)}$ and $\kappaet{F}=\kappaet{\varphii}\kappaet{C^{\thetaext{opt}}(\alphalpha,N_j)}$. The different cardinalities, $N_j$, of the sets allows one to maximize the catalysis success probability using the appropriate catalyst $\kappaet{C^{\thetaext{opt}}(\alphalpha,N_j)}$ for each set.
The choice of the advantageous strategy depends on the number of copies available $N$, the value of $\alphalpha$ and the number of copies of the Bell state $m_*$ desired as the output of the catalyzed entanglement concentration procedure. To compare, strategies-1 and 2 as described above, consider as an example the case when $N=6$ and $\alphalpha=0.99$. If $m
_*=2$ copies of Bell states are desired as output then strategy-1 yields a probability of $0.034$ whereas strategy-2 utilizing 2 sets of 3-copies of $\kappaet{\alphalpha}$ each, yields a probability of $0.065$. On the other hand if only a single copy of a Bell state is the desired output, i.e. $m_*=1$, then strategy-1 yields a probability of $0.391$ whereas strategy-2 utilizing 1 set of 6-copies of $\kappaet{\alphalpha}$ each yields a probability of $0.362$.
It will be interesting to apply the results of catalytic entanglement concentration to increase the efficiency of entanglement distribution protocols in quantum repeaters \chiite{munro_repeater}. The latter distribute entanglement over long distances by purifying and connecting entanglement generated over smaller length segments. While the entanglement generated over the segments is typically in the form of mixed states, some models of channel noise, e.g. \chiite{kwiat_filtration}, can lead to non-maximally entangled shared pure states between the repeater stations. In such cases, if ELQCC is utilized to extract states with high fidelity to a Bell state in each repeater segment more efficiently than LOCC based repeater protocols then the overall distribution rate of the repeater can benefit significantly. This would require adaptive operations at the repeater nodes since the transformation $\kappaet{I}\thetao\kappaet{F}$ is achieved via $\alphalpha$-dependent local unitaries and measurements by Alice and Bob. Copies of the initial states may be generated and stored on matter qubits that have an efficient light-matter interface while storing the catalyst state in long-lived quantum memories \chiite{Simon2010} at the repeater nodes during the ELQCC process. This may allow the reuse of the catalyst state multiple times as allowed by the transformation success probability. Quantum repeater architectures based on the combination of qubits with excellent communication properties and those with long lifetimes, e.g. \chiite{Santra_20192}, can thus be good candidates to exploit catalytic entanglement concentration.
In summary, we analytically obtained a two-qubit catalyst pure state that maximizes the success probability of an entanglement assisted LQCC procedure to convert a given number of copies of a partially entangled pure state to a single copy of a maximally entangled two-qubit state. The supplied entanglement assistance is minimal since the catalyst is an entangled state of Schmidt rank equal to 2. Although, a higher rank catalyst cannot make the transformation deterministic, the maximum transformation success probability with a catalyst of any rank is an open question. In contrast with numerical searches for catalyst states, the analytical derivation of the optimal catalyst state reveals multiple properties of the catalytic process and raises interesting questions about possible applications.
{\it Acknowledgements:-} We thank one anonymous referee for many useful comments and suggestions.
\epsilonnd{document}
|
\begin{document}
\begin{center}
\Large{\textbf{Stable Matching Games}}\\
\normalsize{\textit{Felipe Garrido-Lucero$^1$, Rida Laraki$^{1,2}$}}\\
\quad\\
\small{\textit{1. LAMSADE (CNRS, UMR 7243), University of Paris Dauphine-PSL}}\\
\small{\textit{2. University of Liverpool, Computer Science Department}}
\end{center}
\begin{abstract}
Gale and Shapley introduced a matching problem between two sets of agents where each agent on one side has an exogenous preference ordering over the agents on the other side. They defined a matching as stable if no unmatched pair can both improve their utility by forming a new pair. They proved, algorithmically, the existence of a stable matching. Shapley and Shubik, Demange and Gale, and many others extended the model by allowing monetary transfers. We offer a further extension by assuming that matched couples obtain their payoff endogenously as the outcome of a strategic game they have to play in a usual non-cooperative sense (without commitment) or in a semi-cooperative way (with commitment, as the outcome of a bilateral binding contract in which each player is responsible for his/her part of the contract). Depending on whether the players can commit or not, we define in each case a solution concept that combines Gale-Shapley pairwise stability with a (generalized) Nash equilibrium stability. In each case, we give the necessary and sufficient conditions for the set of stable allocations to be non-empty, we study its geometry (full/semi-lattice), and provide an algorithm that converges to its maximal element. Finally, we prove that our second model –with commitment– encompasses and refines most of the literature (matching with monetary transfers as well as matching with contracts).
\end{abstract}
\noindent{\footnotesize\textbf{Keywords.} Stable Matching $\cdot$ Generalized Nash Equilibrium $\cdot$ Zero-sum Games $\cdot$ Potential Games $\cdot$ Infinitely repeated Games $\cdot$ Matching with Contract $\cdot$ Matching with Transfer}
\section{Introduction}\label{sec:introduction}
The Gale and Shapley \cite{gale1962college} two-sided market matching problem consists in finding a ``stable'' pairing between two different sets of agents $M$ and $W$ given that each agent on one side has an exogenous preference ordering over the agents on the other side.
The marriage problem focuses on a coupling $\mu$ that associates to each agent on one side, to at most one agent on the other side. The coupling $\mu$ is stable if no uncoupled pair of agents $(m,w)\in M\times W$, both prefer to be paired together rather than with their partners in $\mu$. Gale and Shapley \cite{gale1962college} used a ``propose-dispose'', also called ``deferred-acceptance'', algorithm to prove the existence of a stable matching for every instance. Knuth \cite{knuth1976marriages} proved a lattice structure over the set of stable matchings (mentioned in Gale and Shapley \cite{gale1962college}). Gale and Sotomayor \cite{gale1985some} showed that the algorithm in which men are proposing outputs the best stable matching for men.
Shapley and Shubik \cite{shapley1971assignment} extended the model by allowing monetary transfers. Demange and Gale \cite{demange1985strategy} considered more general utility functions for money (non-quasi-linear), allowed monetary transfers on both sides (from buyer to seller and vice-versa) and proved that the set of stable allocations has a lattice structure (non-emptiness of this set has been proved in \cite{crawford1981job,quinzii1984core}). Hatfield and Milgrom \cite{hatfield2005matching}, extended the Demange-Gale model to a one-to-many setting by allowing couples to sign a ``binding contract''. Under monotonicity assumptions allowing the use of Tarski's fixed point theorem, they proved that the set of stable allocations is a non-empty lattice.\footnote{A pioneering article in the extension of Gale-Shapley's model to a many-to-many was made by Blair \cite{blair1988lattice}, studying the case in which workers and firms must form partnerships (workers can work in several firms at the same time). It studied the lattice structure of the set of stable allocations with salaries, extending \cite{kelso1982job} and \cite{knuth1976marriages}}. Chiappori and Reny \cite{chiappori2016matching} studied a model where men and women must form couples and, simultaneously, determine a sharing rule for splitting their total income.
In real life bilateral markets, to be attractive, an agent can take actions that cannot be modeled by monetary transfers. When a firm hires a worker, it can combine the monetary transfer with employee perks: medical insurance, gym, extra time-off, flexible schedule, childcare assistance. The worker can promise to be flexible, work hard, learn new technologies, and be respectful of the company code of conduct. When a university hires a professor, it can reduce or increase its teaching duties, requires a minimum number of top publications, ask for some responsibilities in the department, etc. The professor can promise or not to publish in top journals, be an excellent teacher, apply to/win grants, accept some responsibilities, organize a seminar, and supervise Ph.D. students. All those actions are individual decisions that can be put explicitly or implicitly in a contract but each agent is responsible regarding her own part of the contract. Each agent will do what is needed to be accepted by the other party and will refuse to engage if he/she judges the partner's proposition is insufficient.
To model this, we extend the above matching models by supposing that individual members of a couple $(i,j) \in M \times W$, obtain their payoffs as the output of a strategic game $G_{i,j} = (X_i , Y_j , U_{i,j},$ $V_{i,j})$, that they have to play, where $X_i$ is $i$'s action/strategy set, $Y_j$ is $j$'s action/strategy set, and $U_{i,j}, V_{i,j} : X_i \times Y_j \rightarrow \mathbb{R}$ are the utility functions of $i$ and $j$, respectively. Hence, if $i$ and $j$ are married, $i$ chooses to play $x_i$ and $j$ chooses to play $y_j$, $i$'s and $j$'s final utilities are $U_{i,j}(x_i,y_j)$ and $V_{i,j}(x_i,y_j)$, respectively. An outcome/allocation of the matching game, called a {\it matching profile}, is a triple $(\mu,x,y)$ with $\mu$ a matching between $M$ and $W$, $x=(x_i)_{i\in M} \in \prod_{i \in M} X_i$ a strategy profile for all agents in $M$, and $y=(y_j)_{j\in W}\in \prod_{j \in W} Y_j$ a strategy profile for all agents in $W$. For example, a matching problem with linear transfers can be represented by a family of constant-sum games where the set of strategies are $X_i=Y_j=\mathbb{R}^+$, and the payoff functions are $U_{i,j}(x_i,y_j)=-x_i+y_j+a_{i,j}$ and $V_{i,j}(x_i,y_j)=x_i-y_j+b_{i,j}$, with $a_{i,j}$ and $b_{i,j}$ representing the utility of being with the partner when there is no transfer.
Suppose that some centralized or decentralized process leads to a matching profile where agents (men/women or workers/firms) are matched in pairs and each matched player is intended to play some action. We want to formulate the necessary conditions for that matching profile to be sustainable. We will consider two static stability notions which depend on the players' level of commitment before they play their game. This is in line with the literature on matching which does not describe the precise process but rather defines a static notion of stability and proves, using for example a ``propose-dispose'' algorithm, the existence of such a stable allocation. Each algorithm can be interpreted as a process that converges to some particular stable matching. For example, in the Gale-Shapley model, their propose-dispose process converges to the best stable matching for the proposer side.
The first studied case is when a matched couple $(i,j)$ \textbf{cannot commit, i.e. they cannot sign a binding contract before they play their game}. In this case, for the players not to deviate from the intended actions, these last must constitute a Nash equilibrium of $G_{i,j}$. Thus, a matching profile $(\mu,x,y)$ will be called \textit{Nash stable} if (a) all matched couples play a Nash equilibrium of their game (i.e. the matching profile is internally Nash stable (Definition \ref{def:internally_Nash_stable_matching_profile})) and (b) no pair of agents $(i',j')$, that are not already a couple, can jointly deviate to some Nash strategy profile $(x'_{i'},y'_{j'})$ in their game $G_{i',j'}$ that Pareto improve their payoffs (i.e. the matching profile is externally Nash stable (Definition \ref{def:externally_Nash_stable_matching_profile})). This last condition is a natural extension of the Gale-Shapley pairwise stability condition. Using a propose-dispose algorithm, it is proved that whenever all games $G_{i,j}$ admit a non-empty compact set of Nash equilibria, a Nash stable matching profile exists. In addition, a semi-lattice structure holds: the maximum between two Nash stable matching profiles with respect to men's preferences (resp. women's preferences) is Nash stable. Of course, when all games $G_{i,j}$ have a unique Nash equilibrium payoff (as in strictly competitive games), the model becomes a classical Gale-Shapley problem and so, the usual lattice structure is recovered.
It is important to remark that games with linear transfers, like the ones described above, are constant-sum games where the unique Nash equilibrium is $(x_i^*,y_j^*)=(0,0)$, as the null transfer is a strictly dominant strategy. For positive transfers to occur, players must be able to commit. This is implicitly assumed in the literature of matching with transfers.
The second studied case corresponds to the one in which \textbf{players can commit (e.g. by signing binding contracts or because the game is infinitely repeated and so any deviation from the agreed stationary strategy profile at some stage is immediately punished the next stage by a break of the relation)}. A matching profile $(\mu,x,y)$ is called \textit{externally stable} (Definition \ref{def:externally_stable_matching_profile}) if no pair of agents $(i,j) \notin \mu$ can jointly deviate to some strategy profile $(x'_{i},y'_{j})$ in their game $G_{i,j}$ that Pareto improves their payoffs. A similar propose-dispose algorithm to the one without commitment allows us to prove that, if all the strategic games $G_{i,j}$ have compact strategy spaces and continuous payoff functions, the matching game admits an externally stable matching profile. As above, a semi-lattice structure holds as well: the maximum between two externally stable matching profiles with respect to men's preferences (resp. women's preferences) is externally stable. Even more, when all games $G_{i,j}$ are constant-sum games (or more generally, are strictly competitive games) a lattice structure holds. This extends Shapley-Shubik's and Demange-Gale's models as they are particular instances where the games $G_{i,j}$ are strictly competitive.
As the players must be choosing their actions optimally, a \textbf{constrained Nash equilibrium} condition must naturally hold. An externally stable matching profile $(\mu,x,y)$ is \textit{internally stable} (Definition \ref{def:internally_stable_matching_profile}) if any profitable deviation of a player in its game decreases the partner's payoff below his/her market outside option. Said differently, fixing $y_j$, $x_i$ maximizes $i$'s payoff under the constraint of participation by $j$, and vice-versa. Putting all together, our solution concept combines a cooperative notion (Gale-Shapley pairwise stability) with a non-cooperative notion (a generalized Nash equilibrium). A similar solution concept is used in network formation games: fixing the network each player' action must maximize its payoff, and for each link in the network, both players must agree to from that link (see Jackson and Wolinsky \cite{jackson1996strategic}, Bich and Morhaim \cite{bich2017existence}). Our model can be seen as a particular network game model where only bi-party graphs are possible and a link is formed if a man and a woman agree to match.
We define a class of strategic games (called feasible games (Definition \ref{def:feasible_game})) which admit constrained Nash equilibria (Definition \ref{def:constrained_Nash_eq}) for any pair of outside options and prove that: (a) when all games $G_{i,j}$ are feasible, a new algorithm, if it converges, reaches an externally and internally stable matching profile and (b) this new algorithm converges when all games are constant-sum, strictly competitive, potential or infinitely repeated, as well as a combination of those games. As strictly competitive games are feasible, Shapley-Shubik's and Demange-Gale's results are recovered and refined.
There are different ways of associating a strategic game to a Shapley-Shubik's \cite{shapley1971assignment}, a Demange-Gale's \cite{demange1985strategy} or a Hatfield-Milgrom's \cite{hatfield2005matching} model. Our external stability notion always coincides with their pairwise stability, but each strategic game modeling induces, thanks to internal stability, a different selection among the pairwise stable outcomes. For example, suppose that in Shapley-Shubik's model, there is only one seller and one buyer, the seller has cost $c$ for the house and the buyer has a value $v$ for it. If $v<c$, there is no possibility of trading. Otherwise, the surplus $v-c$ is positive and any price $p$ between $c$ and $v$ corresponds to an externally stable matching profile. If the strategic interaction between the buyer and the seller is an ultimatum bargaining game \cite{abreu2000bargaining} where the seller is the first proposer (resp. the seller is the first proposer), the externally and internally stable selected outcome is $p=v$ (resp. $p=c$). On the other hand, if the strategic interaction is modeled by the Rubinstein alternating offers bargaining game \cite{osborne2019bargaining} with equally patient players, the externally and internally stable outcome is the Nash bargaining solution $p=(v-c)/2$. Therefore, different bargaining games between buyers and sellers induce, due to internal stability, different sharing of the surplus. This is not modeled in the standard literature with transfers and is, probably, the main contribution of our paper.
The paper is structured as it follows: Section \ref{sec:model_and_definition_of_a_matching_profile} introduces the model of matching games and its allocations. Section \ref{sec:external_and_internal_Nash_stability} continues with the stability model without commitment while Sections \ref{sec:external_and_internal_stability} and \ref{sec:feasible_games_and_internal_stability} study the stability model with commitment. In both models the existence and computation of stable allocations is presented, as well as a study of the (full/semi) lattice structure of the set of solutions. Feasible games and constrained Nash equilibria, probably the main technical and conceptual contribution of this article, are presented in Section \ref{sec:feasible_games_and_internal_stability}. Section \ref{sec:electricity_application} explains a possible application of our model to the electricity market. Section \ref{sec:conclusions} concludes the article by discussing some further extensions. Finally, the appendix contains the missing proofs and many additional results.
\section{Model and definition of a matching profile}\label{sec:model_and_definition_of_a_matching_profile}
Along with the paper, we consider two finite sets of agents $M$ and $W$ that we refer to as men and women. But, as in the Gale-Shapley, we expect our model to be useful in many applications (doctors/hospitals, buyers/sellers, firms/workers, etc). The cardinals of $M$ and $W$ are denoted $|M|$ and $|W|$ respectively, and typical elements are denoted $i\in M$ and $j\in W$.
\begin{definition}\label{def:matching}
A \textbf{matching} $\mu$ is a mapping between $M$ and $W$ where each agent on one side is matched to at most one agent on the other side. If $i\in M$ and $j \in W$ are matched in $\mu$, we will denote indistinctly $j = \mu_i$ or $i = \mu_j$.
\end{definition}
When a couple $(i,j) \in M \times W$ forms, they get their payoffs as the output of a strategic game $G_{i,j} := (X_i,Y_j, U_{i,j},$ $V_{i,j})$, where $X_i, Y_j$ are the strategy sets of man $i$ and woman $j$, respectively, and $U_{i,j}, V_{i,j} : X_i \times Y_j \to \mathbb{R}$ are their payoff functions. Denote by $X:= \prod_{i \in M} X_i$ and $Y:= \prod_{i \in W} Y_i$ the spaces of strategy profiles. Further assumptions (such as compactness and continuity) over the strategy sets and payoff functions will be specified later.
\begin{definition}\label{def:men_and_women_matching_profiles}
A \textbf{men action profile} (resp. \textbf{women action profile}) is a vector $x = (x_1,...,x_{|M|})$ $\in X$ (resp. $y=(y_1,...,y_{|W|})\in Y$). A \textbf{matching profile} is a triple $\pi=(\mu,x,y)$ in which $\mu$ is a matching, $x$ is a men action profile and $y$ is a women action profile.
\end{definition}
Given a matching profile $\pi=(\mu,x,y)$, the \textbf{players utilities} are defined by $u_i(\pi):= U_{i,\mu_i}(x_i,y_{\mu_i}),$ $\forall i \in M$, $v_j(\pi):= V_{\mu_j,j}(x_{\mu_j},y_j)$, $\forall j \in W$, as basically, the payoff obtained in the game played against the partners. As $M$ and $W$ may have different sizes, some agents may remain single. It is natural to suppose that each agent has a utility of being single, and that this utility is also his/her \textbf{individually rational payoff} (IRP): he/she accepts a partner only if the payoff of their game is at least his/her IRP. Formally, each man $i \in M$ (resp. $j \in W$) will be attributed a value $\underline{u}_i \in \mathbb{R}$ (resp. $\underline{v}_j \in \mathbb{R}$), which constitutes the utility of being single. As usual, we extend the agent sets $M$ and $W$ by adding to each of them the so called \textbf{empty players} $i_0,j_0$ who, in the algorithms, follow the following rules: (1) empty players can be matched with as many agents as needed, but never between them and (2) any player matched with an empty player receives his/her IRP as payoff. We denote $M_0 = M \cup \{i_0\}$ and $W_0 = W \cup \{j_0\}$.
\begin{definition}
A tuple $\Gamma = (M_0,W_0, (G_{i,j})_{i,j \in M \times W}, \underline{u}, \underline{v})$ will be called a \textbf{matching game}.
\end{definition}
To illustrate our model, we consider the three following leading examples.
\begin{example}\label{ex:one_couple_example}
Consider a matching game with only one man $i$ and one woman $j$, both having strictly positive IRPs $\underline{u}_i = \underline{v}_j = \delta > 0 $. Suppose that if they agree to match, they play a constant-sum game $G_{i,j} = (\mathbb{R}_+,\mathbb{R}_+,U_{i,j},V_{i,j})$ with $U_{i,j}(x_i,y_j) = 10\delta - x_i + y_j$, $V_{i,j}(x_i,y_j) = x_i - y_j$, for $x_i,y_j \geq 0$. The utility function of the man represents his a basic utility of $10\delta$ from being married with $j$ minus the money $x_i$ he pays plus the money $y_j$ he receives. The utility of the women is the money transfer she gets $x_i$ minus the money transfer she pays $y_j$ (her basic utility to is zero). Hence without transfers, $i$ is 10 times happier if he is matched with $j$ compared to being single, while $j$ is worse off. Pareto-optimal outcomes are: $i$ offers a money transfer $x_i \in [\delta, 9\delta]$ to $j$ and she accepts being married with him.
\end{example}
\begin{example}\label{ex:prisoners_dilemma_example}
Consider a matching game with only one man $i$ and one woman $j$, both having strictly positive IRPs $\underline{u}_i = \underline{v}_j = \delta > 0 $. Suppose that if they agree to match, they play the following prisoners' dilemma $G_{i,j}$,
\begin{center}
\begin{tabular}{c|c|c}
& C & B \\
\hline
\ C \ & $2\delta, 2\delta$ & $0,3\delta$ \\
\hline
B & $3\delta,0$ & -$\delta, -\delta$
\end{tabular}
\end{center}
The Nash equilibrium of $G$ cannot be a stable outcome as agents' IRPs are higher than the payoff they receive when both betray. On the other hand, if agents agree to match and to cooperate, both end up being better off than remaining single.
\end{example}
\begin{example}\label{ex:coordination_game_example}
Consider a matching game with only one man $i$ and one woman $j$, both having strictly positive IRPs $\underline{u}_i = \underline{v}_j = \delta > 0 $. Suppose that if they agree to match, they play a coordination game:
\begin{center}
\begin{tabular}{c|c|c}
& A & B \\
\hline
\ A \ & $4\delta, \delta/2$ & $0,0$ \\
\hline
B & $0,0$ & $\delta/2, 4\delta$
\end{tabular}
\end{center}
None of the pure Nash equilibria of $G_{i,j}$ can be stable, as one of the players is worst off than being single.
\end{example}
In the sequel, we will obtain various predictions for the stable allocations of each example, depending on the solution concept used to solve it. As in Gale-Shapley, we are interested in matching profiles that satisfy a certain stability condition. In our model, the meaningful notion depends on the players' ability to commit. Hence, we consider two solution concepts, one without binding contracts and a second with binding contracts. In the first solution concept, players cannot be forced to respect the agreed actions, in the second, players can enforce the agreement by signing a binding contract.
\section{Model without commitment: Nash stability}\label{sec:external_and_internal_Nash_stability}
Suppose $i$ and $j$ agree to match and intend to play respectively the actions $x_i$ and $y_j$. If no specific reason forces them to respect that agreement (no binding contracts, no possibility of future punishment in repeated interaction) then, for $(x_i,y_j)$ to be stable, it must constitute a Nash equilibrium of $G_{i,j}$.
\begin{definition}\label{def:internally_Nash_stable_matching_profile}
A matching profile $\pi = (\mu,x,y)$ is \textbf{internally Nash stable} if for any matched couple $(i,j) \in \mu$, $(x_i,y_j)$ is a Nash equilibrium of $G_{i,j}$, that is $(x_i,y_j)\in \text{N.E}(G_{i,j})$.
\end{definition}
Since each player can remain single or be coupled with a better partner, a pairwise stability condition \`{a} la Gale-Shapley must also be satisfied for a matching profile to be stable.
\begin{definition}\label{def:externally_Nash_stable_matching_profile}
A matching profile $\pi = (\mu,x,y)$ is \textbf{externally Nash stable} if,
\begin{itemize}[leftmargin = 0.6cm]\setlength\itemsep{0em}
\item[(a)]For any $i \in M$ and $j \in W$, $u_i(\pi) \geq \underline{u}_i$ and $v_j(\pi) \geq \underline{v}_j$,
\item[(b)]There is no $(i,j) \in M \times W$, not matched between them by $\mu$, and no Nash equilibrium $(x'_i,y'_j) \in N.E(G_{i,j})$ such that $U_{i,j}(x'_i,y'_j) > u_i(\pi)$ and $V_{i,j}(x'_i,y'_j) > v_j(\pi)$.
\end{itemize}
\end{definition}
Condition (a) above says that no matched agent wants to break his/her couple and remain single. Condition (b) says that $\pi$ does not admit Nash-blocking pairs, i.e. there is no pair $(i,j) \in M \times W$, not matched by $\mu$, that can be paired and play a Nash equilibrium in their game that strictly improves their payoffs in $\pi$. A matching profile that is externally and internally Nash stable will be called \textbf{Nash stable}.
\subsection{Existence and computation of Nash stable allocations}
The next theorem proves that, under the usual Nash equilibrium existence conditions, a Nash stable matching exists and can be computed algorithmically.
\begin{theorem}\label{teo:Existence_Nash_Stable}
If for any couple $(i,j)$ the set of Nash equilibria of the game $G_{i,j}$ is non-empty and compact,\footnote{Observe that if in all games $G_{i,j}$, the strategy sets are convex and compacts and the utility functions are own-quasi concave and continuous or are discontinuous but better-reply-secure \cite{reny1999existence}, then the set of Nash equilibria is non-empty and compact.} and the payoff functions are continuous, then the set of Nash stable matching profiles is also non-empty and compact.
\end{theorem}
Instead of proving directly Theorem \ref{teo:Existence_Nash_Stable}, we prove a more general result. Suppose that each potential couple $(i,j)$ is restricted to choose their contracts from $C_{i,j} \subseteq X_i \times Y_j$. A matching profile will be externally stable with respect to the sets $C_{i,j}$ if no pair of agents $(m,w)$ can match together, play a contract in their set $C_{m,w}$, and increase strictly their payoff with respect to the ones in the matching profile.
\begin{theorem}\label{teo:existence_ext_stable_alloc_general_case}
If all sets $(C_{i,j} : (i,j) \in M \times W)$ are non-empty and compact, and the payoff functions are continuous, then there exists an externally stable matching profile with respect to the sets $C_{i,j}$.
\end{theorem}
Theorem \ref{teo:Existence_Nash_Stable} is a corollary of Theorem \ref{teo:existence_ext_stable_alloc_general_case} when considering $C_{i,j} = N.E(G_{i,j}), \forall (i,j)$. Indeed, external Nash stability is guaranteed by the theorem, while internal Nash stability holds as all couples are restricted to play only Nash equilibria. Theorem \ref{teo:existence_ext_stable_alloc_general_case} is proved in two steps:
\begin{itemize}[leftmargin = 0.5cm]\setlength\itemsep{0em}
\item[1.] An algorithm is designed for computing an $\varepsilon$-approximation of an externally stable allocation.
\item[2.] As all the sets $C_{i,j}$ are compact and the payoff functions are continuous, accumulation points as $\varepsilon \to 0$ exist and any accumulation point is an externally stable matching profile.
\end{itemize}
The pseudo-code of the algorithm used in step 1 (Algorithm \ref{Algo:Propose_dispose_algo_general_case}) is analogous to the first of the two algorithms proposed by Demange, Gale, and Sotomayor \cite{demange1986multi}. Our propose-dispose algorithm takes one of the sides (the men for the rest of the article) and asks its agents to propose, one by one, a contract from the set $C$ to the agents at the other side. A proposed agent, if already matched, decides between keeping the current partner or replacing it by the proposer. Therefore, each iteration of Algorithm \ref{Algo:Propose_dispose_algo_general_case} has two phases: a \textbf{proposal} and a \textbf{competition}. For a numerical example, check Appendix \ref{sec:appendix_numerical_example}.
\begin{algorithm}[th]
\textbf{Input}: $\Gamma = (M_0,W_0, (G_{i,j} : (i,j) \in M \times W), \underline{u}, \underline{v})$ a matching game, $\varepsilon \bi 0$
Set $M' \leftarrow M$ as the set of single men, and $v_j(\pi) \leftarrow \underline{v}_j, \forall j \in W$
\While{$M' \neq \emptyset$}{
Let $i \in M'$. Compute his \textbf{optimal proposal}
$$(j,x,y) \in \argmax\{U_{i,j}(x,y) : V_{i,j}(x,y) \geq v_j(\pi)+ \varepsilon, j \in W_0, (x,y) \in C_{i,j} \}$$
\If{$j$ is single}{$i$ is automatically accepted. Come back to step \textbf{3}}
\Else{$i$ and $\mu_j$ \textbf{compete} for $j$ as in a second-price auction. The winner passes to be the new partner of $j$ and the loser is included in $M'$. Come back to step \textbf{3}}}
\caption{$\varepsilon$-Externally stable matching profile computation}
\label{Algo:Propose_dispose_algo_general_case}
\end{algorithm}
\begin{itemize}[leftmargin = 0.3cm]\setlength\itemsep{0em}
\item[$\bullet$] \textbf{Proposal phase.} Let $i \in M'$ be a proposer man. Given the current matching profile $\pi$ (initially empty), the one generates a women's payoff vector $v(\pi) = (v_j(\pi))_{j \in W}$, $i$ computes his optimal proposal as,
\begin{align}\label{eq:problem_P_i}
(j,x,y) \in \argmax\left\{ U_{i,j}(x,y) : V_{i,j}(x,y) \geq v_j + \varepsilon, j \in W_0, (x,y) \in C_{i,j}\right\}
\end{align}
Problem (\ref{eq:problem_P_i}) is always feasible as $i$ can always propose to $j_0$. If $j$ is single, $i$ is automatically accepted and the algorithm picks a new proposer in $M'$.
\item[$\bullet$] \textbf{Competition phase.} If the proposed woman $j$ is matched, namely with a man $i'$, a competition between $i$ and $i'$ starts. In the stable marriage problem, the competition is the simple comparison between the places that $i$ and $i'$ occupy in $j$'s ranking. In our case, as agents have strategies, a competition is analogous to a second-price auction. Let $\beta_i, \beta_{i'}$ be the lowest payoff that $i$ and $i'$ are willing to accept for being with $j$ (their reservation prices). $i$'s bid $\lambda_i$ (and analogously the one of $i'$) is computed by,
\begin{align}\label{eq:problem_P_max}
\lambda_i := \max\left\{ V_{i,j}(x,y) : U_{i,j}(x,y) \geq \beta_i, (x,y) \in C_{i,j} \right\}
\end{align}
Then, the winner is the one with the highest bid. For ending the competition, the winner, namely $i$, decreases his bid until matching the one of the loser. Formally, $i$ solves,
\begin{align}\label{eq:problem_P_new}
\max\left\{ U_{i,j}(x,y): V_{i,j}(x,y) \geq \lambda_{i'}, (x,y) \in C_{i,j}\right\}
\end{align}
The loser is included in $M'$ and a new proposer is chosen.
\end{itemize}
\begin{remark}
A defeated man $i$ cannot propose right away to the same woman. This is crucial for the convergence of the propose-dispose algorithm.
\end{remark}
\begin{remark}
The output of the algorithm corresponds to an $\varepsilon$-approximation. This is in line with the matching literature with transfer \cite{demange1986multi,hatfield2005matching,kelso1982job}. The problem of computing a $0$-stable allocation remains open, in our case as well as in the literature with transfers.
\end{remark}
We focus in proving that Algorithm \ref{Algo:Propose_dispose_algo_general_case} ends in finite time and its output corresponds to a matching profile $\varepsilon$-externally stable (Definition \ref{def:epsilon_externally_stable_matching_profile}). Appendix \ref{sec:appendix_stability} contains some required technical results.
\begin{theorem}\label{teo:propose_dispose_algo_ends_in_finite_time}
Algorithm \ref{Algo:Propose_dispose_algo_general_case} ends in finite time.
\end{theorem}
\begin{proof}
Since the strategy sets are compact and the payoff functions are continuous, they are bounded. Proposition \ref{prop:women_payoff_are_increasing} implies that the algorithm ends in a finite number of iterations.
\end{proof}
\begin{definition}\label{def:epsilon_externally_stable_matching_profile}
Let $\pi = (\mu,x,y)$ be a matching profile. A pair $(i,j) \notin \mu$ is an $\varepsilon$\textbf{-blocking pair} if there exits a strategy profile $(x'_i,y'_j) \in C_{i,j}$ such that $U_{i,j}(x'_i,y'_j) > u_i(\pi) + \varepsilon$ and $ V_{i,j}(x'_i,y'_j) > v_j(\pi) + \varepsilon$. A matching profile is $\varepsilon$\textbf{-externally stable} if it does not have any $\varepsilon$-blocking pair.
\end{definition}
\begin{theorem}\label{teo:propose_dispose_algo_is_correct}
The matching profile $\pi$, output of Algorithm \ref{Algo:Propose_dispose_algo_general_case}, is $\varepsilon$-externally stable.
\end{theorem}
\begin{proof}
Let $(i,j)$ be an $\varepsilon$-blocking pair of the matching profile $\pi := (\mu,x,y)$. Let $T$ be the last iteration at which $i$ proposed. In particular, $i$ proposed to $\mu_i$ and not $j$, and for any posterior proposal to $\mu_i$, $i$ won the competition. Since $\mu_i$ won all the posterior competitions, in particular $u_i(\pi) \geq \max\{U_{i,j'}(\bar{x},\bar{y}) : V_{i,j'}(\bar{x},\bar{y}) \geq v_{j'}(\pi) + \varepsilon , j' \in W_0, (\bar{x},\bar{y}) \in C_{i,j'} \}$ (Proposition \ref{prop:man_proposer_guarantees_improvement_of_epsilon} and Proposition \ref{prop:winner_man_plays_feasible_payoff}). Since $(i,j)$ is an $\varepsilon$-blocking pair, there exists $(\bar{x},\bar{y}) \in C_{i,j}$ such that $U_{i,j}(\bar{x},\bar{y}) \bi u_i(\pi)+ \varepsilon$ and $V_{i,j}(\bar{x},\bar{y}) \bi v_j(\pi) + \varepsilon$. Then $u_i(\pi) \sm \max\{U_{i,j'}(\bar{x},\bar{y}) : V_{i,j'}(\bar{x},\bar{y}) \geq v_{j'}(\pi) + \varepsilon , j' \in W_0, (\bar{x},\bar{y}) \in C_{i,j'} \}$, and we obtain a contradiction.
\end{proof}
From the existence of $\varepsilon$-externally stable matching profiles, we are finally able to prove Theorem \ref{teo:existence_ext_stable_alloc_general_case}, passing through the compactness of the contract sets, continuity of payoff functions, and the finiteness of players.
\begin{proof}{\textbf{Theorem \ref{teo:existence_ext_stable_alloc_general_case}}.}
Consider $\varepsilon \bi 0$. Let $\pi_{\varepsilon} := (\mu_{\varepsilon}, x_{\varepsilon}, y_{\varepsilon})$ be the output of Algorithm \ref{Algo:Propose_dispose_algo_general_case}. By Theorem \ref{teo:propose_dispose_algo_is_correct}, $\pi_{\varepsilon}$ is a $\varepsilon$-externally stable matching profile. Consider a sequence of these profiles $(\pi_{\varepsilon})_{\varepsilon}$ with $\varepsilon$ going to $0$, and a subsequence $(\pi_{\varepsilon_k})_k$ such that $(x_{\varepsilon_k}, y_{\varepsilon_k})_k$ converges to a fixed contract $(x,y)$, which exists as the sets $(C_{i,j} : (i,j) \in M \times W)$ are compact.
Since there is a finite number of possible matchings, consider a subsubsequence $(\pi_{\varepsilon_{k_l}})_l$ such that $\mu_{\varepsilon_{k_l}} = \mu, \forall l \in \mathbb{N}$, with $\mu$ a fixed matching. Since $(x_{k_l}, y_{k_l}) \to (x,y)$ when $l \to \infty$, the sequence $\pi_{k_l}$ converges to $ \pi := (\mu,x,y)$, with $\mu$ a complete matching and $(x,y)$ a strategy profile. Moreover, as $\varepsilon_{k_l}$ goes to $0$, as for each $l$ the matching profile $\pi_{k_l}$ is $\varepsilon_{k_l}$-externally stable, as the payoff functions are continuous, and as, the definition of external stability only includes inequalities, $\pi$ is externally stable.
\end{proof}
\subsection{Lattice structure}
In Gale-Shapley's model, whenever each player's preferences are strict, the set of pairwise stable matching is a \textbf{lattice}. In our model, a semi-lattice structure is satisfied whenever \textbf{Nash equilibrium payoffs are different across games}, i.e. if $w$ and $w'$ are two Nash equilibrium payoffs of any player with some two different partners, then $w \neq w'$. This is satisfied when all games $G_{i,j}$ are generic bimatrix games.
\begin{theorem}\label{teo:lattice_structure_Nash_stability}
Let $\Gamma$ be a matching game where all Nash equilibrium payoffs are different across games. Then, the set of Nash stable allocations $\Pi$ is a semi-lattice. Moreover, if all the games have a unique Nash equilibrium payoff, then $\Pi$ is a full lattice.
\end{theorem}
\begin{proof}
Let $\pi = (\mu,x,y), \pi' = (\mu',x',y') \in \Pi$ be two Nash stable matching profiles. Consider the new triple $\pi^{\vee} := (\mu^{\vee},x^{\vee},y^{\vee})$ defined by
$$\mu^{\vee}_i = \argmax\left\{U_{i,\mu_i}(x_i,y_{\mu_i})\ ;\ U_{i,\mu'_i}(x'_i,y'_{\mu'_i})\right\}, \forall i \in M,$$
with the corresponding Nash equilibrium $(x_i^{\vee}, y_{\mu^{\vee}_i}^{\vee})$. As Nash equilibrium payoffs are different, partner and Nash equilibrium are uniquely defined when considering the maximum, so $\mu^{\vee}$ is a proposer matching profile. Even more, it is internally Nash stable as all couples play a Nash equilibrium of their game. Regarding external Nash stability, suppose that an agent $k \in M \cup W$ gets less than his/her IRP in $\pi^{\vee}$. Then, in the respective matching $\pi$ or $\pi'$, $k$ also gets less than his/her IRP, what is a contradiction. Let $(i,j) \notin \mu^{\vee}$ be a Nash-blocking pair of $\pi^{\vee}$, so there exists a Nash equilibrium $(\bar{x},\bar{y}) \in N.E(G_{i,j})$ such that $U_{i,j}(\bar{x},\bar{y}) \bi u_i(\pi^{\vee})$ and $V_{i,j}(\bar{x},\bar{y}) \bi v_j(\pi^{\vee})$. From the first inequality, it holds that $U_{i,j}(\bar{x},\bar{y}) \bi u_i(\pi)$, therefore $V_{i,j}(\bar{x},\bar{y}) \leq v_j(\pi)$, as $\pi$ is externally Nash stable. Remark that $v_j(\pi^{\vee})$ is either equal to $v_j(\pi)$ or $v_j(\pi')$. Since $V_{i,j}(\bar{x},\bar{y}) \bi v_j(\pi^{\vee})$, it must be that $v_j(\pi^{\vee}) = v_j(\pi')$. Then, as $\pi'$ is externally Nash stable, $U_{i,j}(\bar{x},\bar{y})\leq u_i(\pi')$, but this contradicts that $U_{i,j}(\bar{x},\bar{y}) \bi \max\{u_i(\pi), u_i(\pi')\}$.
Finally, suppose that all games have a unique Nash equilibrium. In this case, finding a Nash stable matching profile is equivalent to finding a stable marriage problem using the Nash equilibrium payoffs to determine the preferences of the agents, hence a full lattice holds.
\end{proof}
Observe that constant-sum, and more generally strictly competitive games, whenever they have a Nash equilibrium, the equilibrium payoff is unique.
As in Gale-Shapley's model, we can show that the algorithm in which men propose and women dispose converges to the best Nash stable matching for men.
\begin{theorem}\label{teo:propose_dispose_algo_outputs_eps_highest_element_in_lattice}
If all Nash equilibrium payoffs are different across games and $\varepsilon$ is small enough, the output of Algorithm \ref{Algo:Propose_dispose_algo_general_case}, when $C_{i,j} = N.E(G_{i,j}), \forall (i,j) \in M \times W$, is the highest element (with respect to the proposer side) of the semi-lattice.
\end{theorem}
\begin{proof}
It follows from Theorem \ref{teo:the_output_of_algo_1_is_proposer_optimal} as the identification property holds when Nash equilibrium payoffs are all different.
\end{proof}
Exchanging the roles of men and women when computing the maximum between two Nash stable matching profiles, also outputs a Nash stable allocation. Even more, when all games have a unique Nash equilibrium, the minimum between two elements in $\Pi$ can be computed and the result, $(\mu^{\wedge},x^{\wedge},y^{\wedge})$ for men and $(\nu^{\wedge},x^{\wedge},y^{\wedge})$ for women, also belongs to $\Pi$. In addition, as in Gale-Shapley's model, the duality property $(\mu^{\vee},x^{\vee},y^{\vee}) = (\nu^{\wedge},x^{\wedge},y^{\wedge})$ and $(\mu^{\wedge},x^{\wedge},y^{\wedge}) = (\nu^{\vee},x^{\vee},y^{\vee})$ holds. Let us end this section by examining the prediction of Nash stability in our examples.
\setcounter{example}{0}
\begin{example}
Let $\pi$ be a matching profile in Example \ref{ex:one_couple_example}.
The only Nash equilibrium of the constant-sum game $G_{i,j}$ is $x_i = y_j = 0$, as any transfer $x_i>0$ (resp. $y_j>0$) is a strictly dominated strategy for $i$ (resp. for $j$). Thus, if $\pi$ is internally Nash stable and players $i$ and $j$ are matched, their payoffs are $u_i(\pi) = 10\delta$ and $v_j(\pi) = 0$, and then $j$ prefers to be single and get $\delta>0$. Therefore, $\pi$ is (externally and internally) Nash stable if and only if the players remain single.
\end{example}
\begin{example}
The only Nash equilibrium of the prisoners' dilemma is to play $(B,B)$. Then, as both players are better off being single than playing the equilibrium, the only Nash stable allocation is the one in which players do not match.
\end{example}
\begin{example}
The coordination game $G_{i,j}$ has three Nash equilibrium: two pure ones $(A,A)$, $(B,B)$, and one mixed $((8/9,1/9),(1/9,8/9))$, with $(\delta/9, \delta/9)$ as payoff profile. As for the three Nash equilibria at least one agent is worst off than being the single, once again, the only Nash stable matching profile is the one in which players remain single.
\end{example}
The predictions found in the three examples are not Pareto-optimal. Indeed, for Example \ref{ex:one_couple_example}, $i$ can propose $x_i\geq \delta$ to $j$ which she accepts.
For Example \ref{ex:prisoners_dilemma_example}, players can match and cooperate. In Example \ref{ex:coordination_game_example}, agents can match and play each strategy with equal probability. These three predictions are possible only if both agents believe that the other one will honor his/her promise, which is implicit in the matching with transfer and matching with contract literature. The next section studies the model where players can commit (for example by signing binding contracts).
\section{Model with commitment: external stability}\label{sec:external_and_internal_stability}
\subsection{External stability and its existence}
Suppose that partners within a couple can commit to playing a specific action profile before playing their game. This allows them to enlarge their set of feasible contracts well beyond their set of Nash equilibria. This leads naturally to the following stability notion.
\begin{definition}\label{def:externally_stable_matching_profile}
A matching profile $\pi=(\mu,x,y)$ is \textbf{externally stable} if,
\begin{itemize}[leftmargin = 0.6cm]\setlength\itemsep{0em}
\item[(a)]For any $i \in M$ and $j \in W$, $u_i(\pi) \geq \underline{u}_i$ and $v_j(\pi) \geq \underline{v}_j$,
\item[(b)]There is no $(i,j) \in M \times W$, not matched by $\mu$, and no $(x'_i,y'_j) \in X_i \times Y_j$ such that $U_{i,j}(x'_i,y'_j) > u_i(\pi)$ and $V_{i,j}(x'_i,y'_j) > v_j(\pi)$.
\end{itemize}
\end{definition}
Compared to the external Nash stability defined in Section \ref{sec:external_and_internal_Nash_stability}, the new condition $(b)$ allows the players to choose any feasible strategy profile without being restricted to a Nash equilibrium (or to a non-dominated strategy). Let us see the impact of such a change in our leading example.
\setcounter{example}{0}
\begin{example}
Recall that in our Example \ref{ex:one_couple_example} being single was the unique Nash stable matching profile. That outcome is not externally stable (in the sense of Definition \ref{def:externally_stable_matching_profile}) because being married and playing, for example, $x_i = 4\delta, y_j = 2\delta$, increases strictly the payoff of both agents. In fact, the set of externally stable matching profiles corresponds to Pareto-optimal allocations in which $i$ and $j$ match and it holds $\delta \leq x_i - y_j \leq 9\delta$. This is exactly the prediction of Shapley-Shubik's and Demange-Gale's models.
\end{example}
\begin{example}
Similarly for Example \ref{ex:prisoners_dilemma_example}, being single is not externally stable as matching and cooperating improves strictly the payoff of both agents. For simplicity of the computations, assume that both agents cooperate with probability $x$ and betray with probability $(1-x)$. Then, the set of (symmetric) externally stable allocations corresponds to all the matching profiles in which players are matched and $x \geq 1/2$.
\end{example}
\begin{example}
As for the previous two examples, being single is externally unstable. Once again for simplifying the computations, assume that both agents play their most preferred option with probability $x$. Then, the set of (symmetric) externally stable allocations corresponds to all the matching profiles in which players are matched and $x \in [1/3, 2/3]$.
\end{example}
As for Nash stable allocations, Algorithm \ref{Algo:Propose_dispose_algo_general_case} can be used for computing $\varepsilon$-approximations, when considering $C_{i,j} = X_i \times Y_j$, for any $(i,j) \in M \times W$. For a numerical example, check Appendix \ref{sec:appendix_numerical_example}.
\begin{theorem}\label{teo:externally_stable_allocation_existence}
For any matching game with compact strategy sets and continuous payoff function, there always exists an externally stable matching profile.
\end{theorem}
\begin{proof}
Corollary of Theorem \ref{teo:existence_ext_stable_alloc_general_case} when considering $C_{i,j} = X_i \times Y_j$ for any $(i,j) \in M \times W$.
\end{proof}
\subsection{On the semi-lattice structure}\label{sec:lattice_structure}
As for Nash stable allocations, a partial lattice structure is also satisfied by the set of externally stable allocations. However, to be able to define a proper order between matching profiles, an identification property is needed.
\begin{definition}\label{def:property_**}
Let $\pi = (\mu,x,y)$ and $\pi' = (\mu',x',y')$ be two externally stable matching profiles. We say that $\pi$ and $\pi'$ satisfy the \textbf{identification property} if whenever a man/woman has the same utility in both matching, he/she has the same woman/man in both. That is, $\forall i\in M, \text{ if } U_{i,\mu_i}(x_i,y_{\mu_i}) = U_{i,\mu'_i}(x'_i,y'_{\mu'_i}), \text{ then }\mu_i=\mu'_i, \text{ and analogously for women}$.
\end{definition}
The identification property is satisfied by Gale-Shapley's discrete model with strict preferences. It is also satisfied ``generically'' speaking, in the sense that if we discretize the strategy spaces and perturb the payoff functions, then Definition \ref{def:property_**} holds for any pair of matchings profiles and so, for any pair of externally stable matchings profiles. In particular, it is satisfied by the model without binding contracts when all games have different Nash equilibrium payoffs.
The identification property is also satisfied by Demange-Gale's \cite{demange1985strategy} model with transfers and is crucial in their lattice structure proof. Indeed, given two matching profiles $\pi^1, \pi^2$, consider the sets of agents that prefer matching $1$, matching $2$, or are indifferent, respectively by,
\begin{align}
\begin{split}\label{eq:agents_that_prefer_one_or_other_matching}
&P^1 := \{i \in M : u_i(\pi^1) \bi u_i(\pi^2)\}, \ \ Q^1 := \{j \in W : v_j(\pi^1) \bi v_j(\pi^2)\} \\
&P^2 := \{i \in M : u_i(\pi^2) \bi u_i(\pi^1)\}, \ \ Q^2 := \{j \in W : v_j(\pi^2) \bi v_j(\pi^1)\} \\
&P^0 := \{i \in M : u_i(\pi^1) = u_i(\pi^2)\}, \ \ Q^0 := \{j \in W : v_j(\pi^1) = v_j(\pi^2)\}
\end{split}
\end{align}
Demange and Gale proved that agents in $P^1$ are assigned with agents in $Q^2$, and those in $P^2$ with the ones in $Q^1$. This works in their model because every time an agent increases its payoff, its partner's payoff decreases (see Sections \ref{sec:zero_sum_and_strictly_competitive_games} and \ref{sec:shapley_shubik_gale_demange_models} below).
Consider the new triple $\pi^{\vee} := (\mu^{\vee},x^{\vee},y^{\vee})$ defined by $$\mu^{\vee}_i = \argmax\left\{U_{i,\mu_i}(x_i,y_{\mu_i})\ ;\ U_{i,\mu'_i}(x'_i,y'_{\mu'_i})\right\}, \forall i \in M,$$ with the corresponding strategy profile $(x_i^{\vee}, y_{\mu^{\vee}_i}^{\vee})$ (which is uniquely defined when a man is not married to the same woman, and can be chosen arbitrarily otherwise).
\begin{theorem}\label{teo:the_male_max_of_stable_matchings_is_a_stable_matching}
Under the identification property, $(\mu^{\vee},x^{\vee},y^{\vee})$ is a matching profile and is externally stable.
\end{theorem}
\begin{proof}
The proof has two steps. We first prove that $\mu^{\vee}$ is a matching and then, that the triple $(\mu^{\vee},x^{\vee},y^{\vee})$ is externally stable. For the first part, let $i,k \in M$ be two men such that $\mu^{\vee}_i = \mu^{\vee}_{k} = j$, with $j \neq j_0$. Suppose $j = \mu_i = \mu'_k$. It follows,
$$ j = \argmax\left\{U_{i,j}(x_i,y_{j})\ ;\ U_{i,\mu'_i}(x'_i,y'_{\mu'_i})\right\} = \argmax\left\{U_{k,\mu_k}(x_k,y_{\mu_k})\ ;\ U_{k,j}(x'_k,y'_{j})\right\} $$
Then $j \neq \mu_k$, and so $U_{k,\mu_k}(x_k,y_{\mu_k}) < U_{k,j}(x'_k,y'_{j})$ by the identification property. Since $\pi'$ is externally stable, it must hold that $V_{k,j}(x'_k,y'_j) > V_{i,j}(x_i,y_j)$, otherwise $(i,j)$ would block $\pi'$ using $(x_i,y_j)$ (observe that $V_{k,j}(x'_k,y'_j) \neq V_{i,j}(x_i,y_j)$ by the identification property). Consequently, as $\pi$ is externally stable, it must hold that $U_{k,j}(x'_k, y'_j)$ $\sm U_{k, \mu_k}(x_k, y_{\mu_k})$, otherwise $(k,j)$ blocks $\pi$ using $(x'_k,y'_j)$. This contradicts that $j = \argmax\{U_{k,\mu_k}$ $(x_k,y_{\mu_k});U_{k,j}(x'_k,y'_{j})\}$. Thus $\mu^{\vee}$ is a matching.
Regarding external stability, suppose $(i,j)$ is a blocking pair for $(\mu^{\vee},x^{\vee},y^{\vee})$, using the strategy profile $(s,t)$. In particular it holds $U_{i,j}(s,t) \bi \max\{U_{i,\mu_i}(x_i,y_{\mu_i}); U_{i,\mu'_i}(x'_i,y'_{\mu'_i})\}$. Let us call this last inequality $(MI)$. Suppose $j = j_0$. If $\mu_i \neq j_0$ (resp. if $\mu'_i \neq j_0$) then $\mu$ (resp. $\mu'$) is not externally stable because $i$ prefers being single to his partner in $\mu$ (resp. in $\mu'$): a contradiction with the external stability of $\mu$ (resp. $\mu'$). Otherwise, $\mu_i=\mu'_i=j$, which is also a contradiction with $(MI)$. Hence $j \neq j_0$. Let $i'$ be the partner of $j$ in $\pi^{\vee}$. Then, $(i',j)$ are together in one of the matchings $\mu$ or $\mu'$, and $(i,j)$ can block it using the strategy profile $(s,t)$: a contradiction. Thus $(\mu^{\vee},x^{\vee},y^{\vee})$ is externally stable.
\end{proof}
If $\mu^{\wedge}$ denotes the min operation matching in which each man is coupled with his worst woman between $\pi$ and $\pi'$, the output is not necessarily an externally stable matching. In that sense, we only have a semi-lattice structure. The semi-lattice structure remains valid if we exchange the roles of men and women: if $\nu^{\vee}$ gives to each woman her best man between $\mu$ and $\mu'$, the output is an externally stable matching profile. Finally, note that the classical equality between the max for men/min for women operations in the Gale-Shapley model does not hold in our model ($\mu^{\wedge} \neq \nu^{\vee}$). Quite the opposite can be true. For example if all $(G_{i,j})_{i\in M,j\in W}$ are common-interest games, the max for men coincides with the max for women ($\nu^{\vee}=\mu^{\vee}$). The next theorem proves that Algorithm \ref{Algo:Propose_dispose_algo_general_case}, when $C_{i,j} = X_i \times Y_j, \forall (i,j) \in M \times W$, converges to the ``best'' $\varepsilon$-externally stable matching for men if $\varepsilon$ is small enough.
\begin{theorem}\label{teo:the_output_of_algo_1_is_proposer_optimal}
If the identification property holds and $\varepsilon$ is small enough, then the output of Algorithm \ref{Algo:Propose_dispose_algo_general_case}, when $C_{i,j} = X_i\times Y_j, \forall (i,j) \in M \times W$, is men $\varepsilon$-optimal.
\end{theorem}
\begin{proof}
From Lemma \ref{lemma:men_can_always_propose_to_their_best_stable_women}, along all iterations of Algorithm \ref{Algo:Propose_dispose_algo_general_case}, given a man $i \in M$, his best stable woman $w_i$ is always feasible for him. Therefore, $i$'s final payoff is bounded from below by the one he can get with $w_i$. Since this is also the maximum payoff that men can get in an $\varepsilon$-externally stable matching profile, we conclude that all men obtain their highest possible payoff under $\varepsilon$-external stability.
\end{proof}
Let us end this section by observing that arguments similar to \cite{dubins1981machiavelli} allows us to show that, under the identification property and for small enough values of $\varepsilon$, Algorithm \ref{Algo:Propose_dispose_algo_general_case} is strategy-proof on men's side: if men revealed their utility functions to a designer who runs the algorithm, men's $\varepsilon$-dominant strategy is to submit their true utility functions, that is, by lying they will not increase their payoff by more than $\varepsilon$. Going to the limit implies the existence of a strategy-proof externally-stable matching profile for men (and so also for women).
\subsection{Constant-sum and strictly competitive games}\label{sec:zero_sum_and_strictly_competitive_games}
Suppose in this subsection that all the family of games $G_{i,j}, \forall i \in M, j \in W$, are zero-sum games, that is, for any couple $(i,j)$ it holds that $U_{i,j}(\cdot,\cdot) = - V_{i,j}(\cdot,\cdot) = g_{i,j}(\cdot,\cdot)$. This implies that the identification property holds and moreover, we recover a full lattice structure over the set of externally stable matching profiles $\Pi$. Indeed, let $\pi, \pi' \in \Pi$ and their minimum $\pi^{\wedge} := (\mu^{\wedge}, x^{\wedge}, y^{\wedge}) $ defined by,
\begin{align*}
\mu^{\wedge}_i &:= \argmin\{U_{i,\mu_i}(x_i,y_{\mu_i}) ; U_{i,\mu'_i}(x'_i,y'_{\mu'_i})\}, \forall i \in M \\
&:= \argmin\{-V_{i,\mu_i}(x_i,y_{\mu_i}) ; -V_{i,\mu'_i}(x'_i,y'_{\mu'_i})\}, \forall i \in M \\
&:= \argmax\{V_{i,\mu_i}(x_i,y_{\mu_i}) ; V_{i,\mu'_i}(x'_i,y'_{\mu'_i})\} = \nu_i^{\vee}, \forall i \in M
\end{align*}
Notice that the computations made for $\mu^{\wedge}$ still hold if the payoff function $g_{i,j}$ is composed with a strictly monotone function $U_{i,j} = \phi_{i,j} (g_{i,j})$ for player $i$ and $V_{i,j} = \psi_{i,j}(g_{i,j})$ for player $j$ (see Appendix \ref{sec:appendix_proofs_strictly_competitive_games} for the details). This allows us to extend the lattice structure to strictly competitive games \`{a} la Aumann in which, whenever $i$'s payoff increases (resp. decreases), $j$'s payoff decreases (resp. increases) and vice-versa.\footnote{Aumann \cite{aumann1961almost}, who introduced monotone games, also defined the class $\mathcal{S}$ in which payoff functions are obtained via monotone transformations of a zero-sum game. Although he conjectured that $\mathcal{S}$ must cover all strictly competitive games, the proof is only known for finite games \cite{adler2009note}.\label{footnote:Aumann_class}}
\subsection{Shapley-Shubik, Demange-Gale and Hatfield-Milgrom models}\label{sec:shapley_shubik_gale_demange_models}
Shapley-Shubik's model \cite{shapley1971assignment} consists of a housing market with buyers and sellers, where each seller has a house to sell and each buyer is interested in buying a house. A solution to this problem is a pair $(\mu,p)$, with $\mu$ a matching between sellers and buyers, and $p$ a vector of positive monetary transfers from buyers to sellers. Each seller $i \in S$, has a cost of her house $c_i$, and each buyer $j \in B$, has a valuation $h_{i,j}$ for $j$'s house. If seller $i$ sells her house to $j$ at price $p_i \geq 0$, their payoffs are $U_{i,j}(p_i) = p_i - c_i$ and $V_{i,j}(p_i) = h_{i,j} - p_i$, respectively. Demange and Gale \cite{demange1985strategy} generalized the problem as follows: If two agents $i,j$ are paired, their payoffs are given by some strictly increasing and continuous payoff functions $\phi_{i,j}(t)$ for $i$, and $\psi_{i,j}(-t)$ for $j$, with $t \in \mathbb{R}$ being the net transfer from $i$ to $j$ ($t\geq 0$ means that $j$ pays $t$ to $i$ and $t \leq 0$ means that $i$ pays $-t$ to $j$). Demange and Gale also assumed each player is given an IRP.
Demange-Gale's model can be mapped into a matching game in which all couples play strictly competitive games in the class $\mathcal{S}^{\ref{footnote:Aumann_class}}$ as it follows. Let $G_{i,j} = (X_i,Y_j,U_{i,j},V_{i,j})$, with $X_i = Y_j = \mathbb{R}_+$, $U_{i,j}(x_i,y_j) = \phi_{i,j}(y_j-x_i)$ and $V_{i,j}(x_i,y_j) = \psi_{i,j}(x_i-y_j)$, for any $(i,j) \in M \times W$. Notice that payoffs are obtained as a monotone transformation of the zero-sum game in which player $i$ gets $g(x_i,y_j)=y_j-x_i$ and player $j$ gets $-g(x_i,y_j)=x_i-y_j$. Consequently, the results of the previous section (see also Appendix \ref{sec:appendix_proofs_strictly_competitive_games}) apply to this problem to recover the existence of externally stable matching and their lattice structure.\footnote{Remark that, even if the strategy sets in the Demange-Gale matching game are not compact, as transfers are naturally bounded by players' valuation or by the individually rational payoffs, the problem can easily be compactified.}
Finally, Hatfield and Milgrom's model \cite{hatfield2005matching} consists of two groups of players that should match and sign a contract $x$ from a set $X$. Appendix \ref{sec:matching_with_contracts} shows that their model can be mapped into a matching game model and vice-versa.
\begin{remark}
Observe that the unique Nash equilibrium of the underlying strictly competitive games is $x^*=y^*=0$: no monetary transfers in the Demange-Gale's problem (or zero prices in the Shapley-Shubik housing market). Hence, the internal Nash stability prediction is incompatible with the matching models with transfer prediction. As will be seen in the next section, this is not the case with our new notion of internal stability.
\end{remark}
\section{Model with commitment: external-internal stability}\label{sec:feasible_games_and_internal_stability}
\subsection{Internal stability}
Now that we have defined and characterized the set of externally stable matching profiles, we can introduce the main novelty of our paper.
\begin{definition}\label{def:internally_stable_matching_profile}
\textbf{An externally stable} matching profile $\pi=(\mu,x,y)$ is \textbf{internally stable} if for any couple $(i,j)$ matched by $\mu$ and any $(s_i,t_j)\in X_i \times Y_j$, it holds,
\begin{itemize}[leftmargin = 0.7cm]\setlength\itemsep{0em}
\item[(a)] If $U_{i,j}(s_i,y_j) > u_i(\pi)$ then, $(\mu,((x_{i'})_{i' \neq i},s_i),y)$ is not externally stable,
\item[(b)] If $V_{i,j}(x_i,t_j) > v_j(\pi)$ then, $(\mu,x,((y_{j'})_{j' \neq j},t_j))$ is not externally stable.
\end{itemize}
When a matching profile is externally and internally stable, it will be called \textbf{stable}.
\end{definition}
Condition (a) says that if a matched man $i$ has a profitable deviation in his game, he cannot switch to it otherwise his partner would not accept to sign the contract. Indeed, she would prefer her outside option or another partner instead. The second condition is the dual property for the woman. This can be written as a generalized Nash equilibrium condition subject to a participation constraint: players must choose contracts that maximize their payoff under the constraint that the partners still agree to sign the contract (Section \ref{sec:constrained_nash_equilibrium_et_QVI}).
An interesting family of strategic games in which the output of our previous algorithm is not only externally stable but also internally stable is the class of \textbf{csommon interest games}, in which $U_{i,j} = V_{i,j}$ for any couple $(i,j)\in M\times W$, as when a man maximizes his payoff, he also does it for the partner. In general, however, the constructed externally stable matching profile will not be internally stable. We will see in the next sections how, under some assumptions on the family of strategic games, one can, from any externally stable matching, construct an (externally and internally) stable one. Let us see the impact on internal stability in some examples.
\setcounter{example}{0}
\begin{example}
Comeback again to our first leading example. External stability implies that players match and $\delta\leq x_i -y_j \leq 9 \delta$. If $x_i -y_j > \delta$ decreasing slightly $x_i$ increases $i$'s payoff and does not violate the participation constraint of $j$. Thus, internal stability implies that $x_i -y_j=\delta$. If $y_j>0$, decreasing slightly $y_j$ increases $j$'s payoff without violating the participation constraint of $i$. Thus, a matching profile is externally and internally stable if and only if $i$ and $j$ agree to match, $x_i=\delta$ and $y_j = 0$. This ``intuitive'' solution refines the continuum set of predictions of Shapley-Shubik and Demange-Gale.
\end{example}
\begin{example}
Consider again the prisoners' dilemma matching game example. The only symmetric externally and internally stable allocation is the one in which agents are matched and cooperate with probability $1/2$. Indeed, both receive exactly $\delta$ as a payoff and any profitable deviation decreases the partner's payoff below his/her IRP, breaking the couple.
\end{example}
\begin{example}
Consider again the cooperation matching game example. We find two symmetric externally and internally allocations, both of them with the players matched. In the first one each player plays the most preferred option with probability $2/3$ while in the second one, each player plays the least preferred option with probability $2/3$. In both cases, both agents receive exactly $\delta$ as a payoff and any profitable deviation decreases the partner's payoff below his/her IRP, breaking the couple.
\end{example}
\begin{example} Consider a market with $n$ buyers who can commit to a monetary transfer and one seller of an indivisible good. Buyer $i=1,...,n$ has a value $v_i$ for the good, and the seller the reservation price $c$. If the seller accepts to contract with $i$ at price $p_i$, $i$'s utility is $v_i-p_i$ and the seller's utility is $p_i-c$. Suppose $v_1 > v_2\geq ...\geq v_n$. If $c>v_1$ the unique stable allocation is when all the players remain single (given a price and a buyer, or the seller refuses to sell and the buyer refuses to buy). If $v_1\geq c$, external stability implies that the good is sold to buyer 1 for a price $p_1 \in [v_1,max(c,v_2)] $. In this continuum, the unique internally stable allocation is when $p_1=max(c,v_2)$, which corresponds to the outcome of the second price auction. Algorithm \ref{Algo:Propose_dispose_algo_general_case} is, in this case, an ascending price auction that outputs the second price auction allocation (which is strategy-proof on the buyers' side).
\end{example}
As another example, Section \ref{sec:symmetric_matching_games} discusses the particular case of \textbf{symmetric matching games}, in which all couples play the same game, i.e when strategy sets and payoff functions are couple independent.
\subsection{Symmetric matching games}\label{sec:symmetric_matching_games}
Along this section and to keep things simple, suppose that $M$ and $W$ have the same size and that all agents have very low individually rational payoffs, so they prefer to be matched rather than being single. The main conclusion is: in a large class of situations, stable matchings exist and are such that all couples get the same Pareto-optimal payoff.
Formally, suppose that $U_{i,j} = U$, $V_{i,j} = V$, $X_i = S$, $Y_j = T$, for any couple $(i,j) \in M \times W$, meaning that the game played in every couple is independent of the identities of the players. Denote by $G$ the two-player game with action sets $S$ and $T$ and payoff functions $U$ and $V$. Suppose in addition that $S$ and $T$ are \textbf{convex} sets and that $U$ and $V$ are \textbf{continuous} and \textbf{own-payoff q-concave} (Definition \ref{def:q_concave_function}).
\begin{definition}\label{def:q_concave_function}
A function $f$ is \textbf{q-concave} if for any $(s,s',u)$, such that $f(s)\geq u$ and $f(s')>u$, then, $f(s'')>u$, for any $s'' \in ]s,s'[$. \end{definition}
Observe that if $s \mapsto U(s,t)$ is concave, linear, or strictly quasi-concave then, it is q-concave. Thus, we cover the class of finite games in mixed strategies \cite{laraki2019mathematical}. Also, q-concavity implies quasi-concavity.
\begin{definition}\label{def:pareto_transfer_property}
A game $G$ satisfies the \textbf{Pareto transfer property} if for any pair of Pareto-optimal payoffs $(u_1, v_1)$, $(u_2,v_2)$ such that $u_1 \sm u_2$ and $v_2 \sm v_1$, there exists a Pareto-optimal strategy profile $(s',t')$ satisfying, $u_1 \sm U(s',t') \sm u_2 \text{ and } v_2 \sm V(s',t') \sm v_1$.
\end{definition}
Expressed differently, a game satisfying the Pareto transfer property has a Pareto frontier that allows to continuously transfer utility from one player to the other, which is the case in many games. The following theorem characterizes the stable matching profile for symmetric matching games.
\begin{theorem}\label{teo:Pareto_optimal_for_fixed_game_case}
Let $(s,t)$ be Pareto-optimal in $G$. Let $\mu$ be any matching and suppose that any matched couple $(i,j)$ in $\mu$ plays $(x_i,y_j)=(s,t)$. Then, $(\mu,s,t)$ is (externally and internally) stable. Conversely, if $\pi = (\mu,x,y)$ is (externally and internally) stable, then any matched couple, except at most one, plays Pareto-optimally in its game. If moreover, the game satisfies the Pareto transfer property, then all couples playing Pareto-optimally share the same payoff.
\end{theorem}
\begin{proof}
Let $(s,t)$ be Pareto-optimal in $G$ and suppose that any matched couple $(i,j)$ in $\mu$ plays $(x_i,y_i)=(s,t)$. It is not possible to find a blocking pair since any deviation from $(s,t)$ reduces the payoff of one of the agents of the blocking pair. Therefore, the matching profile is externally stable. Regarding internal stability, consider an arbitrary matched couple $(i,j)$ and suppose that player $i$ has a profitable deviation $s'$ in $G$. In particular, $j$'s payoff strictly decreases when $i$ deviates. Because of continuity, there is $\alpha \in (0,1)$ such that $V(s',t) < V(\alpha s+ (1-\alpha)s',t) < V(s,t)$. Because of q-concavity, $U(\alpha s+ (1-\alpha)s',t) > U(s,t)$. Consequently, any $i'$ not married with $j$ increases his payoff as well as $j$'s payoff by forming a couple and playing $(\alpha s+ (1-\alpha)s', t)$. Thus, the deviation of player $i$ creates the blocking pair $(i',j)$, breaking external stability, meaning that the matching profile is internally stable. Conversely, consider two matched couples $(i_1,j_1)$ and $(i_2,j_2)$ with payoffs $(u_1,v_1)$ and $(u_2,v_2)$. Suppose that $u_1 \leq u_2$. If $(u_2,v_2)$ is Pareto-dominated, $i_1$ can replace $i_2$ by proposing to $j_2$ a Pareto-optimal improvement, so the pair $(i_1,j_2)$ externally blocks the matching profile. A contradiction, thus $(u_2,v_2)$ is Pareto optimal. As a conclusion, all couples, except perhaps one, are playing Pareto-optimal in their game. Suppose now the game satisfies the Pareto transfer property and that couples $1$ and $2$ are playing Pareto optimally but their payoffs differ. Without loss of generality, suppose that $u_1 = U(x_1,y_1) < U(x_2,y_2) = u_2$. Then, $v_1 = V(x_1,y_1) > V(x_2,y_2) = v_2$. By the Pareto transfer property, there exists a Pareto-optimal strategy profile $(s',t')$ such that $u_2 > U(s',t') > u_1$ and $v_1 > V(s',t') > v_2$. Thus, $(i_1,j_2)$ is a blocking pair of $\pi$, a contradiction. Consequently, all couples share the same payoff.
\end{proof}
Theorem \ref{teo:Pareto_optimal_for_fixed_game_case} shows that external-internal stability has the feature to induce cooperation (e.g. Pareto optimality) but also some uniformity in behaviors (couples share the same payoffs), in the case of symmetric matching games.
\subsection{Constrained Nash Equilibrium}\label{sec:constrained_nash_equilibrium_et_QVI}
This section defines the new concept of constrained Nash equilibrium and expresses it as a generalized Nash equilibrium under a participation constraint. For this, let us endow agents with outside options $( (u^0_i,v^0_j) \in \mathbb{R}^2 : (i,j) \in M \times W)$, representing some minimum payoff that each agent would like to have.
\begin{definition}\label{def:feasible_contract}
Let $(i,j) \in M \times W$ be a couple and $u_i^0$, $v_j^0$ be their outside options. A contract $(x,y) \in X_i \times Y_j$ is $(u_i^0,v_j^0)$-\textbf{feasible} for $(i,j)$ if it holds $U_{i,j}(x,y) \geq u_i^0$ and $V_{i,j}(x,y) \geq v_j^0$.
\end{definition}
\begin{definition}\label{def:constrained_Nash_eq}
Consider a couple $(i,j) \in M \times W$. A $(u_i^0,v_j^0)$-feasible contract $(x',y')$ is a $(u_i^0,v_j^0)$-\textbf{constrained Nash equilibrium} (CNE) if it satisfies:
\begin{align}
\begin{split}\label{eq:constrained_Nash_eq}
U_{i,j}(x',y') &= \max\{U_{i,j}(x,y') : V_{i,j}(x,y')\geq v_j^0, x \in X_i\}\\
V_{i,j}(x',y') &= \max\{V_{i,j}(x',y) : U_{i,j}(x',y)\geq u_i^0, y \in Y_j\}
\end{split}
\end{align}
The set of $(u_i^0,v_j^0)$-constrained Nash equilibria is denoted $CNE(u_i^0,v_j^0)$.
\end{definition}
Constrained Nash equilibria capture internal stability when players' outside options are the lowest payoff they are willing to accept for being matched with their partners. Indeed, if a strategy profile satisfies $(\ref{eq:constrained_Nash_eq})$, both players within the couple are best replying to the partner, subject to give her/him an acceptable payoff. In other words, any profitable deviation from a constrained Nash equilibria decreases the partner's payoff below his/her reservation price and then, the partner breaks the couple (a blocking pair is created).
Equation $(\ref{eq:constrained_Nash_eq})$ can be written as a quasi-variational inequality (QVI) \cite{facchinei2007finite,harker1991generalized,noor1988quasi} with point-to-set mappings that may fail to be lower semi-continuous. Due to this, the existence of constrained Nash equilibria cannot be always guaranteed. A more detailed study of CNE seen as QVI solutions can be found in Appendix \ref{sec:appendix_QVI_CNE}.
\subsection{Feasible games}
When outside options represent the highest payoff that a player can credibly get with another partner (or the lowest payoff they are willing to obtain with the current partner), the notion of constrained Nash equilibrium coincides with internal stability (Definition \ref{def:internally_stable_matching_profile}). This naturally leads to the following class of games.
\begin{definition}\label{def:feasible_game}
A two-person game $G_{i,j}$ is \textbf{feasible} if for any pair of outside options $(u^0_i,v^0_j) \in \mathbb{R}^2$, which admits at least one $(u^0_i,v^0_j)$-feasible contract, there exists a $(u^0_i,v^0_j)$-CNE.
\end{definition}
Feasibility is a necessary condition for the existence of an externally and internally stable matching profile. Imagine we have one single man $i$ and one single woman $j$, who can play a two-player game $G_{i,j}$ or remain single and get their individually rational payoffs $(\underline{u}_i,\underline{v}_j)$. If there is at least one $(\underline{u}_j,\underline{v}_j)$-feasible contract, then being single is externally unstable. But if there is no $(\underline{u}_i,\underline{v}_j)$-constrained Nash equilibrium in $G_{i,j}$, then there is no externally-internally stable matching. Consequently, \textbf{we suppose for the rest of the section that all two-player games $(G_{i,j} : (i,j) \in M \times W$) are feasible}.
\setcounter{example}{0}
\begin{example}
In the leading example, any $(x_i,y_j)$ satisfying $x_i - y_j \in (\delta,9\delta)$ is $(\underline{u}_i=\delta, \underline{v}_j=\delta)$-feasible. The $(\underline{u}_i, \underline{v}_j)$-CNE of this game is our intuitive solution: $x_i=\delta$ and $y_j = 0$.
\end{example}
Although necessary, not all games satisfy the feasibility condition. For example\footnote{We want to thank Eilon Solan for having suggested this example.}, consider the following matrix game $G$.
\begin{table}[H]
\centering
\begin{tabular}{c|c|c|c|}
& L & M & R \\
\hline
\ T \ & 2,1 & -10,-10 & 3,0 \\
\hline
M & 3,0 & 2,1 & -10,-10 \\
\hline
B & -10,-10 & 3,0 & 2,1 \\
\hline
\end{tabular}
\label{tab:my_label}
\end{table}
Game $G$ has only one Nash equilibrium, which is completely mixed, with payoffs $-5/3$ and $-3$ for players $1$ and $2$ respectively. Considering null outside options, $G$ does not have any constrained Nash equilibrium, even though there are feasible contracts. In pure strategies it is clear, for mixed strategies see Appendix \ref{sec:appendix_non_feasible_game}. The next theorem shows the richness of this class.
\begin{theorem}\label{teo:Class_of_feasible_games}
The class of feasible games includes constant-sum games with a value, strictly competitive games with an equilibrium, potential games, and infinitely repeated games.
\end{theorem}
The proof that a game is feasible relies on the characteristic of the game. In other words, the proof is game-dependent. Therefore, we prove Theorem \ref{teo:Class_of_feasible_games} in several subsections.
\subsubsection{Potential games are feasible.}
A two-person game $G_{i,j} = (X_i, Y_j, U_{i,j}, V_{i,j})$ is a potential game if there exists a potential function $\phi : X_i \times Y_j \to \mathbb{R}$ such that, $\forall x,x' \in X_i, y,y' \in Y_j$ it satisfies,
$$\phi(x',y') - \phi(x,y') = U_{i,j}(x',y') - U_{i,j}(x',y) \text{ and } \phi(x',y') - \phi(x',y) = V_{i,j}(x',y') - V_{i,j}(x',y)$$
\begin{proof}{\textbf{Theorem \ref{teo:Class_of_feasible_games} Potential games}.} Let $G_{i,j}$ be a potential game with potential function $\phi$. Let $(u_i^0,v_j^0)$ be outside options and $Z^0$, be the set of all $(u_i^0,v_j^0)$-feasible contracts. Suppose $Z^0 \neq \emptyset$ and consider $(x',y') \in \argmax \{\phi(x,y) : (x,y) \in Z^0 \}$. Remark $(x',y')$
always exists as $Z^0$ is a non-empty compact set and $\phi$ is continuous. It holds that $(x',y')$ is $(u_i^0,v_j^0)$-feasible. Consider $x \in X_i$ such that $U_{i,j}(x,y') \bi U_{i,j}(x',y')$ and $V_{i,j}(x,y') \geq v_j^0$. In particular, $U_{i,j}(x,y') \bi U_{i,j}(x',y') \geq u_i^0$, so $(x,y') \in Z^0$. Moreover, $\phi(x,y') \bi \phi(x',y')$ as $U_{i,j}(x,y') \bi u(x',y')$. This contradicts that $(x',y')$ belongs to the argmax. Analogously, if player $j$ has a profitable deviation, player $i$ gets less than his outside option. Thus, $(x',y')$ is a $(u_i^0,v_j^0)$-CNE.
\end{proof}
\subsubsection{Zero-sum games with a value are feasible.}\label{sec:zero_sum_games_are_feasible} A two-person game $G = (X_i,Y_j, U_{i,j},V_{i,j})$ is a zero-sum game if players' payoff functions satisfy $U_{i,j}(\cdot, \cdot) = - V_{i,j}(\cdot, \cdot) =: g(\cdot, \cdot)$. Therefore, given $(u_i^0,v_j^0)$ player's outside options, with $u_i^0 \sm v_j^0$, a contract $(x,y) \in X_i \times Y_j$ is feasible if and only if it satisfies $u_i^0 \leq g(x,y) \leq v_j^0$. Similarly, a feasible contract $(x',y')$ is a $(u_i^0,v_j^0)$-CNE if for any $(x,y) \in X_i \times Y_j$, it holds that, if $g(x,y') \bi g(x',y')$ then, $g(x,y') \bi v_j^0$ and, if $g(x',y) \sm g(x',y')$ then, $ g(x',y) \sm u_i^0$.
\begin{proof}{\textbf{Theorem \ref{teo:Class_of_feasible_games} Zero-sum games with a value}.} Let $G_{i,j} = (X_i,Y_j,g)$ be a zero-sum game, with $X_i,Y_j$ compact convex subsets of topological vector spaces and $g$ separately continuous. Suppose the game $G$ has a value $w$ and by continuity of $g$ and compactness of $X_i$ and $Y_j$, players have optimal strategies $(x^*,y^*)$. Let $(x',y')$ be a feasible contract ($u_i^0 \leq g(x',y') \leq v_j^0$). The analysis is split in three cases.
\textbf{Case 1.} $u_i^0 \leq w \leq v_j^0$. The optimal contract $(x^*,y^*)$ is feasible. Since $(x^*,y^*)$ is a Nash equilibrium, it is a $(u_i^0,v_j^0)$-constrained Nash equilibrium.
\textbf{Case 2.} $w \sm u_i^0 \sm v_j^0$. Consider the set $A(u_i^0) := \{x \in X_i: \exists y \in Y_j, g(x,y) \geq u_i^0\}$. Since $(x',y')$ is a feasible contract, $A(u_i^0)$ is non-empty. Consider the optimization problem
\begin{align*}\label{eq:problem_P_zero_sum_game}\tag{P}
\sup\left[ \inf \{ g(x,y) : g(x,y) \geq u_i^0, y \in Y_j\} : x \in A(u_i^0)\right]
\end{align*}
For a given $x_0 \in A(u_i^0)$, the set $\{y \in Y_j: g(x_0,y) \geq u_i^0\}$ is bounded and so, there exists an infimum $y_0(x_0)$. Thus, as the set $A(u_i^0)$ is also bounded, there exists a supremum $x_0$. Let $(x_0,y_0(x_0))$ be the pair supremum-infimum solution of (\ref{eq:problem_P_zero_sum_game}). It holds $g(x_0,y_0(x_0)) \geq u_i^0$ by construction. Suppose that $g(x_0,y_0(x_0)) \bi u_i^0$. Since $w \sm u_i^0$, it holds $w \sm u_i^0 \sm g(x_0,y_0(x_0))$. Considering the optimal contract $(x^*,y^*)$ it holds $g(x_0,y^*) \leq g(x^*,y^*) = w \sm u_i^0 \sm g(x_0,y_0(x_0))$. By continuity of the function $g(x_0,\cdot)$, there exists $\lambda \in (0,1)$ such that $g(x_0, y_{\lambda} ) = u_i^0$, with $y_{\lambda} = \lambda y^* + (1-\lambda)y_0(x_0) \in Y_j$. This contradicts the fact that $(x_0, y(x_0))$ is the solution to (\ref{eq:problem_P_zero_sum_game}). Thus, $g(x_0,y(x_0)) = u_i^0$. If this contract is a constrained Nash equilibrium, the study of the second case is done. If not, consider $y_{t}\in Y_j$ as the convex combination between $y(x_0)$ and $y^*$ with $t$ computed by,
\begin{align}\label{eq:computation_of_t}
t := \sup\{\tau \in [0,1] : y_{\tau} := (1-{\tau})y(x_0) + {\tau} y^* \text{ and } \exists x_{\tau} \in X_i, g(x_{\tau},y_{\tau}) = u_i^0 \}
\end{align}
$t$ exists as for ${\tau} = 0$, there exists $x_0$ such that $g(x_0,y(x_0)) = u_i^0$. In addition, $y_{t} \neq y^*$, since the contract $(x^*,y^*)$ is a saddle point, $g(x^*, y^*) = w \sm u_i^0$ and any deviation of player $i$ decreases the payoff. Notice that any profitable deviation of player $j$ decreases the payoff below $u_i^0$, as $g(x_{t},y_{t}) = u_i^0$. Suppose there exists $\hat{x} \in X_i$ such that $u_i^0 = g(x_{t}, y_{t}) \sm g(\hat{x},y_{t}) \sm v_j^0$. As a summary, it holds: $g(x^*,y^*) = w \sm u_i^0 = g(x_{t}, y_{t}) \sm g(\hat{x}, y_{t})$ with $y_{t} \in (y(x_0), y^*)$. Once again, by the continuity of $g$ and the convexity of $X_i \times Y_j$, there exists an element $z \in (y_{t},y^*)$ and some $x_z \in X_i$, such that $g(x_z,z) = u_i^0$, contradicting the definition of $t$ in (\ref{eq:computation_of_t}). Thus, $(x_{t},y_{t})$ is a constrained Nash equilibrium.
\textbf{Case 3.} $u_i^0 \sm v_j^0 \sm w$. Analogous to case 2.
\textbf{Conclusion.} If $G$ has a value $w$, then it is feasible, and the constrained Nash equilibria $(x,y)$ satisfies $g(x,y) = median(u_i^0,v_j^0,w)$.
\end{proof}
\subsubsection{Strictly competitive games with an equilibrium are feasible.} Consider a strictly competitive game $G_{i,j} = (X_i,Y_j,U_{i,j},V_{i,j})$, with $X_i,Y_j$ compact sets and $U_{i,j},V_{i,j}$ continuous payoff functions. Let $\varphi, \phi$ be increasing functions\footnote{For the model of matching with transfers it is enough to consider increasing transformations, so we limit the study to this kind of monotone transformations. Nevertheless, the cases in which both transformations are decreasing functions or one is increasing and the other decreasing, are analogues.} such that the game $G'_{i,j} = (X_i,Y_j,\varphi \circ U_{i,j}, \phi \circ V_{i,j})$ is a zero-sum game. Nash equilibria of $G$ and $G'$ coincide, and Nash equilibrium payoffs are the image through the increasing functions from one game to another. In particular, if $w_{i,j}$ is the value of $G'_{i,j}$, then $(\varphi^{-1}(w_{i,j}), \phi^{-1}(w_{i,j}))$ is a Nash equilibrium payoff of $G_{i,j}$. Let $(u_0,v_0)$ be outside options of players $(i,j)$ in $G_{i,j}$, and let $(x^*,y^*)$ be a $(u_0,v_0)$-constrained Nash equilibrium. Consider the corresponding outside options in $G'_{i,j}$ given by $u_0' := \varphi(u_0)$, $v_0' := -\phi(v_0)$. Indeed, $u_0'$ and $v_0'$ are outside options for $(i,j)$ in their zero-sum game since, for any $(x,y) \in X_i \times Y_j$ such that $u_0 \leq U_{i,j}(x,y)$ and $v_0 \leq V_{i,j}(x,y)$, it holds $u'_0 \leq \varphi(u_0) \leq \varphi(U_{i,j}(x,y)) = - \phi(V_{i,j}(x,y)) \leq v_0'$. Also, $(x^*,y^*)$ is $(u_0',v_0')$-feasible in game $G'_{i,j}$, and it is direct that $(x^*,y^*)$ is a $(u_0',v_0')$-constrained Nash equilibrium, as increasing functions preserve inequalities. From the proof of Theorem \ref{teo:Class_of_feasible_games} for zero-sum games, it holds $\varphi(U_{i,j}(x^*,y^*)) = -\phi(V_{i,j}(x^*,y^*)) = median(u_0',v_0', w_{i,j})$. Thus, $U_{i,j}(x^*,y^*) = median(u_0,\varphi^{-1}(v_0'), \varphi^{-1}(w_{i,j}))$, $V_{i,j}(x^*,y^*) = median(\phi^{-1}(-u_0'),v_0, \phi^{-1}(-w_{i,j}))$. We conclude the following theorem.
\begin{theorem}\label{teo:strictly_competitive_games_are_feasible}
Let $G_{i,j} = (X_i,Y_j,U_{i,j},V_{i,j})$ be a strictly competitive game and $\varphi, \phi$ increasing functions such that the game $G'_{i,j}= (X_i,Y_j,\varphi \circ U_{i,j}, \phi \circ V_{i,j})$ is a zero-sum game with value $w_{i,j}$. Then, given $u_0, v_0$ outside options in $G_{i,j}$, which admits a feasible contract, there always exists a $(u_0,v_0)$-CNE $(x^*,y^*)$ of $G_{i,j}$. In addition, it holds $U_{i,j}(x^*,y^*) = median\{u_0,\varphi^{-1}(-\phi(v_0))$, $\varphi^{-1}(w_{i,j})\}$, $V_{i,j}(x^*,y^*) = median\{\phi^{-1}(-\varphi(u_0)),v_0, \phi^{-1}(-w_{i,j})\}$.
\end{theorem}
\subsubsection{Infinitely repeated games are feasible.}\label{sec:infinitely_repeated_games_are_feasible}
The existence of a non-feasible finite static game (Appendix \ref{sec:appendix_non_feasible_game}) motivates us to study repeated games. This may be also a more realistic setting: when we marry or are assigned to a job or a university, it is usually for a long period of time. Consider a two-person finite game in mixed strategies, $G = (X_i,Y_j,U_{i,j},V_{i,j})$, called the \textbf{stage game}, the one is played in discrete time $k = \{1,...,K,...\}$ after observing the past history of plays $h_k = ((x_1,y_1),...,(x_{k-1},y_{k-1}))$. Given $K \in \mathbb{N}$, consider the $K$-stages game $G_{i,j}^K$ defined by the payoff functions $U_{i,j}(K,\sigma_i, \sigma_j) := \frac{1}{K} \mathbb{E}_{\sigma}\left[\sum_{k = 1}^K U_{i,j}(x_k,y_k)\right]$, $V_{i,j}(K,\sigma_i, \sigma_j) := \frac{1}{K} \mathbb{E}_{\sigma}\left[\sum_{k = 1}^K V_{i,j}(x_k,y_k)\right]$, where $\sigma_i : \bigcup(X_i\times Y_j)_{k=1}^{\infty} \to X_i$ and $\sigma_j : \bigcup(X_i\times Y_j)_{k=1}^{\infty} \to Y_j$ are the players' behavioral strategies. We define the \textbf{uniform game} $G_{\infty}$ as the game obtained by taking $K \to \infty$ in $G_{i,j}^K$.
\begin{definition}\label{def:punishment_levels}
Consider the set of \textbf{feasible payoffs} $co(u,v) := Conv\{(U_{i,j}(x,y),V_{i,j}(x,y)) \in \mathbb{R}^2 : (x,y) \in X_i \times Y_j\}$, in which $Conv$ stands for the convex envelope. Define the \textbf{punishment level} of players $i$ and $j$ respectively by, $\alpha := \min_{y \in Y_j} \max_{x \in X_i} U_{i,j}(x,y)$, $\beta := \min_{x \in X} \max_{y \in Y}$ $V_{i,j}(x,y)$. Define the set of \textbf{uniform equilibrium payoffs} as $E_{i,j} = \{(\bar{u}, \bar{v}) \in co(u,v) : \bar{u} \geq \alpha, \bar{v} \geq \beta\}$. Finally, consider $u_0,v_0 \in \mathbb{R}$ outside options for player $i$ and player $j$ respectively. The set of \textbf{acceptable payoffs} is defined as $E_{u_0,v_0} := \{(\bar{u}, \bar{v}) \in co(u,v) : \bar{u} \geq u_0 \text{ and } \bar{v} \geq v_0\}$.
\end{definition}
\begin{definition}\label{def:constrained_uniform_equilibrium}
A strategy profile $\sigma = (\sigma_i, \sigma_j)$ is called a \textbf{constrained uniform equilibrium} of $G_{\infty}$ if:
\begin{itemize}[leftmargin = 0.5cm]\setlength\itemsep{0em}
\item[-] $\forall \varepsilon \bi 0$, $\sigma$ is a $(u_0,v_0)$-$\varepsilon$-constrained equilibrium of any long enough finitely repeated game, that is: $\exists K_0, \forall K \geq K_0, \forall (\tau_i,\tau_j):$
\begin{itemize}[leftmargin = 0.5cm]\setlength\itemsep{0em}
\item[(a)] If $\gamma_K^i(\tau_i, \sigma_j) \bi \gamma_K^i(\sigma) + \varepsilon$ then $\gamma_K^j(\tau_i, \sigma_j) < v_0$,
\item[(b)] If $\gamma_K^j(\sigma_i, \tau_j) \bi \gamma_K^j(\sigma) + \varepsilon$ then $\gamma_K^i(\sigma_i, \tau_j) < u_0$, and
\end{itemize}
\item[-] $[(\gamma_K^i(\sigma),\gamma_K^j(\sigma))]_K$ has a limit $\gamma(\sigma)$ in $\mathbb{R}^2$ as $K$ goes to infinity, with $\gamma^i(\sigma) \geq u_0$, $\gamma^j(\sigma) \geq v_0$.
\end{itemize}
The set of constrained uniform equilibrium payoffs is denoted as $E^{ce}_{u_0,v_0}$.
\end{definition}
\begin{definition}\label{def:uniform_game_feasible}
$G_{\infty}$ is \textbf{feasible} if whenever $E_{u_0,v_0}$ is non-empty, $E^{ce}_{u_0,v_0}$ is non-empty as well.
\end{definition}
By Folk theorem \cite{aumann1994long}, the following proposition holds.
\begin{proposition}\label{prop:uniform_equilibria_payoff_are_constrained_uniform_payoff}
Any payoff in $E \cap E_{u_0,v_0}$ can be achieved by a constrained uniform equilibrium.
\end{proposition}
\begin{proof}{\textbf{Theorem \ref{teo:Class_of_feasible_games} Infinitely repeated games}.} Suppose $E_{u_0,v_0}$ is non-empty. We aim to show that $E^{ce}_{u_0,v_0}$ is non-empty as well. Recall the punishment levels $\alpha$ and $\beta$ for players $i$ and $j$ (Definition \ref{def:punishment_levels}). The analysis is split in four cases.
\textbf{Case 1.} $v_0 \geq \beta$ and $u_0 \geq \alpha$. It holds that $E_{u_0,v_0} \subseteq E$. Then, by Proposition \ref{prop:uniform_equilibria_payoff_are_constrained_uniform_payoff}, $E^{ce}_{u_0,v_0} = E_{u_0,v_0}$. Since $E_{u_0,v_0}$ is non-empty, $E^{ce}_{u_0,v_0}$ is non-empty as well.
\textbf{Case 2.} $v_0 \sm \beta$ and $u_0 \sm \alpha$. It holds that $E \subset E_{u_0,v_0}$. Thus, $E^{ce}_{u_0,v_0}$ contains $E$ (by Proposition \ref{prop:uniform_equilibria_payoff_are_constrained_uniform_payoff}) and so, it is non-empty.
\textbf{Case 3.} $v_0 \sm \beta$ and $u_0 \geq \alpha$. If $F:=E_{u_0,v_0} \cap E$ is non-empty, by Proposition \ref{prop:uniform_equilibria_payoff_are_constrained_uniform_payoff}, all elements on $F$ belong to $E^{ce}_{u_0,v_0}$. Otherwise, consider $(u',v')$ defined by $v' := \max\{v : \exists u \text{ s.t. } (u,v) \in E_{u_0,v_0}\}$, $u' \in \{u : (u,v') \in E_{u_0,v_0}\}$. As $E_{u_0,v_0}$ is a non-empty closed set, $(u',v')$ indeed exists and it belongs to $E_{u_0,v_0}$. Consider the strategy profile $\sigma'$ in which the players follow a pure plan which yields to the payoff $(u',v')$. If player $i$ deviates, player $j$ punishes him at the level $\alpha$, and if player $j$ deviates, player $i$ ignores the deviation and continues to follow the pure plan. Player $i$ cannot gain more than $\varepsilon$ by deviating. Indeed, if he does, player $j$ punishes him by reducing his payoff to $\alpha$. Since $(u',v')\in E_{u_0,v_0}$, it holds that $u' \geq u_0 \geq \alpha$ and so this deviation is not profitable. For player $j$, suppose there exists $K \in \mathbb{N}$ and $\varepsilon \bi 0$ such that she can obtain a payoff $v'' \bi v' + \varepsilon$ by deviating at stage $K$. Let $u''$ be the average payoff of player $i$ obtained at stage $K$ after the deviation of player $j$. Since $(u'',v'')$ is an average payoff of the $K$-stages game, it is feasible. It cannot hold that $u'' \geq u_0$, since it would contradict the definition of $v'$, as the payoff $(u'',v'')$ would be acceptable. Thus, $u'' \sm u_0$. As a conclusion, $\sigma'$ is a constrained equilibrium and then, $(u',v') \in E^{ce}_{u_0,v_0}$.
\textbf{Case 4.} $v_0 \geq \beta$ and $u_0 \sm \alpha$. Analogously to case 3.
\end{proof}
\subsection{From external to internal stability}
As mentioned before, constrained Nash equilibria capture internal stability when considering the appropriated outside options (the players' reservation prices). Given $\pi = (\mu,x,y)$ a matching profile and $(i,j) \in \mu$ an arbitrary matched couple, their reservation prices (outside options) can be computed by,
\begin{align}
\begin{split}\label{eq:outside_options}
u_i^0 &:= \max \{U_{i,b}(x,y) : b \in W_0 \setminus \{j\}, V_{i,b}(x,y) \bi v_{b}(\pi), (x,y) \in X_i \times Y_b\},\\
v_j^0 &:= \max \{V_{a,j}(x,y) : a \in M_0 \setminus \{i\}, U_{a,j}(x,y) \bi u_{a}(\pi), (x,y) \in X_a \times Y_j\},
\end{split}
\end{align}
that is, the best payoff that $i$ and $j$ can get outside of their couple by matching with a person who may accept them.
If $\pi$ is an externally stable matching profile, Theorem \ref{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff} implies that the outside options of all couples are never greater than their current payoffs. Therefore, if $(i,j) \in \mu$, $(x_i,y_j)$ is always $(u_i^0, v_j^0)$-feasible and, because of feasibility, there always exists a $(u_i^0, v_j^0)$-constrained Nash equilibrium $(\hat{x}_i, \hat{y}_j)$. With this in mind a new algorithm is designed, the one will output an externally and internally stable allocation for any externally stable allocation used as an input. Intuitively, it will replace one by one the strategy profiles of the couples by a CNE, using at each iteration the Equations $(\ref{eq:outside_options})$ for computing the outside options.
If at any iteration a couple replaces $(x_i,y_j)$ by a $(u_i^0$, $v_j^0)$-feasible Nash equilibrium, they will keep playing it during all posterior iterations. If couples cannot replace their strategy profile with a Nash equilibrium, the choice of a constrained Nash equilibrium is made by an \textbf{oracle}. Algorithm \ref{Algo:strategy_profiles_modification} corresponds to the pseudo-code of the dynamic just explained. For a numerical example, check Appendix \ref{sec:appendix_numerical_example}.
\begin{algorithm}[ht]
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{$\pi = (\mu,x,y)$ externally stable matching profile}
\SetInd{0.2cm}{0.2cm}
\Repeat{convergence}{
\For{$(i,j) \in \mu : i \neq i_0$ and $j \neq j_0$}{
Compute the outside options (\ref{eq:outside_options}). If $(x_i,y_j) \notin CNE(u_i^0$, $v_j^0)$, replace it by $(\hat{x}_i, \hat{y}_j) \in CNE(u_i^0$, $v_j^0)$}
}
\caption{Strategy profiles modification}
\label{Algo:strategy_profiles_modification}
\end{algorithm}
The convergence of Algorithm \ref{Algo:strategy_profiles_modification} does not directly hold as the outside options change at each iteration. Indeed, replacing a strategy profile with a constrained Nash equilibrium may decrease the payoff of an agent. Therefore, his/her outside option may also change and the constrained Nash equilibrium may not be an equilibrium anymore. Nevertheless, if after changing all strategy profiles of $\pi$, the outside options remain invariant, the current matching profile is indeed internally stable and the algorithm stops.
\begin{theorem}\label{teo:strategy_profile_modification_is_correct}
If Algorithm \ref{Algo:strategy_profiles_modification} converges, its output is an externally stable and internally stable matching profile. Even more, for a judicious selection of the oracle for each feasible game, Algorithm \ref{Algo:strategy_profiles_modification} is guaranteed to converge
for potential games, zero-sum games with a value, strictly competitive games with an equilibrium, and infinitely repeated games.
\end{theorem}
The proof of the convergence of Algorithm \ref{Algo:strategy_profiles_modification} is game-dependent as the choice of the oracle is different for each class of games. First, we state the proof of the correctness of Algorithm \ref{Algo:strategy_profiles_modification}. Then, we state the proof that Algorithm \ref{Algo:strategy_profiles_modification} converges for each class of feasible games.
\begin{proof}{\textbf{Algorithm \ref{Algo:strategy_profiles_modification} is correct.}} Let $\pi = (\mu,x,y)$ be the input of Algorithm \ref{Algo:strategy_profiles_modification}. Note that by construction, if the algorithm ends, the output is internally stable. Concerning external stability, we aim to prove that if $\pi_t$, the matching profile before iteration $t$, is externally stable then, $\pi_{t+1}$ is externally stable as well. Let $(i,\mu_i)$ be the couple that changes of strategy profile at iteration $t$. Let $(x_i,y_{\mu_i})$ be their strategy profile at iteration $t$ and $(\hat{x}_i, \hat{y}_{\mu_i})$ at time $t+1$. Suppose there exists $(a,b) \notin \mu$ a blocking pair of $\pi_{t+1}$, so there exists a strategy profile $(s,r) \in X_a \times Y_b$ such that $U_{a,b}(s,r) \bi U_{a, \mu_a}(\pi_{t+1})$ and $V_{a,b}(s,r) \bi U_{\mu_b,b}(\pi_{t+1}) $. If $a \neq i$ (and analogously if $b \neq \mu_i$) then $U_{a, \mu_a}(\pi_{t+1}) = U_{a, \mu_a}(\pi_{t})$. Thus, it cannot hold that $a \neq i$ and $b \neq \mu_i$, otherwise the pair $(a,b)$ would also be a blocking pair of $\pi_t$. Without loss of generality, suppose that $a = i$. In particular, $b \neq \mu_i$ because $a$ and $b$ are not a couple. It holds, $V_{i,b}(s,r) \bi V_{b, \mu_b} (\pi_{t+1}) = V_{b, \mu_b} (\pi_{t})$. Then, if $u_i^0$ is $i$'s outside option at iteration $t$ (computed by (\ref{eq:outside_options})), it holds $u_i^0 \geq U_{i,b}(s,r) = U_{a,b}(s,r) \bi U_{a, \mu_a}(\pi_{t+1}) = U_{i, j}(\hat{x}_i, \hat{y}_{\mu_i})$. This contradicts the fact that $(\hat{x}_i, \hat{y}_{\mu_i})$ is $(u_i^0,v_j^0)$-feasible.
\end{proof}
\begin{remark}
The previous proof also holds when considering $\varepsilon$-externally stable allocations, the ones can be computed by Algorithm \ref{Algo:Propose_dispose_algo_general_case}. Since considering the general case with $\varepsilon$-blocking pairs does not contribute to the proof but only makes it more technical, we have preferred to work the case $\varepsilon = 0$. However, remark that Theorem \ref{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff} works the general case with $\varepsilon \geq 0$. As it is proven next, Algorithm \ref{Algo:strategy_profiles_modification} converges for several possible games. Thus, we obtain the existence of externally-internally stable matching profiles.
\end{remark}
\begin{proof}{\textbf{There exists an oracle for potential games such that Algorithm \ref{Algo:strategy_profiles_modification} converges}.}
Consider a couple $(i,j)$ and $(\hat{x}_i^t,\hat{y}_j^t)_t$ their sequence of constrained Nash equilibria along the iterations. Since $(\hat{x}_i^{t-1},\hat{y}_j^{t-1})$ is always feasible for the following iteration (Theorem \ref{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff}), the sequence $\phi_{i,j}(\hat{x}_i^t,\hat{y}_j^t)_t$ is non-decreasing over $t$. Then, as the potential functions are continuous and the strategy sets are compact, the sequences $(\phi_{i,j}(\hat{x}_i^t,\hat{y}_j^t))_t$ are convergent for any couple $(i,j)$. Thus, Algorithm \ref{Algo:strategy_profiles_modification} converges.
\end{proof}
\begin{proof}{\textbf{For any oracle, Algorithm \ref{Algo:strategy_profiles_modification} converges for zero-sum games with a value}.} At the beginning of Algorithm \ref{Algo:strategy_profiles_modification}, all couples $(i,j)$ belong to one (not necessarily the same) of the following cases: $u_i^0 \leq w_{i,j} \leq v_j^0$, $w_{i,j} \leq u_i^0 \leq v_j^0$ or $u_i^0 \leq v_j^0 \leq w_{i,j}$. In the first case, the couple plays a Nash equilibrium and never changes it afterwards. In the second case, as $u_i^0$ is non increasing for $i$ (Lemma \ref{lemma:outside_options_are_monotone_in_zero_sum_games}) and bounded from below by $w_{i,j}$, his sequence of outside options converges. Analogously, the sequences of outside options for $j$ converges on the third case. Therefore, Algorithm \ref{Algo:strategy_profiles_modification} converges.
\end{proof}
\begin{proof}{\textbf{For any oracle, Algorithm \ref{Algo:strategy_profiles_modification} converges for strictly competitive games in $\mathcal{S}$ with an equilibrium.}} Corollary of the convergence of Algorithm \ref{Algo:strategy_profiles_modification} for zero-sum games with a value.
\end{proof}
Shapley-Shubik's and Demange-Gale's models can be mapped into a matching game in which all strategic games $G_{i,j}$ are included in the class $\mathcal{S}$ (see Section \ref{sec:shapley_shubik_gale_demange_models}), we conclude that our results apply directly to their works proving the existence of matchings that are not only externally stable but also internally stable. The refinement induced by internal stability crucially depends on the choice of the strategic games $G_{i,j}$ as explained in the introduction (see Section \ref{sec:introduction}). For example, if we model the game between a buyer and a seller as an ultimatum game where the buyer is the first proposer, she will get all the surplus, and vice-versa if the first proposer is the buyer. However, if the game is an alternative offer bargaining game, the surplus is shared equally.
\begin{proof}{\textbf{There exists an oracle for infinitely repeated games such that Algorithm \ref{Algo:strategy_profiles_modification} converges.}}
Let $\pi$ be an externally stable matching profile, and $(i,j) \in \mu$ be the couple that modifies their strategy profile at iteration $t$. Let $(u_0^t,v_0^t)$ be their outside options at iteration $t$, and consider $F_t := E \cap E_{u_0^t, v_0^t}$. If $F_t$ is non-empty, there exists a $(u_0^t,v_0^t)$-feasible uniform equilibrium for $(i,j)$, so they keep playing this strategy profile forever. If $F_t = \emptyset$, without loss of generality, assume that $u_0^t \geq \alpha$ and $v_0^t \sm \beta$. Consider the oracle used in the proof of Theorem \ref{teo:Class_of_feasible_games} for infinitely repeated games. Let $(u^t,v^t)$ be the $(u_0^t, v_0^t)$-constrained Nash equilibrium chosen at iteration $t$ by the oracle, so $v^t := \max \{v : \exists u \text{ s.t. } (u,v) \in E_{u_0^t, v_0^t}\}$. If $v^t \geq \beta$, $(u^t, v^t) \in E$ and then, $F_t$ is non-empty, a contradiction. Thus, $v^t \sm \beta$. Let $r \bi t$ be the next iteration in which $(i,j)$ modifies their strategy profile. Let $(u_0^r, v_0^r)$ be their new outside options and $F_r = E \cap E_{u_0^r, v_0^r}$. If $F_r$ is non-empty, they play a uniform equilibrium. Otherwise, since $v_0^r \leq v^t \sm \beta$ (Theorem \ref{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff}), in particular it holds that $u_0^r \geq \alpha$ and $v_0^r \sm \beta$. Let $(u^r, v^r)$ be the new constrained Nash equilibrium found by the oracle. Since external stability implies that $(u^t, v^t) \in E_{u_0^r, v_0^r}$ (Theorem \ref{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff}), $v^r \geq v^t$. In addition, as $F_r$ is empty, $v^r \sm \beta$. Thus, consider an infinite sequence of iterations in which couple $(i,j)$ changes of strategy profiles. In particular, at each iteration $t$, $F_t = \emptyset$ and therefore, $j$ obtains a non-decreasing sequence of payoffs $(v^t)_t$, bounded from above by $\beta$. Then, the sequence converges to a fixed payoff, and therefore, Algorithm \ref{Algo:strategy_profiles_modification} converges.
\end{proof}
\section{A potential application of the matching game model}\label{sec:electricity_application}
Several studies show the willingness to pay of consumers for green energy\footnote{See for example \cite{borchers2007does}, \cite{mozumder2011consumers}, \cite{roe2001us} for the US, \cite{kontogianni2013renewables} for Greece, \cite{nomura2004willingness} for Japan, and \cite{yoo2009willingness} for Korea}.
This induced the creation of several electronic platforms to trade green energy around the world. For example in the UK platform Piclo\footnote{\url{https://www.openutility.com/piclo/}}, every 30mn, an algorithm generates a matching between producers (i.e. generators) and consumers based on meter data, generator pricing policy, and consumers preferences. Generators control and see who buys electricity from them, and consumers select and prioritize from which generators they want to buy electricity. Similar platforms have been developed in France, Germany, US, the Netherlands, etc.\footnote{Germany: \url{https://sonnenbatterie.de/en/sonnenCommunity} Netherlands: \url{https://vandebron.nl/}, etc.}
The matching game framework can be used to model this problem, define a solution, and design an algorithm that implements it. It is well documented that the consumer's utility depends on who produces the energy (she may prefer a local, close to her, producer), the type of green energy used (sunlight, wind, water, etc), the guarantee that her demand will be satisfied and the price or the type of contract offered. For example, a producer contract --or the platform itself-- may promise to a consumer that at least 40\% of the energy received by the consumer is green or is produced by some local producer she wants to support and that whatever happens, her demand is satisfied. Each producer can decide to invest in one or several technologies, on a capacity of production, and a pricing policy (or a menu of contracts from which the consumer can choose). We need an algorithm that matches consumers and producers such that (1) consumers total demands for a given producer are less than its production, (2) each consumer is allocated the best possible price, contact, or technology for her from that producer, (3) no producer is better of by offering another contract, price or investing in a cheaper technology while not losing consumers, and (4) no unmatched producer-consumer can increase their utility by contracting. Conditions (2) and (3) are our internal stability notions, (4) is our external stability condition.
This is not exactly the model we described above as we may have several consumers matched with one producer, and potentially also, several producers matched with one consumer. Also, the real allocation problem platform are faced with is dynamic because electricity production and consumption are uncertain, and so the platforms must dynamically change and update the allocation. Such extensions of our model (one-to-many, many-to-many, and a dynamic allocation) are delegated to a future study.
\section{Extensions and conclusion}\label{sec:conclusions}
This article proposes a new model that mixes Nash incentives with Gale-Shapley pairwise stability. We proved that many existing models and results are particular cases that can be derived from our external stability notion (Section \ref{sec:shapley_shubik_gale_demange_models}). Our originality is the internal stability notion (Section \ref{sec:feasible_games_and_internal_stability}). We introduced the new class of feasible games, which is a necessary condition for the existence of externally and internally stable matching, we proved that this class is rich enough to include zero-sum, strictly competitive, potential, and infinitely repeated games and that our algorithm returns an externally and internally stable matching profiles for any of those sub-classes. It remains open whether the existence of externally and internally stable matching profiles holds for any feasible compact continuous game.
Although the computation of constrained Nash equilibria (CNE) is out of the scope of this article and it represents one of the main future research lines,
it is important to remark that it corresponds to a hard problem. Unlike Nash equilibria computation, known to be in the class of PPAD-Complete problems \cite{papadimitriou2007complexity}, computation of CNE cannot belong to it. Indeed, the existence of CNE is not guaranteed even for finite games. Although for certain games our algorithms are efficient \cite{GL2021SODA}, in the general setting, it belongs to the class of NP-Complete problems, even for finite games. That it belongs to the class of NP problems for bi-matrix games is not difficult, as checking if a given strategy profile $(x,y)$ is a $(u_0,v_0)$-CNE for some fixed outside options $(u_0,v_0)$, can be done by solving a linear programming problem. That computing a CNE is complete in NP is plausible because the problem of computing a Nash equilibrium that gives both players at least some fixed payoffs $(u_0,v_0)$ is NP-complete \cite{gilboa1989Nash}.
More precisely, in a companion paper \cite{GL2021SODA}, we study deeply the complexity of the optimization problems of our algorithms and prove their polynomiality in several classes: zero-sum matrix games, strictly competitive bimatrix games, and infinitely repeated bimatrix games. Also, we propose an $\varepsilon$ modification of our second algorithm which is shown to converge in at most $O(\frac{1}{\varepsilon})$ number of steps implying that our two algorithms are EPTAS \cite{EPTAS}.
There are other interesting research directions that one may want to explore. It would be of interest to have an algorithm that computes a $0$-externally stable matching. Observe that all known algorithms of matching with transfers in the literature (e.g. Kelso and Crawford \cite{kelso1982job} and Gale et al. \cite{demange1986multi}) compute $\varepsilon$-stable matchings but never an exact stable matching since they discretize the price space. This last question continues to be an open problem even in the classical matching with transfer literature.
\appendix
\section{Matching with contracts seen as a matching game.}\label{sec:matching_with_contracts}
Hatfield and Milgrom \cite{hatfield2005matching} defined a one-to-many assignation problem between doctors and hospitals in which, besides being paired, agents define a bilateral contract, from a finite set of contracts $X$. In this section, we prove that a one-to-one matching with contracts setting can be mapped into a matching game, and vice-versa.
For the sake of simplicity, we use the terminology of stable marriage to describe the model of Hatfield and Milgrom. Consider two finite sets of agents $M$ and $W$, men and women, and a finite set of contracts $X$. Contracts are bilateral, i.e. each $x \in X$ is related to one man $x_M \in M$ and one woman $x_W \in W$, and they are the only ones that can trade it. Agents have preferences over the set of contracts, so they will pick the contract they prefer the most. Besides, each agent has the option of being unmatched, so a contract $x$ is never accepted by an agent $k \in M \cup W$, if $x \sm_d \emptyset$, with $\emptyset$ the called \textit{empty contract}.
Consider a matching with contracts model $(M,W,X)$ as above. For every potential couple $(i,j) \in M \times W$, consider the strategic game $G = (X,X,U_{i,j},V_{i,j})$, in which both players can choose any contract from $X$, and their payoff functions are given by, $U_{i,j}(x,y) = V_{i,j}(x,y) = \alpha_{i,j} \bi 0$, if $x = y$ and $x_M = i, x_W = j$, and $U_{i,j}(x,y) = V_{i,j}(x,y) = \beta_{i,j} \sm 0$, otherwise. Constants $\alpha$ are chosen such that, for any agent $k$, if $h \bi_k h'$ then $\alpha_{k,h'} \bi \alpha_{k,h'}$. It is not difficult to see that external stability in the constructed matching game is equivalent to the stable allocations defined by Hatfield and Milgrom. Conversely, given a matching game in which any potential couple $(i,j) \in M \times W$ is endowed with a finite strategic game $G_{i,j} = (X_i, Y_j, U_{i,j}, V_{i,j})$, consider the set of contracts $Z := \bigcup_{(i,j) \in M \times W} Z_{i,j}$, where $Z_{i,j} := X_i \times Y_j$ and associate the agent's IRPs to the empty contract $\emptyset$. Then, the matching game is mapped into a matching with contracts setting, where the preference orders are deduced from players' payoff functions in the matching game. It is easy to check that the notions of stable allocation and external stability coincide. Consequently, our algorithm for external stability can be used to prove the existence of a stable allocation in Hatfield-Milgrom's model (or vice-versa).
Our notion of internal stability is however not present in Hatfield-Milgrom's model. It induces a refinement among their solutions which depends on how we model the strategic game between the players. For example, each bargaining model between a buyer and a seller will induce a different sharing of the surplus.
\section{Model without commitment: Nash stability}\label{sec:appendix_Nash_stability}
\begin{proposition}\label{prop:women_payoff_are_increasing}
The payoff of the proposed woman increases\footnote{Due to the monotonicity of women's payoffs, once a man matches with $j_0$, he exits the market and remains single forever.} strictly by at least $\varepsilon$.
\end{proposition}
\begin{proof}
It holds by construction.
\end{proof}
\begin{proposition}\label{prop:man_proposer_guarantees_improvement_of_epsilon}
Let $i$ be a man proposing to a woman $j$. Let $\lambda_i := val(\ref{eq:problem_P_max})$ the highest payoff that $i$ is willing to offer to $j$. Let $v_j$ be the current payoff of $j$. Then, it holds that $\lambda \geq v_j + \varepsilon$.
\end{proposition}
\begin{proof}
Since $i$ proposed to $j$, there exists a contract $(x,y)$ such that the triple $(x,y,j)$ is solution to (\ref{eq:problem_P_i}). Therefore, the triple $(x,y, v_j + \varepsilon)$ is a feasible solution to Problem (\ref{eq:problem_P_max}), thus, $\lambda \geq v_j + \varepsilon$.
\end{proof}
\begin{proposition}\label{prop:winner_man_plays_feasible_payoff}
The winner of a competition always plays a final feasible contract (for him) and his partner's final payoff is never greater than the one he is willing to give her.
\end{proposition}
\begin{proof}
Let $i$ (proposer) and $i'$ (current partner) be two men competing for $j$. Let $(x^*,y^*)$ be $i$'s optimal proposal. Without loss of generality, suppose $i$ wins. Let $(\lambda_i, \hat{x}, \hat{y})$ be the solution of Problem (\ref{eq:problem_P_max}) for player $i$. Then, the pair $(\hat{x}, \hat{y})$ is a feasible solution of $i$'s Problem (\ref{eq:problem_P_new}), as $V_{i,j}(\hat{x}, \hat{y}) = \lambda_i \bi \lambda_{i'}$. Consider any contract $(x',y')$ such that $V_{i,j}(x',y') \bi V_{i,j}(\hat{x}, \hat{y})$. If $(x',y')$ satisfies $U_{i,j}(x',y') \geq \beta_{i}$, we obtain a contradiction as $(\lambda_i, \hat{x}, \hat{y})$ is solution of Problem (\ref{eq:problem_P_max}) for player $i$. Therefore, the solution $(x,y)$ of (\ref{eq:problem_P_new}) satisfies $V_{i,j}(x,y) \leq V_{i,j}(\hat{x}, \hat{y}) = \lambda_i$.
\end{proof}
\section{Model with commitment: external stability}\label{sec:appendix_stability}
\subsection{On the lattice structure.}\label{sec:appendix_proofs_lattice_structure}
The set of externally stable matching profiles may be an infinite semi-lattice as there may be infinitely many stable payoffs. The identification property handles this issue, although for only small enough values of $\varepsilon$. Indeed, consider any $\varepsilon \in (0,\varepsilon^*)$, where $\varepsilon^*$ is given by,
\begin{align}\label{eq:the_good_eps_for_lattice_structure}
\varepsilon^* = \min_{i \in M} \{|U_{i,\mu_i}(x_i,y_{\mu_i}) - U_{i,\mu'_i}(x'_i,y'_{\mu'_i})| : (\mu,x,y), (\mu',x',y') \in \Pi \}
\end{align}
Then, if the identification property holds for any two $\varepsilon$-externally stable matching profile, $\Pi$ can be endowed with the partial order $ \bi_M$ defined by: $\forall (\mu,x,y), (\mu',x',y') \in \Pi$, $(\mu,x,y) \bi_M (\mu',x',y')$ if and only if $u_i(\mu,x,y) \geq u_i(\mu',x',y'),\forall i \in M$, and at least one man increases his payoff in at least $\varepsilon$.
Whenever the identification property holds and $\varepsilon \in (0,\varepsilon^*)$, all maximal elements of $(\Pi, \bi_M)$ correspond to a unique matching profile, denoted $\pi_M$. Although strategy profiles between two maximal elements may differ, the difference between each man's utilities is not greater than $\varepsilon$. Hence, considering equivalent all matching profiles where men's utilities do not differ by more than $\varepsilon$, the best $\varepsilon$-externally stable matching payoff for men is well defined and it is ($\varepsilon$-)unique. As in Gale-Shapley, we prove that Algorithm \ref{Algo:Propose_dispose_algo_general_case}, in which men propose and women dispose, outputs a maximal element $\pi_M$ of $(\Pi, \bi_M)$. To prove this property, given a man $i \in M$, consider his \textbf{best stable woman} $w_i \in W_0$ defined by,
\begin{align}\label{def:best_stable_woman}
(w_i, x_i, y_i) \in \argmax \{U_{i,j}(x'_i,y'_j): ((i,j), x'_i, y'_j) \in (\mu,x,y) \in \Pi \}.
\end{align}
From the uniqueness of the maximal matching in $\Pi$, we obtain the following lemma.
\begin{lemma}\label{lemma:different_men_have_different_optimal_stable_women}
If the identification property holds and $\varepsilon \in (0, \varepsilon^*)$, the following two properties hold as well:
\begin{itemize}[leftmargin = 0.7cm]\setlength\itemsep{0em}
\item[(1)] For any two different men $i,k \in M$, it holds that $w_i \neq w_k$.
\item[(2)] Any matching profile $\pi = (\mu,x,y)$ in which at least for one man $i$ it holds $U_i(\pi) \bi U_i(\pi_M) + \varepsilon$, is not $\varepsilon$-externally stable.
\end{itemize}
\end{lemma}
\begin{lemma}\label{lemma:men_can_always_propose_to_their_best_stable_women}
Suppose the identification property holds in $\Pi$ and $\varepsilon \in (0,\varepsilon^*)$. Let $t$ be an iteration of Algorithm \ref{Algo:Propose_dispose_algo_general_case} and $i \in M$ be the current proposer man. Let $w_i \in W$ be $i$'s stable woman $(\ref{def:best_stable_woman})$. Then, there always exists a strategy profile $(x,y) \in C_{i,w_i}$ such that $V_{i,w_i}(x,y) \geq V_{w_i}^t + \varepsilon$, with $V_{w_i}^t$ the payoff of $w_i$ at time $t$.
\end{lemma}
\begin{proof}
Suppose there exists an iteration $t \geq 1$ such that for any $(x,y) \in C_{i,w_i}$, $V_{i,w_i}(x ,y) \sm V_{w_i}^t + \varepsilon$, for some proposer man $i$. In particular, $w_i$ must be matched with someone, as her current payoff is not $\underline{v}_{w_i}$. Let $k \in M$ be $w_i$'s partner at time $t$. Since $k$ is matched with $w_i$, in particular he is not matched with his best stable woman $w_k$. Let $r \sm t$ be the iteration when $k$ proposed to $w_i$. Note that he did it either because he achieved a higher payoff than with $w_k$, or because $w_k$ was not a feasible solution. If the first case holds, there exists a contract $(x',y') \in X_k \times Y_{w_i}$ such that $U_{k,w_i}(x',y') \bi U_{k,w_k}(\pi_M) + \varepsilon$ (by the identification property and as $\varepsilon \sm \varepsilon^*$). Thus, any proposition made to $w_i$ between iterations $r$ and $t$ was won by $k$, meaning that $w_i$ continued being his best option (in particular better than $w_k$), and it increased the payoff of $w_i$. We arrive to a contradiction. Indeed, if $(x'',y'')$ is the strategy profile played by $k$ and $w_i$ at iteration $t$, then $(k,w_i)$ is a $\varepsilon$-blocking pair of $\pi_M$ with respect to $(x'',y'')$. Indeed, $k$ gets a payoff $\varepsilon$ higher than with $w_k$ by matching with $w_i$ and playing $(x'',y'')$, and for any $(x,y) \in C_{i,w_i}$, $V_{i,w_i}(x ,y) \sm V_{k,w_i}(x'',y'') + \varepsilon$. Therefore, it must hold that $k$ was a not feasible solution at time $r$, since she was matched with someone else, and was getting a payoff too high. Inductively, we obtain a sequence of men $\{i_1,...,i_{m}\}$ (ordered chronologically) such that none of them was able to propose to his best stable woman, because they had a payoff too high at the moment. Consider the first of them, $i_1$, and let $k$ be the partner of $w_{i_1}$ when $i_1$ proposed. Then, $k$ proposed to $w_{i_1}$ and not to $w_k$ because he got a better payoff with her than with $w_k$, who was single at that time, as before $i_1$ all men proposed to their best stable woman. As before, this contradicts the $\varepsilon$-external stability of $\pi_M$, as $(k, w_{i_1})$ would block it.
\end{proof}
\subsection{Strictly competitive games.}\label{sec:appendix_proofs_strictly_competitive_games}
The minimum of two matching profiles is well defined for strictly competitive games in $\mathcal{S}$. Indeed, given two matching profiles $\pi = (\mu,x,y)$ and $\pi = (\mu,x',y')$, consider the minimum matching $\mu^{\wedge}$ defined by,
\begin{align*}
\mu^{\wedge}_i &:= \argmin\{U_{i,\mu_i}(x_i,y_{\mu_i}) ; U_{i,\mu'_i}(x'_i,y'_{\mu'_i})\}, \forall i \in M \\
&= \argmin\{\varphi^{-1}(g_{i,\mu_i}(x_i,y_{\mu_i})) ; \varphi^{-1}(g_{i,\mu'_i}(x'_i,y'_{\mu'_i}))\}, \forall i \in M \\
&= \varphi^{-1}(\argmin\{g_{i,\mu_i}(x_i,y_{\mu_i})) ; (g_{i,\mu'_i}(x'_i,y'_{\mu'_i}))\}), \forall i \in M
\end{align*}
\begin{align*}
&= \varphi^{-1}(\argmin\{-\phi(V_{i,\mu_i}(x_i,y_{\mu_i})) ; -\phi(V_{i,\mu'_i}(x'_i,y'_{\mu'_i})\})), \forall i \in M \\
&= \varphi^{-1}(\argmax\{\phi(V_{i,\mu_i}(x_i,y_{\mu_i})) ; \phi(V_{i,\mu'_i}(x'_i,y'_{\mu'_i}))\}), \forall i \in M \\
&= \varphi^{-1} \circ \phi( \argmax\{V_{i,\mu_i}(x_i,y_{\mu_i}) ; V_{i,\mu'_i}(x'_i,y'_{\mu'_i})\}), \forall i \in M \\
&= \argmax\{V_{i,\mu_i}(x_i,y_{\mu_i}) ; V_{i,\mu'_i}(x'_i,y'_{\mu'_i})\}, \forall i \in M \\
&= \nu_i^{\vee}, \forall i \in M
\end{align*}
As $\nu^{\vee}$ is always well defined, $\mu^{\wedge}$ is indeed, a proper matching. Moreover, $\pi^{\vee}$ remains externally stable and we recover the duality between $\nu^{\vee}$ and $\mu^{\wedge}$.
\section{Model with commitment: external-internal stability}\label{sec:appendix_external_internal_stability}
\begin{theorem}\label{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff}
Let $\pi = (\mu,x,y)$ be a matching profile. Given $\varepsilon \bi 0$, consider the following outside options, for any $(i,j) \in M \times W$,
\begin{align}
\begin{split}\label{eq:eps_outside_options}
u_i^{\varepsilon} &:= \max \{U_{i,j'}(x',y') : V_{i,j'}(x',y') \bi v_{j'}(\pi) + \varepsilon, j' \in W_0 \setminus\{\mu_i\}, x' \in X_i, y' \in Y_{j'}\}, \\
v_j^{\varepsilon} &:= \max \{V_{i',j}(x',y') : U_{i',j}(x',y') \bi u_{i'}(\pi) + \varepsilon, i' \in M_0 \setminus\{\mu_j\}, x' \in X_{i'}, y' \in Y_{j}\}
\end{split}
\end{align}
Then, $\pi$ is $\varepsilon$-externally stable if and only if $u_i^{\varepsilon} \leq u_i(\pi) + \varepsilon$ and $v_j^{\varepsilon} \leq v_j(\pi) + \varepsilon$, for any $(i,j) \in M \times W$.
\end{theorem}
\begin{proof}
Suppose that $\pi$ is $\varepsilon$-externally stable and let $i \in M$ be a man such that $u_i^{\varepsilon} \bi u_i(\pi) + \varepsilon$. Thus, there exists $j \in W_0 \setminus \{\mu_i\}$ and $(x',y') \in X_i \times Y_j$ such that $V_{i,j}(x',y') \bi v_j(\pi) + \varepsilon$ and $u_i^{\varepsilon} = U_{i,j}(x',y')$. It is clear that $(i,j)$ is an $\varepsilon$-blocking pair of $\pi$, so we obtain a contradiction. The same conclusion holds if for any $j \in W$, $v_j^{\varepsilon} \bi v_j(\pi) + \varepsilon$.
Conversely, suppose that for any $(i,j) \in M \times W$, $u_i^{\varepsilon} \leq u_i(\pi) + \varepsilon$ and $v_j^{\varepsilon} \leq v_j(\pi) + \varepsilon$. Let $(i,j) \in M \times W$ be an $\varepsilon$-blocking pair of $\pi$. Then, there exists $(x',y') \in X_i \times Y_j$ such that $U_{i,j}(x',y') \bi u_i(\pi) + \varepsilon$ and $V_{i,j}(x',y') \bi v_j(\pi) + \varepsilon$. In particular, notice that $u_i^{\varepsilon} \geq U_{i,j}(x',y')$ and $v_j^{\varepsilon} \geq V_{i,j}(x',y')$, as each of player can offer to the another one at least $\varepsilon$ more than their current payoffs. We obtain a contradiction.
\end{proof}
\begin{lemma}\label{lemma:outside_options_are_monotone_in_zero_sum_games}
Let $\Gamma$ be a matching game where all strategic games are zero-sum games with a value. Let $\pi = (\mu,x,y)$ be an externally stable matching and $(i,j)$ be a matched couple. Let $w_{i,j}$ be the value of their game. Consider the sequence of outside options of $(i,j)$ denoted by $(u_i^t, v_j^t)_t$, with $t$ being the iterations of Algorithm \ref{Algo:strategy_profiles_modification}. If there exists $t^*$ such that $w_{i,j} \leq u_i^t$ (resp. $w_{i,j} \geq v_j^t$ ), then the subsequence $(u_i^t)_{t\geq t^*}$ (resp. $(v_j^t)_{t\geq t^*}$) is non increasing (resp. non decreasing).
\end{lemma}
\begin{proof}
Suppose that there exists an iteration $t$ in which $w_{i,j} \leq u_i^t \leq v_j^t$, so couple $(i,j)$ switches its payoff to $u_i^t$. Let $(\hat{x}_i, \hat{y}_j)$ be the constrained Nash equilibrium played by $(i,j)$ at iteration $t$. Let $t'$ be the next iteration in which $(i,j)$ moves. Since $(\hat{x}_i, \hat{y}_j)$ must be $(u_i^{t'}, v_j^{t'})$-feasible, in particular it holds $u_i^{t'} \leq g_{i,j}(\hat{x}_i, \hat{y}_j) = u_i^t$. Therefore, the sequence of outside options starting from $t$ is non increasing.
\end{proof}
\section{Q-Concave games and connected games.}\label{sec:appendix_q_concave_games_and_connected_games}
We define the class of connected games, the one allows to mix between the different feasible games discussed and keep the convergence of Algorithm \ref{Algo:strategy_profiles_modification}. Consider a couple $(i,j)$ and a two-person game $G_{i,j} = (X_i,Y_j,U_{i,j},V_{i,j})$, with $X_i,Y_j$ compact and convex subsets of topological spaces and $U_{i,j},V_{i,j}$ continuous own-payoff q-concave functions (Definition \ref{def:q_concave_function}).
\begin{proposition}\label{prop:in_q_concave_games_players_achieve_their_outside_options}
Let $(x',y')$ be a $(u_0,v_0)$-constrained Nash equilibrium of the q-concave game $G_{i,j}$, that is not a Nash equilibrium. Then, either $U_{i,j}(x',y') = u_0$ or $V_{i,j}(x',y') = v_0$.
\end{proposition}
\begin{proof}
Let $(x',y')$ be a $(u_0,v_0)$-constrained Nash equilibrium that is not a Nash equilibrium. Then, at least one of the two players has a profitable deviation. Without loss of generality, suppose player $i$ can deviate to $x$ and increase his payoff. In particular, $V_{i,j}(x,y') \sm v_0$. Let $t \in [0,1]$ and define $x_t := tx' + (1-t)x$. Since $U_{i,j}(x,y') \bi U_{i,j}(x',y')$, by the $q$-concavity of $U_{i,j}$, $U_{i,j}(x_t,y') \bi U_{i,j}(x',y')$ for any $0 \sm t \sm 1$. Then, $V_{i,j}(x_t,y') \sm v_0$ for any $0 \sm t \sm 1$. As $V_{i,j}(x',y') \geq v_0$, by continuity of $V_{i,j}$, $\lim_{t \to 1} V_{i,j}(x_t,y') = v_0 = V_{i,j}(x',y')$. Therefore, player $j$ obtains exactly her outside option when playing $(x',y')$. Analogously, if player $j$ has a profitable deviation, then player $i$ gets exactly $u_0$.
\end{proof}
Algorithm \ref{Algo:strategy_profiles_modification} converges for feasible q-concave games that are connected (Definition \ref{def:connected_game}).
\begin{definition}\label{def:connected_game}
A two-person game $G_{i,j} = (X_i,Y_j,U_{i,j},V_{i,j})$ is a \textbf{connected-game} if there exists a Nash equilibrium $(x^*,y^*)$ such that for any pair of outside options $(u_0,v_0)$ with $NE \cap CNE(u_0,v_0) = \emptyset$, there exists a $(u_0,v_0)$-constrained Nash equilibrium $(x_0,y_0)$, such that one of the following properties holds,
\begin{itemize}[leftmargin = 0.6cm]\setlength\itemsep{0em}
\item[(a)] $U_{i,j}(x^*,y^*) \sm u_0 \leq U_{i,j}(x_0,y_0)$, $v_0 \leq V_{i,j}(x_0,y_0)\sm V_{i,j}(x^*,y^*)$ and $j$ has a profitable deviation,
\item[(b)] $V_{i,j}(x^*,y^*) \sm v_0 \leq V_{i,j}(x_0,y_0)$, $u_0 \leq U_{i,j}(x_0,y_0)\sm U_{i,j}(x^*,y^*)$ and $i$ has a profitable deviation.
\end{itemize}
\end{definition}
\begin{theorem}\label{teo:algo_2_converges_for_q_concave_games}
Consider a matching game in which all games $(G_{i,j} : (i,j) \in M \times W)$, are feasible, q-concave, and connected. Then, there exists an oracle under the which Algorithm \ref{Algo:strategy_profiles_modification} converges.
\end{theorem}
\begin{proof}
Let $\pi := (\mu,x,y)$ be an externally stable matching profile, $(i,j)$ be a couple in $\mu$ and $t$ an iteration in which they change of strategy profile. Let $(u_0,v_0)$ be their outside options and $(x_i,y_j)$ their current strategy profile. If $NE \cap CNE(u_0,v_0)$ is non-empty, $i$ and $j$ pick a $(u_0,v_0)$-feasible Nash equilibrium and they keep playing it forever. If $NE \cap CNE(u_0,v_0) = \emptyset$, as $G_{i,j}$ is a connected game, there exists a Nash equilibrium $(x_i^*,y_j^*)$ and a $(u_0,v_0)$-constrained Nash equilibrium $(x_0,y_0)$ such that either (a) or (b) holds.
Without loss of generality, suppose it holds (a). As $G_{i,j}$ is q-concave and $j$ has a profitable deviation from $(x_0,y_0)$, player $i$ gets exactly her outside option (Proposition \ref{prop:in_q_concave_games_players_achieve_their_outside_options}), so $u(x_0,y_0) = u_0$. Let $r$ be the next iteration at which $i$ and $j$ change of strategy profile. Let $(u_1,v_1)$ be their new outside options, and suppose that $NE \cap CNE(u_1,v_1)$ is empty. As $\pi$ is externally stable, it holds that $u_1 \leq U(x_0,y_0) = u_0$ and $v_1 \leq V(x_0,y_0)$ (Theorem \ref{teo:the_outside_options_of_a_blocking_pair_are_higher_than_their_payoff}). Since $G_{i,j}$ is connected, there exists a $(u_1,v_1)$-constrained Nash equilibrium (with respect to the same Nash equilibrium $(x_i^*,y_j^*)$) $(x_1,y_1)$, such that either (a) or (b) holds. However, as $v_1 \leq V(x_0,y_0) \sm V(x_i^*,y_j^*)$, necessarily it must hold (a). Therefore, the new constrained Nash equilibrium satisfies, $U(x_i^*,y_j^*) \sm u_1 = U(x_1,y_1)$ and $v_1 \leq V(x_1,y_1) \sm V(x_i^*,y_j^*)$, where player $i$ gets her outside option since $G_{i,j}$ is q-concave. By induction, consider a sequence of iterations in which players $i$ and $j$ change of strategy profile. In particular, they never found a feasible Nash equilibrium as they kept changing of strategy profile forever. Then, we obtain a sequence of outside options $(u_t,v_t)_t$ such that for each pair, there exists a $(u_t,v_t)$-constrained Nash equilibrium $(x_t,y_t)$, satisfying (1) $U(x_i^*,y_j^*) \sm u_t = U(x_t,v_t),\ \forall t \in \mathbb{N}$, (2) $v_t \leq V(x_t,y_t) \sm V(x_i^*, y_j^*), \forall t \in \mathbb{N}$, (3) $(u_t)_t = (U(x_t,y_t))_t$ is decreasing. Therefore, the sequence $(u_t)_t$ is convergent and, at the limit, the couple plays the Nash equilibrium $(x_i^*,y_j^*)$. As $(i,j)$ is an arbitrary couple, Algorithm \ref{Algo:strategy_profiles_modification} converges.
\end{proof}
The class of connected games is rich enough to include \textbf{zero-sum games} and \textbf{infinitely repeated games}. Indeed, consider a zero-sum game $G = (X,Y,g)$ with value $w$ and $(u_0,v_0)$ player's outside options (Section \ref{sec:zero_sum_games_are_feasible}). If $NE \cap CNE(u_0,v_0) = \emptyset$, the value of the game is $(u_0,v_0)$-unfeasible, thus either $u_0 \sm v_0 \sm w$, or $w \sm u_0 \sm v_0$. If the first case holds, there exists a constrained Nash equilibrium $(x_0,y_0)$ such that $g(x_0,y_0) = v_0$. Moreover, player $1$ has a profitable deviation since she can always force to play the value of the game. As conclusion, game $G$ satisfies Property (b) of a connected game. Analogously, if it holds $w \sm u_0 \sm v_0$, game $G$ satisfies Property (a) of a connected game.
For infinitely repeated games (Section \ref{sec:infinitely_repeated_games_are_feasible}), consider a two-person stage game $G$ and player's outside options $(u_0,v_0)$ such that $E \cap E_{u_0,v_0} = \emptyset$. In particular, either $\alpha \leq u_0$ and $v_0 \sm \beta$, or $\alpha \bi u_0$ and $v_0 \geq \beta$. If the first case holds, consider the payoff vector $(u,v)$ defined by the oracle in the proof of Theorem \ref{teo:Class_of_feasible_games} for infinitely repeated games. Moreover, consider the respective strategy profile $\sigma$ in which both players follow a pure plan to the payoff $(u,v)$, such that, if player $1$ deviates, player $2$ punishes her by reducing her payoff to $\alpha$, and if player $2$ deviates, player $1$ omits it. Note that $\sigma$ is not a uniform equilibrium as $(u,v)$ is not a uniform equilibrium payoff. It holds that $\sigma$ is a $(u_0,v_0)$-constrained Nash equilibrium and satisfies Property (a) of a connected game, with $(\alpha,\beta)$ being the Nash equilibrium payoff. Moreover, as $\sigma$ is not a uniform equilibrium, at least one of the players must have a profitable deviation. As player $1$ cannot deviate and increase her payoff, player $2$ must have a profitable deviation. Analogously, if it holds $\alpha \bi u_0$ and $v_0 \geq \beta$, $G$ satisfies Property (b) of connected games.
Unlike zero-sum games, infinitely repeated games are not necessarily q-concave as players do not get exactly their outside option when their partners have a profitable deviation. Moreover, comparing the proofs of Theorem \ref{teo:strategy_profile_modification_is_correct} for infinitely repeated games and Theorem \ref{teo:algo_2_converges_for_q_concave_games}, it is not establish the same monotonicity over the outside options. In the first theorem, the outside option of the player whose punishment level is too high is increased. In the second theorem, the outside option of the player whose Nash equilibrium payoff is too low is decreased.
\section{Constrained Nash equilibria and Quasi-Varational Inequalities}\label{sec:appendix_QVI_CNE}
Consider a two-person game $G = (X_i,Y_j,U_{i,j},V_{i,j})$ with $X_i,Y_j$ strategy sets and $U_{i,j},V_{i,j}$ payoff functions. Given two outside options $(u_i^0,v_j^0)$, consider the point-to-set mappings,
\begin{align}\label{eq:feasible_strategies}
K_i(y') := \{x \in X_i : V_{i,j}(x,y' ) \geq v_j^0 \}, \ \ K_j(x') := \{y \in Y_j : U_{i,j}(x',y ) \geq u_i^0 \}
\end{align}
Then, a strategy profile $(x',y')$ is a $(u_i^0,v_j^0)$-constrained Nash equilibrium (CNE) if it solves,
\begin{align}\label{eq:CNE_cond_2_bis}
\begin{split}
&U_{i,j}(x',y') = \max_{s \in K_i(y')} U_{i,j}(s,y'),\\
&V_{i,j}(x',y') = \max_{t \in K_j(x')} V_{i,j}(x',t)
\end{split}
\end{align}
\begin{remark}
Note that for a given strategy profile $(x',y')$, the sets (\ref{eq:feasible_strategies}) are convex if the strategy sets $X_i,Y_j$ are convex as well and the payoff functions are own-concave (or q-concave).
\end{remark}
We aim to express condition (\ref{eq:CNE_cond_2_bis}) as a quasi-variational inequality.
\begin{proposition}\label{prop:QVI_formulation_for_CNE_Appendix}
Let $X_i,Y_j$ be closed convex subsets of $\mathbb{R}^{n}$ and $\mathbb{R}^{m}$, respectively. In addition, suppose that the payoff functions $U_{i,j},V_{i,j}$ are concave and differentiable on the player's own-strategy. Then, a strategy profile $(x',y')$ satisfies condition (\ref{eq:CNE_cond_2_bis}) if and only if it satisfies,
\begin{align}\label{eq:QVI_formulation_for_CNE}
\begin{split}
&\nabla_x U_{i,j}(x',y')^T \cdot (x'-s) \geq 0, \forall s \in K_i(y')\\
&\nabla_y V_{i,j}(x',y')^T \cdot (y'-t) \geq 0, \forall t \in K_j(x')
\end{split}
\end{align}
Setting $K_{i,j}(x',y') = K_i(y') \times K_j(x')$, $F_{i,j} = [\nabla_x U_{i,j}, \nabla_y V_{i,j}]$, a strategy profile $(x',y')$ satisfies condition (\ref{eq:CNE_cond_2_bis}) if and only if it is solution to the QVI($K_{i,j},F_{i,j}$).
\end{proposition}
\begin{proposition}\label{prop:qvi_solutions_are_feasible}
Consider a strategy profile $(x',y')$, solution of QVI($K_{i,j},F_{i,j}$) as defined in Proposition \ref{prop:QVI_formulation_for_CNE_Appendix}. Then, $(x',y')$ is a $(u_i^0,v_j^0)$-feasible contract and therefore, it is a $(u_i^0,v_j^0)$-constrained Nash equilibrium.
\end{proposition}
\begin{proof}
Let $(x',y')$ be a solution of QVI($K_{i,j},F_{i,j}$). In particular it holds, $u(x',y') \geq u_i^0, \text{as } y' \in K_j(x')$, and $v(x',y') \geq v_j^0, \text{as } x' \in K_i(y')$. Therefore, $(x',y')$ is $(u_i^0,v_j^0)$-feasible. As by construction, $(x',y')$ also satisfies condition (\ref{eq:CNE_cond_2_bis}), we conclude that $(x',y')$ is a $(u_i^0,v_j^0)$-constrained Nash equilibrium.
\end{proof}
From Proposition \ref{prop:QVI_formulation_for_CNE_Appendix} and Proposition \ref{prop:qvi_solutions_are_feasible}, the constrained Nash equilibrium problem is a generalized Nash equilibrium problem in which the point-to-set mappings $K_i$ and $K_j$ give the feasible deviations of each player.
Considering convex and compact strategy sets $X_i,Y_j$, and continuously diffentiable concave payoff functions $U_{i,j},V_{i,j}$ (standard assumptions on game theory), most of the conditions on existence of QVI solutions theorems are satisfied \cite{facchinei2007finite,harker1991generalized,noor1988quasi}. Even more, being $X_i,Y_j$ convex sets, $U_{i,j},V_{i,j}$ continuous and own-payoff concave functions, the applications $K_i$ and $K_j$ are closed and convex. By choosing outside options not too restrictive, we can easily obtain the non-emptyness of the point-to-set mappings. The upper semicontinuity is a consequence of the payoff functions' continuity and the fact that $K_i$ and $K_j$, for some given strategy profile, are polytopes. Indeed, consider $(y_n) \subseteq Y_j$, $x \in X_i$ and $x_n \in K_i(y_n), \forall n \geq 0$. Consider in addition that $y_n \to y$ and $x_n \to x$. In particular, for any $n \geq 0$, $V_{i,j}(x_n,y_n) \geq v_j^0$, as $x_n \in K_i(y_n)$. Then, taking $n \to \infty$, as $V_{i,j}$ is a continuous function, $v_j^0 \leq V_{i,j}(x_n,y_n) \to V_{i,j}(x,y)$ and therefore, $x \in K_i(y)$. The only missing assumption is the lower semicontinuity of the point-to-set mappings, the one cannot be guaranteed.
\section{A non feasible game.}\label{sec:appendix_non_feasible_game}
Consider the following matrix game $G$
\begin{align*}
\begin{tabular}{c|c|c|c|}
& L & M& R \\
\hline
\ T\ & 2,1 & -10,-10 & 3,0 \\
\hline
M & 3,0 & 2,1 & -10,-10 \\
\hline
B & -10,-10 & 3,0 & 2,1 \\
\hline
\end{tabular}
\end{align*}
Game $G$ has only one Nash equilibrium, in mixed strategies, in which agents play the three strategies with the same probability, getting a payoff of $-5/3$ and $-3$ respectively. Consider outside options $u_0, v_0$ equal to $0$ and let $Z_{1,2}$ be the set of feasible contracts for players $1$ and $2$. Note that $Z_{1,2}$ is non-empty since $(M,M) \in Z_{1,2}$. We claim that there are no constrained equilibria in $Z_{1,2}$. To prove this, consider an acceptable and feasible payoff $(u',v')$. Let $(x,y) = ((x_1,x_2,x_3),(y_1,y_2,y_3))$ be a mixed strategy profile that achieves the payoff $(u',v')$, in which each coordinate corresponds to playing Top, Medium and Bottom respectively for player $1$, and Left, Medium and Right for player $2$. Note that if any of the players is playing a pure strategy, the other player can improve his/her payoff by a unilateral deviation, respecting the outside option of the first player. For example, if player $1$ is playing $x = (1,0,0)$, that is, he/she is playing Top, player $2$ can deviate to play Left with probability $1$, increasing his/her payoff and still giving player $1$ at least his/her outside option. In the same way, if player $2$ plays Left, player $1$ can deviate to play Medium. So no pure strategy is a constrained equilibrium. Going even further, if a player does not play the three actions with positive probability, the other can always deviate and increase his/her payoff. For example, if player $1$ plays $(x_1,x_2,0)$, player $2$ can deviate and play Left with probability $1$ if $x_1$ is large enough, or a mixed strategy mixing only Left and Medium, if $x_2$ is large enough. In any of the two cases, players converge to play pure strategies, deviating all the time and never reaching a constrained Nash equilibrium.
Consider that both players play all strategies with a positive probability. Note that none of them will play the three actions with equal probability, since the Nash equilibrium does not belong to $Z_{1,2}$. Without loss of generality, let us assume that $x_1 \bi 1/3 \geq x_2$. Recalling that $x_3 = 1 - x_1 - x_2$ and $y_3 = 1 - y_1 - y_2$, the expected payoff of player $2$ is given by,
\begin{align*}
v' &= v(x,y) = y_1(12x_1 + 21x_2 - 11) + y_2(-9x_1 + 12x_2 - 1) + (1-x_1 - 11x_2)
\end{align*}
It holds that $x_1,x_2,y_1,y_2$ are positive and $x_1 + x_2 \sm 1$, $y_1 + y_2 \sm 1$, since players have full support. Then, $-9x_1 + 12x_2 - 1 \sm 0$, so player $2$ can deviate and increase his/her payoff by decreasing $y_2$. Consider now the payoff of player $1$,
\begin{align*}
u' &= u(x,y) = y_1(11x_1 + 25x_2 - 12) + y_2(-14x_1 + 11x_2 + 1) + (x_1 - 12x_2 + 2)
\end{align*}
It holds $-14x_1 + 11x_2 + 1 \sm 0$, so player $1$ increases his/her payoff if $y_2$ decreases. Therefore, player $2$ has a profitable deviation that still guarantees to player $1$ his/her outside option. Intuitively, since player $1$ is more likely to play Top, it makes sense that both players improve their payoff if player $2$ decreases the probability of playing Medium, so they avoid getting $-10$ as a payoff. We conclude that $(u',v')$ is not the payoff of a constrained equilibrium payoff, and therefore, the game $G$ is not feasible.
\section{Extensive form games with perfect information.}\label{sec:extensive_form_games_are_feasible}
Extensive form games with perfect information are also feasible games, as proved in this section. Consider a finite set of players $I$ and a set $P$ of nodes of a tree. For each $p \in P$, we define its set of successor nodes in the tree by $S(p)$. For each $i \in I$ we consider a set $P^i$ of nodes of the tree where $i$ has to play. A \textbf{strategy} $\sigma^i$ for player $i$ is an application on $P^i$ which associates to each position $p \in P^i$ a successor node in $S(p)$. The set of terminal nodes or results is denoted by $R$, and the payoff of player $i$ is given by $u^i(r)$ with $r \in R$.
Consider outside options $u^i_0 \in \mathbb{R}$ for all players $i \in I$. For player $i$ we consider the set of terminal nodes $R_0^i$ such that his/her payoff is at least his/her outside option. We call the game $G$ \textbf{admissible} if there exists at least one strategy $\sigma$ such that $F[\sigma]$, the final node reached by $\sigma$, belongs to $\bigcap_{i \in I} R_0^i$, so players can obtain at least their outside options. It is straightforward to prove (by induction on the length) that the game is admissible if and only if $\bigcap_{i \in I} R_0^i$ is non-empty.
\begin{definition}\label{def:constrained_Nash_equilibria_tree_games}
Let $G$ be an admissible game and let $\sigma = (\sigma^i)_{i \in I}$ be a strategy profile such that $F[\sigma] \in \bigcap_{i \in I} R_0^i$. $\sigma$ is a \textbf{constrained equilibrium} if for any $i \in I$ and for any strategy $\tau^i$ of player $i$, if $u^i(F[\tau^i, \sigma^{-i}]) \bi u^i(F[\sigma])$ then $F[\tau^i, \sigma^{-i}] \notin \bigcap_{i \in I} R_0^i$, that is, every time a player has a profitable deviation from $\sigma$, there is another player for whom the terminal node is not feasible.
\end{definition}
A strategy profile $\sigma$ naturally induces a strategy in the subgames $G[p]$, where $p$ is a node of the tree and $G[p]$ the game starting from $p$. As with subgame-perfect equilibrium, we can define the notion of constrained subgame-perfect equilibrium.
\begin{definition}\label{def:constrained_subgame_perfect_equilibrium}
A strategy profile $\sigma$ is a \textbf{constrained subgame-perfect equilibrium} if for each position $p$ such that $G[p]$ is admissible, the continuation strategy $\sigma[p]$ induced by $\sigma$ is a constrained equilibrium of $G[p]$.
\end{definition}
\begin{definition}\label{def:tree_game_feasible}
A finite perfect information game $G$ is called \textbf{feasible} if any time that there exists a strategy profile $\sigma$ such that $F[\sigma] \in \bigcap_{i \in I} R_0^i$, then there exists $\tau$ a constrained subgame-perfect equilibrium such that $F[\tau] \in \bigcap_{i \in I} R_0^i$.
\end{definition}
\begin{theorem}\label{teo:finite_perfect_information_games_are_feasible}
Every finite perfect information game is feasible.
\end{theorem}
\begin{proof}
We make the proof by \textbf{forward induction} over the length $n$ of the game. Let $i$ be the player who plays at the first node $r$, the root of the tree. For $n = 1$, note that $G$ corresponds to a game where only $i$ plays and then the outputs are announced. Player $i$ will choose the action that maximizes his/her payoff subject to choose an ending node in $\cap_{i \in I} R_0^i$, obtaining a strategy $\sigma$ in which nobody can deviate without violating the constraint. Therefore, $\sigma$ is a constrained equilibrium that is subgame perfect because the game has no subgame.
Suppose that any perfect information game of length $n$ is feasible, and let $G$ be a game of length $n+1$. Suppose $G$ is admissible, so there exists a path from the root to a terminal node that gives all players a payoff greater than or equal to their outside options. We aim to prove that $G$ has a constrained subgame-perfect equilibrium.
Let $S(r)$ be the set of all possible nodes that $i$ can choose when he/she plays for the first time, that is, $S(r)$ is the set of successors of the root of the tree. Since $G$ is admissible, there is at least one node $p \in S(r)$ such that $G[p]$ is also admissible. Let $S' \subseteq S(r)$ be the set of all nodes $p$ in $S(r)$ such that $G[p]$ is admissible. By induction, all subgames $G[p]$ with $p \in S'$ are feasible, so for each of them there exists $\sigma'_p$ a constrained subgame perfect equilibrium.
Consider the strategies $\sigma_p := (p, \sigma'_p)$ in which player $i$ chooses a node $p \in S'$ in his/her first turn and then, in the subgame $G[p]$, players follow the constrained equilibrium $\sigma'_p$. From all strategies $\sigma_p$ with $p \in S'$, consider the one that maximizes $i$'s payoff, called $\sigma_{p^*}$. We claim that $\sigma_{p^*}$ is a constrained subgame-perfect equilibrium of game $G$. Indeed, for $p^*$ the induced strategy $\sigma_{p^*}[p^*]$ coincides with $\sigma'_{p^*}$ so it corresponds to a constrained subgame-perfect equilibrium. Moreover, since $i$ chose the best node $p \in S'$ for his/her, he/she has not incentive to deviate to any other node in $S'$. Therefore, the strategy $\sigma_{p^*}$ is a constrained subgame-perfect equilibrium, since choosing any other node in $S(r) \setminus S'$ yields to a non admissible subgame.
We also present an alternative proof by \textbf{backward induction} in which we construct a constrained subgame-perfect equilibrium or we prove that the tree is not admissible. Consider the longest path from the root $r$ to some terminal node $t$ in $R$. Note that there is a unique path from $r$ to $t$ since the tree is an acyclic graph. Let $i_t$ be the last player to play in this path and consider the set $R_{i_t}$ of all the terminal nodes that $i_t$ can choose. The set is non-empty since $t \in R_{i_t}$. Next, consider the set of all nodes in $R_{i_t}$ that belong to $\bigcap_{i \in I} R_0^i$. If the intersection $\bigcap_{i \in I} R_0^i \cap R_{i_t}$ is non-empty, player $i_t$ can choose a final node that gives all players at least their outside option. If this is the case, $i_t$ picks the one that maximizes his/her payoff. On the other hand, if the intersection is empty, anyway player $i_t$ picks a node maximizing his/her own payoff. Note that, in the first case player $i_t$ has no incentive to deviate to any other node in $\bigcap_{i \in I} R_0^i$, so if he/she has a profitable deviation, there will be a player who receives less than his/her outside option. In the second case, player $i_t$ is playing in a non-admissible game.
Once $i_t$ has chosen her node, we erase all the nodes of $R_{i_t}$ from the tree and replace the node in which $i_t$ had to play by the terminal node in which all players receive the payoff related to the choice of $i_t$. Once this is done, we come back to the first step of finding the longest path from the root to some terminal node. We iterate until reducing the tree to just the root $r$.
Since each time a player has to decide the final node to reach in the tree, either he/she chooses a constrained equilibrium or a non-admissible game, the final result of this procedure leads to either finding a constrained subgame-perfect equilibrium of the game $G$, or proving that $G$ is a non-admissible game, so we conclude that $G$ is feasible.
\end{proof}
\section{Other notions of external stability.}\label{sec:other_notions_of_external_stability}
\begin{definition}\label{def:unilateral_externally_stable_matching_prof}
A matching profile $\pi=(\mu,x,y)$ is \textbf{unilateral externally stable} if, \begin{itemize}[leftmargin = 0.6cm]\setlength\itemsep{0em}
\item[(a)] For any $i \in M$ and $j\in W$, $u_i(\pi) \geq \underline{u}_i \text{ and } v_j(\pi) \geq \underline{v}_j$.
\item[(b)] There is no $(i,j) \in M \times W$, not matched by $\mu$, and no $(s_i,t_j)\in X_i \times Y_j$ such that either $U_{i,j}(s_i,y_i) > u_i(\pi)$ and $V_{i,j}(s_i,y_i)>v_j(\pi)$ or $U_{i,j}(x_i,t_i) > u_i(\pi)$ and $V_{i,j}(x_i,t_i)>v_j(\pi)$.
\end{itemize}
\end{definition}
Being unilateral externally stable is indeed weaker than being externally stable, as any unilateral externally blocking pair is an externally blocking pair as well. The last notion, weaker than the one already defined, assumes that players cannot change actions if they want to move from one partner to another.
\begin{definition}\label{def:weak_externally_stable_matching_prof}
A matching profile $\pi=(\mu,x,y)$ is \textbf{weakly externally stable} if,
\begin{itemize}[leftmargin = 0.6cm]\setlength\itemsep{0em}
\item[(a)] For any $i \in M$ and $j \in W$, $u_i(\pi) \geq \underline{u}_i$ and $v_j(\pi) \geq \underline{v}_j$,
\item[(b)] There is no $(i,j)\in M \times W$, not matched by $\mu$ s.t. $U_{i,j}(x_i,y_i) > u_i(\pi)$ and $V_{i,j}(x_i,y_i)>v_j(\pi)$.
\end{itemize}
\end{definition}
\begin{remark}
A matching profile is weakly externally stable if after fixing the chosen actions, the matching is pairwise stable in Gale-Shapley's sense. Therefore, if actions are interpreted as investments decided before players are matched, as in N\"{o}ldeke-Samuelson's setting \cite{noldeke2015investment}, our unilateral/weak external stability notions and their stability notion are related.
\end{remark}
Consider that all couples play the same fixed game $G = (X,Y, U,V)$.
\begin{theorem}
Consider a matching profile $\pi = (\mu, x, y)$ in which all couples play the same Nash equilibrium $(x,y)$. Then, $\pi$ is unilateral externally stable and internally Nash stable.
\end{theorem}
The proof is direct as in the setting in which all couples play the same game and the same strategy profile, if a matching profile is internally Nash stable, then, it is unilateral externally stable. Therefore, if all couples play the same two-person game $G$ with at least one Nash equilibrium, we obtain the existence of matching profiles that are unilateral externally stable and internally Nash stable. Going even further, since unilateral externally stable matchings are also weakly externally stable, we obtain the existence of weak externally stable and Nash stable matchings.
This last combination was studied by Jackson and Wolinsky \cite{jackson1996strategic} and Bich and Morhain \cite{bich2017existence} in $2017$, in network formation games. In such a model, agents can create links with all partners and they can choose actions. The payoff function of an agent depends on the links created and the actions played by all the agents. Their concept of \textit{Nash-pairwise stability} corresponds in our model to the combination of weak external and internal Nash stability.
\section{A numerical example}\label{sec:appendix_numerical_example}
Consider a matching game with three men $M = \{i_1,i_2,i_3\}$ and three women $W= \{j_1,j_2,j_3\}$, who have the following preferences,
\begin{align*}
A = \begin{tabular}{|c|c|c|}
\hline
83 & 85 & 99 \\
\hline
74 & 13 & 15 \\
\hline
58 & 49 & 54 \\
\hline
\end{tabular} \ \ \ B = \begin{tabular}{|c|c|c|}
\hline
69 & 6 & 28 \\
\hline
88 & 2 & 70 \\
\hline
72 & 18 & 9 \\
\hline
\end{tabular}
\end{align*}
$A(i,j)$ (resp. $B(i,j)$) represents the fixed utility that $i \in M$ (resp. $j \in W$) receives if he (resp. she) matches with $j$ (resp. $i$). Suppose that all agents have null IRPs, i.e. $(\underline{u}_i, \underline{v}_j) = (0,0), \forall (i,j) \in M \times W$. Suppose in addition that if a couple $(i,j)$ is created, as in Example \ref{ex:one_couple_example}, each agent can decide for a linear monetary transfer to the partner. Formally, if $x_i,y_j \geq 0$ are $i$ and $j$ respective monetary transfers, their utilities are $U_{i,j}(x_i,y_j) = A(i,j) - x_i + y_j$ and $V_{i,j}(x_i,y_j) = B(i,j) + x_i - y_j$.
\subsection{Computing a Nash stable allocation}
As already discussed, internal Nash stable allocations for matching with transfer problems correspond to matching profiles in which no agent makes a positive transfer. Due to this, computing a Nash stable allocation is equivalent to Gale-Shapley's stable allocations where the ordinal preferences are deduced from the matrices $A$ and $B$. Algorithm \ref{Algo:Propose_dispose_algo_general_case} is equivalent to Gale-Shapley's algorithm in which men are proposing, as competitions are reduced to preference comparisons. We can summarize Algorithm \ref{Algo:Propose_dispose_algo_general_case} in that case in the following pseudo-code.
\begin{algorithm}[H]
Set $M' \leftarrow \{i_1,i_2,i_3\}$ as the set of single men and $v_j(\pi) = 0, \forall j \in W$
\While{$M' \neq \emptyset$}{
Let $i \in M'$ and $j$ such that $A(i,j) = \argmax \{ A(i,\cdot) : B(i,j) \bi v_j(\pi)\}$
\If{$j$ is single}{$i$ is automatically accepted}
\Else{$j$ chooses $\argmax\{A(i,j) ; A(\mu_i,j)\}$ and the loser is included in $M'$}}
\caption{Propose-dispose algorithm}
\end{algorithm}
\noindent Let us run Algorithm \ref{Algo:Propose_dispose_algo_general_case} iteration by iteration.
\begin{itemize}[leftmargin = 0cm]\setlength\itemsep{0em}
\item[] \textbf{Iter 1}. Suppose $i_1$ is the first proposer man. Looking at the first row of $A$, we see that $i_1$ prefers to propose to $j_2$, who accepts him.
\item[] \textbf{Iter 2}. Suppose $i_3$ is the second proposer. From $A(i_3,\cdot)$, $i_3$ proposes to $j_1$, who accepts him as she is single.
\item[] \textbf{Iter 3}. Finally, $i_2$ proposes as he is the only remaining man. From the payoff matrix, $i_2$ proposes to $j_1$ and has to compete against $i_3$. From the first column of $A$, the winner is $i_2$. $i_3$ becomes single and proposes again.
\item[] \textbf{Iter 4}. $i_3$ computes his optimal proposal. As $B(i_3, j_1) \leq v_{j_1}(\pi)$, $i_3$ proposes to his second best option, $j_3$. However, $j_3$ is already matched with $i_1$, who is better ranked than $i_3$ according to $j_3$'s preferences, so $i_3$ is rejected.
\item[] \textbf{Iter 5}. Finally, $i_3$ proposes to $j_2$ as she is his only feasible proposal, who accepts him, and the algorithm ends.
\end{itemize}
\noindent The output is $\mu = ((i_1, j_3),(i_2,j_1),(i_3,j_2))$. We see that $i_1$ and $i_2$ are matched with their best possible options, so they will never prefer to change to another partner. Regarding $i_3$, although he would prefer to change to another partner, neither $j_1$ nor $j_3$ would accept him, as each of them prefers their actual partners. We conclude that $\pi = (\mu,0,0)$ is a Nash stable matching profile where $(0,0)$ means that there the transfer vector $(x,y)$ is equal to zero.
\subsection{Computing an externally stable allocation}
Unlike Nash stability, as it will be seen now, the external stability concept will output a matching profile in which (some) agents will make a positive transfer. First, let's recall the payoff matrices and the propose-dispose algorithm.
\begin{align*}
A = \begin{tabular}{|c|c|c|}
\hline
83 & 85 & 99 \\
\hline
74 & 13 & 15 \\
\hline
58 & 49 & 54 \\
\hline
\end{tabular} \ \ \ B = \begin{tabular}{|c|c|c|}
\hline
69 & 6 & 28 \\
\hline
88 & 2 & 70 \\
\hline
72 & 18 & 9 \\
\hline
\end{tabular}
\end{align*}
\begin{algorithm}[H]
Set $M' \leftarrow \{i_1,i_2,i_3\}$ as the set of single men and $v_j(\pi) = 0, \forall j \in W$
\While{$M' \neq \emptyset$}{
Let $i \in M'$ and $(j, x_i, y_j) \in \argmax \{ A(i,j') - x_i + y_j : B(i,j') + x_i - y_j \bi v_j(\pi) + \varepsilon: j' \in \{j_1,j_2,j_3\}\}$
\If{$j$ is single}{$i$ is automatically accepted}
\Else{$i$ and $\mu_i$ compete for $j$: Each of them computes $$\max\{B(i,j) + x_i - y_j : A(i,j) - x_i + y_j \geq \beta_i\}$$ with $\beta_i$ being $i$'s reservation price. The highest bid wins and the winner decreases the bid until matching the one of the loser}}
\caption{Propose-dispose algorithm}
\end{algorithm}
Let $\varepsilon = 1$ and let us consider the same order that for the previous section:
\begin{itemize}[leftmargin = 0cm]\setlength\itemsep{0em}
\item[] \textbf{Iter 1}. Suppose $i_1$ is the first proposer man. He solves \begin{align*}
&\max\{A(i_1,j) - x_{i_1} + y_j : B(i_1,j) + x_{i_1} - y_j \geq \varepsilon, j \in W\} \\
\Longleftrightarrow &\max\{A(i_1,j) + B(i_1,j) - \varepsilon : j \in W\} = \max\{83 + 69 - 1, 85 + 6 - 1, 99 + 28 - 1\} = 151
\end{align*}
Therefore, the optimal proposal is $(j_1, 0, 68)$\footnote{Any transfer profile $(x_i,y_j)$ satisfying $-x_i + y_j = 68$ is also an optimal proposal. Taking, in particular, an optimal proposal in which one of the agents makes a null transfer will be useful for the next section.}, i.e. $i_1$ proposes to $j_1$ and takes the highest possible profit from their transfer game by offering to $j_1$ exactly her IRP. Since $j_1$ is single, he accepts him automatically.
\item[] \textbf{Iter 2}. Suppose $i_3$ proposes next. Similarly, he solves the problem (notice that still all women have $0$ payoff),
\begin{align*}
&\max\{A(i_3,j) - x_{i_3} + y_j : B(i_3,j) + x_{i_1} - y_j \geq \varepsilon, j \in W\} \\
\Longleftrightarrow &\max\{A(i_3,j) + B(i_3,j) - \varepsilon : j \in W\} = \max\{58 +72 - 1, 49 + 18 - 1, 54 + 9 - 1\} = 129
\end{align*}
Therefore, the optimal proposal for $i_3$ is $(j_1,0,71)$. However, since $j_1$ is already matched, $i_1$ and $i_3$ compete. First of all, notice that $i_3$'s reservation price is equal to $66$, as it corresponds to the second highest value in the calculation of his optimal proposal. Similarly, $i_1$'s reservation price is equal to $126$. Let us compute the bid of $i_1$,
\begin{align*}
&\lambda_{i_1} = \max\{B(i_1,j_1) + x_{i_1} - y_{j_1} : A(i_1,j_1) - x_{i_1} + y_{j_1} \geq 126\}\\
\Longleftrightarrow & \lambda_{i_1} = \max\{69 + x_{i_1} - y_{j_1} : 83 - x_{i_1} + y_{j_1} \geq 126\}\\
\Longleftrightarrow & \lambda_{i_1} = \max\{69 + x_{i_1} - y_{j_1} : x_{i_1} - y_{j_1} \leq - 43 \} = 69 - 43 = 26
\end{align*}
Similarly for $i_3$,
\begin{align*}
&\lambda_{i_3} = \max\{72 + x_{i_3} - y_{j_1} : 58 - x_{i_3} + y_{j_1} \geq 66\}\\
\Longleftrightarrow & \lambda_{i_3} = \max\{72 + x_{i_3} - y_{j_1} : x_{i_3} - y_{j_1} \leq -8 \} = 72 - 8 = 64
\end{align*}
Since $i_3$'s bid is the highest, he wins the competition. Finally, he decreases his bid until matching the one of $i_1$,
\begin{align*}
&\max\{A(i_3,j_1) - x_{i_3} + y_{j_1} : B(i_3,j_1) + x_{i_3} - y_{j_1} \geq \lambda_{i_1}\}\\
\Longleftrightarrow &\max\{58 - x_{i_3} + y_{j_1} : 72 + x_{i_3} - y_{j_1} \geq 26\}\\
\Longleftrightarrow & \max\{58 - x_{i_3} + y_{j_1} : -x_{i_3} + y_{j_1} \leq 46 \} = 58 + 46 = 104
\end{align*}
Therefore, the final transfer profile between $i_3$ and $j_1$ is $(x_{i_3},y_{j_1}) = (0,46)$. Notice that $j_1$ reaches exactly the previous bid of $i_1$. As this is the highest payoff that $i_1$ is willing to offer her, he will not propose to her if he is the next proposer man.
\item[] \textbf{Iter 3}. Suppose $i_2$ proposes next. His optimal proposal is,
\begin{align*}
&\max\{A(i_2,j) - x_{i_2} + y_j : B(i_2,j) + x_{i_2} - y_j \geq \varepsilon, j \in \{j_2,j_3\}, B(i_2,j_2) + x_{i_2} - y_{j_2} \geq 26 + \varepsilon\} \\
\Longleftrightarrow &\max\{A(i_2,j_1) + B(i_2,j_1) - 26 - \varepsilon, A(i_2,j_2) + B(i_2,j_2) - \varepsilon, A(i_2,j_3) + B(i_2,j_3) - \varepsilon\}\\
\Longleftrightarrow &\max\{74 + 88 - 26 - 1, 13 + 2 - 1, 15 + 70 - 1\} = 135
\end{align*}
Thus, $i_2$ proposes to $j_1$ the transfer profile $(0, 61)$, and has to compete against $i_3$. The reservation prices of the men are $\beta_{i_2} = 94$ and $\beta_{i_3} = 66$. Then, the bids are equal to,
\begin{align*}
&\lambda_{i_2} = \max\{B(i_2,j_1) + x_{i_2} - y_{j_1} : A(i_2,j_1) - x_{i_2} + y_{j_1} \geq \beta_{i_2}\} = 68 \\
&\lambda_{i_3} = \max\{B(i_3,j_1) + x_{i_3} - y_{j_1} : A(i_3,j_1) - x_{i_3} + y_{j_1} \geq \beta_{i_3}\} = 64
\end{align*}
Since $\lambda_{i_2}$ is the highest value, $i_2$ is the winner of the competition. Finally, he decreases his offer for matching the one of $i_3$,
\begin{align*}
&\max\{A(i_2,j_1) - x_{i_2} + y_{j_1} : B(i_2,j_1) + x_{i_2} - y_{j_1} \geq \lambda_{i_3}\} = 98
\end{align*}
and so, the final transfer profile between $i_2$ and $j_1$ is $(x_{i_2}, y_{j_1}) = (0,24)$.
\item[] \textbf{Iter 4}. Suppose $i_1$ proposes next. Since $j_1$'s payoff is too high for offering her a profitable (for $i_1$) contract, $i_1$ proposes to his next best option, $j_3$ the transfer profile $(x_{i_1}, y_{j_3}) = (0,27)$. Since $j_3$ is single, she accepts automatically.
\item[] \textbf{Iter 5}. Finally, $i_3$ proposes again as he is the only single man. Since he lost the competition for $j_1$, he is not able to propose her again and increase her payoff by $\varepsilon$ without violating his own reservation price. Thus, he offers to his second-best option $j_2$ the transfer profile $(0,17)$. Since she is single, $i_3$ is automatically accepted and the algorithm stops.
\end{itemize}
The output of Algorithm \ref{Algo:Propose_dispose_algo_general_case} is $\pi = (\mu,x,y)$ with $\mu = ((i_1,j_3),(i_2,j_1),(i_3,j_2))$ and $(x,y) = ((0,0,0), (24,17,27))$. The final payoffs of the players are, $(u(\pi),v(\pi)) = ((126,98,66),(64,1,1))$. For checking the $\varepsilon$-external stability of $\pi$, we compute the outside options of one side and check that none of them is $\varepsilon$ higher than the players' payoff. Formally, for $i \in M$ we check that,
\begin{align*}
&u_i^{\varepsilon} = \max\{A(i,j) - x_{i} + y_{j} : B(i,j) + x_{i} - y_j \geq v_j(\pi) + \varepsilon, j \in W \setminus\{\mu_i\}\} \leq u_i(\pi) + \varepsilon\\
\Longleftrightarrow & u_i^{\varepsilon} = \max\{A(i,j) + B(i,j) - v_j(\pi) - \varepsilon, j \in W \setminus\{\mu_i\}\} \leq u_i(\pi) + \varepsilon
\end{align*}
We obtain that $u^{\varepsilon} = (u_{i_1}^{\varepsilon}, u_{i_2}^{\varepsilon}, u_{i_3}^{\varepsilon})$ = $(89, 83, 65)$. Since men's payoff in $\pi$ are strictly higher than their outside options, none of them has the incentive to change of partner. Therefore, we conclude that $\pi$ is $\varepsilon$-externally stable, for $\varepsilon = 1$\footnote{As the payoff matrices have integer values, the solution found is indeed a $0$-externally stable allocation}.
\subsection{Computing an externally-internally stable allocation}
The externally stable matching profile $\pi$ found by Algorithm \ref{Algo:Propose_dispose_algo_general_case} at the previous section is known to be the most preferred stable allocation by the proposer side. However, there is a continuum of strategy profiles that achieve the same payoffs that $\pi$. Indeed, for any $\delta \in [0, \min\{y_{j_1},y_{j_2},y_{j_3}\}]$, considering the shifted matching profile $\pi_{\delta} = (\mu, x+\delta, y-\delta)$ gives the same payoffs to the agents and therefore, it remains $\varepsilon$-externally stable. However, from this continuum of solutions, there is only one internally stable.
Let us start this section by studying the constrained Nash equilibria. Recall that $(x_i,y_j) \in \mathbb{R}_+^2$ is an $\varepsilon$-$(u_i^{\varepsilon}, v_j^{\varepsilon})$-CNE if and only if,
\begin{align*}
U_{i,j}(x_i,y_j) + \varepsilon \geq \max \{U_{i,j}(s,y_j): V_{i,j}(s,y_j) + \varepsilon \geq v_j^{\varepsilon}, s \geq 0\} &\Longleftrightarrow V_{i,j}(x_i,y_j) \leq v_j^{\varepsilon} \\
V_{i,j}(x_i,y_j) + \varepsilon \geq \max\{V_{i,j}(x_i,t): U_{i,j}(x_i,t) + \varepsilon \geq u_i^{\varepsilon}, t \geq 0\} &\Longleftrightarrow U_{i,j}(x_i,y_j) \leq u_i^{\varepsilon}
\end{align*}
In words, as players' profitable deviations are equivalent to decreasing their transfers, a couple plays a constrained Nash equilibrium if each of them is receiving no more than their outside option, so transfers are shifted to the minimum possible without breaking the external stability\footnote{In reality, as we are working with $\varepsilon$-external stability, players' payoffs can be decreased until $\varepsilon$ less than their outside options}. We state the pseudo-code of Algorithm \ref{Algo:strategy_profiles_modification} for this problem.
\begin{algorithm}[H]
\Repeat{Convergence}{
\For{$(i,j) \in \mu$}{
Compute $(u_i^{\varepsilon}, v_j^{\varepsilon})$ and reduce $(x_i,y_j)$ s.t. $U_{i,j}(x_i,y_j) = u_i^{\varepsilon}$ and $V_{i,j}(x_i,y_j) = v_j^{\varepsilon}$.
}}
\caption{Strategy profiles modification, $\varepsilon$-version}
\end{algorithm}
Let us run the strategy profiles modification algorithm iteration by iteration.
\begin{itemize}[leftmargin = 0cm]\setlength\itemsep{0em}
\item[] \textbf{Iter 1.} Let $(i_1,j_3)$ be the first couple. Since $i_1$ is not making any transfer, only $j_3$ decreases hers. Recall $u_{i_1}^{\varepsilon} = 65$, therefore $j_3$ passes to transfer $y_{j_3} : A(i_1,j_3) + y_{j_3} = u_{i_1}^{\varepsilon} \Longleftrightarrow y_{j_3} = 65 - 99 = -34$. Since transfers are non-negative, $j_3$ decreases hers until $0$. The new payoffs of the couple become $(u_{i_1}(\pi),v_{j_3}(\pi)) = (99,28)$. Notice that the couple has passed to play the Nash equilibrium of their game, as this one is a feasible strategy profile.
\item[] \textbf{Iter 2.} Consider the second couple $(i_2,j_1)$. Again, only $j_1$ can deviate as $i_2$ already plays a best reply. We obtain that $u_{i_2}^{\varepsilon} = 15$ thus, $j_1$ decreases $y_{j_1}$ such that $A(i_2,j_1) + y_{j_1} = 15$, so $y_{j_i}$ becomes $0$. The new payoffs of the couple is $(u_{i_2}(\pi),v_{j_1}(\pi)) = (74,88)$, that corresponds to their Nash equilibrium payoff.
\item[] \textbf{Iter 3.} Consider the last couple $(i_3,j_2)$. It follows $u_{i_3}^{\varepsilon} = 41$ so
$j_2$ decreases $y_{j_2}$ such that $A(i_3,j_2) + y_{j_2} = 41$. Thus, $y_{j_2} = 0$ and the third couple also passes to play a Nash equilibrium, getting as payoffs $(u_{i_3}(\pi),v_{j_2}(\pi)) = (49, 18)$.
\end{itemize}
As all couples switch to play the Nash equilibrium of their game, the algorithm stops. Hence starting from the best externally stable matching for men, when we apply algorithm 2, we find the best solution for men in the model without transfers (e.g. the Gale-Shapley original model). This property is not always true: in Example 1, in the unique externally-internally stable matching, the man offers a positive transfer $\delta$ to the woman.
\end{document}
|
\begin{document}
\title[]{Optimal feedback control of dynamical systems via value-function approximation}
\author {Karl Kunisch\textsuperscript{$*$}}
\thanks{\textsuperscript{$*$}University of Graz, Institute of Mathematics and Scientific
Computing, Heinrichstr. 36, A-8010 Graz, Austria and Johann Radon Institute for Computational and Applied Mathematics
(RICAM), Austrian Academy of Sciences, Altenberger Stra\ss{}e 69, 4040 Linz, Austria, ({\tt
[email protected]}).}
\author{Daniel Walter\textsuperscript{$\dagger$}}
\thanks{\textsuperscript{$\dagger$}Institut f\"ur Mathematik, Humboldt-Universit\"at~zu~Berlin, Rudower Chaussee 25, 10117 Berlin, Germany,({\tt
[email protected]}).}
\maketitle
\begin{abstract}
A self-learning approach for optimal feedback gains for finite-horizon nonlinear continuous time control systems is proposed and analysed. It relies on parameter dependent approximations to the optimal value function obtained from a family of universal approximators. The cost functional for the training of an approximate optimal feedback law incorporates two main features. First, it contains the average over the objective functional values of the parametrized feedback control for an ensemble of initial values. Second, it is adapted to exploit the relationship between the maximum principle and dynamic programming.
Based on universal approximation properties, existence, convergence and first order optimality conditions for optimal neural network feedback controllers are proved.
\end{abstract}
{\em{ Keywords:}}
optimal feedback control, neural networks, Hamilton-Jacobi-Bellman equation, self-learning, reinforcement learning.
{\em{AMS classification:}}
49J15,
49N35,
68Q32,
93B52,
93D15.
\section{Introduction} \label{sec:Introduction}
In this paper we focus on optimal feedback control for problems of the form
\begin{equation} \label{def:refproblem}\tag{$P$}
\left\{
\begin{aligned}
\quad &\inf_{y,u} J(y,u) \coloneqq \frac{1}{2}\int_{0}^T \left
( |Q_1(y(t)-y_d(t))|^2+ \beta |u(t)|^2 \right)~\mathrm{d} t +\frac{1}{2}|Q_2(y(T)-y^T_d)|^2\\
&s.t. \quad \dot{y}= {f}(y)+g(y)u, \quad y(0)=y_0, \text{ and } u \in L^2(0,T; \mathbb{R}^m),
\end{aligned}
\right.
\end{equation}
with nonlinear dynamics described by $f:[0,T]\times \mathbb{R}^n \to \mathbb{R}^n$. The system can be influenced by choosing a control input~$u$ which enters through a control operator $g:\mathbb{R}^n\to \mathbb{R}^{n \times m}$. We assess the perfomance of a given control by its objective functional value which comprises the (weighted) distance between the associated state trajectory~$y$ and a given desired state~$y_d$ as well as the norm of the control for some cost parameter~$\beta>0$. The weighting matrices $Q_i$, for $i=1,2$, are assumed to be symmetric positive semi-definite. Searching for an optimal control $u^*$ in feedback form requires to find a function $F^*:[0,T]\times \mathbb{R}^n \to \mathbb{R}^m$ such that
$$
u^*(t)= F^*(t,y^*(t)), \text{ for } t\in (0,T).
$$
Here $(u^*,y^*)$ denotes an optimal control-trajectory pair associated to \eqref{def:refproblem}.
Under appropriate conditions, see e.g. \cite{FS06}, the feedback mapping can be expressed as
\begin{equation}\label{eq:intro1}
F^*(t,y) = -\frac{1}{\beta}g^\top(t,y) \partial_y V^*(t,y),
\end{equation}
where $V^*$ stands for the value function associate to \eqref{def:refproblem}, i.e. for $(T_0,y_0)\in [0,T]\times \mathbb{R}^n$:
$$
V^*(T_0,y_0)= \min_{y,u} J_{T_0}(y,u), \text{ subject to } \dot{y}= {f}(y)+g(y)u, \quad y(T_0)=y_0,
$$
and
$$
J_{T_0}(y,u)=\frac{1}{2}\int_{T_0}^T \left
( |Q_1(y(t)-y_d(t))|^2+ \beta |u(t)|^2 \right)~\mathrm{d} t +\frac{1}{2}|Q_2(y(T)-y^T_d)|^2.
$$
The value function $V^*$ satisfies a Hamilton-Jacobi-Bellman (HJB) equation which is a time-dependent first order hyperbolic equation of spatial dimension $n$. Numerical realisations, therefore, are plagued by the curse of dimensionality. Indeed a direct solution of the HJB equation already becomes computationally prohibitive for moderate dimensions~$n$.
Therefore, for practical realization, the interest in alternative techniques arises. In many situations of practical relevance researches have relied on linear approximations to the nonlinear dynamical system and have treated the resulting linear-quadratic problem by Riccati techniques. Much research has concentrated on validating this approach locally around a reference trajectory. Globally such a strategy may fail, see for instance \cite{dkk19, KW2020}.
In this paper we follow an approach, possibly first proposed in \cite{KW2020}, circumventing the construction of the value function on the basis of solving the HJB equation. Rather the feedback mapping is constructed by an unsupervised self-learning technique. In practice, this requires the approximation of~$V^*$ by a family of functions~$V_\theta$ which are parametrized by a finite dimensional vector~$\theta$ and satisfy a uniform approximation property. Possible families of universal approximators include, e.g., neural networks or piecewise polynomial approximations. Subsequently, in view of \eqref{eq:intro1}, we introduce the corresponding feedback law
\begin{equation}\label{eq:intro2}
F_\theta(t,y) = -\frac{1}{\beta}g^\top(y) \partial_y V_\theta(t,y), \text{ for } (t,y)\in [0,\infty) \times \mathbb{R}^n,
\end{equation}
as approximation to $F^*$. An "optimal" parametrized feedback law is then determined by a variant of the following self-learning, structure preserving, variational problem:
\begin{equation}\label{eq:intro3}
\begin{array}l
\min_{\theta}\; J(y,\mathcal{F}_{\theta}(y))\\[1.5ex]
\qquad \; +\frac{1}{2}\int^T_0 {\gamma_1} |V_{\theta}(t,y(t))-J_t(y,F_{\theta}(\cdot,y))|^2 + {\gamma_2}|\partial_y V_{\theta}(t,y(t))-p(t)|^2~\mathrm{d}t +\frac{\gamma_\varepsilon}{2} {|\theta|^2}\\[1.8ex]
\text{s.t. } \quad \dot{y}= {f}(y)+g(y) F_{\theta}(y), \quad y(0)=y_0, \quad p(T)= Q^\top_2 Q_2(y(T)-y^T_d)\\[1.5ex]
\qquad \;- \dot p= {f}(y)^\top p+\lbrack D {g}(y)^\top F_\theta (y)\rbrack p+Q_1^\top Q_1(y-y_d).
\end{array}
\end{equation}
In this problem, minimization with respect to $u$ is replaced by minimizing with respect to the parameters $\theta$ which characterize $V_\theta$ and~$F_\theta$. The cost functional of problem \eqref{eq:intro3} consists of four parts: The first term represents the objective functional of \eqref{def:refproblem} where the control~$u$ is replaced by the closed loop expression~$F_\theta(y)$. The next two terms realize the fact that $V_\theta$ is constructed as approximation to the value function associated to $\eqref{def:refproblem}$ and exploit the well-known property that, under certain conditions, the gradient of the value function coincides with the solution of a suitable adjoint equation, see e.g. \cite[page 21]{FS06}. The final term penalizes the norm of the structural parameters.
We point out that $V_\theta$ and $F_\theta$ are learned along the
orbit $\mathcal {O}= \{y(t;y_0): t\in (0,\infty)\}$ within the state space $\mathbb{R}^n$.
To accommodate the case that one trajectory does not provide enough information, we
propose to involve an ensemble of orbits departing from a set $Y_0$ of initial
conditions, and to reformulate problem \eqref{eq:intro3} accordingly. This will be done in Section 4 below.
In our earlier work on learning a feedback function, \cite{KW2020}, we considered infinite horizon optimal control problems. In that case, the time-dependent HJB equation results in a stationary one. There we had not yet incorporated the structure preserving terms involving $V_\theta$ and $\partial_y V_\theta$ into the cost. Moreover we directly constructed an approximation $F_\theta$ to the vector valued function $F^*$, rather than approximating the scalar valued function $V^*$ and subsequently using \eqref{eq:intro2}. In the present paper we provide the theoretical foundations for the learning based technique that we propose to construct an approximation to the optimal feedback function for \eqref{def:refproblem}. Recently in \cite{onlfor2020} a variant of the approach as in \cite{KW2020} was used for interesting numerical investigations to construct optimal feedback functions for finite horizon multi-agent optimal control problems.
Let us very briefly mention some of the vast literature
on solving the HJB equations. Semi-Lagrangian schemes and finite difference methods have been deeply investigated to directly solve HJB equations directly, see e.g. \cite{bggk13, ff16, kkr18}.
Significant progress was made in solving high
dimensional HJB equations by the of use policy iterations combined with tensor calculus techniques, \cite{dkk19,
kk18, foss2020}. The use of Hopf formulas was proposed in e.g. \cite{lr86,
cloy18}. Interpolation techniques, utilizing ensembles of open loop
solutions have been analyzed in the works of \cite{akk2020, ngk19}, for example.
Finally we mention that optimal feedback control is intimately
related to reinforcement learning, see e.g. the monograph
\cite{bert19}, and also the survey articles \cite{LV09, recht18,
vls14}.
The manuscript is structured as follows. Some pertinent notation is gathered in Section 2.
In Section 3 concepts of optimal feedback control, semi-global with respect to the initial condition $y_0$, are gathered. Section 4 is devoted to describing the learning technique that we propose to approximate the optimal feedback function. In Section 5 the required assumptions on approximating subspaces are checked for a class of neural networks and a class of piecewise polynomials. Existence of solutions to the approximating learning problems is proved in Section 6. Their convergence is analyzed in Section 7. The case of learning from finitely many orbits is the focus of Section 8. Section 9 provides an example illustrating the numerical feasibility of the proposed
method. We do not aim for sophistication in this respect.
The appendix details the proofs of several necessary technical results.
\section{Notation}
For ~$I:=(0,T)$, with $T>0$, we define
$
W_T= \{\,y \in L^2(I; \mathbb{R}^n)\;|\;\dot{y}\in L^2(I; \mathbb{R}^n)\,\},
$
where the temporal derivative is understood in the distributional sense. We equip~$W_T$ with the norm induced by the inner product
\begin{align*}
(y_1,y_2)_{W_T}=(\dot{y}_1,\dot{y}_2)_{L^2(I;\mathbb{R}^n)}+(y_1,y_2)_{L^2(I;\mathbb{R}^n)} \quad \text{for } y_1, y_2 \in W_T,
\end{align*}
making it a Hilbert space.
We recall that $W_T$ embeds continuously into $C(\bar I; \mathbb{R}^n)$.
For a compact metric space~$X$ we denote the space of continuous functions between~$X$ and~$Y$ by~$\mathcal{C}(X;Y)$ which we endow with
$
\|\varphi\|_{\mathcal{C}(X;Y)}= \max_{x \in X} \|\varphi(x)\|_Y$ as norm. By $Y_0$ we denote a compact set of initial conditions in $\mathbb{R}^n$. When arising as index, the space $\mathcal{C}(Y_0;W_T)$ will frequently be abbreviated by $\mathcal{C}$. The space~$\mathcal{C}^1(X;Y)$ of continuously differentiable functions is defined analogously.
Open balls of radius $\varepsilon$ in a Banach space $X$ with center $x$ will be denoted by $B_{\varepsilon}(x)$.
The space of bounded linear operators between Banach spaces~$X$ and~$Y$, endowed with the canonical norm, is denoted by~$\mathcal{B}(X,Y)$.
We further abbreviate~$\mathcal{B}(X):=\mathcal{B}(X,X)$.
\section{Semi-global optimal feedback control}
\label{sec:o}
Consider the controlled nonlinear dynamical system of the form
\begin{align}
\label{eq:openloop}
\dot{y}= \mathbf{f}(y)+\mathbf{g}(y) u \quad \text{in}~L^2(I;\mathbb{R}^n), \quad y(0)=y_0,
\end{align}
described by Nemitsky operators
\begin{equation}\label{eq:spaces}
\begin{array}l
\mathbf{f}\colon W_T \to L^2(I;\mathbb{R}^n), \quad \mathbf{f}(y)(t)= f(t,y(t)) \\[1.3ex]
\mathbf{g} \colon W_T \to \mathcal{L}(L^2(I;\mathbb{R}^m);L^2(I;\mathbb{R}^n)), \quad \mathbf{g}(y)(t)= g(t,y(t))
\end{array}
\end{equation}
for~$a.e.~t\in I$,~$f \colon I \times \mathbb{R}^n \to \mathbb{R}^m$ and~$g \colon I \times \mathbb{R}^n \to \mathbb{R}^{n \times m}$. The smoothness requirements on~$f$ and~$g$ will be detailed in Assumption~\ref{ass:feedbacklaw} below. Our aim is to choose a control input~$u^*\in L^2(I;\mathbb{R}^m)$ which keeps the associated solution~$y^* \in W_T$ close to a known reference trajectory~$y_d$, while keeping the control effort small. This is formulated as the constrained minimization problem
\begin{equation} \label{def:openloopproblem}\tag{$P_{y_0}$}
\left\{
\begin{aligned}
\quad &\inf_{y \in W_T,\, u \in L^2(I; \mathbb{R}^m)} J(y,u) \\
&s.t. \quad \dot{y}= \mathbf{f}(y)+\mathbf{g}(y) u, \quad y(0)=y_0,
\end{aligned}
\right.
\end{equation}
where
\begin{align*}
J(y,u)=\frac{1}{2}\int_{I} \left ( |Q_1(y(t)-y_d(t))|^2+ \beta |u(t)|^2 \right )~\mathrm{d} t+\frac{1}{2}|Q_2(y(T)-y^T_d)|^2,
\end{align*}
which incorporates the weighted misfit between the trajectory ~$y$ within the time horizon $I=(0,T)$ and at the terminal time to desired states~$y_d\in L^2(I;\mathbb{R}^n)$ and $y_d^T \in \mathbb{R}^n$, as well as the norm of the control~$u$. While this ~\textit{open loop} optimal control problem captures well the objective formulated above, it comes with several disadvantages. First, its solution is a function of time only, and does not include the current state $y(t)$. This makes the open loop approach susceptible to possible perturbations in the dynamical system. Second, determining the control action for a new initial condition requires to solve~\eqref{def:openloopproblem} from the start.
The aforementioned limitations of open loop optimal controls motivate the study of~\textit{semi-global optimal feedback control} approaches to \eqref{def:openloopproblem}. More precisely, given a compact set~$Y_0 \subset \mathbb{R}^n $, we look for a feedback function~$F^* \colon I \times \mathbb{R}^n \to\mathbb{R}^m$ which induces a Nemitsky operator
\begin{align*}
\mathcal{F}^* \colon W_T \to {L^2}(I; \mathbb{R}^m) , \quad \mathcal{F}^*(y)(t)=F^*(t,y(t)) \quad \text{for a.e.}~t \in I,
\end{align*}
such that for every~$y_0 \in Y_0$ the~\textit{closed loop system}
\begin{align} \label{eq:cloloop}
\dot{y}= \mathbf{f}(y)+ \mathbf{g}(y) \mathcal{F}^*(y), \quad y(0)=y_0,
\end{align}
admits a unique solution~$y^*( y_0 ) \in W_T$ and~$(y^*( y_0), \mathcal{F}^*(y^*(y_0 )))$ is a minimizing pair of~\eqref{def:openloopproblem}.
The determination of an optimal feedback function usually rests on the computation of the value function to\eqref{def:openloopproblem} which is defined as
\begin{align} \label{def:valuefunc}
V^*(T_0,y_0):= \min_{\substack{y \in H^1(T_0,T;\mathbb{R}^n), \\ u \in L^2(T_0,T; \mathbb{R}^m)}} J_{T_0}(y,u) \quad s.t. \quad \dot{y}= \mathbf{f}(y)+ \mathbf{g}(y) u,~ \quad y(t_0)=y_0,
\end{align}
\noindent
where~$(T_0,y_0)\in I \times \mathbb{R}^n$, and $J_{T_0}(y,u)$ is defined as
\begin{align*}
J_{T_0} (y,u)= \frac{1}{2}\int^T_{T_0} \left ( |Q_1(y(t)-y_d(t))|^2+ \beta |u(t)|^2 \right )~\mathrm{d} t+\frac{1}{2}|Q_2(y(T)-y_d(T))|^2.
\end{align*}
By construction $V^*$ satisfies the final time boundary condition
\begin{align*}
V^*(T,y_0)=\frac{1}{2} |Q_2(y_0-y_d(T))|^2 \quad \forall y_0 \in \mathbb{R}^n.
\end{align*}
If~$V^*$ is continuously differentiable in a neighborhood of some~$(t,y_0) \in I \times \mathbb{R}^n$ then it solves
the instationary~\textit{Hamilton-Jacobi-Bellman (HJB) equation}
\begin{align} \label{eq:HJB}
\partial_t V^*(t,y_0) +(f(y_0), \partial_y V^*(t,y_0))_{\mathbb{R}^n}- \frac{1}{2\beta}|g(t,y_0)^\top\partial_y V^*(t,y_0)|^2+ \frac{1}{2} |Q_1 (y_0-y_d(t))|^2=0
\end{align}
in the classical sense there, see e.g. \cite{ff14,FS06}. Here~$\partial_t V^*$ denotes the partial derivative of the value function with respect to~$t$ and~$\partial_y V^*$ is the gradient of~$V^*$ with respect to the~$y$-variable.
An optimal control for~\eqref{def:openloopproblem} in feedback form is then given by
$u^*=-\frac{1}{\beta} \mathbf{g}(y^*)^\top\partial_y \mathcal{V}^*(y^*)$ where~$\partial_y \mathcal{V}^*(y^*)(t)=\partial_y V^*(t,y^*(t))$ for every~$t\in I$, and~$y^*=y^*(y_0) \in W_T$ solves the closed loop system
\begin{align*}
\dot{y}=\mathbf{f}(y)- \frac{1}{\beta}\mathbf{g}(y) \mathbf{g}(y)^\top \partial_y \mathcal{V}^*(y), \quad y(0)=y_0.
\end{align*}
Thus
\begin{align*}
\left(y^*(y_0), -\frac{1}{\beta}\mathbf{g}(y^*(y_0))^\top \partial_y \mathcal{V}^*(y^*(y_0))\right) \in \argmin \eqref{def:openloopproblem}
\end{align*}
and the function
\begin{align*}
F^*(\cdot,\cdot)=-\frac{1}{\beta} g(\cdot,\cdot)^\top \partial_y V^*(\cdot,\cdot)
\end{align*}
is an optimal feedback law.
Realizing the optimal feedback in this way requires a solution to \eqref{eq:HJB} which is a partial differential equation on~$\mathbb{R}^n$. This can be extremely challenging or even impossible depending on the dimension $n$ and the computational facilities at hand. Similarly to our previous manuscript \cite{KW2020} we take a different approach by formulating minimization problem over a suitable set of feedback functions involving the closed loop system as a constraint. This relates to a learning problem, within which the feedback functions are trained to achieve optimal stabilization. This makes the problem computationally amenable.
The procedure just described will be formalized in the following section. Here we first summarize the assumptions on the nonlinear dynamical system that we refer to throughout the paper.
\begin{assumption} \label{ass:feedbacklaw}
\leavevmode
\begin{itemize}
\item[\textbf{A.1}] The functions~$f \colon I \times \mathbb{R}^n \to \mathbb{R}^n$ and~$g \colon I \times \mathbb{R}^{n} \to \mathbb{R}^{n \times m}$ are twice continuously differentiable. Their Jacobians and Hessians with respect to the second variable, denoted by~$D_{y}f, D_{yy}f$, and~$D_{y}g, D_{yy}g$, respectively, are Lipschitz continuous on compact sets, uniformly for $t\in I$.
\item [\textbf{A.2}] There exists a constant~$M_{Y_0}>0$ such that the value function~$V(\cdot ,\cdot)\colon I \times \mathbb{R}^n \to \mathbb{R}$ for~\eqref{def:openloopproblem} is twice continuously differentiable on~$I \times \bar{B}_{2\widehat{M}}(0)$ with Lipschitz continuous gradient and Hessian (w.r.t. $y$ uniformly in~$t \in I$) where
\begin{align}\label{eq:aux1}
\widehat M = M_{Y_0} \, \|\imath\|_{\mathcal{B}(W_T,\, \mathcal{C}(I;\mathbb{R}^n))},
\end{align}
and $\imath$ denotes the embedding of $W_T$ into $\mathcal{C}(I;\mathbb{R}^n))$.
\end{itemize}
\end{assumption}
As a consequence of (\textbf{A.1}), the Nemitsky operators~$\mathbf{f},~\mathbf{g}$ are at least two times continuously differentiable with domains and ranges as defined in \eqref{eq:spaces}. Their derivatives, denoted by~$D \mathbf{f}$ and~$D \mathbf{g}$, are the Nemitsky operators induced by~$D_y f$ and~$D_y g$. We point out~$D \mathbf{g}(y)\in \mathcal{B}(W_T; L^2(I;\mathbb{R}^m); L^2(I;\mathbb{R}^n))$. Moreover~$ \mathbf{f}, D\mathbf{f}, \mathbf{g},D \mathbf{g}$ are Lipschitz continuous and bounded, on bounded subsets of $L^\infty(I;\mathbb{R}^n)$, and thus in particular on~$\mathcal{Y}_{ad}\subset W_T$, where
\begin{align}\label{eq:kk13}
\mathcal{Y}_{ad}:= \left \{\,y \in W_T\;|\;\wnorm{y}\leq 2 M_{Y_0}\,\right\}.
\end{align}
Finally $D\mathbf{f}^\top \in{\mathcal B}(W_T,L^2(I;\mathbb{R}^n))$ denotes the Nemitsky operator associated to $D_y f^\top$.
Analogously, due to (\textbf{A.2}), ~$V^*$ induces a twice Lipschitz continuously Fr\'echet differentiable Nemitsky operator~$\mathcal{V}^*: \mathcal{Y}_{ad}\subset W_T \to L^{\textcolor{black}{2}}(I)$.
Moreover~$\mathcal{V}^*$ and its first derivative~$D\mathcal{V}^* $ are weak-to-strong continuous.
Define the Nemitsky operator
\begin{align} \label{def:optfeddnemitsk}
\mathcal{F}^* \colon \mathcal{Y}_{ad}\to L^2(I;\mathbb{R}^n), \textcolor{black} \quad \mathcal{F}^*(y)=-\frac{1}{\beta} \mathbf{g}(y)^\top \partial_y \mathcal{V}^*(y),
\end{align}
where~$\partial_y \mathcal{V}^*$ is the Nemitsky operator induced by the gradient~$\partial_y V^*=D_y V (\cdot,\cdot)^\top$.
Note also that $\mathcal{F}^* \in C^1(W_T; (L^2(I;\mathbb{R}^m);L^2(I;\mathbb{R}^n)))$.
We further assume the following:
\begin{itemize}
\item [\textbf{A.3}]For every~$y_0 \in Y_0$ there exists a unique function~$y=\mathbf{y}^*(y_0)\in W_T$ satisfying
\begin{align*}
\dot{y}= \mathbf{f}(y)+ \mathbf{g}(y) \mathcal{F}^*(y), \quad y(0)=y_0, \quad \wnorm{y}\leq M_{Y_0}.
\end{align*}
Moreover we have
\begin{align*}
(y^*(y_0), \mathcal{F}^*(y^*(y_0))) \in \argmin \eqref{def:openloopproblem} \quad \forall y_0 \in Y_0.
\end{align*}
\end{itemize}
When referring to Assumption 1 we mean (\textbf{A.1})-(\textbf{A.3}).
We emphasize that the constant $M$ appearing in (\textbf{A2}) and (\textbf{A3}) is assumed to be same.
Note further that as a consequence of (\textbf{A3}) problem \eqref{def:openloopproblem} admits a solution for each $y_0 \in Y_0$, with the optimal control given by $u^*= \mathcal{F}^*(y^*(y_0)))$.
\begin{remark}\label{rem3}
Using $(\mathbf{A.1})$, $(\mathbf{A.3})$ as well as the implicit function theorem it can be readily be verified that the mapping~$\mathbf{y}^*\colon Y_0 \to W_T$ from~($\mathbf{A.3}$) is continuously differentiable. Given~$\mathrm{d}lta y_0 \in \mathbb{R}^n$ the directional derivative~$\mathrm{d}lta y\coloneqq \partial\mathbf{y}^*(y_0)(\mathrm{d}lta y_0)$ of~$\mathbf{y}^*$ at~$y_0 \in Y_0$ in direction $\mathrm{d}lta y_0$ satisfies the linearized ODE system
\begin{align*}
\dot{\mathrm{d}lta y}= D \mathbf{f}(\mathbf{y}^*(y_0))\mathrm{d}lta y+ \lbrack D\mathbf{g}(\mathbf{y}^*(y_0))\mathrm{d}lta y\rbrack \mathcal{F}^*(\mathbf{y}^*(y_0)) +\mathbf{g}(\mathbf{y}^*(y_0))D\mathcal{F}^*(\mathbf{y}^*(y_0))\mathrm{d}lta y,\, \mathrm{d}lta y(0)= \mathrm{d}lta y_0.
\end{align*}
Here $D \mathbf{g}$ is induced by $D_yg$ which is given by
\begin{align*}
\left \lbrack D_y g(t,y) \mathrm{d}lta y \right\rbrack_{ij}= \left( \sum^n_{k=1} \partial_k g_{ij}(t,y) \mathrm{d}lta y_k \right) \quad \forall \mathrm{d}lta y \in \mathbb{R}^n,
\end{align*}
where~$g(y)=(g_{ij})$ and~$"\partial_k "$ denotes the partial derivative w.r.t to the~$k$-th component of~$y$. The transposed~$D\mathbf{g}(y)^\top$, which will arise in the adjoint equation below, is induced by the tensor~$D_y g(t,\cdot)^\top=(D_y g(t,\cdot)_{kji})\in \mathbb{R}^{n\times n\times m}$, with $t\in I$. In particular, we readily verify that ~$D \mathbf{g}(\cdot)^\top \in \mathcal{B}(L^2(I;\mathbb{R}^m); \mathcal{B}(W_T; L^2(I;\mathbb{R}^n)))$.
\end{remark}
To end this section we collect structural information on the relation between the adjoined state, denoted by ${p}$ below, the optima value function~$V^*$, and the induced optimal feedback law~$\mathcal{F}^*$.
\begin{prop} \label{prop:structure}
Let Assumption~\ref{ass:feedbacklaw} hold. Then there exists a unique continuous mapping~$\mathbf{p}^* \colon Y_0 \to W_T$ such that for each~$y_0 \in Y_0$ the tuple~$(y,p)=(\mathbf{y}^*(y_0),\mathbf{p}^*(y_0))$ satisfies
\begin{align}
\frac{d}{dt} y&=\mathbf{f}(y)+\mathbf{g}(y)\mathcal{F}^*(y),~ y(0)=y_0, \label{eq:stateprop} \\
-\frac{d}{dt} p&= D\mathbf{f}(y)^\top p+\lbrack D \mathbf{g}(y)^\top\mathcal{F}^*(y)\rbrack p+Q_1^\top Q_1(y-y_d),~ p(T)= Q^\top_2 Q_2(y(T)-y^T_d), \label{eq:adjointprop} \\
\mathcal{F}^*(y)&=- \frac{1}{\beta} \mathbf{g}(y)^\top p. \label{eq:gradienteqprop}
\end{align}
Moreover we have
\begin{align} \label{eq:dynamicalprog}
V^*(t,y(t))=J_t(y(t),F^*(t,y(t))),~p(t)= \partial_y V(t,y(t)) \quad \forall t\in [0,T].
\end{align}
\end{prop}
\begin{proof}[Proof of Proposition \ref{prop:structure}]
By (\textbf{A.3}) problem \eqref{def:openloopproblem} admits a solution for each $y_0 \in Y_0$. Then (\textbf{A.1})-(\textbf{A.2}) guarantee that
~\eqref{eq:adjointprop}, with $y=y(y_0)\in W_T$ the state component of a solution to \eqref{def:openloopproblem}, admits a unique solution $p$ in~$W_T$ which continuously depends on~$y\in W_T$.
Moreover \eqref{eq:stateprop} - \eqref{eq:gradienteqprop} represent the first order necessary optimality condition for~\eqref{def:openloopproblem} with the optimal control $u(t)= \mathcal{F}^*(y(t))$. Since $\mathbf{y}^*\colon Y_0 \to W_T$ is continuous as mentioned in Remark \ref{rem3} and the solution to \eqref{eq:adjointprop} depends continuously on $y\in W_T$, the claimed continuity $\mathbf{p}^* \colon Y_0 \to W_T$ follows.
Equation~\eqref{eq:dynamicalprog} is a direct consequence of the dynamic programming principle, and (\textbf{A.3}).
\end{proof}
\section{Optimal feedback control by value function approximation}\label{sec:learnfeedback}
This section is devoted to introducing a family of computationally tractable minimization problems from which we will "learn" approximations of optimal feedback laws. Our approach rests on two main pillars. First, given~$\varepsilon>0$, we consider a family of functions $V^\varepsilon_{\theta} \in \mathcal{C}(I\times \mathbb{R}^n) $ which are finitely parametrized by~$\theta \in \mathcal{R}_\varepsilon \sigmameq \mathbb{R}^{N_\varepsilon}$,~$N_\varepsilon \in \mathbb{N}$. These serve as "discrete" approximations of the optimal value function~$V^*$. The following a priori estimate is assumed, for some fixed $\varepsilon_0 >0$:
\begin{assumption} \label{ass:approxsmoothness}
For every~$0 <\varepsilon \leq \varepsilon_0$ there holds~$V_{\cdot}^\varepsilon \in\mathcal{C}^4(\mathcal{R}_\varepsilon \times \mathbb{R} \times \mathbb{R}^n)$ and~$V^\varepsilon_{\theta}(T,y_0)=\frac{1}{2} |Q_2(y_0-y_d(T))|^2$ for every~$y_0 \in \mathbb{R}^n$ and~$\theta \in \mathcal{R}_\varepsilon$.
Moreover there exists~$\theta_\varepsilon \in \mathcal{R}_{\varepsilon}$ with
\begin{align} \label{eq:approxcapa}
\max_{\substack{ t \in I, \\ |y| \leq 2 \widehat{M} }} |V^\varepsilon_{\theta_\varepsilon}(t,y)-V^*(t,y)|+|\partial_y (V^\varepsilon_{\theta_\varepsilon}(t,y)-V^*(t,y))| +\|\partial_{yy}(V^\varepsilon_{\theta_\varepsilon}(t,y)-V^*(t,y))\| \leq c \varepsilon
\end{align}
for some~$c>0$ independent of~$\varepsilon\in (0,\varepsilon_0]$.
\end{assumption}
Now recall from ~\eqref{def:optfeddnemitsk}
that the optimal feedback law~$\mathcal{F}^*$ is the superposition operator induced by~$F^*(t,y)=-(1/\beta)g(t,y)^\top \partial_y V^*(t,y) $. With the aim of preserving the dependence of the feedback law on the value function in our approximation, we define a set of parametrized feedback laws~$\mathcal{F}^\varepsilon_\theta$ associated to~$V^\varepsilon_\theta$,~$\theta \in \mathcal{R}_\varepsilon$, by
\begin{align*}
\mathcal{F}^\varepsilon_\theta(y)(t)= F^\varepsilon_\theta(t,y(t))= -\frac{1}{\beta} g(t,y(t)) \partial_y V^\varepsilon_\theta(t,y(t))
\end{align*}
for all~$y\in W_T$,~$t \in \bar{I}$ and~$\theta \in \mathcal{R}_\varepsilon$. A first approach to obtain an
optimal feedback law in the form~$\mathcal{F}^\varepsilon_\theta$ can then be found by replacing the open loop control~$u$ in~\eqref{def:openloopproblem} by the closed loop expression~$\mathcal{F}^\varepsilon_\theta(y)$ and minimizing for~$\theta \in \mathcal{R}_\varepsilon$:
\begin{align}\label{eq:aux7}
\min_{y \in W_T, \theta \in \mathcal{R}_\varepsilon } J(y, F^\varepsilon_\theta(y))+ \frac{\gamma_\varepsilon}{2} \|\theta\|^2_{\mathcal{R}_\varepsilon} \quad s.t. \quad \dot{y}= \mathbf{f}(y)+\mathbf{g}(y) \mathcal{F}^\varepsilon_\theta(y),~y(0)=y_0,
\end{align}
where~$\|\cdot\|_{\mathcal{R}_\varepsilon}$ denotes a Hilbert space norm on~$\mathcal{R}_\varepsilon$,~$\gamma_\varepsilon > 0$ and~$y_0 \in Y_0$ is fixed. This represents the goal of finding a feedback law~$\mathcal{F}^\varepsilon_\theta$ together with a trajectory~$y\in W_T$ which satisfy~$(y, \mathcal{F}^\varepsilon_\theta(y))\in \argmin \eqref{def:openloopproblem} $. However, this approach falls short in several aspects. First, we cannot hope to recover a solution of the semiglobal optimal feedback control problem for all $y_0\in Y_0$, since the minimization in \eqref{eq:aux7} is associate to a single initial condition only. Secondly it misses to impose properties that would guide ${\mathcal F}^\varepsilon_\theta(y)$ to be close to $\mathcal{V}^*$, and it does not exploit the relation between the adjoint state~$p$, see~\eqref{eq:adjointprop}, and the gradient of the value function~$\partial_y \mathcal{V}^*$. Incorporating this information into the problem can, potentially, lead to improved learning results and improved parameterized feedback laws which behave similarly to~$\mathcal{F}^*$.
These considerations lead to the second pillar of our approach, namely a succinct choice of the cost for the learning problem. For this purpose we use all of $Y_0$ as ~"learning set" for initial conditions.
It is endowed with the normalized Lebesgue measure~$\mathcal{L}$.
Moreover we define the augmented objective
\begin{equation}\label{eq:aux2}
\begin{array}l
J_\varepsilon(y,p,\theta)= J(y,\mathcal{F}^\varepsilon_{\theta}(y))\\[1.5ex]
+\int^T_0 \frac{\gamma_1}{2} |V^\varepsilon_{\theta}(t,y(t))-J_t(y,\mathcal{F}^\varepsilon_{\theta}(y))|^2 + \frac{\gamma_2}{2}|\partial_y V^\varepsilon_{\theta}(t,y(t))-p(t)|^2~\mathrm{d}t
\end{array}
\end{equation}
for penalty parameters~$\gamma_1, \gamma_2 \geq 0$. The arguments in $J_t$ are the restriction of the solution $y$ to the equation in \eqref{eq:aux7}
and the feedback $\mathcal{F}^\varepsilon_{\theta}(y)$ to $[t,T]$.
The additional terms in this new objective functional penalize the violation of the cost and its gradient by means of the approximation based on $V_\theta^\varepsilon$, i.e. they penalize the differences between
$J_t(y,\mathcal{F}^\varepsilon_\theta(y))$ and $V^\varepsilon_\theta(t,y(t)$, as well as
$p(t)$ and~$\partial_y V^\varepsilon_\theta(t,y(t)$.
\color{black} Given a strictly positive weight function~$\omegaega \in L^\infty(Y_0);~0<c \leq \omegaega$ a.e., we thus propose to find a feedback law~$\mathcal{F}^\varepsilon_\theta$ by solving the ensemble control problem
\begin{align}\label{def:approxfeedprop}
\min_{\substack{\mathbf{y}\in \mathbf{Y}_{ad},\\ \mathcal{F}^\varepsilon_\theta(\mathbf{y}) \in \mathbf{U}_{ad},\\ \mathbf{p}\in \mathcal{C}(Y_0;W_T) \\ \theta \in \mathcal{R}_{\varepsilon}}}\mathcal{J}_\varepsilon(\mathbf{y},\mathbf{p},\theta)\coloneqq \int_{Y_0} \omegaega(y_0)\, J_\varepsilon(\mathbf{y}(y_0),\mathbf{p}(y_0),\theta)~\mathrm{d} \mathcal{L}(y_0)+ \frac{\gamma_\varepsilon}{2} \|\theta\|^2_{\mathcal{R}_\varepsilon} \tag{$\mathcal{P}_\varepsilon$}
\end{align}
subject to the system of closed loop state~\textit{and} adjoint equations
\begin{align}
\dot{\mathbf{y}}(y_0)=\mathbf{f}(\mathbf{y}(y_0))+\mathbf{g}(\mathbf{y}(y_0))\mathcal{F}^\varepsilon_\theta(\mathbf{y}(y_0)) \label{eq:statepropapprox} \\
-\dot{\mathbf{p}}(y_0)= D\mathbf{f}(\mathbf{y}(y_0))^\top\mathbf{p}(y_0)+ \lbrack D\mathbf{g}(\mathbf{y}(y_0))^\top\mathcal{F}^\varepsilon_\theta(\mathbf{y}(y_0))\rbrack \mathbf{p}(y_0)+\mathbf{Q}_1^\top \mathbf{Q}_1(\mathbf{y}(y_0)-y_d) \label{eq:adjointpropapprox} \\
\mathbf{y}(y_0)(0)=y_0,~\mathbf{p}(y_0)(T)= Q^\top_2 Q_2(\mathbf{y}(y_0)(T)-y_d^T),~\mathbf{y}(y_0)\in \mathcal{Y}_{ad} \label{eq:constraintprop}
\end{align}
for~$\mathcal{L}$-a.e.~$y_0\in Y_0$. Above~$\mathbf{Y}_{ad} \subset \mathcal{C}(Y_0;W_T)$ and~$\mathbf{U}_{ad} \subset L^2(Y_0;L^2(I;\mathbb{R}^m))$ denote the admissible sets of ensemble state trajectories and admissible controls. They will be specified in section \ref{sec:existence}.
\section{Examples} \label{sec:examples}
In this section we discuss two particular examples for the parameterized mappings~$V^\varepsilon$: deep residual networks and piecewise polynomial functions of sufficiently high degree.
\subsection{Residual networks} \label{subsec:residual}
To explain the approximation of the value function by residual neural
networks, we first fix some notation.
Let~$L_\varepsilon\in \mathbb{N}$,~$L_\varepsilon \geq 2$, as well as~$N^{\varepsilon}_{i}\in
\mathbb{N}$,~$i=1, \dots,L_\varepsilon -1$ be given. We set~$N^{\varepsilon}_0=n+1$
and~$N^{\varepsilon}_L=1$. Furthermore define
\begin{align*}
\mathcal{R}_\varepsilon= \bigtimes^{L_\varepsilon-1}_{i=1} \left ( \mathbb{R}^{N^\varepsilon_{i}
\times N^\varepsilon_{i-1}} \times \mathbb{R}^{N^\varepsilon_{i} \times N^\varepsilon_{i-1}} \times
\mathbb{R}^{N^\varepsilon_i} \right ) \times \mathbb{R}^{N^\varepsilon_{L} \times N^\varepsilon_{L-1}}.
\end{align*}
The space~$\mathcal{R}_\varepsilon$ is uniquely determined by
its~\textit{architecture}
\begin{align*}
\text{arch}(\mathcal{R}_\varepsilon)=\left( N^\varepsilon_0, N^\varepsilon_1, \dots ,
N^\varepsilon_L\right)\in \mathbb{N}^{L_\varepsilon+1}.
\end{align*}
A set of parameters~$\theta \in \mathcal{R}_\varepsilon$ given by
\begin{align*}
\theta=\left( W_{11}, W_{12}, b_1,\dots, W_{{L_\varepsilon}}
\right)
\end{align*}
is called a~\textit{neural network} with~$L_\varepsilon$~\textit{layers}.
Moreover let~$\sigmagma \in \mathcal{C}^4(\mathbb{R})$ be given and assume that~$\sigmagma$ is not a polynomial.
The function
\begin{equation}\label{eq:shift}
\begin{array}l
V^\varepsilon_{\theta}(t,y)= \frac{1}{2} |Q_2(y-y_d(T))|^2 \\[1.5ex]
\qquad \qquad + f^{\sigmagma}_{L_\varepsilon, \theta} \circ
f^{\sigmagma}_{L_\varepsilon-1, \theta} \circ \cdots \circ f^{\sigmagma}_{1,
\theta}((t,y))-f^{\sigmagma}_{L_\varepsilon, \theta} \circ f^{\sigmagma}_{L_\varepsilon-1,
\theta} \circ \cdots \circ f^{\sigmagma}_{1, \theta}((T,y))
\end{array}
\end{equation}
for~$(t,y)\in \mathbb{R} \times \mathbb{R}^n$ where
\begin{align*}
f^{\sigmagma}_{L_\varepsilon, \theta}(x)= W_{L_\varepsilon} x \quad \forall x
\in \mathbb{R}^{N^\varepsilon_{L-1}}
\end{align*}
as well as
\begin{align*}
f^{\sigmagma}_{i, \theta}(x)= \sigmagma(W_{i1}x+b_i)+ W_{i2}x \quad \forall x
\in \mathbb{R}^{N^\varepsilon_{i-1}},~i=1, \dots, L_\varepsilon-1
\end{align*}
is called the~\textit{realization} of~$\theta$ with~\textit{activation
function}~$\sigmagma$.
Here the application of~$\sigmagma$ is defined to act componentwise i.e.
given an index~$i \in \{1,\dots,L_\varepsilon-1\}$ and~$x \in \mathbb{R}^{N^\varepsilon_i}$ we set
\begin{align*}
\sigmagma(x)=(\sigmagma(x_1), \dots, \sigmagma(x_{N^\varepsilon_i}))^\top.
\end{align*}
By construction,~$V^\varepsilon_\theta$ satisfies the terminal condition
\begin{align*}
V^\varepsilon_\theta(T,y)=\frac{1}{2} |Q_2(y-y_d(T))|^2 \quad \forall y\in
\mathbb{R}^n.
\end{align*}
Moreover Assumption~\ref{ass:approxsmoothness} is fulfilled as confirmed by the following result.
\begin{theorem} \label{thm:approxbynetworks}
For every~$\varepsilon>0$ there exists
architectures~$\operatorname{\mathcal{R}}_\varepsilon$ and~$\theta_\varepsilon\in
\mathcal{R}_\varepsilon$ such that~$V^\varepsilon \in \mathcal{C}^4(\mathcal{R}_\varepsilon
\times \mathbb{R} \times \mathbb{R}^n)$ and~$V^\varepsilon_{\theta_\varepsilon}$
satisfies~\eqref{eq:approxcapa}.
\end{theorem}
\begin{proof}
Let us set $h(t,y)= V^*(t,y)$ for $(t,y) \in I \times \bar
B_{2\widehat M}(0)$. Then $h$ is twice continuously differentiable on $I
\times \bar B_{2\widehat M}(0)$ and $h(T,y)= \frac{1}{2} |Q_2(y-y^T_d)|^2$. A consequence of the
universal approximation theorem implies that for all $\varepsilon >0 $
there exists $\tilde h_\varepsilon \in \mathcal{M}_{\text{net}}$ such that
\begin{equation}\label{eq:aux10}
\|h-\tilde h_\varepsilon \|_{C^2(I\times \bar B_{2\widehat M}(0))} \le
\frac{\varepsilon}{2},
\end{equation}
where $\mathcal{M}_{\text{net}}= \text{span} \{\sigmagma( \tilde w\cdot x+ \tilde b ):
\tilde w \in \mathbb{R}^{n+1},\, \tilde b\in\mathbb{R} \}$, see eg \cite[Theorem 4.1]{P99},
\cite{hornik91}. Let us observe that $\tilde h_\varepsilon$ can be
expressed as a residual network. Indeed, since
$$
\tilde h_\varepsilon = \sum_{i=1}^M \tilde c_i \sigmagma(\tilde w_i \cdot x
+\tilde b_i)
$$
for some $M\in \mathbb{N}$, $\tilde w_i \in \mathbb{R}^{n+1}, \tilde b_i, \tilde
c_i \in \mathbb{R}$, choosing $L_\varepsilonilon=2, W_{11}\in \mathbb{R}^{M\times (n+1)}$ with
rows $\{\tilde w_i\}_{i=1}^M$,
\begin{equation*}
b_1=\text{col}(\tilde b_1,\dots, \tilde b_M), \,W_2=(\tilde
c_1,\dots, \tilde c_M), \, W_{12}=0,
\end{equation*}
we have $\tilde h_\varepsilon = f^\sigmagma_{2,\theta}\circ
f^\sigmagma_{1,\theta}$. Moreover, $\tilde h_\varepsilon\in
C^4(I\times \bar B_{2\widehat M})$. Following \eqref{eq:shift} we define
\begin{align*}
V^\varepsilon_{\theta_\varepsilon}(t,y)= \frac{1}{2} |Q_2(y-y^T_d)|^2 + \tilde h_\varepsilon(t,y) -
\tilde h_\varepsilon(T,y)\in C^4(I\times \bar B_{2\widehat M}).
\end{align*}
and estimate
\begin{align*}
\| V^\varepsilon_{\theta_\varepsilonilon}(t,y) - V^*(t,y)\|_{C^2} &=
\| \tilde h_{\varepsilon}(t,y) - \tilde h_{\varepsilon}(T,y) + V^*(T,y) -
V^*(t,y)\|_{C^2} \\
& \leq 2 \|h-\tilde{h}_\varepsilon\|_{C^2} \leq \varepsilon,
\end{align*}
where all norms are taken over $I\times \bar B_{2\widehat M}(0)$. This
ends the proof.
\end{proof}
\subsection{Piecewise polynomials} \label{subsec:polynomials}
Fix~$\varepsilon_0>0$, and let $\varepsilon\in (0, \varepsilon_0]$ be arbitrarily fixed. Throughout this subsection we assume ~$(\mathbf{A.2})$ and in particular we shall make use of the global Lipschitz continuity of $D^2 V^*$ on $\bar K= \bar I \times \bar B_{2\widehat M}(0)$. Since $\bar K$ is compact and hence totally bounded, there exist $n_\varepsilon \in \mathbb{N}$ and $\{(\bar{t}_i,\bar{y}^i_0)\}_{i=1}^{n_\varepsilon}\in \mathbb{R}^{n+1}$ such that
\begin{align*}
\bar{K} \subset \bigcup^{n_\varepsilon}_{i=1} K_i \quad \text{where} \quad K_i= B_{\varepsilon}( (t_i,\bar{y}^i_0)) .
\end{align*}
Note that we do not highlight the dependence of $(t_i,\bar{y}^i_0)$ and $K_i$ on $\varepsilon$.
For each~$i$ define the parametrized polynomial
\begin{align*}
V^\varepsilon_i(A,b,c,t,y)= (t-\bar{t}_i, y-\bar{y}^i_0)^\top A(t-\bar{t}_i, y-\bar{y}^i_0)+ b^\top (t-\bar{t}_i, y-\bar{y}^i_0)+c
\end{align*}
with
\begin{align*}
(A,b,c,t,y) \in \operatorname{Sym}(n+1) \times \mathbb{R}^{n+1} \times \mathbb{R} \times \mathbb{R}^{n+1},
\end{align*}
where ${Sym}(n)$ denotes the space of real symmetric $ n \times n$ matrices. Note that~$V^\varepsilon_i $ is infinitely many times differentiable in all of its arguments.
For each $\varepsilon\in (0,\varepsilon_0]$ we define a special partition of unity $\{\varphi_i\}_{i=1}^{n_\varepsilon}$ subordinate to~$K_i$ with
$\varphi_i \colon \mathbb{R} \times \mathbb{R}^n \to [0,1]$, satisfying ~$\mathcal{C}^4$ and
\begin{equation}\label{eq:kk10}
\left\{
\begin{array}{c}
\supp \varphi_i = \bar K_i, \qquad ~\sum^{n_\varepsilon}_{i=1} \varphi_i(t,y)=1, \, \forall (t,y) \in \bar K, \\[1.7ex]
\|D^{j}\varphi_i\|_{C(\bar K_i\cap \bar K)} =\bar \mu \varepsilon^{-j}, \forall i=1,\dots, n_\varepsilon, \; \text{ and } j\in\{1,2\},\\[1.7ex]
\text{card } \{i: \varphi_i(t,y) \neq 0 \} \le \mathfrak{m} \quad \forall (t,y)\in \bar K, \varepsilon\in (0,\varepsilon_0],
\end{array}
\right.
\end{equation}
with $\bar \mu$ and $\mathfrak{m}$ positive constants independent of $i, (t,y)\in \bar K, \varepsilon\in (0,\varepsilon_0]$.
Finally we define
\begin{align*}
\mathcal{R}_\varepsilon= \bigtimes^{n_\varepsilon}_{i=1} ( \operatorname{Sym}(n+1) \times \mathbb{R}^{n+1} \times \mathbb{R} ),
\end{align*}
and introduce the family of parameterized functions on $\mathbb{R}^{n+1}$ by
\begin{align} \label{eq:taylorfunctionchoice}
V^\varepsilon_\theta (t,y)= \frac{1}{2} |Q_2(y-y_d(T))|^2+ \sum^{n_\varepsilon}_{i=1} \varphi_i(t,y)\left( V^\varepsilon_i(A_i,b_i,c_i,t,y)-V^\varepsilon_i(A_i,b_i,c_i,T,y) \right)
\end{align}
for~$\theta=(A_1,b_1,c_1,\dots, A_{n_\varepsilon},b_{n_\varepsilon},c_{n_\varepsilon})\in\mathcal{R}\varepsilon$. Obviously we have~$V^\varepsilon_{\cdot}(\cdot) \in\mathcal{C}^4(\mathcal{R}_\varepsilon \times \mathbb{R} \times \mathbb{R}^n)$ and
\begin{align*}
V^\varepsilon_\theta (T,y)=\frac{1}{2} |Q_2(y-y^T_d)|^2 \quad \forall y \in \mathbb{R}^n.
\end{align*}
Thus the final time condition in the HJB equation is fulfilled. Next we show that { $V^\varepsilon_{\theta}$ satisfies the approximation property in Assumption~\ref{ass:approxsmoothness} for the particular choice of
\begin{equation} \label{eq:taylorparachoice}
\begin{array}{ll}
\theta_\varepsilon = &\big(\partial_{yy} V^*(\bar{t}_1,\bar{y}^1_0), \partial_y V^*(\bar{t}_1,\bar{y}^1_0),V^*(\bar{t}_1,\bar{y}^1_0), \\[1.4ex]
&\dots, \partial_{yy} V^*(\bar{t}_{n_\varepsilon},\bar{y}^{n_\varepsilon}_0), \partial_y V^*(\bar{t}_{n_\varepsilon},\bar{y}^{n_\varepsilon}_0),V^*(\bar{t}_{n_\varepsilon},\bar{y}^{n_\varepsilon}_0)\big),
\end{array}
\end{equation}
i.e. $V^\varepsilon_i$ in \eqref{eq:taylorfunctionchoice} are chosen with
\begin{equation}\label{eq:kk11}
(\bar{A}_i,\bar{b}_i,\bar{c}_i)=(\partial_{yy} V^*(\bar{t}_i,\bar{y}^i_0), \partial_y V^*(\bar{t}_i,\bar{y}^i_0),V^*(\bar{t}_i,\bar{y}^i_0)) \quad i=1,\dots,n_\varepsilon.
\end{equation}
\begin{theorem}
\label{thm:Taylor}
Let~$V^\varepsilon$ and~$\theta_\varepsilon$ be chosen according to~\eqref{eq:taylorfunctionchoice} and~\eqref{eq:taylorparachoice}, respectively, and suppose that ~$(\mathbf{A.2})$ and \eqref{eq:kk10} are satisfied. Then Assumption~\ref{ass:approxsmoothness} holds.
\end{theorem}
\begin{proof}
We already argued that~$V_\theta^\varepsilon$ has the desired regularity. It remains to prove the required approximation capabilities.
For abbreviation set~$V^\varepsilon_i(t,y)=V^\varepsilon_i({\bar A_i},{\bar b_i},{\bar c_i},t,y)$, with $({\bar A_i},{\bar b_i},{\bar c_i})$ as in \eqref{eq:kk11}.
Since~$V^\varepsilon_i$ is the second order Taylor expansion of~$V$ at~$(\bar{t}_i,\bar{y}^i_0)$ we conclude that
\begin{equation}\label{eq:kk12}
\|V^*-V^\varepsilon_i\|_{C^{2-j}(\bar K_i \cap\bar K)} \leq \bar c\varepsilon^{j+1}, \quad \text{for } j\in{0,1,2,}
\end{equation}
for some~$\bar c>0$ depending on the global Lipschitz constant of $V$ on $\bar K$, and independent of~$\varepsilon \in (0,\varepsilon_0]$ and~$i$. Still recall that the sets $K_i$ depend on $\varepsilon$.
To estimate $V^*(t,y) -V^\varepsilon_\theta(t,y)$ we recall that $V^*(T,y)=\frac{1}{2} |Q_2(y-y^T_d)|^2$, and express $V^*(t,y)$ as $V^*(t,y)= V^*(T,y)+ V^*(t,y)- V^*(T,y)$. This leads to
\begin{equation*}
\begin{array} l
V^*(t,y)-V^\varepsilon_\theta(t,y) \\[1.5ex]
\;= \sum_{i\in \{1,\dots,n_\varepsilon\}} \varphi_i(t,y) (V^*(t,y)-V^\varepsilon_i(t,y)) +\sum_{i\in \{1,\dots,n_\varepsilon\}}\varphi_i(T,y)\ (V^*(T,y)-V^\varepsilon_i(T,y)),
\end{array}
\end{equation*}
for $(t,y) \in \bar K$.
From \eqref{eq:kk10} and \eqref{eq:kk10} we deduce that $\|V(t,y) -V^\varepsilon_\theta(t,y)\|_{C(\bar K)}\le 2 \bar c \varepsilon^3$.
For the gradient with respect to~$y$ we proceed similarly. Fixing ~$(t,y)\in \bar K $ we estimate
\begin{align*}
|\partial_y V^*(t,y)&-\partial_y V^\varepsilon_{\theta_\varepsilon}(t,y)| \leq D_1+D_2
\end{align*}
where
\begin{align*}
D_1 &=\sum_{i \in \{1,\dots,n_\varepsilon\}} \left \lbrack \varphi_i(t,y) |\partial_y V^*(t,y)-\partial_y V^\varepsilon_i(t,y)|+ | V^\varepsilon_i(t,y)-V^*(t,y)| |\partial_y \varphi_i(t,y)| \right \rbrack \\
D_2 &=\sum_{i \in \{1,\dots,n_\varepsilon\}} \left \lbrack \varphi_i(T,y) |\partial_y V^*(T,y)-\partial_y V^\varepsilon_i(T,y)|+ | V^\varepsilon_i(T,y)-V^*(T,y)| |\partial_y \varphi_i(t,y)| \right \rbrack.
\end{align*}
By \eqref{eq:kk12} with $j=1$ the first terms in $D_1$ and $D_2$ can be estimated by $\bar c \varepsilon ^2$.
Using \eqref{eq:kk10} and \eqref{eq:kk12} the second terms in $D_1$ and $D_2$ can be bounded by $\mathfrak{m} \bar \mu \varepsilon ^2$. Combining these estimate we arrive at
\begin{equation*}
\|\partial_y V^*(t,y) -\partial_y V^\varepsilon_\theta(t,y)\|_{C(\bar K)}\le 2 \varepsilon^2( \bar c + \mathfrak{m}\, \bar \mu).
\end{equation*}
In an analogous manner one can obtain a bound of the order $O(\varepsilon)$ on the difference of the Hessians of $V$ and $V^\varepsilon_\theta$. This finishes the proof.
\end{proof}
In Appendix \ref{app1} it is shown how standard mollifiers can be used so that \eqref{eq:kk10} is satisfied. This requires some extra attention due to the required bounds on the derivatives of $\varphi_i$.
\section{Existence of minimizers to~\eqref{def:approxfeedprop}} \label{sec:existence}
This section is devoted to proving the existence of minimizing triples to~\eqref{def:approxfeedprop}. Throughout this section $c$ will denote a generic constant independent of $\varepsilon>0$ and $y_0\in Y_0$.
\subsection{Existence of admissible points} \label{subsec:exstofmin}
Recall from Assumption~\ref{ass:feedbacklaw} \textcolor{black} and Remark \ref{rem3} that the optimal ensemble state~$\mathbf{y}^* \in \mathcal{C}(Y_0;W_T)$ satisfies~$\|{\mathbf{y}^*}\|_{{\mathcal C}}\leq M_{Y_0}$. Accordingly we define the set of admissible states and admissible controls as
\begin{align*}
\mathbf{Y}_{ad} = \left\{\, \mathbf{y} \in \mathcal{C}(Y_0;W_T)\;|\;\mathbb{C}bochnorm{\mathbf{y}} \leq 2 M_{Y_0}\, \right\}, \quad \mathbf{U}_{ad} \coloneqq L^2(Y_0;L^2(I;\mathbb{R}^m)).
\end{align*}
We also recall the definition $\mathcal{Y}_{ad}$ in \eqref{eq:kk13}.
To prove the existence of minimizers to~\eqref{def:approxfeedprop} we first argue that the admissible set
\begin{align} \label{def:admissibleset}
\mathcal{N}^{\varepsilon}_{ad}= \left\{(\mathbf{y},\mathbf{p},\theta)\!\in\! \mathbf{Y}_{ad}\!\times\!\mathcal{C}(Y_0;W_T) \!\times\! \mathcal{R}_{\varepsilon}|(\mathbf{y},\mathbf{p},\theta)~\text{satisfies}\!~\eqref{eq:statepropapprox}\!
-\!\eqref{eq:constraintprop}, \mathcal{F}^\varepsilon_{\theta}(\mathbf{y})\!\in\! \mathbf{U}_{ad}\right\}
\end{align}
is nonempty for~$\varepsilon$ small enough.
For this purpose consider the family~$\theta_\varepsilon \in \mathcal{R}_{\varepsilon}$,~$0< \varepsilon \leq \varepsilon_0$, from Assumption~\ref{ass:approxsmoothness} as well as the associated closed loop system of state and adjoint equations
\begin{align} \label{eq:neuralnetstate}
\dot{y}_\varepsilon&= \mathbf{f}(y_\varepsilonilon)+ \mathbf{g}(y_\varepsilon)
\mathcal{F}^\varepsilon_{\theta_\varepsilonilon} (y_\varepsilonilon), \\
-\dot{p}_\varepsilon&= D\mathbf{f}(y_\varepsilon)^\top p_\varepsilon + \lbrack D\mathbf{g}(y_\varepsilon)^\top\mathcal{F}^\varepsilon_{\theta_\varepsilon}(y_\varepsilon)\rbrack p_\varepsilon+\mathbf{Q}_1^\top \mathbf{Q}_1(y_\varepsilon-y_d), \label{eq:neuraladjoint}
\end{align}
subject to the following initial and terminal conditions
\begin{align*}
y_\varepsilonilon(0)=y_0,~p_\varepsilon(T)= Q^\top_2 Q_2(y_\varepsilon(T)-y_d^T),
\end{align*}
for every~$y_0 \in Y_0$.
We first prove the following approximation result.
\begin{theorem} \label{thm:existenceneuralnetwork2}
Let Assumptions~\ref{ass:feedbacklaw} and~\ref{ass:approxsmoothness} hold. There exists a constant $c$ such that for all~$\varepsilon>0$ small enough and for all $y_0\in Y_0$
the system \eqref{eq:neuralnetstate} and~\eqref{eq:neuraladjoint}
admits unique solutions~$y_\varepsilon=\mathbf{y}_\varepsilon (y_0) \in \mathcal{Y}_{ad}$ and~$p_\varepsilon=\mathbf{p}_\varepsilon (y_0) \in W_T$. Furthermore ~$\mathbf{y}_\varepsilon \in \mathcal{C}^1 (Y_0;W_T)$,~$\mathbf{p}_\varepsilon \in \mathcal{C} (Y_0;W_T)$, and $\mathcal{F}^*(\mathbf{y}^*) \in \mathcal{C} (Y_0;L^2(I;\mathbb{R}^m))$ hold and
\begin{align*}
\|\mathbf{y}_\varepsilon-\mathbf{y}^*\|_{\mathcal{C}^1(Y_0;W_T)}+
\|\mathbf{p}_\varepsilon-\mathbf{p}^*\|_{\mathcal{C}(Y_0;W_T)}+ \|\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon)-\mathcal{F}^*(\mathbf{y}^*)\|_{\mathcal{C}(Y_0;L^2(I;\mathbb{R}^m))} \leq c \varepsilon.
\end{align*}
In particular,~$(\mathbf{y}_\varepsilon,\mathbf{p}_\varepsilon,\theta_\varepsilon)\in \mathcal{N}^\varepsilon_{ad}$ for all~$\varepsilon>0$ small enough.
\end{theorem}
In order to prove this we require several auxiliary results.
\begin{lemma} \label{lem:lipschitzoffeed}
There exists a constant $c$ such that for all $\varepsilon$ small enough there holds
\begin{align*}
\|(\mathcal{F}^*(y_1)-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(y_1))-(\mathcal{F}^*(y_2)-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(y_2))\|_{L^2(I;\mathbb{R}^m)}\leq c\varepsilon \wnorm{y_1-y_2}, \quad \forall y_1,y_2 \in \mathcal{Y}_{ad}.
\end{align*}
\end{lemma}
\begin{proof}
According to the definition of~$\mathcal{F}^*$ and~$\mathcal{F}^\varepsilon_{\theta_\varepsilon}$ we split
\begin{align*}
\|(\mathcal{F}^*(y_1)-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(y_1))-(\mathcal{F}^*(y_2)-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(y_2))\|_{L^2(I;\mathbb{R}^m)} \leq D_1+D_2
\end{align*}
with
\begin{align*}
D_1&= 1/\beta \, \|\mathbf{g}(y_1)^\top\|_{\mathcal{B}(L^2(I;\mathbb{R}^n),L^2(I;\mathbb{R}^m))} \|\partial_y((\mathcal{V}^*(y_1)\!-\!\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_1))-(\mathcal{V}^*(y_2)\!-\!\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_2)))\|_{L^2(I;\mathbb{R}^n)} \\
D_2 &= 1/\beta \, \|\mathbf{g}(y_1)^\top-\mathbf{g}(y_2)^\top\|_{\mathcal{B}(L^2(I;\mathbb{R}^n),L^2(I;\mathbb{R}^m))} \|\partial_y(\mathcal{V}^*(y_2)-\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_2))\|_{L^2(I;\mathbb{R}^n)}.
\end{align*}
Applying the integral mean value theorem yields
\begin{align*}
\notag
\|\partial_y((\mathcal{V}^*(y_1)-\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_1))&-(\mathcal{V}^*(y_2)-\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_2)))\|_{L^2(I;\mathbb{R}^n)}
\\[1.5ex]
& \leq
\sup_{s\in
[0,1]}\|\partial_{yy}(\mathcal{V}^*(y_1+sh)-\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_1+sh))\|_{\mathcal{B}(W_T,L^2(I;\mathbb{R}^n))}
\wnorm{h}
\end{align*}
with~$h=y_2-y_1\in W_T$. Note that~$y_1+sh \in \mathcal{Y}_{ad}$ for all~$s \in [0,1]$. Thus we can use Assumption ~\ref{ass:approxsmoothness} for every~$s \in [0,1]$ and~$\mathrm{d}lta y \in W_\infty$ and estimate
\begin{align*}
\|\partial_{yy}(\mathcal{V}^*(y_1+sh)&-\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_1+sh))\mathrm{d}lta y\|_{L^2(I;\mathbb{R}^n)}
\\&\leq \sqrt{\int_0^T~|\partial_{yy}(V^*(t,y_1(t)+sh(t))-V^\varepsilon_{\theta_\varepsilon}(t,y_1(t)+sh(t)))|^2_{\mathbb{R}^{n
\times n}}|\mathrm{d}lta y(t)|^2\mathrm{d} t}
\\&\leq c \varepsilon \|\mathrm{d}lta y\|_{L^2(I;\mathbb{R}^n)} \leq \varepsilon c \wnorm{\mathrm{d}lta y}.
\end{align*}
Similarly we obtain
\begin{align*}
\|\partial_{y}(\mathcal{V}^*(y_2)-\mathcal{V}^\varepsilon_{\theta_\varepsilon}(y_2))\|_{L^2(I;\mathbb{R}^n)}= \sqrt{\int^T_0 |\partial_{y}({V}^*(t,y_2(t))-{V}^\varepsilon_{\theta_\varepsilon}(t,y_2(t)))|^2~\mathrm{d} t} \leq \sqrt{T}c \varepsilon.
\end{align*}
Last recall that~$\mathbf{g}$ is Lipschitz continuous and uniformly bounded on~$\mathcal{Y}_{ad}$.
Combining these facts yields the desired statement.
\end{proof}
With the same arguments the following a priori estimate can be obtained. For the sake of brevity
its proof is omitted.
\begin{coroll}\label{lem:calmness}
There exists a constant $c$ such that for all $\varepsilon$ small enough there holds
\begin{align*}
\|\mathcal{F}^*(y)-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(y)\|_{L^2(I;\mathbb{R}^m)}
\leq c \varepsilon \wnorm{y}, \quad \forall y \in \mathcal{Y}_{ad}.
\end{align*}
\end{coroll}
Next we establish existence of a unique solution to~\eqref{eq:neuralnetstate} as well as a first approximation result.
\begin{prop} \label{thm:existenceneuralnetwork}
Let Assumptions~\ref{ass:feedbacklaw} and~\ref{ass:approxsmoothness} hold. Then for all~$\varepsilon>0$ small enough
there is a unique~$\mathbf{y}_\varepsilon \in \mathcal{C}^1(Y_0;W_T)$ such that~$y_\varepsilon\coloneqq\mathbf{y}_\varepsilon(y_0) \in \mathcal{Y}_{ad}$ satisfies~\eqref{eq:neuralnetstate} for all $y_0 \in Y_0$.
Moreover there exists a constant $c$ independent of $\varepsilon$ such that
\begin{align*}
\|\mathbf{y}^*-\mathbf{y}_\varepsilon\|_{\textcolor{black}{\mathcal{C}(Y_0;W_T)}}+ \|\mathcal{F}^*(\mathbf{y}^*)-\mathcal{F}^{\varepsilon}_{\theta_\varepsilon}(\mathbf{y}_\varepsilon)\|_{\textcolor{black}{\mathcal{C}(Y_0; L^2(I;\mathbb{R}^m))}} \leq c\varepsilon.
\end{align*}
In particular we have $\|\mathbf{y}_\varepsilon\|_{\mathcal{C}(Y_0;W_T)} \leq 2 M_{Y_0}$ for all sufficiently small $\varepsilon$.
\end{prop}
\begin{proof}
The proof is based on a fixed-point argument. Let~$y_0 \in
Y_0$ be arbitrary but fixed. Define the set
\begin{align*}
\mathcal{M}= \left\{\, y \in W_T \;|\;\wnorm{y} \leq \frac{3}{2}
M_{Y_0}\,\right\} \subset \mathcal{Y}_{ad}.
\end{align*}
On~$\mathcal{M}$ we consider the mapping~$\mathcal{Z} \colon \mathcal{M}
\to W_T$, where~$z=\mathcal{Z}(y)\in \mathcal{Y}_{ad}$ is the unique
solution of
\begin{align} \label{eq:auxeqfixpoint}
\dot{z}= \mathbf{f}(z)+ \mathbf{g}(z)\mathcal{F}^*(z)+
\mathbf{g}(y)\mathcal{F}^\varepsilon_{\theta_\varepsilonilon}(y)-\mathbf{g}(y) \mathcal{F}^*(y),
\quad z(0)=y_0.
\end{align}
It is well-defined since the
perturbation function~$v=\mathbf{g}(y)\mathcal{F}^\varepsilon_{\theta_\varepsilonilon}(y)-
\mathbf{g}(y) \mathcal{F}^*(y)\in L^2(I;\mathbb{R}^n)$ satisfies
\begin{align*}
\|v\|_{L^2}\leq \|\mathbf{g}(y)\|_{\mathcal{B}(L^2(I;\mathbb{R}^m),L^2(I;\mathbb{R}^n))}
\|\mathcal{F}^*(y)-\mathcal{F}^\varepsilon_{\theta_\varepsilonilon}(y)\|_{L^2} \leq \frac{3}{2} c \varepsilon M_{Y_0} \|\mathbf{g}(y)\|_{\mathcal{B}(L^2(I;\mathbb{R}^m),L^2(I;\mathbb{R}^n))}
\end{align*}
where we use Corollary~\ref{lem:calmness} and the definition of~$\mathcal{M}$. Hence $\|v\|_{L^2}\leq c \varepsilon$.
Here and below $c$ denotes a generic constant which is independent of $y_0\in Y_0$ and all $\varepsilonilon>0$ sufficiently small.
We may invoke Proposition~\ref{thm:existspert}
and Corollary~\ref{coroll:locallipschitzofstate} from the Appendix, to assert the existence of a
unique solution~$z\in \mathcal{Y}_{ad}$ to~\eqref{eq:auxeqfixpoint} with
\begin{align*}
\wnorm{z}& \leq M_{Y_0}+c\|v\|_{L^2} \leq \frac{3}{2} M_{Y_0},\quad \forall y_0 \in Y_0,
\end{align*}
if~$\varepsilon>0$ is chosen small enough.
From this we particularly conclude~$\mathcal{Z}(\mathcal{M})\subset \mathcal{M}$ for
all~$y_0 \in Y_0$ and~$\varepsilon >0$ small. It remains to prove
that~$\mathcal{Z}$ is a contraction. To this end let~$y_1,~y_2 \in
\mathcal{M}$ be given. Applying
Corollary~\ref{coroll:locallipschitzofstate} yields the first inequality in
\begin{align*}
\wnorm{\mathcal{Z}(y_1)-\mathcal{Z}(y_2)} &\leq c
\|\mathcal{F}^*(y_1)-\mathcal{F}^\varepsilon_{\theta_\varepsilonilon}(y_1)-\mathcal{F}^*(y_2)+\mathcal{F}^\varepsilon_{\theta_\varepsilonilon}(y_2)\|_{L^2} \leq c \varepsilon \wnorm{y_1-y_2}
\end{align*}
with a constant~$c>0$ independent of~$y_1, y_2 \in \mathcal{M}$ as well
as of~$y_0 \in Y_0$, and $\varepsilonilon$ sufficiently small.
The last inequality follows from
Lemma~\ref{lem:lipschitzoffeed}.
Choosing~$\varepsilon>0$ small enough we conclude that~$\mathcal{Z}$ admits a
unique fixed point~$y_\varepsilon=\mathcal{Z}(y_\varepsilon) \in W_T$
on~$\mathcal{M}$. Clearly, the function~$ \mathbf{y}_\varepsilon(y_0):=y_\varepsilon$
satisfies~\eqref{eq:neuralnetstate},~$y_\varepsilon \in {\mathcal{M}} \subset\mathcal{Y}_{ad}$ as well as
\begin{align*}
\wnorm{\mathbf{y}_\varepsilon(y_0)-\mathbf{y}^*(y_0)}&=\wnorm{\mathcal{Z}(\mathbf{y}_\varepsilon(y_0))-\mathcal{Z}(0)}\leq
c\varepsilon \wnorm{y_\varepsilon}
\leq c \varepsilon \frac{3}{2} M_{Y_0},
\end{align*}
and by Corollary \ref{lem:calmness}
\begin{align*}
\|\mathcal{F}^*(\mathbf{y}^*(y_0))&-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))\|_{L^2} \\&\leq \|\mathcal{F}^*(\mathbf{y}^*(y_0))-\mathcal{F}^*(\mathbf{y}_\varepsilon(y_0))\|_{L^2} + \|\mathcal{F}^*(\mathbf{y}_\varepsilon(y_0))-\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))\|_{L^2} \\ & \leq
c \wnorm{\mathbf{y}^*(y_0)-\mathbf{y}_\varepsilon(y_0)}+c \varepsilon \wnorm{\mathbf{y}_\varepsilon(y_0)} \leq c \varepsilon.
\end{align*}
Finally according to Proposition~\ref{thm:existspert} the solution $\mathbf{y}_\varepsilon(y_0) $ is unique and the mapping~$\mathbf{y}_\varepsilon $ is at least of class~$\mathcal{C}^1$ .
\end{proof}
Next we estimate the~$W^{1,2}$ difference between~$\mathbf{y}_\varepsilon$ and~$\mathbf{y}^*$.
\begin{prop} \label{thm:aprioriW12}
The mapping~$\mathbf{y}_\varepsilon \in \mathcal{C}^1(Y_0;W_T)$ from Theorem~\ref{thm:existenceneuralnetwork} satisfies
\begin{align*}
\|\mathbf{y}_\varepsilon-\mathbf{y}^*\|_{\mathcal{C}^1(Y_0;W_T)} \leq c\varepsilon
\end{align*}
for~$c>0$ independently of~$\varepsilon$ small enough.
\end{prop}
\begin{proof}
By the previous proposition the estimate is already known for ${\mathcal{C}^1(Y_0;W_T)}$ replaced by ${\mathcal{C}(Y_0;W_T)}$.
Now fix~$y_0 \in Y_0$ and~$i \in \{1, \dots,n\}$. By the inverse mapping theorem the partial derivatives of~$\mathbf{y}^*$ and~$\mathbf{y}_\varepsilon$ at~$y_0$ are given by~$\partial_i \mathbf{y}^*(y_0)= T_*(y_0)^{-1}(0,e_i)$,~$\partial_i \mathbf{y}_\varepsilon(y_0)= T_\varepsilon (y_0)^{-1}(0,e_i)$. Here,~$e_i$ denotes the i-th canonical basis vector in~$\mathbb{R}^n$ and
\begin{align*}
T_*(y_0)^{-1}, T_\varepsilon(y_0)^{-1} \colon L^2(I;\mathbb{R}^n) \times \mathbb{R}^n \to W_T
\end{align*}
denote the linear continuous inverses of
\begin{align*}
T_*(y_0)\mathrm{d}lta y\!=\!\left(
\begin{array}{c}
\!\!\!\dot{\mathrm{d}lta y}-D\mathbf{f}(\mathbf{y}^*(y_0))\mathrm{d}lta y-\lbrack D\mathbf{g}(\mathbf{y}^*(y_0))\mathrm{d}lta y\rbrack\mathcal{F}^*(\mathbf{y}^*(y_0))-\mathbf{g}(\mathbf{y}^*(y_0))D\mathcal{F}^*(\mathbf{y}^*(y_0))\mathrm{d}lta y\\
\mathrm{d}lta y(0)\\
\end{array}
\!\!\!\right)
\end{align*}
and
\begin{align*}
T_\varepsilon(y_0)\mathrm{d}lta y\!=\!\left(
\begin{array}{c}
\!\!\!\dot{\mathrm{d}lta y}-D\mathbf{f}(\mathbf{y}_\varepsilon(y_0))\mathrm{d}lta y-\lbrack D\mathbf{g}(\mathbf{y}_\varepsilon(y_0))\mathrm{d}lta y\rbrack\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\mathbf{g}(\mathbf{y}_\varepsilon(y_0))D\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))\mathrm{d}lta y\\
\mathrm{d}lta y(0)\\
\end{array}
\!\!\!\right).
\end{align*}
Using Gronwall's inequality, we readily verify that
\begin{align} \label{eq:uniformT}
\max \left\{ \|T_\varepsilon (y_0)^{-1}(\mathrm{d}lta v, \mathrm{d}lta y_0 )\|_{W_T},\|T_*(y_0)^{-1}(\mathrm{d}lta v, \mathrm{d}lta y_0 )\|_{W_T} \right\} \leq C(\|\mathrm{d}lta v\|_{L^2(I;\mathbb{R}^n)}+|\mathrm{d}lta y_0|_{\mathbb{R}^n})
\end{align}
for all~$ \mathrm{d}lta v \in L^2(I,\mathbb{R}^n),~\mathrm{d}lta y_0 \in \mathbb{R}^n$,~$y_0 \in Y_0$ and some~$C>0$ independent of~$y_0, \mathrm{d}lta v, \mathrm{d}lta y_0 $. Now we recall that~$\mathbf{y}_\varepsilon(y_0) , \mathbf{y}^*(y_0)\in \mathcal{Y}_{ad}$ and that~$D \mathbf{f}, D \mathbf{g},\mathbf{g}$ are Lipschitz continuous, and thus in particular bounded, on~$\mathcal{Y}_{ad}$, see Assumption~\ref{ass:feedbacklaw} $\mathbf{A.1}$. Together with~boundedness of $\{ \|\mathcal{F}^*(\mathbf{y}(y_0))\|_{L^2}: y_0 \in Y_0\}$, Corollary~\ref{lem:calmness} and Theorem~\ref{thm:existenceneuralnetwork} we conclude
\begin{align} \label{eq:kk14}
\|(T_*(y_0)-T_\varepsilon(y_0))\mathrm{d}lta y\|_{L^2(I;\mathbb{R}^n)\times \mathbb{R}^n} \leq c \varepsilon \wnorm{\mathrm{d}lta y} \quad \forall \mathrm{d}lta y \in W_T
\end{align}
for some~$c>0$ again independent of~$y_0 \in Y_0$. Recalling that $B^{-1} - A^{-1}=A^{-1}(A-B)B^{-1}$ for invertible bounded linear operators $A$ and $B$, we obtain
\begin{align*}
\wnorm{\partial_i\mathbf{y}_\varepsilon(y_0)-\partial_i \mathbf{y}^*(y_0)}&= \wnorm{T_\varepsilon (y_0)^{-1}(0,e_i)-T_* (y_0)^{-1}(0,e_i)} \\ & \leq C^2 \sup_{\|\mathrm{d}lta y\|_{W_T} \leq 1} \|(T_*(y_0)-T_\varepsilon(y_0))\mathrm{d}lta y\|_{L^2(I;\mathbb{R}^n)\times \mathbb{R}^n} \leq c \varepsilon,
\end{align*}
where~$C>0$ is the constant from~\eqref{eq:uniformT}. Since all involved constants are independent of~$y_0 \in Y_0$ we obtain the desired estimate
$
\|\partial_i \mathbf{y}_\varepsilon-\partial_i \mathbf{y}^*\|_{\mathcal{C}} \leq c \varepsilon.
$
\end{proof}
Next we address the solvability of the adjoint equation~\eqref{eq:adjointpropapprox}.
\begin{prop} \label{prop:solvofadjoint}
There exists a constant $c$ such that for all $\varepsilon$ small enough
there exists~$\mathbf{p}_\varepsilon \in \mathcal{C}(Y_0;W_T)$ such that~$p_\varepsilon \coloneqq \mathbf{p}_\varepsilon(y_0)\in W_T$ satisfies~\eqref{eq:neuraladjoint} for all~$y_0 \in Y_0$ and
\begin{align*}
\|\mathbf{p}_\varepsilon-\mathbf{p}^*\|_{\mathcal{C}} \leq c \varepsilon.
\end{align*}
\end{prop}
\begin{proof}
Given~$y\in \mathcal{Y}_{ad}$ consider the linear ordinary differential equation
\begin{align*}
-\dot{p}= D\mathbf{f}(y)p+\lbrack D \mathbf{g}(y)^\top \mathcal{F}^\varepsilon_{\theta_\varepsilon}(y)\rbrack p+\mathbf{Q}_1^\top \mathbf{Q}_1(y-y_d),~p(T)= Q^\top_2 Q_2(y(T)-y^T_d).
\end{align*}
It admits a unique solution~$p=P(y)\in W_T$ which is bounded independently of~$y\in \mathcal{Y}_{ad}$. Moreover the mapping~$P \colon W_T \to W_T$ is continuous on~$\mathcal{Y}_{ad}$ in virtue of the Gronwall lemma and Assumption~\ref{ass:feedbacklaw}. The existence of a mapping~$\mathbf{p}_\varepsilon$ which satisfies~\eqref{eq:neuraladjoint} then follows by setting~$\mathbf{p}_\varepsilon=P \circ \mathbf{y}_\varepsilon$.
It remains to prove the estimate for the difference between~$\mathbf{p}_\varepsilon$ satisfying \eqref{eq:neuraladjoint} and~$\mathbf{p}^*$ satisfying \eqref{eq:adjointprop}. For this purpose
we can use the same technique as in the proof of Proposition \ref{thm:aprioriW12} and therefore we only give the main estimates.
Recall that~$D \mathbf{f}(\cdot)^\top ,D \mathbf{g}(\cdot)^\top$ are Lipschitz continuous on~$\mathcal{Y}_{ad}$.
The the most involved term in the estimate analogous to \eqref{eq:kk14} is
\begin{align*}
\|\lbrack D \mathbf{g}(\mathbf{y}_\varepsilon(y_0))^\top &\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-D \mathbf{g}(\mathbf{y}^*(y_0))^\top \mathcal{F}^*(\mathbf{y}^*(y_0))\rbrack \mathrm{d}lta p\|_{L^2}\\& \leq c(\wnorm{\mathbf{y}_\varepsilon(y_0)-\mathbf{y}^*(y_0)}+\|\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\mathcal{F}^*(\mathbf{y}^*(y_0))\|_{L^2}) \wnorm{\mathrm{d}lta p}
\end{align*}
with~$c>0$ independent of~$\varepsilon>0$ and $\mathrm{d}lta p \in W_T$. Now a perturbation argument as in the proof of Propostion \ref{thm:aprioriW12} provides us with
\begin{align*}
\wnorm{&\mathbf{p}_\varepsilon(y_0)-\mathbf{p}^*(y_0)}\\& \leq c (\wnorm{\mathbf{y}_\varepsilon(y_0)-\mathbf{y}^*(y_0)}+\|\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\mathcal{F}^*(\mathbf{y}^*(y_0))\|_{L^2}+|\mathbf{y}_\varepsilon(y_0)(T)-\mathbf{y}^*(y_0)(T)|) \\ & \leq c(\wnorm{\mathbf{y}_\varepsilon(y_0)-\mathbf{y}^*(y_0)}+\|\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\mathcal{F}^*(\mathbf{y}^*(y_0))\|_{L^2}) \leq c \varepsilon
\end{align*}
where~$W_T \hookrightarrow \mathcal{C}(\bar{I};\mathbb{R}^n)$ is used
in the second inequality, and Proposition ~\ref{thm:aprioriW12} and Corollary \ref{lem:calmness} are utilized in the final one. Since all involved constants are again independent of~$y_0 \in Y_0$, this finishes the proof.
\end{proof}
Summarizing all previous observations we arrive at the proof of Theorem~\ref{thm:existenceneuralnetwork2}.
\begin{proof}[Proof of Theorem~\ref{thm:existenceneuralnetwork2}]
This follows directly by combining Proposition~\ref{thm:existenceneuralnetwork}, Proposition~\ref{thm:aprioriW12}, and Proposition~\ref{prop:solvofadjoint}.
\end{proof}
\subsection{Closedness of~$\mathcal{N}^\varepsilon_{ad}$} \label{subsec:closedandexist}
As a last prerequisite for proving existence to~\eqref{def:approxfeedprop} we argue that the admissible set~$\mathcal{N}^\varepsilon_{ad}$ is closed. The existence of at least one minimizing triple to~\eqref{def:approxfeedprop} then follows by variational arguments. From here on we always assume that~$\mathcal{N}^\varepsilon_{ad}$ from \eqref{def:admissibleset} is nonempty, i.e. that $\varepsilon$ is sufficiently small.
\begin{prop} \label{prop:closedofNad}
Let~$(\mathbf{y}_k,\mathbf{p}_k,\theta_k)_{k\in\mathbb{N}} \subset \mathcal{N}^\varepsilon_{ad} $ be a sequence with weak limit~$(\mathbf{y},\mathbf{p},\theta)$ in~$L^2(Y_0;W_T)^2 \times \mathcal{R}_\varepsilon$. Then
$(\mathbf{y},\mathbf{p},\theta) \in \mathcal{N}^\varepsilon_{ad} $ and we have
\begin{align*}
(\mathbf{y},\mathbf{p}) \in \mathcal{C}(Y_0;W_T)^2,~\lim_{k\rightarrow \infty} \mathbf{y}_k(y_0)=\mathbf{y}(y_0) \text{ and}~\lim_{k\rightarrow \infty} \mathbf{p}_k(y_0)=\mathbf{p}(y_0) \text{ in } W_T, \quad \forall~y_0 \in Y_0.
\end{align*}
\end{prop}
The proof builds upon the following two lemmas.
\begin{lemma} \label{lem:weakclosedstate}
Let the sequence $(\mathbf{y}_k,\mathbf{p}_k,\theta_k)_{k\in\mathbb{N}} \subset \mathcal{N}^\varepsilon_{ad} $ satisfy the prerequisites of Proposition~\ref{prop:closedofNad}. Then~$\mathbf{y} \in \mathbf{Y}_{ad}$, $\mathbf{y}_k(y_0)\rightarrow \mathbf{y}(y_0)\text{ in}~W_T,$ $\mathcal{F}^\varepsilon_{\theta_k}(\mathbf{y}_k(y_0))\rightarrow \mathcal{F}^\varepsilon_{\theta}({\mathbf{y}}(y_0))~\text{in}~L^\infty (I;\mathbb{R}^m)$, and
\begin{align}\label{eq:aux8}
\dot{\mathbf{y}}(y_0)= \mathbf{f}({\mathbf{y}}(y_0))+ \mathbf{g}({\mathbf{y}}(y_0))\mathcal{F}^\varepsilon_{\theta}({\mathbf{y}}(y_0)),~{\mathbf{y}}(y_0)(0)=y_0,
\end{align}
for all~$y_0 \in Y_0$.
\end{lemma}
\begin{proof}
By assumption we have~$\mathbf{y}_k \in \mathbf{Y}_{ad}$, and hence $\|\mathbf{y}_k(y_0)\|_{W_T} \le 2M_{Y_0}$ for all $k \in \mathbb{N}$ and $y_0 \in Y_0$, and $\mathbf{y}_k \in \mathcal{C}^1(Y_0;W_T)$ for all~$k \in \mathbb{N}$, see Proposition \ref{thm:existenceneuralnetwork}. Let us fix an arbitrary $y_0 \in Y_0$.
and set~$y_k \coloneqq \mathbf{y}_k(y_0)$ for abbreviation. Then there exists a subsequence, denoted by the same index, and~$\tilde y\in W_T$ such that~$y_k \rightharpoonup \tilde y $ in $W_T$. Since~$W_T \hookrightarrow_c \mathcal{C}(\bar{I};\mathbb{R}^n) \hookrightarrow L^p(I;\mathbb{R}^n)$,~$1\leq p \leq +\infty$, we immediately get
\begin{align*}
y_k(0) \rightarrow \tilde y(0)~\text{in}~\mathbb{R}^n,~\mathbf{f}(y_k) \rightarrow \mathbf{f}(\tilde y)~\text{in}~L^2(I;R^n),~\mathbf{g}(y_k) \rightarrow \mathbf{g}(\tilde y)~\text{in}~\mathcal{B}(L^2(I;\mathbb{R}^m),L^2(I;\mathbb{R}^n))
\end{align*}
as well as
$\mathcal{F}^\varepsilon_{\theta}(y_k) \rightarrow \mathcal{F}^\varepsilon_{\theta}(\tilde y)~\text{in}~L^\infty(I;\mathbb{R}^m). $
Moreover by Assumption~\ref{ass:approxsmoothness} for every~$\mathrm{d}lta >0$ there exits~$K_\mathrm{d}lta\in \mathbb{N}$ such that
\begin{align}\label{eq:kk15}
|\partial_y V^\varepsilon_{\theta_k}(t,y)-\partial_y V^\varepsilon_{\theta}(t,y)|\leq \mathrm{d}lta \quad \forall (t,y)\in \bar{I} \times \bar{B}_{2\widehat{M}}(0)
\end{align}
for all~$k\geq K_\mathrm{d}lta$. Here~$\widehat{M}$ denotes the constant from Assumption~\ref{ass:feedbacklaw}~$\mathbf{A.2}$. For all such~$k$ we get utilizing \eqref{eq:kk15} for a constant $c$ independent of $k$
\begin{align*}
\|\mathcal{F}^\varepsilon_{\theta_k}(y_k)-\mathcal{F}^\varepsilon_{\theta}(\tilde y)\|_{L^\infty} &\leq c\|\mathcal{F}^\varepsilon_{\theta_k}(y_k)-\mathcal{F}^\varepsilon_{\theta}(y_k)\|_{L^\infty}+\|\mathcal{F}^\varepsilon_{\theta}(y_k)-\mathcal{F}^\varepsilon_{\theta}(\tilde y)\|_{L^\infty} \\ & \leq c \mathrm{d}lta + \|\mathcal{F}^\varepsilon_{\theta}(y_k)-\mathcal{F}^\varepsilon_{\theta}(\tilde y)\|_{L^\infty}.
\end{align*}
This implies that ~$\lim_{k \to \infty} \mathcal{F}^\varepsilon_{\theta_k}(y_k)=\mathcal{F}^\varepsilon_{\theta}(\tilde y)$ in~$L^\infty(I;\mathbb{R}^m)$.
These observations imply
\begin{align*}
\dot{y}_k= \mathbf{f}(y_k)+\mathbf{g}(y_k)\mathcal{F}^\varepsilon_{\theta_k}(y_k) \rightarrow \mathbf{f}(\tilde y)+\mathbf{g}(\tilde y)\mathcal{F}^\varepsilon_{\theta}(\tilde y).
\end{align*}
Together with $y_k \rightharpoonup \tilde y $ in $W_T$ this implies that
$\mathbf{y}_k(y_0)=y_k \rightarrow \tilde y$ in~$W_T$ and
\begin{align} \label{eq:closedloopaux14}
\dot{\tilde y}=\mathbf{f}(\tilde y)+\mathbf{g}(\tilde y)\mathcal{F}^\varepsilon_{\theta}(\tilde y) ,~\tilde y(0)=y_0.
\end{align}
Since the solution to this equation is unique, every weak accumulation point of~$y_k$ satisfies~\eqref{eq:closedloopaux14} and we have
$\mathbf{y}_k(y_0)\rightarrow \tilde y~\text{in}~W_T$
for the whole sequence. We repeat this construction for all $y_0\in Y_0$. This defines a function $ \tilde {\bf{y}}:Y_0 \to W_T$ such that $\mathbf{y}_k(y_0)\rightarrow \tilde{\bf{y}}(y_0)~\text{in}~W_T$ and such that \eqref{eq:closedloopaux14} is satisfied with $\tilde y= \tilde {\bf{y}}(y_0)$ for each $y_0\in Y_0$.
By Proposition \ref{thm:existenceneuralnetwork} it is the unique solution to \eqref{eq:aux8}.
Lebesgue's dominated convergence theorem for Bochner integrals \cite[pg 45]{DU77} implies that $\mathbf{y}_k\rightarrow \tilde {\bf{y}}$ in $L^1(Y_0;W_T)$, and by boundedness of $\{\|\mathbf{y}_k\|_{\mathcal{C}}\}_{k=1}^\infty$ also in $L^2(Y_0;W_T)$.
By assumption ${\bf{y}}_k$ converges weakly in $L^2(Y_0;W_T)$ to $\bf{y}$. Thus we have
$\bf{y}=\tilde {\bf{y}}$. Moreover $\|\mathbf{y}\|_{\mathcal{C}} \le 2M_{Y_0}$ and hence $\mathbf{y} \in \mathbf{Y}_{ad}$.
\end{proof}
Next we consider the behavior of the adjoint states~$\mathbf{p}_k$.
\begin{lemma} \label{lem:weakclosedadjoint}
Let $(\mathbf{y}_k,\mathbf{p}_k,\theta_k)_{k\in\mathbb{N}} \subset \mathcal{N}^\varepsilon_{ad} $ be a sequence with weak limit~$(\mathbf{y},\mathbf{p},\theta)$ satisfying the prerequisites of Proposition~\ref{prop:closedofNad}. Then~$\|\mathbf{p}_k\|_{\mathcal{C}}\leq C$ for some~$C>0$ and all~$k \in\mathbb{N}$ large enough, and~$\mathbf{p} \in \mathcal{C}(Y_0;W_T)$. Moreover~$\mathbf{p}_k(y_0)\rightarrow \mathbf{p}(y_0)~\text{in}~W_T$, and
\begin{equation} \label{eq:adjointlim}
\begin{array}{ll}
-\dot{\mathbf{p}}(y_0) = D\mathbf{f}(\mathbf{y}(y_0))^\top\mathbf{p}(y_0)+ [D\mathbf{g}(\mathbf{y}(y_0))^\top\mathcal{F}^\varepsilon_{\theta}(\mathbf{y}(y_0))]{\mathbf{p}}(y_0)+ \mathbf{Q}_1^\top \mathbf{Q}_1(\mathbf{y}(y_0)-y_d),\\[1.6ex]
\;\mathbf{p}(y_0))(T)= Q^\top_2 Q_2(\mathbf{y}(T)(y_0)-y^T_d),
\end{array}
\end{equation}
for all~$y_0 \in Y_0$.
\end{lemma}
\begin{proof}
From Lemma \ref{lem:weakclosedstate} recall that for the sequences~$~y_k\coloneqq \mathbf{y}_k(y_0) \in \mathcal{Y}_{ad}$ and~$y \coloneqq \mathbf{y}(y_0)$ we have for each $y_0\in Y_0$
\begin{align*}
y_k \rightarrow y~\text{in}~ W_T,~ \mathcal{F}^\varepsilon_{\theta_k}(y_k) \rightarrow \mathcal{F}^\varepsilon_{\theta}(y)~\text{in}~L^\infty(I;\mathbb{R}^m).
\end{align*}
Further for each~$k\in\mathbb{N}$ and~$y_0 \in Y_0 $, the element $p_k \coloneqq \mathbf{p}_k(y_0) \in W_T$ satisfies
\begin{align} \label{eq:adjointaux}
-\dot{p}_k= D\mathbf{f}(y_k)^\top p_k+\lbrack D \mathbf{g}(y_k)^\top \mathcal{F}^\varepsilon_{\theta_k}(y_k)\rbrack p_k+\mathbf{Q}_1^\top \mathbf{Q}_1(y_k-y_d),~p_k(T)= Q^\top_2 Q_2(y_k(T)-y^T_d).
\end{align}
Recall from Assumption~\ref{ass:approxsmoothness} that
~$\partial_y V_{\cdot}^\varepsilon$ is uniformly continuous on compact sets. Thus for every~$\mathrm{d}lta>0$ there is~$K_\mathrm{d}lta \in \mathbb{N}$ such that
\begin{align*}
|F^\varepsilon_{\theta_k}(t,x)| \leq |F^\varepsilon_{\theta_k}(t,x)-F^\varepsilon_{\theta}(t,x)|+ |F^\varepsilon_{\theta}(t,x)| \leq \mathrm{d}lta+ \max_{(t,x)\in I \times \bar{B}_{2 \widehat{M}}(0)} |F^\varepsilon_{\theta}(t,x)| < \infty
\end{align*}
for all~$(t,x)\in I \times \bar{B}_{2 \widehat{M}}(0)$ and~$k\geq K_\mathrm{d}lta$. Consequently we obtain
\begin{align*}
\sup_{k \ge K_\mathrm{d}lta} \max_{(t,x)\in I \times \bar{B}_{2 \widehat{M}}(0)} \|A_k(t,x)\|_{\mathbb{R}^{n\times n}} < \infty, \text{ where }
A_k(t,x)= Df(t,x)^{\top}+ D g(t,x)^\top F^\varepsilon_{\theta_k}(t,x).
\end{align*}
Applying Proposition~\ref{prop:s} to the time-reversed equation \eqref{eq:adjointaux} implies that
\begin{align*}
\wnorm{p_k} \leq c \left( \|\mathbf{Q}_1^\top \mathbf{Q}_1(y_k-{y}_d)\|_{L^2}+ |y_k(T)-y^T_d| \right)
\end{align*}
for some~$c>0$ independent of~$y_0 \in Y_0$ and all sufficiently large $k$. Since~$\|\mathbf{y}_k\|_{\mathcal{C}}\leq 2M_{Y_0}$ we finally conclude~$\|\mathbf{p}_k\|_{\mathcal{C}} \leq C$ for some~$C>0$ independent of~$k$ sufficiently large.
We are now prepared to pass to the limit in \eqref{eq:adjointaux}.
For this purpose we proceed as in the proof of Lemma \ref{lem:weakclosedstate} and use
\begin{align*}
D \mathbf{f}(y_k)+ D \mathbf{g}(y_k)^\top \mathcal{F}^\varepsilon_{\theta_k}(y_k) \rightarrow D \mathbf{f}(y)+ D \mathbf{g}(y)^\top \mathcal{F}^\varepsilon_{\theta}(y)~\text{in}~\mathcal{B}(L^2(Y;\mathbb{R}^n)),
\end{align*}
as well as
\begin{align*}
\mathbf{Q}_1^\top \mathbf{Q}_1(y_k-{y}_d) \rightarrow \mathbf{Q}_1^\top \mathbf{Q}_1(y-{y}_d)~\text{in}~L^2(I;\mathbb{R}^n),
\end{align*}
and
\begin{align*}
Q^\top_2 Q_2(y_k(T)-y^T_d) \rightarrow Q^\top_2 Q_2(y(T)-y^T_d)~\text{in}~\mathbb{R}^n
\end{align*}
to show that every weak accumulation point~$\tilde p \in W_T$ of~$p_k$ is in fact a strong accumulation point and satisfies the differential equation in~\eqref{eq:adjointlim}. Since the solution to this equation is unique we get~$p_k \rightarrow \tilde p $ in~$W_T$ for the whole sequence. Finally utilizing~$\|\mathbf{p}_k\|_{\mathcal{C}} \leq C$ and Lebesgue's dominated convergence theorem we conclude~$\tilde p=\mathbf{p}(y_0)$ for all $y_0 \in Y_0$.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{prop:closedofNad}]
This is a direct consequence of Lemma~\ref{lem:weakclosedstate} and Lemma~\ref{lem:weakclosedadjoint}.
\end{proof}
\subsection{Existence of minimizers}
Finally we prove the existence of at least one minimizing triplet to~\eqref{def:approxfeedprop}.
\begin{theorem} \label{thm:existence}
Let Assumption~\ref{ass:feedbacklaw} and~\ref{ass:approxsmoothness} hold. Then for all~$\varepsilon>0$ small enough, Problem~\eqref{def:approxfeedprop} admits at least one minimizing triplet~$(\mathbf{y}^*_\varepsilon, \mathbf{p}^*_\varepsilon, \theta^*_\varepsilon ) \in \mathcal{C}(Y_0;W_T)^2 \times \mathcal{R}_\varepsilon $.
\end{theorem}
\begin{proof}
According to Theorem~\ref{thm:existenceneuralnetwork2}, the admissible set~$\mathcal{N}^\varepsilon_{ad}$ is nonempty for~$\varepsilon>0$ small enough. Fix such a~$\varepsilon>0$ and let
~$(\mathbf{y}_k,\mathbf{p}_k, \theta_k) \in \mathcal{N}^\varepsilon_{ad}$ denote a minimizing sequence for~$\mathcal{J}_\varepsilon$ i.e.
\begin{align*}
\mathcal{J}_\varepsilon(\mathbf{y}_k,\mathbf{p}_k, \theta_k) \rightarrow \inf_{(\mathbf{y},\mathbf{p}, \theta) \in \mathcal{N}^\varepsilon_{ad}} \mathcal{J}_\varepsilon(\mathbf{y},\mathbf{p}, \theta).
\end{align*}
Since~$\mathbf{y}_k \in \mathbf{Y}_{ad}$ and
$
\frac{\gamma_\varepsilon}{2} \|\theta_k\|^2_{\mathcal{R}_\varepsilon} \leq \mathcal{J}_\varepsilon(\mathbf{y}_k,\mathbf{p}_k, \theta_k),
$ for all $k\in\mathbb{N}$,
the sequence~$\{(\mathbf{y}_k,\theta_k)\}\in L^2(Y_0;W_T)\times \mathcal{R}_\varepsilon$ is bounded. Thus it admits at least one subsequence, denoted by the same index, with
\begin{align*}
(\mathbf{y}_k,\theta_k) \rightharpoonup (\mathbf{y}^*_\varepsilon,\theta^*_\varepsilon) ~\text{in}~L^2(Y_0;W_T) \times \mathcal{R}_\varepsilon
\end{align*}
for some~$(\mathbf{y}^*_\varepsilon,\theta^*_\varepsilon)$. As in the proof of Lemma~\ref{lem:weakclosedadjoint} we verify that ~$\|\mathbf{y}_k\|_{\mathcal{C}} \leq C$ and ~$\|\mathbf{p}_k\|_{\mathcal{C}} \leq C$ for some~$C>0$ independent of~$k\in\mathbb{N}$. Consequently, by possibly taking another subsequence we arrive at
\begin{align*}
(\mathbf{y}_k,\mathbf{p}_k,\theta_k) \rightharpoonup (\mathbf{y}^*_\varepsilon,\mathbf{p}^*_\varepsilon,\theta^*_\varepsilon) ~\text{in}~L^2(Y_0;W_T)^2 \times \mathcal{R}_\varepsilon
\end{align*}
for some~$(\mathbf{y}^*_\varepsilon,\mathbf{p}^*_\varepsilon,\theta^*_\varepsilon)\in \mathcal{N}^\varepsilon_{ad}$. For the following estimates it will be convenient to recall the augmented functional~$J_\varepsilon$, see~\eqref{eq:aux2}, which arises in the running cost of ~\eqref{def:approxfeedprop} in compact form:
\begin{align}\label{eq:aux2a}
J_\varepsilon(y,p,\theta)= J(y, \mathcal{F}^\varepsilon_\theta(y))+ \frac{\gamma_1}{2} \|\mathcal{V}(y)-J_\bullet (y, \mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(y))\|^2_{L^2(I;\mathbb{R})}+\frac{\gamma_2}{2}\|p-\partial_y \mathcal{V}(y)\|^2_{L^2(I;\mathbb{R}^n)},
\end{align}
where $J_t$ was defined below \eqref{def:valuefunc}.
Now fix an arbitrary~$y_0\in Y_0$ and set
\begin{align*}
y_k \coloneqq \mathbf{y}_k(y_0),~p_k \coloneqq \mathbf{p}_k(y_0),~{y^*} \coloneqq \mathbf{y}^*_\varepsilon (y_0),~p \coloneqq \mathbf{p}^*_\varepsilon (y_0).
\end{align*}
From Lemma~\ref{lem:weakclosedstate} and Lemma~\ref{lem:weakclosedadjoint} we get
\begin{align*}
y_k \rightarrow \tilde y,~p_k \rightarrow p~\text{in}~W_T,~\mathcal{F}^\varepsilon_{\theta_k}(y_k) \rightarrow \mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(\tilde y)~\text{in} ~L^2(I;\mathbb{R}^n)
\end{align*}
and, again using the uniform continuity of~$V_{\bullet}^\varepsilon$ and~$\partial_y V_{\bullet}^\varepsilon$, we conclude
\begin{align*}
\mathcal{V}^\varepsilon_{\theta_k}(y_k) \rightarrow \mathcal{V}^\varepsilon_{\theta^*_\varepsilon}(\tilde y)~\text{in}~L^2(I),~ \partial_y \mathcal{V}^\varepsilon_{\theta_k}(y_k) \rightarrow \partial_y \mathcal{V}^\varepsilon_{\theta^*_\varepsilon}(\tilde y)~\text{in}~L^2(I;\mathbb{R}^n),
\end{align*}
as well as the uniform boundedness of~$\mathcal{V}^\varepsilon_{\theta_k}(\mathbf{y}_k)$ and~$\partial_y \mathcal{V}^\varepsilon_{\theta_k}(\mathbf{y}_k)$ in~$\mathcal{C}(Y_0;L^2(I))$ and $\mathcal{C}(Y_0;L^2(I;\mathbb{R}^n))$, respectively.
Moreover we readily verify that
\begin{align*}
|J_t(y_k, \mathcal{F}^\varepsilon_{\theta_k}(y_k))\!-\!J_t(\tilde y, \mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(\tilde y))|\! \leq\! c\!\left(\|y_k-\tilde y\|_{L^2}+\|\mathcal{F}^\varepsilon_{\theta_k}(y_k)\!-\!\mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(\tilde y)\|_{L^2}+ |y_k(T)- \tilde y(T)| \right),
\end{align*}
for some~$c>0$ independent of~$y_0 \in Y_0$, $t\in (0,T)$, and~$k\in \mathbb{N}$. Thus we arrive at
\begin{align*}
J_{{\bullet}} (y_k, \mathcal{F}^\varepsilon_{\theta_k}(y_k)) \rightarrow J_\bullet (y, \mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(y))~\text{in}~L^\infty(I).
\end{align*}
Summarizing the previous findings there holds
\begin{align*}
\|\mathcal{V}(y_k)\!-\!J_\bullet (y_k, \mathcal{F}^\varepsilon_{\theta_k}(y_k))\|^2_{L^2}\!+\!\|p_k\!-\!\partial_y \mathcal{V}(y_k)\|^2_{L^2}
&\to \|\mathcal{V}(y)\!-\!J_ {\boldsymbol{\cdot}} (y, \mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(y))\|^2_{L^2}+\|p\!-\!\partial_y \mathcal{V}(y)\|^2_{L^2}
\\ J(y_k,\mathcal{F}^\varepsilon_{\theta_k}(y_k)) & \rightarrow J(y,\mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(y)).
\end{align*}
Using these expressions in ~$J_\varepsilon$ as given in ~\eqref{eq:aux2a},
and the boundedness of~$\|y_k\|_{L^2},|y_k(0)|,$
$\|p_k\|_{L^2}~\|\mathcal{F}^\varepsilon_{\theta_k}(y_k)\|_{L^2},~\|\mathcal{V}^\varepsilon_{\theta_k}(y_k)\|_{L^2}$ independent of~$k \in \mathbb{N}$ and~$y_0 \in Y_0$ we finally get by using Lebesgue's dominated convergence theorem
\begin{align*}
\mathcal{J}_{\varepsilon}(\mathbf{y}_k,\mathbf{p}_k, \theta_k) \rightarrow \mathcal{J}_{\varepsilon}(\mathbf{y}^*_\varepsilon,\mathbf{p}^*_\varepsilon, \theta^*_\varepsilon)= \inf_{(\mathbf{y},\mathbf{p}, \theta) \in \mathcal{N}^\varepsilon_{ad}} \mathcal{J}_\varepsilon(\mathbf{y},\mathbf{p}, \theta).
\end{align*}
\end{proof}
\section{Convergence towards optimal controls} \label{sec:convergence}
In Proposition \ref{thm:existenceneuralnetwork} and \ref{prop:solvofadjoint} it was established that the ensemble triple ~$(\mathbf{y}^*, \mathcal{F}(\mathbf{y^*}) , \mathbf{p}^*)$ can be approximated by ensemble triples ~$(\mathbf{y}_\varepsilon, \mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon), \mathbf{p}_\varepsilon)$ in the order $O(\varepsilon)$.
In this section, the convergence of solutions to~\eqref{def:approxfeedprop} as~$\varepsilon \rightarrow 0$ is addressed. We first consider the terms in the definition~$\mathcal{J}_\varepsilon$, see \eqref{eq:aux2}. To obtain the desired asymptotic behavior a smallness condition on the regularisation parameter $\gamma_\varepsilonilon$, in relation to the norm of the parameters $\theta_\varepsilon$ describing the approximation quality, is required.
\begin{theorem} \label{thm:convofobj}
Let Assumptions~\ref{ass:feedbacklaw} and~\ref{ass:approxsmoothness} hold the latter with ~$\theta_\varepsilon \in \mathcal{R}_\varepsilon$, and let~$(\mathbf{y}^*_\varepsilon, \mathbf{p}^*_\varepsilon,\theta^*_\varepsilon)$, denote an optimal triple to~\eqref{def:approxfeedprop} for all $\varepsilon>0$ small enough. If additionally $\gamma_\varepsilon \|\theta_\varepsilon\|^2_{\mathcal{R}_\varepsilon} = O(\varepsilon)$, then
\begin{align*}
0 \leq \int_{Y_0} \omegaega(y_0) \left \lbrack J(\mathbf{y}^*_\varepsilon(y_0),\mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(\mathbf{y}^*_\varepsilon(y_0))- V^*(0,y_0) \right \rbrack~\mathrm{d} \mathcal{L}(y_0) \leq c\, \varepsilon
\end{align*}
holds and, if~$\gamma_1,\gamma_2 >0$, we also have
\begin{align*}
\int_{Y_0}\!\! \omegaega(y_0) (\| V^\varepsilon_{\theta^*_\varepsilon}(t,\mathbf{y}^*_\varepsilon(y_0))\!-\!J_\bullet (\mathbf{y}^*_\varepsilon(y_0), \mathcal{F}^\varepsilon(\mathbf{y}^*_\varepsilon(y_0)))\|^2_{L^2}&+ \|\partial_y V^\varepsilon_{\theta^*_\varepsilon}(t,\mathbf{y}^*_\varepsilon(y_0))\!-\!\mathbf{p}^*_{\varepsilon}(y_0)\|^2_{L^2}) \mathrm{d} \mathcal{L}(y_0)\\ & \leq c \, \varepsilon
\end{align*}
for some~$c>0$ independent of~$\varepsilon$.
\end{theorem}
\begin{proof}
Let~$\mathbf{y}_\varepsilon,\mathbf{p}_\varepsilon$ denote the ensembles of state and adjoint trajectories associated to~$\theta_\varepsilon$, see Theorem~\ref{thm:existenceneuralnetwork2}, for~$\varepsilon>0$ small enough. Then we have
\begin{align*}
\big|& J(\mathbf{y}_\varepsilon(y_0),\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))- V^*(0,y_0) \big|
\\&\leq C \big ( \|\mathbf{y}_\varepsilon(y_0)-\mathbf{y}^*(y_0)\|_{W_T}+ \|\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\mathcal{F}^*(\mathbf{y}^*(y_0))\|_{L^2(I;\mathbb{R}^m)} \big ) \leq c \varepsilon
\end{align*}
for some~$C>0$ independent of~$\varepsilon$.
Here we have used~$V^*(0,y_0)=J(\mathbf{y}^*(y_0),\mathcal{F}^*(\mathbf{y}^*(y_0)))$ for all~$y_0 \in Y_0$, the embedding~$W_T \hookrightarrow \mathcal{C}(\bar{I};\mathbb{R}^n)$ as well as the a priori estimates of Proposition~\ref{thm:existenceneuralnetwork}. Next we utilize~$\mathbf{p}^*(y_0)=\partial \mathcal{V}^*(\mathbf{y}^*(y_0))$,~$y_0\in Y_0$, to estimate
\begin{align*}
\|\partial_y \mathcal{V}^\varepsilon_{\theta_\varepsilon}&(\mathbf{y}_\varepsilon(y_0))-\mathbf{p}_{\varepsilon}(y_0)\|^2_{L^2(I;\mathbb{R}^n)} \\& \leq 2\left( \|\partial_y \mathcal{V}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\partial_y \mathcal{V}^*(\mathbf{y}^*(y_0))\|^2_{L^2(I;\mathbb{R}^n)}+\|\mathbf{p}_{\varepsilon}(y_0)-\mathbf{p}^*(y_0)\|^2_{L^2(I;\mathbb{R}^n)}\right) \\&
\leq c\varepsilon^2,
\end{align*}
where the last inequality is deduced from Proposition~\ref{thm:existenceneuralnetwork} and Proposition~\ref{prop:solvofadjoint}. Proceeding analogously and using~$V^*(t,\mathbf{y}^*(y_0)(t))=J_t(\mathbf{y}^*(y_0),\mathcal{F}^*(\mathbf{y}^*(y_0)))$ for all~$y_0 \in Y_0$,~$t\in I$, we obtain
\begin{align*}
\int_{Y_0} \omegaega(y_0) \int^T_0 |V^\varepsilon_{\theta_\varepsilon}(t,\mathbf{y}_\varepsilon(y_0)(t))-J_t(\mathbf{y}_\varepsilon(y_0),\mathcal{F}^\varepsilon_{\theta_\varepsilon}
(\mathbf{y}_\varepsilon(y_0))|^2~\mathrm{d}t\,\mathrm{d}\mathcal{L}(y_0)\leq D_1+D_2,
\end{align*}
where, using Assumption~\ref{ass:approxsmoothness} and again Proposition ~\ref{thm:existenceneuralnetwork}
\begin{align*}
D_1 &\coloneqq \int_{Y_0} \omegaega(y_0) \|\mathcal{V}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))-\mathcal{V}^*(\mathbf{y}^*(y_0))\|^2_{L^2(I)}~\mathrm{d} \mathcal{L}(y_0) \leq c \varepsilon^2, \\
D_2 & \coloneqq \int_{Y_0}\omegaega(y_0)\int^T_0 |J_t(\mathbf{y}^*(y_0),\mathcal{F}^*(\mathbf{y}^*(y_0))-J_t(\mathbf{y}_\varepsilon(y_0),\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon(y_0))|^2~\mathrm{d}t\mathrm{d}\mathcal{L}(y_0) \leq c \varepsilon^2.
\end{align*}
Combining the previous estimates with the optimality of~$(\mathbf{y}^*,\mathbf{p}^*,\theta^*_\varepsilon)$, and the assumption on the asymptotic behavior of $\gamma_\varepsilonilon$ we deduce that
\begin{align*}
0 &\leq \int_{Y_0} \omegaega(y_0)\left \lbrack J_\varepsilon(\mathbf{y}^*_\varepsilon(y_0),\mathbf{p}^*_\varepsilon(y_0),\theta^*_\varepsilon)- V^*(0,y_0) \right \rbrack~\mathrm{d} \mathcal{L}(y_0) +\frac{\gamma_\varepsilon}{2} \|\theta^*_\varepsilon\|^2_{\mathcal{R}_\varepsilon}
\\ &\leq \int_{Y_0} \omegaega(y_0) \left \lbrack J_\varepsilon(\mathbf{y}_\varepsilon(y_0),\mathbf{p}_\varepsilon(y_0),\theta_\varepsilon)- V^*(0,y_0) \right \rbrack~\mathrm{d} \mathcal{L}(y_0) +\frac{\gamma_\varepsilon}{2} \|\theta_\varepsilon\|^2_{\mathcal{R}_\varepsilon}
\leq c\, \varepsilon.
\end{align*}
Recalling the definition of $J_\varepsilon$, this yields all claimed estimates and finishes the proof.
\end{proof}
Next the convergence of the ensemble trajectories~$(\mathbf{y}^*_\varepsilon, \mathbf{p}^*_\varepsilon)$, the feedback controls~$\mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(\mathbf{y}^*_\varepsilon)$ as well as the approximate value function~$\mathcal{V}^\varepsilon_{\theta^*_\varepsilon}$ are analyzed. For this purpose we make use of the additional regularity of ensemble solutions to the closed loop system, see Proposition~\ref{thm:aprioriW12}}, and introduce further constraints to~\eqref{def:approxfeedprop}. Without changing the notation we henceforth set
\begin{align} \label{def:Yadstrict}
\mathbf{\hat Y}_{ad} = \left\{\, \mathbf{y} \in \mathcal{C}^1(Y_0;W_T)\;|\;\mathbb{C}bochnorm{\mathbf{y}} \leq 2 M_{Y_0},~\|\mathbf{y}\|_{W^{1,2}}\leq 2 M_{W^{1,2}}\, \right\},
\end{align}
where~$M_{W^{1,2}}>0$ is a constant with~$\|\mathbf{y}^*\|_{W^{1,2}} \leq M_{W^{1,2}} $, the function $y^*$ was introduced in \textbf{A.3}, and $W^{1,2}= \{\mathbf{y} \in L^2(I;W_T): \partial_i \mathbf{y} \in L^2(Y_0;W_T), i\in \{1,\dots,n\} \}$ endowed with the natural norm. Next we note that
\begin{align*}
\frac{\beta}{2} \|\mathcal{F}^*(\mathbf{y}^*(y_0))\|^2_{L^2} \leq J(\mathbf{y}^*(y_0),\mathcal{F}^*(\mathbf{y}^*(y_0)))=V^*(0,y_0)
\end{align*}
for all~$y_0 \in Y_0$. Thus, due to the continuity of the value function~$V^*$, see Assumption~\ref{ass:feedbacklaw} $\mathbf{A.2}$, there is~$M_U >0$ with~$\|\mathcal{F}^*(\mathbf{y}^*)\|_{L^\infty} \leq M_U$. Correspondingly we set
\begin{align} \label{def:Uadstricter}
\mathbf{\hat U}_{ad} = \left\{\, \mathbf{u} \in L^\infty(Y_0;L^2(I;\mathbb{R}^m))\;|\; \|\mathbf{u}\|_{L^\infty}\leq 2 M_U \,\right\}.
\end{align}
We point out that Theorem~\ref{thm:existence} remains valid despite the additional restriction of the set of admissible states and controls. Problem~\eqref{def:approxfeedprop} with $\mathbf{ Y}_{ad}, \mathbf{ U}_{ad}$ replaced by $\mathbf{\hat Y}_{ad}, \mathbf{\hat U}_{ad}$ will be denoted by $(\mathcal{\hat P}_\varepsilonilon) $.
\begin{prop} \label{prop:existencestrict}
Let Assumption~\ref{ass:feedbacklaw} and~\ref{ass:approxsmoothness} hold. Then for all~$\varepsilon>0$ small enough, Problem $(\mathcal{\hat P}_\varepsilonilon) $ admits at least one minimizing triple.
\end{prop}
\begin{proof}
Let~$(\mathbf{y}_\varepsilon,\mathbf{p}_\varepsilon, \theta_\varepsilon)$ be defined as in Theorem~\ref{thm:existenceneuralnetwork2}. Then we have~$\mathbf{y}_\varepsilon \in \mathbf{\hat Y}_{ad}$, see Proposition~\ref{thm:existenceneuralnetwork} and Proposition~\ref{thm:aprioriW12}, as well as~$\mathcal{F}^\varepsilon_{\theta_\varepsilon}(\mathbf{y}_\varepsilon) \in \mathbf{\hat U}_{ad}$, according to Proposition~\ref{thm:existenceneuralnetwork}, for all~$\varepsilon>0$ small enough. Hence the admissible set of~ $(\mathcal{\hat P}_\varepsilonilon) $ is not empty. The existence of a minimizing triple then follows by repeating the arguments of the proof of Theorem~\ref{thm:existence} noting that the admissible set
\begin{align*}
\left\{(\mathbf{y},\mathbf{p},\theta)\!\in\! \mathbf{\hat Y}_{ad}\!\times\!\mathcal{C}(Y_0;W_T) \!\times\! \mathcal{R}_{\varepsilon}|(\mathbf{y},\mathbf{p},\theta)~\text{satisfies}\!~\eqref{eq:statepropapprox}\!
-\!\eqref{eq:constraintprop}, \mathcal{F}^\varepsilon_{\theta}(\mathbf{y})\!\in\! \mathbf{\hat U}_{ad}\right\}
\end{align*}
is closed w.r.t to the weak topology on~$L^2(Y_0;W_T)^2 \times \mathcal{R}_\varepsilon$.
\end{proof}
Let us next address the convergence of the optimal ensemble states~$\mathbf{y}^*_\varepsilon$, adjoint states~$\mathbf{p}_\varepsilon$ and the associated feedback controls~$\mathcal{F}^\varepsilon_{\theta^*_\varepsilon}(\mathbf{y}^*_\varepsilon)$ as $\varepsilon$ tends to 0.
\begin{theorem} \label{thm:convoftraj}
Let the prerequisites of Theorem~\ref{thm:convofobj} hold, and let $\varepsilon_k >0$ be a strictly decreasing null sequence such that $(\mathcal{\hat P}_{\varepsilon_k})$ admits a minimizing triple $(\mathbf{y}^*_k, \mathbf{p}^*_k, \theta^*_k)$.
Then $(\mathbf{y}^*_{k}, \mathbf{p}^*_{k}, \mathcal{F}^{\varepsilon_k}_{\theta^*_{\varepsilon_k}}(\mathbf{y}^*_{\varepsilon_k}))$ contains at least one accumulation point~$(\bar{\mathbf{y}}, \bar{\mathbf{p}}, \bar{\mathbf{u}}) \in L^\infty(Y_0; W_T)^2 \times L^\infty(Y_0; L^2(I;\mathbb{R}^m))$ w.r.t the strong topology on~$L^2(Y_0;W_T)^2 \times L^2(Y_0;L^2(I;\mathbb{R}^m))$. For each accumulation point and~$\mathcal{L}$-a.e.~$y_0 \in Y_0$ we have that~$(\bar{y}, \bar{p}, \bar{u}) \coloneqq (\bar{\mathbf{y}}(y_0),\bar{\mathbf{p}}(y_0),\bar{\mathbf{u}}(y_0))$ satisfies
\begin{align*}
(\bar{y}, \bar{u}) \in \min \eqref{def:openloopproblem}
\end{align*}
as well as
\begin{align*}
\dot{\bar{y}}&=\mathbf{f}(\bar{y})+\mathbf{g}(\bar{y})\bar{u},~ \bar{y}(0)=y_0, \\
-\dot{\bar{p}}&= D\mathbf{f}(\bar{y})^\top \bar{p}+\lbrack D \mathbf{g}(\bar{y})^\top\bar{u})\rbrack \bar{p}+\mathbf{Q}_1^\top \mathbf{Q}_1(\bar{y}-y_d),~ \bar{p}(T)=Q^\top_2 Q_2(\bar{y}(T)-y^T_d).
\end{align*}
\end{theorem}
\begin{proof}
By choice of the admissible sets~$\mathbf{\hat Y}_{ad}$ and~$\mathbf{\hat U}_{ad}$ we have that~$\{(\mathbf{y}^*_k, \mathcal{F}^\varepsilon_{\theta^*_k}(\mathbf{y}^*_k))\}_{k=1}^{\infty} $ is bounded in~$(W^{1,2}(Y_0;W_T) \cap L^\infty(Y_0;W_T))\times L^\infty(Y_0;L^2(I;\mathbb{R}^m))$. By Gronwall's inequality we can argue that ~$\{\mathbf{p}^*_k\}_{k=1}^\infty$ is also bounded in ~$L^\infty(Y_0;W_T)$. Thus, due to the Banach-Alaoglu theorem, there is a subsequence, denoted by the same index, and
~$(\bar{\mathbf{y}},\bar{\mathbf{p}},\bar{\mathbf{u}})\in L^\infty(Y_0;W_T)^2 \times L^\infty(Y_0;L^2(I;\mathbb{R}^m))$ such that
\begin{align*}
(\mathbf{y}^*_k, \mathbf{p}^*_k, \mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)) \rightharpoonup^* (\bar{\mathbf{y}},\bar{\mathbf{p}},\bar{\mathbf{u}})~\text{in}~L^\infty(Y_0;W_T)^2 \times L^\infty(Y_0;L^2(I;\mathbb{R}^m)),
\end{align*}
and $\dot{\mathbf{y}}^*_k \rightharpoonup \dot{\bar{\mathbf{y}}}$ in $L^2(Y_0;L^2(I;\mathbb{R}^n))$. By the compact embedding of $W^{1,2}(Y_0;W_T)$ into $L^2(Y_0;\mathcal{C}(I;\mathbb{R}^n))$, see \cite[Theorem 5.3]{AK18} the subsequence can be chosen such that $\mathbf{y}^*_k \to \bar{\mathbf{y}}$ strongly in $L^2(Y_0;\mathcal{C}(I;\mathbb{R}^n))$.
These properties imply that~$(\bar{y},\bar{u})\coloneqq (\bar{\mathbf{y}}(y_0),\bar{\mathbf{u}}(y_0))$ satisfies
\begin{equation}\label{eq:aux9}
\dot{\bar{y}}=\mathbf{f}(\bar{y})+\mathbf{g}(\bar{y})\bar{u},~ \bar{y}(0)=y_0,
\end{equation}
for~$\mathcal{L}$-a.e.~$y_0 \in Y_0$. This also implies~$V^*(0,y_0) \leq J(\bar{y},\bar{u}) $ and thus, together with
\begin{align*}
J(\mathbf{y}^*_k, \mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)) \rightarrow V^*(0,\cdot)~\text{in}~L^1(Y_0;W_T), \\
\end{align*}
see Theorem~\ref{thm:convofobj},
we have~$(\bar{y},\bar{u}) \in \argmin \eqref{def:openloopproblem}$ for~$\mathcal{L}$-a.e.~$y_0 \in Y_0$. Moreover, again using the strong convergence of $\mathbf{y}^*_k$ in $L^2(Y_0;\mathcal{C}(I;\mathbb{R}^n))$ and recalling the definition of~$J(\cdot,\cdot)$ as
\begin{align*}
J(y,u)= (1/2) \|\mathbf{Q}_1(y-y_d)\|^2_{L^2(I;\mathbb{R}^n)}+(\beta/2) \|u\|^2_{L^2(I;\mathbb{R}^n)}+(1/2) |{Q}_2(y(T)-y^T_d)|^2,
\end{align*}
for all~$y \in W_T,~u \in L^2(I;\mathbb{R}^m)$,
we also conclude the convergence of the~$L^2(Y_0;L^2(I;\mathbb{R}^m))$ norm of~$\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)$ towards the norm of~$\bar{\mathbf{u}}$. Thus~$\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k) \rightarrow \bar{\mathbf{u}}$ strongly in~$L^2(Y_0;L^2(I;\mathbb{R}^m))$, and $\mathbf{y}^*_k \to \bar{\mathbf{y}}$ strongly in $L^2(Y_0;W_T)$, by Lebegue's bounded convergence theorem.
It remains to address the strong convergence of~$\mathbf{p}_k$. For this purpose we show that the functions~$\lbrack D\mathbf{g}(\mathbf{y}^*_k)^\top \mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(\cdot))\rbrack \mathbf{p}^*_k(\cdot) $ converge weakly to~$\lbrack D\mathbf{g}(\bar{\mathbf{y}}(\cdot))^\top \bar{\mathbf{u}}(\cdot)\rbrack \bar{\mathbf{p}}(\cdot) $ in~$L^2(Y_0;L^2(I;\mathbb{R}^n))$. Fixing a test function~$\varphi \in L^2(Y_0;L^2(I;\mathbb{R}^n))$ we first note that
\begin{align*}
\lim_{k\rightarrow \infty}\big(\varphi&, \lbrack D\mathbf{g}(\bar{\mathbf{y}}(\cdot))^\top \bar{\mathbf{u}}(\cdot))\rbrack (\mathbf{p}^*_k(\cdot)-\bar{\mathbf{p}}) \big) _{L^2(Y_0;L^2(I;\mathbb{R}^n))}=0.
\end{align*}
Second, for~$\mathcal{L}$-a.e.~$y_0 \in Y_0$ we estimate
\begin{align*}
&\big(\varphi(y_0),\lbrack D\mathbf{g}(\mathbf{y}^*_k(y_0))^\top \mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0))-D\mathbf{g}(\bar{\mathbf{y}}(y_0))^\top \bar{\mathbf{u}}(y_0)\rbrack \mathbf{p}^*_k(y_0)\big)_{L^2(I;\mathbb{R}^n)} \\
&\leq \! C \|\varphi(y_0)\|_{L^2} \wnorm{\mathbf{p}^*_k(y_0)}\! \left( \|\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0))\|_{L^2}\wnorm{\mathbf{y}^*_k(y_0)\!-\!\bar{\mathbf{y}}(y_0)}\!+
\!\|\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0))\!-\!\bar{\mathbf{u}}(y_0)\|_{L^2}\!\right) \\
& \leq \!
C \|\varphi(y_0)\|_{L^2} \left( \wnorm{\mathbf{y}^*_k(y_0)-\bar{\mathbf{y}}(y_0)}+\|\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0))-\bar{\mathbf{u}}(y_0)\|_{L^2}\right)
\end{align*}
for some~$C>0$ independent of~$k\in\mathbb{N}$ and~$y_0$. Here we made use of the boundedness of~$\{\mathbf{y}^*_k\}_{k=1}^\infty$ and~$\{\mathbf{p}^*_k\}_{k=1}^\infty$ in~$L^\infty(Y_0;W_T)$, and of~$\{\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)\}_{k=1}^\infty$ in~$L^\infty(Y_0;L^2(I;\mathbb{R}^m))$. Integrating both sides of the inequality w.r.t to~$\mathcal{L}$ and utilizing the strong convergence of~$\mathbf{y}^*_k$ and~$\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)$ we finally arrive at
\begin{align*}
\lim_{k \rightarrow \infty}\big(\varphi,\lbrack D\mathbf{g}(\mathbf{y}^*_k(\cdot))^\top \mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(\cdot))-D\mathbf{g}(\bar{\mathbf{y}}(\cdot))^\top \bar{\mathbf{u}}(\cdot)\rbrack \mathbf{p}^*_k(\cdot)\big)_{L^2(Y_0,L^2(I;\mathbb{R}^n))}=0.
\end{align*}
By repeating this argument for the different terms appearing in the adjoint equation we get that~$(\bar{y}, \bar{p}, \bar{u}) \coloneqq (\bar{\mathbf{y}}(y_0),\bar{\mathbf{p}}(y_0),\bar{\mathbf{u}}(y_0))$ satisfies
\begin{align*}
-\dot{\bar{p}}&= D\mathbf{f}(\bar{y})^\top \bar{p}+\lbrack D \mathbf{g}(\bar{y})^\top \bar{u}\rbrack \bar{p}+\mathbf{Q}_1^\top \mathbf{Q}_1(\bar{y}-y_d),~ \bar{p}(T)= Q^\top_2 Q_2(\bar{y}(T)-y^T_d)
\end{align*}
for~$\mathcal{L}$-a.e.~$y_0 \in Y_0$. Applying Gronwall's inequality we deduce
\begin{align*}
\wnorm{\mathbf{p}_k(y_0)-\bar{\mathbf{p}}(y_0)} \leq C \left( \wnorm{\mathbf{y}^*_k(y_0)-\bar{\mathbf{y}}(y_0)}+\|\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0))-\bar{\mathbf{u}}(y_0)\|_{L^2} \right)
\end{align*}
for~$\mathcal{L}$-a.e.~$y_0 \in Y_0$ and~$C>0$ independent of~$y_0$ and~$k$. This yields~$\mathbf{p}_k \rightarrow \bar{\mathbf{p}}$ strongly in~$L^2(Y_0;W_T)$. Since the weakly convergent subsequence was chosen arbitrarily in the beginning, this finishes the proof.
\end{proof}
\begin{remark}
If~$\mathbf{g}(y(t))=B \in \mathbb{R}^{m \times n}$ then the statement of the previous theorem also holds~\text{without} constraints on the control (i.e. for~$\mathbf{U}_{ad}=L^2(Y_0;L^2(I;\mathbb{R}^m))$). In this particular case, the uniform boundedness of~$\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)$ in~$L^2(Y_0;L^2(I;\mathbb{R}^m))$ follows from
\begin{align*}
\frac{\beta}{2} \|\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)\|^2_{L^2} \leq c \int_{Y_0} \omegaega(y_0) J(\mathbf{y}^*_k,\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0)))~\mathrm{d}\mathcal{L}(y_0) \leq C,
\end{align*}
see Theorem~\ref{thm:convofobj}. Moreover the adjoint equation does no longer depend on the control. Repeating the arguments of the last proof yields the subsequential convergence of~$(\mathbf{y}^*_k,\mathbf{p}^*_k,\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k))$ towards an element~$(\bar{\mathbf{y}},\bar{\mathbf{p}},\bar{\mathbf{u}})\in L^\infty(Y_0;W_T)^2 \times L^2(Y_0;L^2(I;\mathbb{R}^m))$ such that~$(\bar{y},\bar{p},\bar{u})\coloneqq(\bar{\mathbf{y}}(y_0),\bar{\mathbf{p}}(y_0),\bar{\mathbf{u}}(y_0))$ satisfy the system of state and adjoint equations as well as~$(\bar{y},\bar{u})\in \argmin \eqref{def:openloopproblem}$ for~$\mathcal{L}$-a.e.~$y_0 \in Y_0$. Then it only remains to argue the additional regularity~$\mathbf{u}\in L^\infty(Y_0;L^2(Y_0;W_T))$. This is, however, a direct consequence of the first order necessary optimality condition~$\bar{\mathbf{u}}=(-1/\beta)B^\top\bar{p}$ for~\eqref{def:openloopproblem}, see Proposition~\ref{prop:structure}.
\end{remark}
We point out that the statement of Theorem~\ref{thm:convoftraj} holds independently of the values of the penalty parameters~$\gamma_1,\gamma_2$. If~$\gamma_1,\gamma_2 >0$ then we additionally obtain the following convergence results for the approximate value function~$\mathcal{V}^\varepsilon_{\theta^*_k}$ and its derivative~$\partial_y \mathcal{V}^\varepsilon_{\theta^*_k}$ along optimal state trajectories.
\begin{prop} \label{prop18}
Let the prerequisites of Theorem~\ref{thm:convofobj} hold and
let~$(\mathbf{y}^*_k, \mathbf{p}^*_k, \theta^*_k)$ denote a sequence of minimizing triplets as described in Theorem~\ref{thm:convoftraj}. Assume that~$(\mathbf{y}^*_{k}, \mathbf{p}^*_{k}, \mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k))$ converges to~$(\bar{\mathbf{y}},\bar{\mathbf{p}}, \bar{\mathbf{u}})$ in~$L^2(Y_0;W_T)^2 \times L^2(Y_0; L^2(I;\mathbb{R}^m))$ and~$\gamma_1,\gamma_2 >0$. Then we also have
\begin{align*}
\mathcal{V}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k) \rightarrow \mathcal{V}^*(\bar{\mathbf{y}})~\text{in}~L^2(Y_0;L^2(I)),~\partial_y \mathcal{V}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k) \rightarrow \bar{\mathbf{p}}~\text{in}~L^2(Y_0;L^2(I;\mathbb{R}^n)).
\end{align*}
\end{prop}
\begin{proof}
Due to the convergence of~$\mathbf{y}^*_k \to \bar{\mathbf{y}}$ in $L^2(Y_0;L^2(I;\mathbb{R}^n))$ and~$\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k) \to \bar{\mathbf{u}}$ in $L^2(Y_0;L^2(I;\mathbb{R}^m))$, we conclude that
\begin{align*}
J_\bullet (\mathbf{y}^*_k,\mathcal{F}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)) \rightarrow J_\bullet (\bar{\mathbf{y}},\bar{\mathbf{u}})= \mathcal{V}^*(\bar{\mathbf{y}})~\text{in}~L^2(Y_0;L^2(I)).
\end{align*}
Together with
\begin{align*}
\lim_{k\rightarrow \infty} \int_{Y_0} \omegaega(y_0) \| V^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k(y_0))-J_\bullet (\mathbf{y}^*_k(y_0), \mathcal{F}^{\varepsilon_k}(\mathbf{y}^*_k(y_0)))\|^2_{L^2}~\mathrm{d}\mathcal{L}(y_0)=0,
\end{align*}
see Theorem~\ref{thm:convofobj}, we arrive at~$\mathcal{V}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k) \rightarrow \mathcal{V}^*(\bar{\mathbf{y}})$ in~$L^2(Y_0;L^2(I))$. The statement on the convergence of~$\partial_y \mathcal{V}^{\varepsilon_k}_{\theta^*_k}(\mathbf{y}^*_k)$ follows similarly from the strong convergence of~$\mathbf{p}_k$.
\end{proof}
\section{Learning from a finite training set}
We turn to analysing a discrete version of \eqref{def:approxfeedprop}.
In this case we can proceed without the state-space constraint
$\mathbf{y} \in \textbf{Y}_{ad}$ provided certain growth bounds on
$\mathbf{f}$ and $\mathbf{g}$ are satisfied. The numerical realization of \eqref{def:approxfeedprop} will always rely on such a discrete
approximation. Henceforth we fix a finite ensemble of initial conditions
$\{y_0^i: i=1,\dots, N\} \subset Y_0$. For positive weights $\omegaega_i$, $i=1,\dots,N$, and $\varepsilon>0$ we consider
\begin{align} \label{eq:learningprobfinite}
\inf_{y_i, p_i \in W_T, \theta \in \mathcal{R}_\varepsilon} \left
\lbrack\sum^N_{i=1} \omegaega_i J_\varepsilon(y_i,p_i, \theta)+
\frac{\gamma_\varepsilon}{2} \|\theta\|^2_{\mathcal{R}_\varepsilon} \right \rbrack
\tag{$\mathcal{P}^N_\varepsilon$}
\end{align}
subject to
\begin{align*}
\dot{y}_i
&=\mathbf{f}(y_i)+\mathbf{g}(y_i)\mathcal{F}^\varepsilon_\theta(y_i),~y_i(0)=y^i_0 \\
-\dot{p}_i&= D\mathbf{f}(y_i)^\top p_i+ \lbrack
D\mathbf{g}(y_i)^\top\mathcal{F}^\varepsilon_\theta(y_i)\rbrack p_i+\mathbf{Q}_1^\top
\mathbf{Q}_1(y_i-y_d),~p_i(T)= Q^\top_2 Q_2(y_i(T)-y_d^T).
\end{align*}
Throughout this section, Assumptions~\ref{ass:feedbacklaw}
and~\ref{ass:approxsmoothness} are supposed to hold. Further $\varepsilon$ is
supposed to be sufficiently small so that the set of admissible
solutions for \eqref{eq:learningprobfinite} is nonempty, compare
Theorem~\ref{thm:existenceneuralnetwork2}. It will be convenient to introduce $\mathbf{y}=
\text{col}(y_1,\dots,y_N)$, and $\mathbf{p}=
\text{col}(p_1,\dots,p_N)$, which replace the ensemble states and
costates from the previous sections.
\begin{prop}\label{prop:existencefinite}
Let~$\varepsilon>0$ be sufficiently small and let $(\mathbf{y}^k, \mathbf{p}^k,
\theta_k)\in W_T^{2N}\times \mathcal{R}_\varepsilon$
denote an infimizing sequence for \eqref{eq:learningprobfinite}.
If ~$\max_{i} \|y^k_i\|_{L^\infty(I;\mathbb{R}^n)} \leq M_\infty$ for
some~${M_\infty}>0$ independent of~$k \in \mathbb{N}$, then
Problem~\eqref{eq:learningprobfinite} admits at least one
minimizer~$(\mathbf{y}^*, \mathbf{p}^*, \theta^*)$.
\end{prop}
\begin{proof}
Since by assumption $(\mathbf{y}^k, \mathbf{p}^k, \theta_k)$
is an infimizing sequence for \eqref{eq:learningprobfinite} and since ~$\beta >0$ we have
\begin{align} \label{eq:dimdepend}
\max_{i} \|{\bf{Q_1}} y^k_i\|^2_{L^2}+ \max_{i}
\|\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)\|^2_{L^2} \leq C_N
\end{align}
for some~$C_N >0$ depending on~$N$. Moreover there holds
\begin{align*}
\|\dot{y}^k_i\|_{L^2} \leq \|\mathbf{f}(y^k_i)\|_{L^2}+
\|\mathbf{g}(y_i)\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)\|_{L^2} \leq
C(\mathbf{f},\mathbf{g}) M_\infty (1+C_N )
\end{align*}
using the uniform~$L^\infty$ and~$L^2$ boundedness of~$y^k_i$
and~$\mathcal{F}^\varepsilon_\theta(y^k_i)$, respectively. Thus we also
have~$\wnorm{y^i_k}\leq \widehat{C}_N $ for all~$k \in \mathbb{N}$, for some
~$\widehat{C}_N>0$ which depends on~$N$ but not on~$k$ and~$i$. The
proof can now be completed by the same steps as
Theorem~\ref{thm:existence}.
\end{proof}
\begin{remark}\label{rem:linftybound}
The~$L^\infty$-boundedness of the minimizing sequence~$y^k_i$ in
Proposition~\ref{prop:existencefinite} can be be ensured by additional
assumptions on the dynamics of the problem. These include:
\begin{itemize}
\item Add an additional state constraint~$\|y_i\|_{L^\infty} \leq
\widehat{M}$ to~\eqref{eq:learningprobfinite}.
\item Assume that there are~$a_1,a_2,a_3>0$ such that
\begin{align*}
|f(x)| \leq a_1+ a_2 |x|+ a_3 |x|^2,~\|g(x)\| \leq a_1+a_2|x| \quad
\forall x \in \mathbb{R}^n,
\end{align*}
and that $Q_1$ is positive definite. Then by \eqref{eq:dimdepend} the family $\{y^k_i\}$ is
uniformly w.r.t. $i\in\{1,\dots,n\}$ and $k=1,\dots$ bounded in $L^2(I;\mathbb{R}^n)$ .
Further we can readily verify that
\begin{align*}
\|\dot{y}^k_i\|_{L^1} &\leq \|\mathbf{f}(y^k_i)\|_{L^1}+
\|\mathbf{g}(y^k_i)\|_{L^1}
\\ & \leq 2a_1 T+ a_2 \|y^k_i\|_{L^1}+ a_3 \|y^k_i\|^2_{L^2}+ a_2
\|y^k_i\|_{L^2} \|\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)\|^2_{L^2} \leq
M_N
\end{align*}
for an~$N$-dependent bound~$M_N>0$. Here we made use of the
$L^2$-boundedness of~$y^k_i$ and~$\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)$ which follows from \eqref{eq:dimdepend} in the
proof of Proposition ~\ref{prop:existencefinite}, and the assumption that $Q_1>0$. Consequently~$y^k_i$ is uniformly bounded
in~$W^{1,1}(I; \mathbb{R}^n)$ and thus also in~$L^\infty(I;\mathbb{R}^n)$.
\item Assume that~$f(x)=Ax-h(x)$ where~$A \in \mathbb{R}^{n \times n}$ and~$h$
is monotone i.e.~$(x,h(x))_{\mathbb{R}^n} \geq 0$ for all~$x\in\mathbb{R}^n$. Moreover
assume that $Q_1$ is positive definite and that
\begin{align*}
\|g(x)\| \leq a_1+a_2|x| \quad \forall x \in \mathbb{R}^n.
\end{align*}
In this case, testing the equation satisfied by $y_i$ with $y_i$, and a
Gronwall argument yields
\begin{align*}
|y^k_i(t)|^2 \leq C_N \left( |y^i_0|^2+ \|y^k_i\|^2_{L^2} +
\|\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)\|^2_{L^2} \right)
\end{align*}
for some~$N$-dependent~$C_N>0$ and all~$t\in I$. Thus, the uniform
boundedness of~$y^k_i$ in~$L^\infty(I;\mathbb{R}^n)$ follows again from
the~$L^2$-estimates on~$y^k_i$ and~$\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)$
in~\eqref{eq:dimdepend}.
\end{itemize}
\end{remark}
The convergence result as $\varepsilon\to 0^+$ of
Theorem~\ref{thm:convoftraj} can be transferred to the finite training
set setting as well.
\begin{prop} \label{prop:convergencefinite}
Let the regularisation parameters satisfy $\gamma_\varepsilon
\|\theta_\varepsilon\|^2_{\mathcal{R}_\varepsilon} = O(\varepsilon)$.
Further let ~$\varepsilon_k>0$ be a positive null sequence such that
for each~$k \in \mathbb{N}$ there exists a
solution~$(\mathbf{y}^k,\mathbf{p}^k,\theta_k) \in W_T^{2N} \times
\mathcal{R}_\varepsilon$ to~$(\mathcal{P}^N_{\varepsilon_k})$.
If there is~${M_\infty}>0$ with~$\max_{i} \|y^k_i\|_{L^\infty} \leq
M_\infty$ for all~$k \in \mathbb{N}$,
then~$(\mathbf{y}^k,\mathbf{p}^k,
\mathbf{\mathcal{F}}^{\varepsilon_k}_{\theta_k}(\mathbf{y}^k))$ admits at least one
strong accumulation point~$(\mathbf{\bar{y}}, \mathbf{\bar{p}},
\mathbf{\bar{u}})$ in~$W^{2N}_T \times L^2(I;\mathbb{R}^m)^N$. Each such point
satisfies
\begin{align*}
(\bar{y}_i, \bar{u}_i)\in \argmin (P^{y^i_0}_\beta), \quad i=1, \dots,N,
\end{align*}
as well as
\begin{align*}
\dot{\bar{y}}_i
&=\mathbf{f}(\bar{y}_i)+\mathbf{g}(\bar{y}_i)\bar{u}_i,~\bar{y}_i(0)=y^i_0
\\
-\dot{\bar{p}}_i&= D\mathbf{f}(\bar{y}_i)^\top \bar{p}_i+ \lbrack
D\mathbf{g}(\bar{y}_i)^\top\bar{u}_i\rbrack \bar{p}_i+\mathbf{Q}_1^\top
\mathbf{Q}_1(\bar{y}_i-y_d),~\bar{p}_i(T)= Q^\top_2
Q_2(\bar{y}_i(T)-y_d^T).
\end{align*}
\end{prop}
\begin{proof}
For every~$\varepsilon_k$, with $k$ sufficiently large, denote
by~$\theta_{\varepsilon_k}\in \mathcal{R}_{\varepsilon_k}$ the corresponding
parameters from Assumption~\ref{ass:approxsmoothness},
by~$\mathbf{y}_{\varepsilon_{k}}$ the associated ensemble solution, see
Theorem~\ref{thm:existenceneuralnetwork2}, and
by~$\mathbf{p}_{\varepsilon_{k}}$ the adjoint states. For abbreviation we
set~$y^{\varepsilon_k}_i \coloneqq \mathbf{y}_{\varepsilon_k}(y^i_0)$ and
$p^{\varepsilon_k}_i \coloneqq \mathbf{p}_{\varepsilon_k}(y^i_0)$. Then, by
optimality, we have
\begin{align}\label{eq:aux11}
\sum^N_{i=1} \omegaega_i J(y^k_i, \mathcal{F}^{\varepsilon_k}_{\theta_k}(y^k_i)) \leq
\sum^N_{i=1} \omegaega_i J_\varepsilon(y^{\varepsilon_k}_i, p^{\varepsilon_k}_i,
\theta_{\varepsilon_k}) + \frac{\gamma_{\varepsilon_k}}{2} \|\theta_{\varepsilon_k}\|^2_{\mathcal{R}_\varepsilon}.
\end{align}
As in the proof of Theorem~\ref{thm:convofobj} we see that the
righthandside of this inequality converges to~$\sum^N_{i=1} \omegaega_i
V^*(0,y^i_0)$ as~$k \rightarrow +\infty$. Thus it is bounded
independently of~$k \in \mathbb{N}$. Similarly to
Proposition~\ref{prop:existencefinite} we then conclude the existence
of~$C_N >0$ depending on~$N$, but not on~$k$, such that
\begin{align*}
\max_{i} \|{\mathbf Q_1}y^k_i\|^2_{L^2}+ \max_{i}
\|\mathcal{F}^\varepsilon_{\theta_k}(y^k_i)\|^2_{L^2} \leq C_N.
\end{align*}
Utilizing the state equation this can be improved to a~$k$-independent
bound on the~$W_T$-norm of~$y^k_i$. By a Gronwall-type argument the same
can be shown for the adjoint states~$p^k_i$. Now fix an arbitrary
index~$i \in \{1,\dots,N\}$. Summarizing the previous observations we
get the uniform boundedness of~$(y^k_i, p^k_i, \mathcal{F}^\varepsilon_{\theta_k}(y^k_i)
)$ in~$W_T^2 \times L^2(I; \mathbb{R}^m) $ w.r.t. $k$, for each $i=1,\dots,N$. Each of its
weak accumulation points~$(\bar{y}_i, \bar{p}_i, \bar{u}_i) \in W_T^2
\times L^2(I; \mathbb{R}^m)$ satisfies
\begin{align*}
\dot{\bar{y}}&=\mathbf{f}(\bar{y})+\mathbf{g}(\bar{y})\bar{u},~
\bar{y}(0)=y_0.
\end{align*}
From this we conclude that
\begin{align*}
0\le \sum^N_{i=1} \omegaega_i V^*(0,y^i_0) \leq \sum^N_{i=1} \omegaega_i
J(\bar y_i,\bar u_i) \le \lim_{k\to \infty} \sum^N_{i=1} \omegaega_i
J(y^k_i, \mathcal{F}^{\varepsilon_k}_{\theta_k}(y^k_i))\le \sum^N_{i=1} \omegaega_i
V^*(0,y^i_0),
\end{align*}
Since the second and third of the above inequalities also hold for each
summand we conclude that~$\lim_{k\to \infty}J(y^k_i,
\mathcal{F}^{\varepsilon_k}_{\theta_k}(y^k_i))\rightarrow J(\bar{y}_i, \bar{u}_i)$ as
well as~$J(\bar{y}_i, \bar{u}_i)=V^*(0,y^i_0)$. Hence
\begin{align*}
(\bar{y}_i, \bar{u}_i)\in \argmin (P^{y^i_0}_\beta).
\end{align*}
The proof can now be concluded with minor adaptations to the proof of
Theorem~\ref{thm:convoftraj}.
\end{proof}
A result analogous to that of Proposition \ref{prop18} can also be
obtained for Problem \eqref{eq:learningprobfinite}. For the sake of
brevity we do not present the details.
\subsection{The reduced objective functional}
In order to compute a solution to~\eqref{eq:learningprobfinite} we will rely on gradient-based optimization methods. For this purpose we introduce a~\textit{reduced objective functional} by eliminating the state and adjoint equations in~\eqref{eq:learningprobfinite}. Subsequently, we characterize the derivative of the reduced functional by means of adjoint techniques. To simplify the presentation we fix an arbitrary index~$i\in\{1,\dots,N\}$ in the following. Moreover, for abbreviation, we define the mapping
\begin{align*}
\mathbf{A} \colon W_T \times \mathcal{R}_\varepsilon \to \mathcal{B}(W_T;L^2(I;\mathbb{R}^n)),~A(y,\theta)=D\mathbf{f}(y)^\top + \lbrack D\mathbf{g}(y)^\top\mathcal{F}^\varepsilon_\theta(y)\rbrack.
\end{align*}
Using this notation, the adjoint equation in~\eqref{eq:learningprobfinite} can be expressed compactly as
\begin{align*}
-\dot{p}_i= \mathbf{A}(y_i,\theta)p_i+\mathbf{Q}_1^\top \mathbf{Q}_1(y_i-y_d),~p_i(T)=Q^\top_2 Q_2(y_i(T)-y_d^T).
\end{align*}
First, we argue the existence of~\textit{parameter-to-state operators} for the adjoint and the state equation.
\begin{lemma} \label{lem:existenceparametertostate}
Define~$G_i \colon W_T \times W_T \times \mathcal{R}_\varepsilon \to L^2(I; \mathbb{R}^n) \times L^2(I; \mathbb{R}^n) \times \mathbb{R}^n \times \mathbb{R}^n$ by
\begin{align*}
G_i(y,p, \theta)= \left(
\begin{array}{c}
\dot{y} -\mathbf{f}(y)-\mathbf{g}(y)\mathcal{F}^\varepsilon_\theta(y) \\
-\dot{p}- A(y,\theta)p-\mathbf{Q}_1^\top \mathbf{Q}_1(y-y_d)\\
y(0)-y^i_0 \\
p(T)- Q^\top_2 Q_2(y(T)-y_d^T)
\end{array}
\right).
\end{align*}
Let~$(\tilde{y},\tilde{p},\tilde{\theta})\in W_T \times W_T \times \mathcal{R}_\varepsilon$ satisfy~$G(\tilde{y},\tilde{p},\tilde{\theta})=0$. Then there exists a neighbourhood~$\mathcal{N}_i(\tilde{y})\times \mathcal{N}_i(\tilde{p}) \times \mathcal{N}_i(\tilde{\theta})$ as well as~$\mathcal{C}^1$-mappings~$Y_i \colon \mathcal{N}_i(\tilde{\theta})\to \mathcal{N}_i(\tilde{y})\subset W_T$,~$P_i \colon \mathcal{N}_i(\tilde{\theta})\to \mathcal{N}_i(\tilde{p}) \subset W_T$ such that
\begin{align*}
G_i(Y_i(\theta),P_i(\theta),\theta)=0 \quad \forall \theta \in \mathcal{N}(\tilde{\theta}).
\end{align*}
Given $y_i:=Y_i(\theta)$ and $~p_i:=P_i(\theta)$, the Fr\'{e}chet derivatives of~$Y_i$ and~$P_i$ at~$\theta \in \mathcal{N}_i(\tilde{\theta})$, in direction ~$\mathrm{d}lta \theta \in \mathcal{R}_\varepsilon$, denoted by ~$\mathrm{d}lta Y_i \coloneqq Y'_i(\theta)(\mathrm{d}lta \theta) $,~$\mathrm{d}lta P_i \coloneqq P'_i(\theta)(\mathrm{d}lta \theta)$ satisfy
\begin{align*}
&\dot{\mathrm{d}lta Y_i}- \mathbf{A}(y_i,\theta)^\top \mathrm{d}lta Y_i- \mathbf{g}(y_i) D_y \mathcal{F}^\varepsilon_\theta(y_i)\mathrm{d}lta Y= \mathbf{g}(y_i) D_\theta \mathcal{F}^\varepsilon_\theta(y_i)\mathrm{d}lta \theta, \\
&-\dot{\mathrm{d}lta P_i}-\mathbf{A}(y_i,\theta)\mathrm{d}lta P_i =\lbrack D_y\mathbf{A}(y_i,\theta)\mathrm{d}lta Y_i \rbrack p_i+ \mathbf{Q}_1 \mathbf{Q}_1 \mathrm{d}lta Y_i+\lbrack \partial_\theta \mathbf{A}(y_i,\theta) \mathrm{d}lta \theta \rbrack p_i,\\
&~\mathrm{d}lta Y_i(0)=0, ~\mathrm{d}lta P_i(T)= Q_2^\top Q_2 \mathrm{d}lta Y_i(T).
\end{align*}
\end{lemma}
\begin{proof}
This is a direct consequence of the implicit function theorem applied to~$G$ noting that the directional derivatives satisfy
\begin{align*}
\begin{pmatrix}
\partial_y G_i(y,p,\theta) & \partial_p G_i(y,p,\theta)
\end{pmatrix}
\begin{pmatrix}
\mathrm{d}lta Y \\ \mathrm{d}lta P
\end{pmatrix}
=- \partial_\theta G_i(y,p,\theta) \mathrm{d}lta \theta.
\end{align*}
\end{proof}
Now consider an admissible point~$(\tilde{\mathbf{y}}, \tilde{\mathbf{p}}, \theta) \in W^{2N}_T \times \mathcal{R}_\varepsilon$ for~\eqref{eq:learningprobfinite}. For every~$i=1,\dots,N$, let~$\mathcal{N}_i(\tilde{\theta})$ and~$Y_i,P_i$ denote the corresponding neighbourhoods and operators from Lemma~\ref{lem:existenceparametertostate}. Setting~$\mathcal{N}(\tilde{\theta})= \bigcap^N_{i=1} \mathcal{N}_i(\tilde{\theta})$ define the reduced objective functional
\begin{align} \label{eq:reducedobj}
\mathcal{J}_N \colon \mathcal{N}(\tilde{\theta}) \to [0,+\infty),~\mathcal{J}_N(\theta)= \sum^N_{i=i} \omegaega_i J_\varepsilon(Y_i(\theta),P_i(\theta), \theta)+ \frac{\gamma_\varepsilon}{2}\|\theta\|^2_{\mathcal{R}_\varepsilon},
\end{align}
and set
\begin{align*}
\Phi_i(t)= \int^t_0 (V^\varepsilon_\theta(s,y_i(s))-J_s(y_i,u))~\mathrm{d}s.
\end{align*}.
\begin{prop} \label{prop:direcderiv}
The functional~$\mathcal{J}_N$ from~\eqref{eq:reducedobj} is at least of class~$\mathcal{C}^1$ on~$\mathcal{N}(\tilde{\theta})$. Given~$ \theta \in \mathcal{N}(\tilde{\theta})$, set~$y_i \coloneqq Y_i(\theta)$,~$p_i \coloneqq P_i(\theta)$ as well as~$\mathrm{d}lta Y_i \coloneqq Y'_i(\theta)(\mathrm{d}lta \theta)$,~$ \mathrm{d}lta P_i \coloneqq P'_i(\theta)(\mathrm{d}lta \theta)$. The directional derivative of~$\mathcal{J}_N$ at~$\theta$ in the direction of~$\mathrm{d}lta \theta \in \mathcal{R}_\varepsilon$ is given by
\begin{align*}
\mathcal{J}'_N(\theta)( \mathrm{d}lta \theta)= \sum^N_{i=1} \omegaega_i \left( (\widehat{y}_i, \mathrm{d}lta Y_i)_{L^2}+(\widehat{y}^T_i, \mathrm{d}lta Y_i(T))_{\mathbb{R}^n}+(\widehat{p}_i, \mathrm{d}lta P_i)_{L^2}+(\widehat{\theta}_i,\mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon}\right)+\gamma_\varepsilon (\theta,\mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon}
\end{align*}
with
\begin{align*}
\widehat{y}_i=(1-\gamma_1\Phi_i)& \mathbf{Q}_1 \mathbf{Q}_1 (y_i-y_d)+ \beta(1-\gamma_1\Phi_i) D_y \mathcal{F}^\varepsilon_\theta(y_i)^\top\mathcal{F}^\varepsilon_\theta(y_i)\\&+\gamma_1 (\mathcal{V}^\varepsilon_\theta(t,y_i)-J_\bullet(y_i,\mathcal{F}^\varepsilon_\theta(y_i)))\partial_y \mathcal{V}^\varepsilon_\theta(y_i)+\gamma_2 D_{yy}\mathcal{V}^\varepsilon_\theta(y_i)(\partial_{y}\mathcal{V}^\varepsilon_\theta(y_i)-p_i),
\end{align*}
and
\begin{align*}
\widehat{y}^T_i=\alpha(1-\gamma_1\Phi_i(0)) Q_2 Q_2 (y_i(T)-y^T_d),
\end{align*}
as well as
\begin{align*}
\widehat{p}_i=\gamma_2(p_i-\partial_y \mathcal{V}^\varepsilon_\theta(y_i)),
\end{align*}
and
\begin{align*}
\widehat{\theta}_i&= \gamma_1 \int^T_0 D_\theta V^\varepsilon_\theta(t,y_i(t))^\top(V^\varepsilon_\theta(t,y_i(t))-J_t(y_i,\mathcal{F}^\varepsilon_\theta(y_i)))~\mathrm{d} t \\
&+\int^T_0 \!\lbrack \beta (1\!-\!\gamma_1\Phi_i(t)) D_\theta F^\varepsilon_\theta(t,y_i(t))^\top F^\varepsilon_\theta(t,y_i(t))\!+\!\gamma_2 D_{y \theta} V^\varepsilon_\theta(t,y_i(t))^\top(\partial_{y}{V}^\varepsilon_\theta(t,y_i(t))\!-\!p_i(t)) \rbrack~\mathrm{d} t.
\end{align*}
\end{prop}
\begin{proof}
The regularity of~$\mathcal{J}_N$ follows immediately from Lemma~\ref{lem:existenceparametertostate} and the chain rule. In order to compute the directional derivative we abbreviate
\begin{equation*}
\begin{array}l
F_1(y,u,\theta)=\frac{\gamma_1}{2}\int^T_0 |V^\varepsilon_\theta(t,y(t))-J_t(y,u)|^2~\mathrm{d} t,\\[1.4ex]
F_2(y,p,\theta)=\frac{\gamma_2}{2}\int^T_0 |\partial_y V^\varepsilon_\theta(t,y(t))-p(t)|^2~\mathrm{d} t
\end{array}
\end{equation*}
in the following.
Thus we have
\begin{align*}
J_\varepsilon(Y_i(\theta),P_i(\theta),\theta)&= J(Y_i(\theta),\mathcal{F}^\varepsilon_\theta(Y_i(\theta)))+F_1(Y_i(\theta),\mathcal{F}^\varepsilon_\theta(Y_i(\theta)),\theta)+F_2(Y_i(\theta),P_i(\theta),\theta)\\&=G_1(\theta)+G_2(\theta)+G_3(\theta).
\end{align*}
We readily verify
\begin{align*}
G'_1(\theta)(\mathrm{d}lta \theta)=&(\mathbf{Q}_1 \mathbf{Q}_1 (y_i-y_d),\mathrm{d}lta Y_i )_{L^2}+ \beta (D_y \mathcal{F}^\varepsilon_\theta(y_i)^\top\mathcal{F}^\varepsilon_\theta(y_i),\mathrm{d}lta Y_i)_{L^2}\\&+ \beta (D_\theta \mathcal{F}^\varepsilon_\theta(y_i)^\top\mathcal{F}^\varepsilon_\theta(y_i),\mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon}+ (Q_2 Q_2 (y_i(T)-y^T_d),\mathrm{d}lta Y_i(T))_{\mathbb{R}^n}.
\end{align*}
Recalling the definition of $\Phi_i$ we get
\begin{align*}
G'_2(\theta)& (\mathrm{d}lta \theta)=\gamma_1 (E_1+E_2+E_3+E_4),
\end{align*}
where
\begin{align*}
E_1 &= ((\mathcal{V}^\varepsilon_\theta(y_i)\!-\!J_\bullet(y_i,\mathcal{F}^\varepsilon_\theta(y_i))) \partial_y \mathcal{V}^\varepsilon_\theta(y_i), \mathrm{d}lta Y_i)_{L^2} +(D_\theta \mathcal{V}^\varepsilon_\theta(y_i)^\top(\mathcal{V}^\varepsilon_\theta(y_i)\!-\!J_\bullet(y_i,\mathcal{F}^\varepsilon_\theta(y_i))), \mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon} \\
&= ((\mathcal{V}^\varepsilon_\theta(y_i)\!-\!J_\bullet(y_i,\mathcal{F}^\varepsilon_\theta(y_i))) \partial_y \mathcal{V}^\varepsilon_\theta(t,y_i), \mathrm{d}lta Y_i)_{L^2} \\
&\qquad +\left( \int^T_0 D_\theta V^\varepsilon_\theta(t,y_i(t))^\top(V^\varepsilon_\theta(t,y_i(t))-J_t(y_i,\mathcal{F}^\varepsilon_\theta(y_i)))~\mathrm{d} t, \mathrm{d}lta \theta \right)_{\mathcal{R}_\varepsilon},
\end{align*}
\begin{align*}
E_2 =- \int^T_0 &(V^\varepsilon_\theta(t,y(t))-J_t(y,u))\\
&\left(\int^T_t (Q_1 Q_1(y(s)-y_d(s)),\mathrm{d}lta y(s))~\mathrm{d} s+(Q_2 Q_2(y(T)-y^T_d),\mathrm{d}lta y(T))_{\mathbb{R}^n} \right) ~\mathrm{d} t \\
&=-(\Phi_i Q_1 Q_1(y-y_d),\mathrm{d}lta y)_{L^2}- \Phi_i(0) (Q_2 Q_2(y(T)-y^T_d),\mathrm{d}lta y(T))_{\mathbb{R}^n},
\end{align*}
as well as
\begin{align*}
E_3&=- \int^T_0 (V^\varepsilon_\theta(t,y(t))-J_t(y,u)) \left( \beta \int^T_t (D_y F^\varepsilon_\theta(s,y_i(s))^\top F^\varepsilon_\theta(s,y_i(s)),\mathrm{d}lta Y_i(s))_{\mathbb{R}^n}~\mathrm{d} s\right) ~\mathrm{d} t \\ &=- \beta(\Phi_i D_y \mathcal{F}^\varepsilon_\theta(y_i)^\top \mathcal{F}^\varepsilon_\theta(y_i), \mathrm{d}lta Y_i )_{L^2},
\end{align*}
and
\begin{align*}
E_4&=- \int^T_0 (V^\varepsilon_\theta(t,y(t))-J_t(y,u)) \left( \beta \int^T_t (D_\theta F^\varepsilon_\theta(s,y_i(s))^\top F^\varepsilon_\theta(s,y_i(s)),\mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon}~\mathrm{d} s\right) ~\mathrm{d} t \\ &=- \beta \left(\int^T_0 \Phi_i(t) D_\theta F^\varepsilon_\theta(t,y_i(t))^\top F^\varepsilon_\theta(t,y_i(t))~\mathrm{d} t, \mathrm{d}lta \theta \right)_{\mathcal{R}_\varepsilon},
\end{align*}
by means of partial integration. Finally we calculate
\begin{align*}
G'_3(\theta)(\mathrm{d}lta \theta)= \gamma_2 (D_{yy}&\mathcal{V}^\varepsilon_\theta(y_i)(\partial_{y}\mathcal{V}^\varepsilon_\theta(y_i)-p_i), \mathrm{d}lta Y_i)_{L^2}- \gamma_2(\partial_{y}\mathcal{V}^\varepsilon_\theta(y_i)-p_i, \mathrm{d}lta P_i)_{L^2} \\&
+\gamma_2\left(\int^T_0 D_{y \theta} V^\varepsilon_\theta(t,y_i(t))^\top(\partial_{y}{V}^\varepsilon_\theta(t,y_i(t))-p_i(t))~\mathrm{d} t, \mathrm{d}lta \theta \right)_{\mathcal{R}_\varepsilon}.
\end{align*}
Summarizing the previous observations, we arrive at the claimed characterization.
\end{proof}
Applying a gradient method to~\eqref{eq:learningprobfinite} requires the computation of the gradient~$\nabla \mathcal{J}_{N}(\theta)\in \mathcal{R}_\varepsilon$ which satisfies
\begin{align*}
\mathcal{J}'_N(\theta)( \mathrm{d}lta \theta)=(\nabla \mathcal{J}_N(\theta), \mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon} \quad \forall \mathrm{d}lta \theta \in \mathcal{R}_\varepsilon.
\end{align*}
This can be done by computing~$\mathcal{J}'_N(\theta)( e_j)$ for the canonical basis~$\{e_j\}^{N_\varepsilon}_{j=1}\subset \mathcal{R}_\varepsilon$. However, such reasoning leads to the necessity to solve ~$2\operatorname{dim}(\mathcal{R}_\varepsilon) N$ additional ODEs in order to compute the sensitivities~$Y'_i(\theta)(e_j)$ and~$P'_i(\theta)(e_j)$, respectively. Introducing suitable costate equations, this can be reduced to~$2N$ additional equation solves.
\begin{lemma} \label{lem:adjoint}
Let~$\widehat{y}_i,\widehat{y}^T_i, \widehat{p}_i$ as well as~$\mathrm{d}lta Y_i, \mathrm{d}lta P_i$ be defined as in Proposition~\ref{prop:direcderiv}. Then there holds
\begin{align*}
(\widehat{y}_i,\mathrm{d}lta Y_i )_{L^2}+ (\widehat{y}^T_i,\mathrm{d}lta Y_i (T) )_{\mathbb{R}^n}+(\widehat{p}_i,\mathrm{d}lta P)_{L^2}=(D_\theta \mathcal{F}^\varepsilon_\theta(y_i)^\top(\mathbf{g}(y_i)^\top \zeta_i+\lbrack D\mathbf{g}(y_i) \kappa_i \rbrack^\top p_i), \mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon}
\end{align*}
where~$\zeta_i, \kappa_i \in W_T$ satisfy
\begin{align*}
-\dot{\zeta}_i&=\mathbf{A}(y_i,\theta)\zeta+ D_y \mathcal{F}^\varepsilon_\theta(y_i)^\top \mathbf{g}(y_i)^\top\zeta_i + \lbrack D_y \mathbf{A}(y_i,\theta)^\top p\rbrack\kappa_i+\mathbf{Q}_1^\top \mathbf{Q}_1\kappa_i+\widehat{y}_i \\
\dot{\kappa}_i&=\mathbf{A}(y_i,\theta)^\top \kappa_i+\widehat{p}_i,\\
\zeta_i(T)&= Q_2^\top Q_2 \kappa (T)+\widehat{y}^T_i,~\kappa_i(0)=0.
\end{align*}
\end{lemma}
\begin{proof}
For the sake of readability, we drop the subscript~$i$ in the following. By partial integration and Lemma \ref{lem:existenceparametertostate} we obtain
\begin{align*}
(\widehat{p},\mathrm{d}lta P)_{L^2}&=(\dot{\kappa}-\mathbf{A}(y,\theta)^\top \kappa,\mathrm{d}lta P)=(-\dot{\mathrm{d}lta P}-\mathbf{A}(y,\theta) \mathrm{d}lta P,\kappa)+( Q_2^\top Q_2 \mathrm{d}lta Y(T),\kappa(T))_{\mathbb{R}^n}\\ &=(\lbrack D_y\mathbf{A}(y,\theta)\mathrm{d}lta Y \rbrack p+ \mathbf{Q}_1^\top \mathbf{Q}_1 \mathrm{d}lta Y+\lbrack \partial_\theta \mathbf{A}(y,\theta) \mathrm{d}lta \theta \rbrack p,\kappa) +( \mathrm{d}lta Y(T),\zeta(T)- \hat y_i^T)_{\mathbb{R}^n}
\end{align*}
and
\begin{align*}
&(\hat y_i,\mathrm{d}lta Y )_{L^2}+ (\hat y_i^T,\mathrm{d}lta Y (T) )_{\mathbb{R}^n}\\&=(-\dot{\zeta}\!-\!\mathbf{A}(y,\theta)\zeta\!-\! D_y \mathcal{F}^\varepsilon_\theta(y)^\top \mathbf{g}(y)^\top\zeta- \lbrack D_y \mathbf{A}(y,\theta)^\top p\rbrack\kappa-\mathbf{Q}_1^\top \mathbf{Q}_1\kappa, \mathrm{d}lta Y)_{L^2}+ (\hat y^T_i,\mathrm{d}lta Y (T) )_{\mathbb{R}^n}\\
&=(\dot{\mathrm{d}lta Y}-\mathbf{A}(y,\theta)^\top\mathrm{d}lta Y- \mathbf{g}(y) D_y \mathcal{F}^\varepsilon_\theta(y) \mathrm{d}lta Y ,\zeta)_{L^2}\\
&\quad -( \lbrack D_y \mathbf{A}(y,\theta)^\top p\rbrack\kappa+\mathbf{Q}_1^\top \mathbf{Q}_1\kappa, \mathrm{d}lta Y)_{L^2} - (\mathrm{d}lta Y (T), \zeta(T) )_{\mathbb{R}^n} \\
&=(\mathbf{g}(y) D_\theta \mathcal{F}^\varepsilon_\theta(y)\mathrm{d}lta \theta,\zeta)_{L^2} -( \lbrack D_y \mathbf{A}(y,\theta)^\top p\rbrack\kappa+\mathbf{Q}_1^\top \mathbf{Q}_1\kappa, \mathrm{d}lta Y)_{L^2} - (\mathrm{d}lta Y (T), \zeta(T)- \hat y_i^T )_{\mathbb{R}^n}.
\end{align*}
Adding both equations finally yields
\begin{align*}
(\hat y_i,\mathrm{d}lta Y )_{L^2}+ (\hat y_i^T,\mathrm{d}lta Y (T) )_{\mathbb{R}^n}+(p_1,\mathrm{d}lta P)_{L^2}&=(\mathbf{g}(y) D_\theta \mathcal{F}^\varepsilon_\theta(y)\mathrm{d}lta \theta,\zeta)_{L^2} +(\lbrack \partial_\theta \mathbf{A}(y,\theta) \mathrm{d}lta \theta \rbrack p,\kappa)_{L^2} \\
&=(D_\theta \mathcal{F}^\varepsilon_\theta(y)^\top(\mathbf{g}(y)^\top \zeta+\lbrack D\mathbf{g}(y) \kappa \rbrack^\top p), \mathrm{d}lta \theta)_{\mathcal{R}_\varepsilon}
\end{align*}
which ends the proof.
\end{proof}
We arrive at the following characterization of the gradient~$\nabla \mathcal{J}_N(\theta)$.
\begin{theorem} \label{thm:gradient}
Let~$y_i,p_i,\zeta_i, \kappa_i \in W_T,~\widehat{\theta}_i \in \mathcal{R}_\varepsilon$ be defined as in~Proposition~\ref{prop:direcderiv} and Lemma~\ref{lem:adjoint}. The gradient of~$\mathcal{J}_N$ at~$\theta$ is given by
\begin{align*}
\nabla \mathcal{J}_N(\theta)= \sum^N_{i=1} \omegaega_i \left( D_\theta \mathcal{F}^\varepsilon_\theta(y_i)^\top(\mathbf{g}(y_i)^\top \zeta_i+\lbrack D\mathbf{g}(y_i) \kappa_i \rbrack^\top p_i) +\widehat{\theta}_i\right)+ \gamma_\varepsilon \theta.
\end{align*}
\end{theorem}
\section{Numerical example}\label{sec:numericalexampl}
We finish this paper by applying the proposed learning approach to one particular instance of Problem~\eqref{def:openloopproblem}. Setting~$I=(0,T)$ and~$\Omegaega=(0,2\pi)$, we consider the parabolic bilinear optimal control problem
\begin{align*}
\min_{\mathcal{Y} \in L^2(I \times \Omegaega), u \in L^2(I;\mathbb{R}^3)} \left \lbrack \frac{1}{2} \int_I \|\mathcal{Y}(t)-\mathcal{Y}_d(t)\|^2_{L^2(\Omegaega)}+ \frac{\beta}{2}|u(t)|^2_{\mathbb{R}^3}~\mathrm{d}t \right \rbrack+\frac{\alpha}{2} \|\mathcal{Y}(T)-\mathcal{Y}_d(T)\|^2_{L^2(\Omegaega)}
\end{align*}
subject to
\begin{align} \label{eq:bilinearPDE}
\partial_t \mathcal{Y}-\bigtriangleup \mathcal{Y}+ \left( u_1 \chi_1 + u_2 \chi_2+ u_3 \chi_3 \right) \mathcal{Y}= 0,
\end{align}
as well as
\begin{align*}
\mathcal{Y}(t,x)=0 \quad \text{on}~I \times \partial \Omegaega,~\mathcal{Y}(0,x)= \mathcal{Y}_0(x) \quad \text{on}~\Omegaega.
\end{align*}
Here~$\alpha>0, \beta>0$, and~$\mathcal{Y}_d$ denotes a given desired state. The dynamics of this infinite-dimensional system can be influenced by choosing a time-dependent three-dimensional control input~$u \in L^2(I; \mathbb{R}^3) $ which acts on the subdomains~$\Omegaega_1=(0.5,1)$,~$\Omegaega_2=(2,2.5)$ and $\Omegaega_3=(4,4.5)$, respectively. The associated characteristic functions are denoted by~$\chi_i$,~$i=1,\dots,3$.
In order to fit this problem into the setting of the current manuscript, let~$\{\lambda_i, \varphi_i\} \in \mathbb{R}_+ \times L^2(\Omegaega)$ denote the first $n \in \mathbb{N}$ normalized eigenpairs of the Dirichlet Laplacian on~$\Omegaega$. Approximating the state dynamics~$\mathcal{Y}$ as well as the desired state by
\begin{align*}
\mathcal{Y}(t,x) \approx \sum^n_{i=1} Y_i(t) \varphi_i (t),~\mathcal{Y}_d(t,x) \approx \sum^n_{i=1} Y^i_d(t) \varphi_i (t),~
\end{align*}
we end up with
\begin{align} \label{def:discreteexample}
\min_{{Y} \in L^2(I;\mathbb{R}^10), u \in L^2(I;\mathbb{R}^3)} \left \lbrack \frac{1}{2} \int_I |Y(t)-Y_d(t)|^2+ \frac{\beta}{2}|u(t)|^2_{\mathbb{R}^3}~\mathrm{d}t+ \frac{\alpha}{2}|Y(T)-Y_d(T)|^2_{\mathbb{R}^10} \right \rbrack
\end{align}
subject to
\begin{align*}
\dot{Y}(t)+ AY(t)+ \sum^3_{i=1} u_i M_i Y(t) =0,~Y(0)=Y_0.
\end{align*}
where~$(Y_0)_i= (\mathcal{Y}_0, \varphi_i)_{L^2}$,~$i=1,\dots,n$, and the symmetric matrices~$A,M_i \in \mathbb{R}^{n\times n}$ are given by
\begin{align*}
A_{jk}= \begin{cases} 0 & j \neq k \\
\lambda_j & \text{else}
\end{cases},~ (M_i)_{jk}=\int_\Omegaega \phi_j \phi_k \chi_i(x)~\mathrm{d}x, \quad i=1,2,3,~j,k= 1\dots, n.
\end{align*}
\subsection{Learning \& validation setup}
In the following, we determine an approximate optimal feedback law for~\eqref{def:discreteexample} by applying the learning approach detailed in Section~\ref{sec:learnfeedback}. The parametrized model~$V^\varepsilon_\theta$ for the value function is given by realizations of residual networks, as described in Section~\ref{subsec:residual}, with~$L_\varepsilon=2$ layers,~$\operatorname{arch}(\theta)=(11,60,1)$ and activation function~$\sigmagma$ given by
\begin{align*}
\sigmagma(x)= \sigman(x)+\cos(x).
\end{align*}
This yields a total of~$1440$ trainable parameters. We emphasize that the architecture as well as the activation function were chosen based on numerical testing. In particular, the present tests should not be mistaken as a \emph{quantitative} survey but as a \emph{proof of concept} which highlights the potential of learned feedbacks for optimal control and puts a focus on the role played by the penalty parameters~$\gamma_1$ and~$\gamma_2$.
Given a fixed reference vector~$\bar{Y}_0 $, we randomly generate a set~$\mathbf{Y}_0$ of~$130$ initial conditions by sampling uniformly from the closure of~$B_1(\bar{Y}_0)$,
Subsequently, these are split into a training set~$\mathbf{Y}^t_0$ of $N=30$ initial conditions, which is used in the learning problem~\eqref{eq:learningprobfinite} together with uniform weights~$w_j=1/N$, and a validation set~$\mathbf{Y}^v_0= \mathbf{Y}_0 \setminus \mathbf{Y}^T_0$ which we later utilize to assess the performance of the obtained feedback.
In order to obtain a candidate for the optimal network parameters~$\theta^*_\varepsilon$, a Barzilai-Borwein method
~\cite{BaBo19}, is applied to the learning problems \eqref{eq:learningprobfinite}, based on the reduced objective functional introduced in~\eqref{eq:reducedobj} as well as the characterization of its gradient in Theorem~\ref{thm:gradient}. For every~$Y_0 \in \mathbf{Y}^t_0$, this approach entails the computation of the state~$Y \coloneqq Y_\theta (Y_0)$ and the adjoint state~$P \coloneqq P_\theta (Y_0)$ which satisfy
\begin{align} \label{def:exampleneuralequations}
\dot{Y} (t)+ \left(A+ \sum^3_{i=1} {F}^\varepsilon_\theta(t,Y(t))_i M_i \right) Y(t) &=0,~Y(0)=Y_0 \notag \\
-\dot{P} (t) + \left( A+ \sum^3_{i=1} {F}^\varepsilon_\theta(t,Y(t))_i M_i \right) P(t) &= Y(t)-Y_D(t),~P(T)= Y(T)-Y_D(T)
\end{align}
as well as the costates~$K \coloneqq K_\theta(Y_0)$ and~$Z \coloneqq Z_\theta(Y_0)$ with
\begin{align*}
\dot{K}(t)+ \left( A+ \sum^3_{i=1} {F}^\varepsilon_\theta(t,Y(t))_i M_i \right) K(t)= \widehat{P}(t)
\end{align*}
and
\begin{multline*}
- \dot{Z} (t)+ \left( A+ \sum^3_{i=1} {F}^\varepsilon_\theta(t,Y(t))_i M_i + D_y {F}^\varepsilon_\theta(t,Y(t))^\top \begin{pmatrix}
Y_j(t)^\top M_1 \\
Y_j(t)^\top M_2 \\
Y_j(t)^\top M_3
\end{pmatrix} \right) Z(t) \\
=- D_y {F}^\varepsilon_\theta(t,Y(t))^\top \begin{pmatrix}
Y(t)^\top M_1 \\
Y(t)^\top M_2 \\
Y(t)^\top M_3
\end{pmatrix} Z (t) +K (t)+ \widehat{Y} (t)
\end{multline*}
equipped with the boundary conditions
\begin{align*}
K(0)=0, \quad Z(T)= \alpha K(T)+ \widehat{Y}^T_j
\end{align*}
where~$\widehat{Y},\widehat{Y}^T$ and~$\widehat{P}$ are defined in analogy to Proposition~\ref{prop:direcderiv}. Note that this system is not fully coupled, i.e. in practice, we first solve the nonlinear closed-loop equation using a Radau time-stepping scheme and then, successively treat the adjoint and costate equations by an implicit Euler method. This can be done in parallel for various initial conditions to achieve additional speed-up. Moreover, the adjoint state~$P$ and costate~$K$ only need to be computed if~$\gamma_2 >0$. The gradient of the reduced objective functional~$\mathcal{J}_N$ in~\eqref{eq:learningprobfinite} at an admissible~$\theta$ is then obtained as
\begin{align*}
\frac{1}{30} \sum_{Y \in \mathbf{Y}^t_0} \int_I \left( D_\theta F^\varepsilon_\theta(t,Y_\theta (Y_0)(t))^\top \left( B^\theta_Y (t) Z_\theta (Y_0) (t)+ B^\theta_K(t) P_\theta (Y_0) (t) \right)~\mathrm{d}t +\widehat{\theta}(Y_0)\right).
\end{align*}
where we set
\begin{align*}
B^\theta_Y (t) \coloneqq
\begin{pmatrix}
Y_\theta (Y_0)(t)^\top M_1 \\
Y_\theta (Y_0)(t)^\top M_2 \\
Y_\theta (Y_0)(t)^\top M_3
\end{pmatrix}
, \quad
B^\theta_K(t) \coloneqq
\begin{pmatrix}
K_\theta (Y_0)(t)^\top M_1 \\
K_\theta (Y_0)(t)^\top M_2 \\
K_\theta (Y_0)(t)^\top M_3
\end{pmatrix},
\end{align*}
integration has to be understood componentwise and~$\widehat{\theta}(Y_0)$ is as in Proposition~\ref{prop:direcderiv}.
Once the network is determined, we compute the state~$Y_{\theta} (Y_0)$ and adjoint~$P_\theta (Y_0)$ for every~$Y_0 \in \mathbf{Y}_0$ from~\eqref{def:exampleneuralequations} and set~$U_\theta(Y_0)\coloneqq\mathcal{F}^\varepsilon_{\bar{\theta}}(Y_\theta(Y_0))$.
Subsequently we determine a stationary point~$(\bar{Y}(Y_0), \bar{U}(Y_0))$ of~\eqref{def:discreteexample},~$Y_0 \in \mathbf{Y}_0$, by applying a Barzilai-Borwein gradient method to its control-reduced formulation. The associated adjoint state is denoted by $\bar{P}(Y_0)$. At this point, it should be stressed that both, the open loop as well as the feedback learning problem, are nonconvex. As a consequence, we cannot ensure global optimality of the computed stationary points and, in particular, both methods might provide different results. For the present example, open loop and learned feedback controls are comparable. Moreover, for every~$Y_0 \in \mathbf{Y}_0$, we have~$J(\bar Y(Y_0), \bar U(Y_0) ) \geq J( Y_\theta (Y_0), U_\theta (Y_0) )$. In order to assess the performance of open loop and feedback controls, let~$Y^{ad}_{0}\subset \mathbf{Y}_0$ be either~$Y^{ad}_{0}=\mathbf{Y}^t_0$ or~$Y^{ad}_{0}=\mathbf{Y}^v_0$ and consider the relative difference between the averaged objective functional values:
\begin{align*}
\operatorname{Err}_{\mathcal{J}} \coloneqq \frac{\sum_{Y_0 \in Y_{ad}} J(Y_\theta (Y_0) , U_\theta (Y_0))-\sum_{Y_0 \in Y_{ad}} J(\bar Y (Y_0) ,\bar U (Y_0)) }{\sum_{Y_0 \in Y_{ad}} J(\bar Y (Y_0) ,\bar U (Y_0))}
\end{align*}
as well as the associated normalized mean squared error of~$J(Y_\theta (\cdot) , U_\theta (\cdot))$:
\begin{align*}
\operatorname{Err}_{J} \coloneqq \frac{\sum_{Y_0 \in Y_{ad}} (J(Y_\theta (Y_0) , U_\theta (Y_0))- J(\bar Y (Y_0) ,\bar U (Y_0)))^2 }{\sum_{Y_0 \in Y_{ad}} J(\bar Y (Y_0) ,\bar U (Y_0))^2}.
\end{align*}
The normalized mean-squared errors of the state, $\operatorname{Err}_{Y}$, adjoint, $\operatorname{Err}_{P}$, and of the control,~$\operatorname{Err}_{U}$, are defined analogously. Moreover, to quantify the influence of the penalty parameters~$\gamma_1$ and~$\gamma_2$, we define
\begin{align*}
\operatorname{Err}_{V} \coloneqq \frac{\sum_{Y_0 \in Y_{ad}} \int_I |V^\varepsilon_\theta (t,Y_\theta(Y_0)(t))- J_t(Y_\theta(Y_0),U_\theta(Y_0)(t))|^2 ~\mathrm{d}t }{\sum_{Y_0 \in Y_{ad}} \int_I | J_t(Y_\theta(Y_0),U_\theta(Y_0)(t))|^2 ~\mathrm{d}t}.
\end{align*}
as well as
\begin{align*}
\operatorname{Err}_{\partial V} \coloneqq \frac{\sum_{Y_0 \in Y_{ad}} \int_I |\partial_y V^\varepsilon_\theta (t,Y_\theta(Y_0)(t))- P_\theta (Y_0)(t)|^2 ~\mathrm{d}t }{\sum_{Y_0 \in Y_{ad}} \int_I | P_\theta (Y_0)(t)|^2 ~\mathrm{d}t}.
\end{align*}
For $Y^{ad}_0= Y^t_0$, these terms correspond to the relative sizes of the additional penalties in~\eqref{eq:learningprobfinite}.
Finally, we also want to compare~$V^\varepsilon_\theta$ with the optimal value function~$V^*$. Of course,~$V^*$ can neither be given analytically nor can it be computed exactly. As a remedy, we recall that if~$V^*$ is sufficiently regular and~$(\bar{Y}(Y_0), \bar{U}(Y_0) )$ is a minimizing pair of~\eqref{def:discreteexample} with adjoint state~$\bar{P}(Y_0)$, we have
\begin{align*}
V^*(t, \bar{Y}(Y_0)(t))= J_t(\bar{Y}(Y_0),\bar{U}(Y_0)) \quad \text{as well as} \quad \partial_y V^*(t, \bar{Y}(Y_0)(t))= \bar{P}(Y_0)(t)
\end{align*}
for all~$t \in I$. As a consequence, setting
\begin{align*}
d (V^*, V^\varepsilon_\theta)=\frac{\sum_{Y_0 \in Y_{ad}} \int^T_0 | V^\varepsilon_\theta(t, \bar{Y}(Y_0)(t))- J_t(\bar{Y}(Y_0),\bar{U}(Y_0)) |^2 ~\mathrm{d}t}{\sum_{Y_0 \in Y_{ad}} \int^T_0 |J_t(\bar{Y}(Y_0),\bar{U}(Y_0)) |^2 ~\mathrm{d}t}.
\end{align*}
as well as
\begin{align*}
d (\partial V^*, \partial V^\varepsilon_\theta)=\frac{\sum_{Y_0 \in Y_{ad}} \int^T_0 | \partial_y V^\varepsilon_\theta(t, \bar{Y}(Y_0)(t))- \bar{P}(Y_0)(t) |^2 ~\mathrm{d}t}{\sum_{Y_0 \in Y_{ad}} \int^T_0 |\bar{P}(Y_0)(t) |^2 ~\mathrm{d}t}.
\end{align*}
provides a suitable ``distance'' for the comparison of~$V^*$ and~$V^\theta_\varepsilon$.
\subsection{Validation results}
As a concrete example, we set~$T=2,~\beta=0.01,~\alpha=0.25 $ and~$\mathcal{Y}_d(t,x)= x^2/10$, i.e., we try to steer the system towards a parabola. Note that there is no control input~$u \in L^2(I;\mathbb{R}^3)$ such that the corresponding solution~$\mathcal{Y}$ of the PDE~\eqref{eq:bilinearPDE} satisfies~$\mathcal{Y}(t)= \mathcal{Y}_d$. The parabolic binlinear control problem is approximated using~$n=10$ eigenfunctions. All computations were carried out in Matlab 2019 on a notebook with~$32$ GB RAM and an Intel\textregistered Core\texttrademark ~i7-10870H [email protected] GHz.
In order to compute an approximately optimal feedback law for this problem, we solve~\eqref{eq:learningprobfinite} for various penalty parameter configurations~$\gamma_1, \gamma_2 \in \{0,0.1,1\}$. The resulting normalized errors can be found in Table~\ref{tab:val1}, for~$Y^{ad}_0=\mathbf{Y}^t_0$, and Table~\ref{tab:val2}, for~$Y^{ad}_0=\mathbf{Y}^v_0$. Comparing their individual entries, we observe that there is (almost) no difference in performance between the training and the validation sets. This means that, while the utilized networks are rather simple and only comprise a small number of trainable parameters, the corresponding learned feedback controls generalize well to initial conditions which are not contained in the training set.
Indeed, on the one hand \emph{all} computed networks provide feedback controls which perform similarly to their open loop counterparts. This is manifested in very small averaged errors for the objective functional, i.e.~$\operatorname{Err}_{\mathcal{J}}$ and $\operatorname{Err}_{{J}}$, the states and adjoint states,~$\operatorname{Err}_{{Y}}$ and $\operatorname{Err}_{{P}}$, as well as the controls,~$\operatorname{Err}_{{U}}$. These start to (slowly) deteriorate as~$\gamma_1$ and/or~$\gamma_2$ grow. However, cf. the explanation in Section~\ref{sec:learnfeedback}, this is expected: For~$\gamma_1>0$ and/or~$\gamma_2 >0$, the learned feedback has to strike a balance between minimizing~$J(Y_\theta (\cdot), U_\theta(\cdot))$ and keeping the penalty terms small, hence the slightly larger error.
On the other hand, the picture looks different once we consider the errors associated to the approximation of the value function, i.e.,~$\operatorname{Err}_{V}$,~$\operatorname{Err}_{\partial V}$ as well as $d(\partial V^*, \partial V^\varepsilon_\theta)$ and $d (\partial V^*, \partial V^\varepsilon_\theta)$. Here $\gamma_1>0$ and/or~$\gamma_2 >0$ have a significant influence on $d( V^*, V^\varepsilon_\theta)$ and~$d(\partial V^*, \partial V^\varepsilon_\theta)$ while
the other normalized mean squared errors remain relatively small. Moreover, we have~$\operatorname{Err}_{V} \approx d ( V^*, V^\varepsilon_\theta) $ and~$\operatorname{Err}_{\partial V} \approx d (\partial V^*, \partial V^\varepsilon_\theta) $ on the test as well as on the validation set. Hence, large values for these terms are a reliable indicator for structural differences between~$V^\varepsilon_\theta$ and~$V^*$ and/or~$\partial_y V^\varepsilon_\theta$ and~$ \partial_y V^*$, respectively.
Now, while~$\gamma_1=\gamma_2=0$ provides a very good approximation to the open loop optimal control, it performs the worst in terms of approximating the optimal value function and its derivative. This is related to two observations. First, in this case, the learning problem~\eqref{eq:learningprobfinite} only depends on the derivative~$\partial_y V^\theta_\varepsilon$ but \emph{not} on the value function~$V^\varepsilon_\theta$. Since primitives are not unique, approximating~$V^*$ by~$V^\varepsilon_\theta$ is unlikely.
Second, due to the absence of~$V^\varepsilon_\theta$ in the problem, some of the parameters in the model are not trainable. In fact, for $\gamma_1=\gamma_2=0$, there holds~$\partial_{W_{12}} \mathcal{J}_N(\theta)=0$ for every admissible~$\theta$.
Once we increase~$\gamma_1$ and~$\gamma_2$, this is no longer the case. Hence, we observe rapid decrease for $d( V^*, V^\varepsilon_\theta)$ and $d(\partial V^*,\partial V^\varepsilon_\theta)$. Most remarkably, the improvement for both is, to some extend, already visible for~$\gamma_1>0$ and~$\gamma_2=0$. In this setting, applying the gradient method neither requires computing the adjoint state~$P$ nor the costate~$K$ which limits the cost of every gradient step to~$2N=60$ ODE solve. Quite the contrary, increasing~$\gamma_2 >0$ but keeping~$\gamma_1=0$ fixed, there is \emph{no} improvement for $d(\ V^*, V^\varepsilon_\theta)$. This further backs up our reasoning given for the case of~$\gamma_1=\gamma_2=0$.
Consequently, the computed results indicate that the best balance between finding an optimal control and approximating the value function is achieved by a careful choice of~$\gamma_1,\gamma_2 >0$. Moreover, they highlight two important points: First, the presented learning approach indeed allows to compute semiglobal optimal feedback laws~$F^\varepsilon_\theta$ for higher dimensional problems and, thus, to some extent, alleviates the curse of dimensionality. Second, incorporating additional terms into the learning problem penalizing the violation of the dynamic programming principles~\eqref{eq:dynamicalprog}, allows to compute a good approximation~$V^\varepsilon_\theta$ of the optimal value function on the fly. As stated initially, the present example should be understood as a proof of concept and, following these first promising results, we believe that this approach to feedback learning deserves further investigations, both, from the theoretical and the numerical side. For example, it would be interesting to explore systematic ways of choosing the penalty parameters~$\gamma_1,\gamma_2$.
However, this goes beyond the scope of the current paper and is left for future work.
\begin{table}[!htb]
\centering
\begin{tabular}{ l c c c c c }
\toprule
$\qquad \text{Penalty}$ & $\text{Err}_{\mathcal{J}}$ & $\text{Err}_{Y}$ & $\text{Err}_{P}$ & $\text{Err}_{U}$\\
\midrule
$\gamma_1= 0, \gamma_2=0$ & $ 0.15 \%$ & $0.04 \%$& $ 0.12 \%$ & $ 2.4 \%$ \\
\midrule
$\gamma_1= 0.1, \gamma_2=0.1$ & $0.36 \%$ & $0.1 \%$ & $0.24 \%$ & $5.5 \%$
\\
\midrule
$\gamma_1= 0.1, \gamma_2=0$ & $0.29 \%$ & $0.1 \%$ & $0.85 \%$ & $ 4.4 \%$
\\
\midrule
$\gamma_1= 1, \gamma_2=1$ & $ 0.64 \%$ & $ 0.25 \%$& $1 \%$ & $8.65 \%$ \\
\midrule
$\gamma_1= 0, \gamma_2=1$ & $ 0.1 \%$ & $ 0.05 \%$& $0.26 \%$ & $2.1 \%$ \\
\bottomrule
\end{tabular}
\centering
\begin{tabular}{ l c c c c c }
\toprule
$\qquad \text{Penalty}$ & $\text{Err}_{J}$ & $\text{Err}_{V}$ & $\text{Err}_{\partial V}$ & $d(V^\varepsilon_\theta;V^*)$ & $d(\partial_y V^\varepsilon_\theta;\partial_y V^*)$ \\
\midrule
$\gamma_1= 0, \gamma_2=0$ & $ 0.0003 \%$ & $ 79 \%$& $33 \%$ & $78.8 \%$ & $33.5 \%$ \\
\midrule
$\gamma_1= 0.1, \gamma_2=0.1$ & $ 0.001 \%$ & $0.03 \%$ & $7.4 \%$ & $0.03 \%$ & $7 \%$
\\
\midrule
$\gamma_1= 0.1, \gamma_2=0$ & $ 0.001 \%$ & $0.02 \%$ & $12.5 \%$ & $0.02 \%$ & $12.1 \%$
\\ \midrule
$\gamma_1= 1, \gamma_2=1$ & $ 0.005 \%$ & $ 0.007 \%$& $4.5 \%$ & $0.01 \%$ & $3.5 \%$
\\ \midrule
$\gamma_1= 0, \gamma_2=1$ & $ 0.003 \%$ & $ 88.8 \%$& $6.4 \%$ & $88.5 \%$ & $6.4 \%$\\
\bottomrule
\end{tabular}
\caption{Results on training set i.e.~$Y^{ad}_0=\mathbf{Y}^t_0$.}
\label{tab:val1}
\end{table}
\begin{table}[!htb]
\centering
\begin{tabular}{ l c c c c c }
\toprule
$\qquad \text{Penalty}$ & $\text{Err}_{\mathcal{J}}$ & $\text{Err}_{Y}$ & $\text{Err}_{P}$ & $\text{Err}_{U}$ \\
\midrule
$\gamma_1= 0, \gamma_2=0$ & $ 0.23 \%$ & $0.06 \%$& $ 0.57 \%$ & $ 4.42 \%$ \\
\midrule
$\gamma_1= 0.1, \gamma_2=0.1$ & $0.51 \%$ & $0.15 \%$ & $1.1 \%$ & $9.2 \%$
\\ \midrule
$\gamma_1= 0.1, \gamma_2=0$ & $0.47 \%$ & $0.16 \%$ & $2.8 \%$ & $8.7 \%$
\\ \midrule
$\gamma_1= 1, \gamma_2=1$ & $ 0.85 \%$ & $ 0.35 \%$ & $ 6\%$ & $13.5 \%$
\\ \midrule
$\gamma_1= 0, \gamma_2=1$ & $ 0.25 \%$ & $ 0.1 \%$ & $ 1.3\%$ & $4.8 \%$\\
\bottomrule
\end{tabular}
\centering
\begin{tabular}{ l c c c c c }
\toprule
$\qquad \text{Penalty}$ & $\text{Err}_{J}$ & $\text{Err}_{V}$ & $\text{Err}_{\partial V}$ & $d(V^\varepsilon_\theta;V^*)$ & $d( \partial_y V^\varepsilon_\theta;\partial_y V^*)$ \\
\midrule
$\gamma_1= 0, \gamma_2=0$ & $ 0.002 \%$ & $ 78.7 \%$& $33.6 \%$ & $78.6 \%$ & $33.9 \%$ \\
\midrule
$\gamma_1= 0.1, \gamma_2=0.1$ & $0.007 \%$ & $0.03 \%$ & $8.9 \%$ & $0.03 \%$ & $7.9 \%$ \\
\midrule
$\gamma_1= 0.1, \gamma_2=0$ & $0.008 \%$ & $0.02 \%$ & $15.1 \%$ & $0.02 \%$ & $13.1 \%$ \\ \midrule
$\gamma_1= 1, \gamma_2=1$ & $ 0.02 \%$ & $0.009 \%$ & $ 9.8 \%$ & $ 0.01 \%$ & $ 4.2 \%$ \\
\midrule
$\gamma_1= 0, \gamma_2=1$ & $ 0.002 \%$ & $ 88 \%$ & $ 8.6 \%$ & $ 88 \%$ & $ 7.1 \%$ \\
\bottomrule
\end{tabular}
\caption{Results on validation set i.e.~$Y^{ad}_0=\mathbf{Y}^v_0$.}
\label{tab:val2}
\end{table}
\appendix
\section{Condition \eqref{eq:kk10}} \label{app1}
Here we address condition \eqref{eq:kk10}.
Define $N_\varepsilonilon=\lceil \frac{2\widehat M}{\varepsilon} \rceil,\,\tilde M:=\varepsilon N_\varepsilon$ and introduce the equidistant grid $G=\{-\tilde M, (1-N_\varepsilon)\varepsilon, \dots,-\varepsilon,0,\varepsilon, \dots,(N_\varepsilon-1)\varepsilon, \tilde M\}$. Next endow the hypercube $[-\tilde M, \tilde M]^{n+1}$ with the $(n+1)-$ dimensional product of the grid $G$. These grid points define
$\{Q_i\}_{i=1}^{(2N_\varepsilon)^{n+1}}$ closed subhypercubes of dimension $\varepsilon^n$ whose union covers $\bar K=[0,T] \times \bar B_{2\tilde M}(0)$.
We extend this $n+1$-dimensional grid by adding $k\ge \lceil\frac{1}{2}\sqrt{n}\rceil+1$ layers (again all of dimension $\varepsilon^n)$, to the surfaces of the preexisting grid, resulting in $\tilde N_\varepsilon=(2N_\varepsilon +2k)^{n+1}$ hypercubes whose union covers $ [- \tilde M-k\varepsilon,\tilde M+k\varepsilon]^{n+1}$. The subhypercubes are ordered in such a manner that the interiors ones $\{Q_i\}_{i=1}^{(2N_\varepsilon +2(k-1))^{n+1}}$ are assembled first and the ones with a boundary face $\{Q_i\}_{i=(2N_\varepsilon +2(k-1)+1)^{n+1}}^{(2N_\varepsilon +2k)^{n+1}}$ come last. The set of indices corresponding to interior hypercubes are denoted by $\mathcal{I}$, those to boundary hypercubes by $\mathcal{F}$.
Next we introduce a staggered grid and place a node $x_i=(t_i,y_i)$ at the barycenter of each of the $Q_i,\, i=1, \dots,(2N_\varepsilon +2k)^{n+1}$. We shall use the standard mollifier of radius $r_\varepsilon$ defined by
\begin{equation*}
\psi(x)= \left\{
\begin{array}{ll}
\exp(\frac{1}{|\frac{x}{r_\varepsilon}|^2-1}), & \text{ for} |x|\le r_\varepsilon \\[1.4ex]
0, & \text{ for} |x|\le r_\varepsilon,
\end{array}
\right.
\end{equation*}
where $r_\varepsilon=\varepsilon(\frac{1}{2}\sqrt{n}+.1)$. Note that by adding $.1$ in the previous expression the cube $[-\frac{\varepsilon}{2},\frac{\varepsilon}{2}]^n$ is contained in the interior of the support of $psi$. Finally we introduce
$\psi_j(x)= \psi(x-x_j)$, for ${j\in \mathcal{I}\cup \mathcal{F}}$ and
\begin{equation*}
\varphi_j= \frac{\psi_j}{\sum_{i\in \mathcal{I}\cup \mathcal{F}} \psi_i}, \text{ for } j \in \mathcal{I}.
\end{equation*}
Let us deduce the following properties:
\begin{itemize}
\item[(i)] For each ${j\in \mathcal{I}\cup \mathcal{F}}$ we have $supp\, \psi_j=\bar K_j$ where $K_j = \{x: |x-x_j|< r_\varepsilon\}$.
\item[(ii)] By construction there exists $\mathfrak{m} $ such that $\text{card} \{j: \psi_j(x) \neq 0 \} \le \mathfrak{m}, \quad \forall x \in \mathbb{R}^{n+1}, \text{ and } \forall {j\in \mathcal{I}\cup \mathcal{F}}$, for each $\varepsilon\in (0,\varepsilon_0]$.
\item[(iii)] For each $j\in \mathcal{I}$ the denominator in the definition of $\varphi_j$ is different from zero. Hence $\varphi_j$ is well-defined
with $supp\, \varphi_j=\bar K_j$ for $j\in \mathcal{I}$ and $\varphi_j \colon\mathbb{R}^{n+1} \to [0,1]$, and it is $\mathcal{C}^\infty$ smooth.
\item[(iv)] $\bar{K} \subset \bigcup_{j\in \mathcal{I}} Q_j\subset \bigcup_{j\in \mathcal{I}} K_j$.
\item[(v)] $\text{card} \{j: \varphi_j(x) \neq 0 \} \le \mathfrak{m}, \quad \forall x \in \mathbb{R}^{n+1}, \text{ and } \forall {j\in \mathcal{I}}$, for each $\varepsilon\in (0,\varepsilon_0]$.
\item[(vi)] Due to the choice of $r_\varepsilon$ the functions $\psi$ are uniformly bounded from below on $Q_j$ for each $j$, independent of $\varepsilon\in (0,\varepsilon_0]$. Moreover due to the boundedness of $\psi$ and by the definition of $\mathfrak{m}$, there exists $\nu>0$ such that
\begin{equation*}
\sum_{i\in \mathcal{I}\cup \mathcal{F}} \psi_i(x) \ge \nu , \forall x \in Q_j, \text{ with } j \in \mathcal{I}\cup \mathcal{F},
\end{equation*}
and thus in particular $\sum_{i\in \mathcal{I}\cup \mathcal{F}} \psi_i(x) \ge \nu , \forall x \in \bar K$.
\item[(vii)] $$ supp\; \varphi_j \cap \bar K = \emptyset\; \forall j \in \mathcal{F}. $$
This is a consequence of the fact that for $j \in \mathcal{F}$ we have
$dist(x_j, \partial ([-\tilde M, \tilde M]^n))= \varepsilon[(k-1)+\frac{1}{2}]$ and thus
$dist(\partial K_j, \partial ([-\tilde M, \tilde M]^n)) \le \varepsilon[(k-1)+\frac{1}{2}-r_\varepsilon] = k-\frac{1}{2}(1+\sqrt{n})-.1>\varepsilon (k-1 - \frac{1}{2}\sqrt{n})>0$.
\item[(viii)] $\sum_{i\in \mathcal{I}}\varphi_i =1, \; \forall x \in \bar K$. This is a consequence of $(vii)$ and the definition of $\varphi_j$.
\item[(ix)]
$\|D^{j}\varphi_i\|_{C(\bar K_i\cap \bar K)} \le\bar \mu \varepsilon^{-j}, \text{ for some } \bar \mu \text{ independent of } i\in \mathcal{I}, \; \text{ and } j\in\{1,2\}.$
\end{itemize}
Once we have verified $(ix)$, all the properties demanded in
\eqref{eq:kk10} on the partition of unity $\{\varphi_i\}_{i\in \mathcal{I}}$ subordinate to~$K_i$ will be satisfied.
In the following calculations we repeatedly use that $ \nabla \sum _{i\in \mathcal{I}}\varphi_i(x)=0$ for $x\in \bar K$. This follows from $(viii)$. As short calculation shows that for each $j\in \mathcal{I}$ , each $x\in \bar K$, and $k,\ell \in \{1,\dots,n\}$
\begin{equation*}
\partial_{x_k} \varphi_j(x) = \frac{\partial_{x_k} \psi_j(x)}{\sum_{i\in \mathcal{I}} \psi_i(x)}, \quad \partial_{x_\ell} \partial_{x_k} \varphi_j(x)= \frac{\partial_{x_\ell} \partial_{x_k} \varphi_j(x) \sum_{i\in \mathcal{I}} \psi_i(x) - \psi_j \sum_{i\in \mathcal{I}} \partial_{x_\ell} \partial_{x_k} \psi_i(x) }{(\sum_{i\in \mathcal{I}} \psi_i(x))^2 },
\end{equation*}
where we use that $\partial_{x_k}\sum_{i\in \mathcal{I}} \psi_i(x)=0$ for $x\in \bar K$.
To obtain the required estimates we introduce for $\eta>0$
\begin{equation*}
\psi_\eta(x)= \left\{
\begin{array}{ll}
\exp(\frac{1}{|\frac{x}{\eta}|^2-1}), & \text{ for} |x|\le \eta \\[1.4ex]
0, & \text{ for} |x|\le \eta.
\end{array}
\right.
\end{equation*}
Then we have
\begin{equation*}
\partial_{x_k} \psi_\eta=- \psi_\eta \, \frac{2x_k}{\eta^2 (|\frac{x}{\eta}|^2-1)^2},
\end{equation*}
\begin{equation*}
\begin{array}l
(\partial_{x_k})^2 \psi_\eta=
\frac{2\psi_\eta}{\eta^2(|\frac{x}{\eta}|^2-1)^4}\big[\frac{2x_k^2}{\eta^2}-(|\frac{x}{\eta}|^2-1)^2 +\frac{4x_k^2}{\eta^2}(|\frac{x}{\eta}|^2-1) \big],\\[1.5ex]
\end{array}
\end{equation*}
and for $k\ne \ell$
\begin{equation*}
\partial_{x_\ell} \partial_{x_k}\psi_\eta=
\frac{2\psi_\eta\,x_\ell x_k}{\eta^2(|\frac{x}{\eta}|^2-1)^4} \big[ \frac{2}{\eta^2} + \frac{4}{\eta^2}(|\frac{x}{\eta}|^2 -1))\big]=
\frac{2\psi_\eta\, x_\ell x_k}{\eta^2(|\frac{x}{\eta}|^2-1)^4} \big[ \frac{-2}{\eta^2} + \frac{4}{\eta^4}|x|^2 \big].
\end{equation*}
Considering the behavior of $\partial_{x_k} \psi_\eta$ and $\partial_{x_\ell} \partial_{x_k}\psi_\eta$ separately on the ball $B_{\frac{\eta}{2}}(0)$ and its complement in
$B_{{\eta}}(0)$, it follows that these functions behave like $O(\frac{1}{\eta})$ and $O(\frac{1}{\eta^2})$. Applying these estimates in the
expressions for the first and second derivatives for $\varphi_j$ and using the lower bound established in $(vi)$ we obtain $(ix)$.
\section{Perturbation results} \label{app:perturbation}
Here we collect pertinent existence and stability results for dynamical systems. The constant $\widehat{M}(0)$ appearing below relates to Assumption $\textbf{A.2}$.
\begin{prop} \label{prop:s}
Let~$\mathbf{y} \in \mathcal{C}(Y_0;W_T)$,~$\mathbf{y}(y_0) \in \mathcal{Y}_{ad}$ for all~$y_0 \in Y_0$,~$\mathrm{d}lta \mathbf{v} \in \mathcal{C}(Y_0;L^2(I;\mathbb{R}^n))$, as well as~$\mathrm{d}lta \mathbf{y}_0 \in \mathcal{C}(Y_0;\mathbb{R}^n)$ be given. Moreover let~$A \colon I \times \mathbb{R}^n \to \mathbb{R}^{n \times n} $ be continuous, and denote by~$\mathbf{A} \colon L^\infty(I;\mathbb{R}^n) \to \mathcal{B}(L^2(I;\mathbb{R}^n))$ the induced Nemitsky operator i.e.
\begin{align*}
\mathbf{A}(y) \mathrm{d}lta y = A(t,y(t)) \mathrm{d}lta y(t) \quad \forall \mathrm{d}lta y \in L^2(I;\mathbb{R}^n),~y \in L^\infty(I;\mathbb{R}^n)
\end{align*}
and a.e.~$t \in I$. Then there is~$\mathrm{d}lta \mathbf{y} \in \mathcal{C}(Y_0;W_T)$ such that~
\begin{align} \label{eq:linearensemble}
\dot{\mathrm{d}lta y}= \mathbf{A}(y)\mathrm{d}lta y+v,~\mathrm{d}lta y (0)= \mathrm{d}lta y_0
\end{align}
for~$y\coloneqq \mathbf{y}(y_0)$,~$\mathrm{d}lta v\coloneqq \mathrm{d}lta \mathbf{v}(y_0) $,~$\mathrm{d}lta y_0 \coloneqq \mathrm{d}lta \mathbf{y}(y_0)$ and all~$y_0 \in Y_0$. It satisfies
\begin{align} \label{eq:aprioriindependent}
\|\mathrm{d}lta \mathbf{y}(y_0)\|_{W_T} \leq C \left( \|\mathrm{d}lta \mathbf{v}(y_0)\|_{L^2}+|\mathrm{d}lta \mathbf{y}_0(y_0)| \right)
\end{align}
for some~$C>0$ depending continuously on $\max_{(\tau,y)\in I \times \bar{B}_{2 \widehat{M}(0)} }\|A(\tau,y)\|_{\mathbb{R}^{n \times n}}$, and independent of $y_0\in Y_0$.
\end{prop}
\begin{proof}
Let~$y_0 \in Y_0$ be arbitrary but fixed. Then there is a unique solution~$\mathrm{d}lta y \in W_T$ to~\eqref{eq:linearensemble} which satisfies
\begin{align*}
\frac{1}{2} |\mathrm{d}lta y (t)|^2 &= \frac{1}{2} |\mathrm{d}lta y (0)|^2+ \int^t_0 (\dot{\mathrm{d}lta y}(s),\mathrm{d}lta (y)(s)) ~\mathrm{d}s \\
&= \frac{1}{2} |\mathrm{d}lta y_0|^2+ \int^t_0 (\mathrm{d}lta y(s), A(t,y(t))\mathrm{d}lta (y)(s))+ (\mathrm{d}lta v(s), \mathrm{d}lta y(s)) ~\mathrm{d}s \\
& \leq \frac{1}{2} |\mathrm{d}lta y_0|^2+ \frac{1}{2} |\mathrm{d}lta v|^2_{L^2}+ \frac{1}{2}\int^t_0 \left(2 \max_{(\tau,y)\in I \times \bar{B}_{2 \widehat{M}(0)} }\|A(\tau,y)\|_{\mathbb{R}^{n \times n}} +1\right) |\mathrm{d}lta y(s)|^2~\mathrm{d}s
\end{align*}
for all~$t \in I$. Setting
\begin{align*}
L \coloneqq \left(2 \max_{(z,y)\in I \times \bar{B}_{2 \widehat{M}(0)} }\|A(z,y)\|_{\mathbb{R}^{n \times n}} +1\right),
\end{align*}
Gronwall's inequality implies that
\begin{align*}
\|\mathrm{d}lta y\|_{L^\infty} \leq e^{TL}(|\mathrm{d}lta y_0|+ |\mathrm{d}lta v|_{L^2}).
\end{align*}
By~\eqref{eq:linearensemble} we further get
$
\|\dot{\mathrm{d}lta y}\|_{L^2} \leq L (\|\mathrm{d}lta y\|_{L^2} + \|v\|_{L^2}),
$
which implies ~\eqref{eq:aprioriindependent}.
Next, let~$y^k_0 \in Y_0$ denote a convergent sequence with limit~$y_0$. For abbreviation set
\begin{align*}
\mathrm{d}lta y_k \coloneqq \mathrm{d}lta \mathbf{y}(y^k_0),~y_k\coloneqq \mathbf{y}(y^k_0),~\mathrm{d}lta v_k \coloneqq \mathrm{d}lta \mathbf{v}(y^k_0),~\mathrm{d}lta y_0 \coloneqq \mathrm{d}lta \mathbf{y}_0(y^k_0)
\end{align*}
as well as
\begin{align*}
y\coloneqq \mathbf{y}(y_0),~\mathrm{d}lta v \coloneqq \mathrm{d}lta \mathbf{v}(y_0),~\mathrm{d}lta y_0 \coloneqq \mathrm{d}lta \mathbf{y}_0(y_0).
\end{align*}
Note that~$\mathrm{d}lta y_k$ is uniformly bounded in~$W_T$ by \eqref{eq:aprioriindependent}. Thus it admits a subsequence, denoted by the same index, with~$\mathrm{d}lta y_k \rightharpoonup \mathrm{d}lta y$ in~$W_T$ for some~$\mathrm{d}lta y \in W_T$. This implies
\begin{align*}
\mathrm{d}lta y_k (0) \rightarrow \mathrm{d}lta y(0)~\text{in}~\mathbb{R}^n,~ \mathrm{d}lta y_k \rightarrow \mathrm{d}lta y~\text{in}~L^\infty(I;\mathbb{R}^n),~\dot{\mathrm{d}lta y}_k \rightharpoonup \dot{\mathrm{d}lta y}~\text{in}~L^2(I;\mathbb{R}^n).
\end{align*}
Moreover, due to the continuity of~$\mathbf{y},\mathrm{d}lta \mathbf{v}$ and~$\mathrm{d}lta \mathbf{y}_0$ , we get
\begin{align*}
\dot{\mathrm{d}lta y}_k=\mathbf{A}(y_k)\mathrm{d}lta y_k+ \mathrm{d}lta v_k \rightarrow \mathbf{A}(y)\mathrm{d}lta y+ \mathrm{d}lta v~\text{in}~L^2(I;\mathbb{R}^n),~\mathrm{d}lta y_k (0) \rightarrow \mathrm{d}lta y_0~\text{in}~\mathbb{R}^n.
\end{align*}
Summarizing the previous observations we conclude that
\begin{align*}
\dot{\mathrm{d}lta y}= \mathbf{A}(y)\mathrm{d}lta y+\mathrm{d}lta v,~\mathrm{d}lta y(0)= \mathrm{d}lta y_0
\end{align*}
as well as~$\mathrm{d}lta y_k \rightarrow \mathrm{d}lta y$ in~$W_T$,
and thus~$\mathrm{d}lta y= \mathrm{d}lta \mathbf{y}(y_0) $. By uniqueness of solutions to the above equation ~$\mathrm{d}lta \mathbf{y}(y^k_0)\rightarrow \mathrm{d}lta \mathbf{y}(y_0) $ for the whole sequence in~$W_T$ follows, and therefore~$\mathrm{d}lta \mathbf{y} \in \mathcal{C}(Y_0;W_T)$.
\end{proof}
Next we address nonlinear systems of the form:
\begin{align}
\label{eq:pertstate}
\dot{y_v}= \mathbf{f}(y_v)+ \mathbf{g}(y_v) \mathcal{F}^*(y_v)+v, \quad
y_v(0)=y_0
\end{align}
where~$v \in L^2(I ; \mathbb{R}^n)$ is a perturbation.
\begin{prop} \label{thm:existspert}
Let Assumption~\ref{ass:feedbacklaw} hold.
Then there exist an open neighbourhood~$V_1 \subset L^2(I; \mathbb{R}^n)$ of~$0$
and an open neighbourhood~$\mathbf{Y}_0$ of $Y_0$ such
that~\eqref{eq:pertstate} admits a unique solution~$y_v=\mathbf{y}^v
(y_0)\in \mathcal{Y}_{ad} $ for every pair~$(v,y_0)\in V_1 \times
\mathbf{Y}_0$. Moreover the mapping
\begin{align}
\mathbf{y}^\bullet( {\boldsymbol{\cdot}}) \colon V_1 \times \mathbf{Y}_0 \to
\mathcal{Y}_{ad} , \quad (v, y_0) \mapsto \mathbf{y}^v(y_0)
\end{align}
is continuously Fr\'echet differentiable.
\end{prop}
\begin{proof}
Define the mapping
\begin{align*}
G \colon \mathcal{Y}_{ad} \times \mathbb{R}^n \times L^2(I; \mathbb{R}^n)
\to L^2(I; \mathbb{R}^n) \times \mathbb{R}^n
\end{align*}
with
\begin{align*}
G(y,y_0,v)= \left(
\begin{array}{c}
\dot{y}-\mathbf{f}(y)-\mathbf{g}(y)\mathcal{F}^*(y)-v\\
y(0)-y_0\\
\end{array}
\right).
\end{align*}
Now fix an arbitrary~$\bar{y}_0 \in Y_0$ and, utilizing $(\mathbf{A.3})$ denote by $\bar{y}=\mathbf{y}^*(y_0)\in \operatorname{int}
\mathcal{Y}_{ad}$ the unique solution in~$\mathcal{Y}_{ad}$ to the unperturbed closed loop system~$G(\bar{y},\bar{y}_0,0)=0$. Since~$G$ is of
class~$\mathcal{C}^1$ in a neighborhood of~$(\bar{y},\bar{y}_0,0)$ we have
\begin{align*}
D_y G(y,y_0,v)\mathrm{d}lta y = \left(
\begin{array}{c}
\dot{\mathrm{d}lta y}-D\mathbf{f}(y)\mathrm{d}lta y-\lbrack D\mathbf{g}(y) \mathrm{d}lta y \rbrack
\mathcal{F}^*(y) - \mathbf{g}(y) \partial_y \mathcal{F}^*(y)\mathrm{d}lta y\\
\mathrm{d}lta y(0)\\
\end{array}
\right).
\end{align*}
It is straightforward that the linearized equation
\begin{align*}
D_y G(\bar{y},\bar{y}_0,v)\mathrm{d}lta y=\left(
\begin{array}{c}
\mathrm{d}lta v \\ \mathrm{d}lta y_0\\
\end{array}
\right)
\end{align*}
admits a unique solution~$\mathrm{d}lta \bar{y}\in W_T$ for every~$\mathrm{d}lta v \in L^2(I;\mathbb{R}^n),~\mathrm{d}lta y_0 \in \mathbb{R}^n$. Moreover, applying Gronwall's lemma yields~$c>0$ independent of~$\bar{y},~\bar{y}_0$ with
\begin{align*}
\wnorm{\mathrm{d}lta \bar{y}} \leq c(\|\mathrm{d}lta v\|_{L^2(I;\mathbb{R}^n)}+|\mathrm{d}lta y_0|), \quad \forall \mathrm{d}lta v \in L^2(I;\mathbb{R}^n),~\mathrm{d}lta y_0 \in \mathbb{R}^n.
\end{align*}
Thus from the implicit function theorem we get
constants~$\kappa_1=\kappa_1(\bar{y}_0)$
and~$\kappa_2=\kappa_2(\bar{y}_0)$, such that for every~$y_0\in \mathbb{R}^n$
with~$|y_0-\bar{y}_0|<\kappa_1$ and~$\|v\|_{L^2(I;\mathbb{R}^n)}< \kappa_2$ there
exists~$\mathbf{y}^v(y_0) \in \mathcal{Y}_{ad}$
with~$G(\mathbf{y}^v(y_0),y_0,v)=0$. By $(\textbf{A.1})$ it is the
unique solution to~\eqref{eq:pertstate} in~$\mathcal{Y}_{ad}$. Moreover, the mapping
\begin{align*}
\mathbf{y}^\bullet(\cdot) \colon B_{\kappa_2}(0) \times
B_{\kappa_1}(\bar{y}_0) \to \mathcal{Y}, \quad (v,y_0) \mapsto
\mathbf{y}^v(y_0)
\end{align*}
is of class~$\mathcal{C}^1$.
Observe that repeating this argument for every~$y_0 \in Y_0$ yields an open covering of~$Y_0$ i.e.
\begin{align*}
Y_0 \subset \bigcup_{\bar{y}_0 \in Y_0}
B_{\kappa_1(\bar{y}_0)}(\bar{y}_0).
\end{align*}
Since~${Y}_0$ is compact there exists a finite set of initial
conditions~$\{\bar{y}^i_0\}^N_{i=1}\subset Y_0$, including~$0$, such that
\begin{align*}
Y_0 \subset \mathbf{Y}_0 :=\bigcup^N_{i=1}
B_{\kappa_1(\bar{y}^i_0)}(\bar{y}^i_0).
\end{align*}
Set~$V= \bigcap^N_{i=1} B_{\kappa_2(\bar{y}^i_0)}(0) \subset
L^2(I;\mathbb{R}^n) $. Summarizing these arguments yields the
existence of a~$\mathcal{C}^1$-mapping
\begin{align*}
\mathbf{y}^\cdot(\cdot) \colon V \times \mathbf{Y}_0 \to
\mathcal{Y}_{ad}, \quad \mathbf{y}^v(y_0)~\text{ uniquely
solves}~\eqref{eq:pertstate}~\text{in}~\mathcal{Y}_{ad}.
\end{align*}
\end{proof}
We use the following consequences of the previous proposition.
\begin{coroll} \label{coroll:locallipschitzofstate}
There exists an open neighborhood~$V_2 \subset V_1 \subset L^2(I;\mathbb{R}^n)$
of~$0$ as well as~$c>0$ such that
\begin{align*}
\wnorm{\mathbf{y}^{v_1}(y_0)-\mathbf{y}^{v_2}(y_0)} \leq c
\|v_1-v_2\|_{L^2(I;\mathbb{R}^n)} \quad \forall y_0 \in Y_0,~v_1 \in V_2, v_2 \in V_2
\end{align*}
and
\begin{align*}
\wnorm{\mathbf{y}^v(y_0)} \leq M_{Y_0} +c \|v\|_{L^2(I;\mathbb{R}^n)} \quad
\forall y_0 \in Y_0,~v \in V_2,
\end{align*}
hold. Here~$M_{Y_0}$ denotes the constant from Assumption $(\mathbf{A.3})$.
\end{coroll}
\begin{proof}
The first assertion follows from the continuous differentiability of $v\to \mathbf{y}^v(y_0)$ and compactness of $Y_0$.
To verify the second we use that $\mathbf{y}^*(y_0) = \mathbf{y}^0(y_0)$ and estimate
\begin{align*}
\wnorm{\mathbf{y}^v(y_0)} &\leq \wnorm{\mathbf{y}^*(y_0)} +
\wnorm{\mathbf{y}^v(y_0)-\mathbf{y}^0(y_0)}.
\end{align*}
The claim now follows from the first inequality and $(\mathbf{A.3})$.
\end{proof}
\end{document}
|
\begin{document}
\title{Variation of the canonical height in a family of rational maps}
\author{D.~Ghioca}
\address{
Dragos Ghioca\\
Department of Mathematics\\
University of British Columbia\\
Vancouver, BC V6T 1Z2\\
Canada
}
\email{[email protected]}
\author{N.~M.~Mavraki}
\address{
Niki Myrto Mavraki\\
Department of Mathematics\\
University of British Columbia\\
Vancouver, BC V6T 1Z2\\
Canada
}
\email{[email protected]}
\begin{abstract}
Let $d\ge 2$ be an integer, let ${\mathbf c}\in {\mathbb Q}bar(t)$ be a rational map, and let ${\mathbf f}_t(z):=\mathfrak rac{z^d+t}{z}$ be a family of rational maps indexed by $t$. For each $t=\lambda\in{\mathbb Q}bar$, we let ${\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))$ be the canonical height of ${\mathbf c}(\lambda)$ with respect to the rational map ${\mathbf f}_\lambda$; also we let ${\widehat h}_{\mathbf f}({\mathbf c})$ be the canonical height of ${\mathbf c}$ on the generic fiber of the above family of rational maps. We prove that there exists a constant $C$ depending only on ${\mathbf c}$ such that for each $\lambda\in{\mathbb Q}bar$, $\lambdaeft|{\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))-{\widehat h}_{\mathbf f}({\mathbf c})\cdot h(\lambda)\right|\lambdae C$. This improves a result of Call and Silverman \cite{Call-Silverman} for this family of rational maps.
\end{abstract}
\thanks{2010 AMS Subject Classification: Primary 11G50; Secondary 14G17, 11G10.
The research of the first author was partially supported by an NSERC grant. The second author was partially supported by Onassis Foundation.}
\maketitle
\section{Introduction}
\lambdaabel{intro}
Let $X$ be a curve defined over ${\mathbb Q}bar$, let ${\mathcal V}\lambdaongrightarrow X$ be an algebraic family of varieties $\{{\mathcal V}_\lambda\}_{\lambda\in X}$, let $\Phi:{\mathcal V}\lambdaongrightarrow {\mathcal V}$ be an endomorphism with the property that there exists $d>1$, and there exists a divisor $\cD$ of ${\mathcal V}$ such that $\Phi^*(\cD)= d\cdot \cD$. Then for all but finitely many $\lambda\in X$, there is a well-defined canonical height ${\widehat h}_{{\mathcal V}_\lambda,\cD_\lambda,\Phi_\lambda}$ on the fiber above $\lambda$. Let $P:X\lambdaongrightarrow {\mathcal V}$ be an arbitrary section; then for each $\lambda\in X({\mathbb Q}bar)$, we denote by $P_\lambda$ the corresponding point on ${\mathcal V}_\lambda$. Also, $P$ can be viewed as an element of $V({\mathbb Q}bar(X))$ and thus we denote by ${\widehat h}_{V,D,\Phi}(P)$ the canonical height of $P$ with respect to the action of $\Phi$ on the generic fiber $(V,D)$ of $({\mathcal V},\cD)$. Extending a result of Silverman \cite{Sil83} for the variation of the usual canonical height in algebraic families of abelian varieties, Call and Silverman \cite[Theorem 4.1]{Call-Silverman} proved that
\begin{equation}
\lambdaabel{C-S result}
{\widehat h}_{{\mathcal V}_\lambda,\cD_\lambda,\Phi_\lambda}(P_\lambda)={\widehat h}_{V,D, \Phi}(P)\cdot h(\lambda)+o(h(\lambda)),
\end{equation}
where $h(\lambda)$ is a Weil height on $X$. In the special case ${\mathcal V}\lambdaongrightarrow X$ is an elliptic surface, Tate \cite{tate83} improved the error term of \eqref{C-S result} to $O(1)$ (where the implied constant depends on $P$ only, and it is independent of $\lambda$). Furthermore, Silverman \cite{Silverman83, Silverman-2, Silverman-3} proved that the difference of the main terms from \eqref{C-S result}, in addition to being bounded, varies quite regularly as a function
of $\lambda$, breaking up into a finite sum of well-behaved functions at finitely many places. It is natural to ask whether there are other instances when the error term of \eqref{C-S result} can be improved to $O_P(1)$.
In \cite{ingram10}, Ingram showed that when $\Phi_\lambda$ is an algebraic family of polynomials acting on the affine line, then again the error term in \eqref{C-S result} is $O(1)$ (when the parameter space $X$ is the projective line). More precisely, Ingram proved that for an arbitrary parameter curve $X$, there exists $D = D({\mathbf f}, P) \in Pic(X)\otimes {\mathbb Q}$ of
degree ${\widehat h}_{{\mathbf f}}(P)$ such that $
{\widehat h}_{{\mathbf f}_\lambda}(P_\lambda) = h_D(\lambda) + O(1)$.
This result is an analogue of Tate's theorem~\cite{tate83} in the
setting of arithmetic dynamics. Using this result and applying an
observation of Lang~\cite[Chap.~5, Prop.~5.4]{Lang-diophantine}, the error term can be improved to $O(h(\lambda)^{1/2})$ and
furthermore, in the special case where $X = {\mathbb P}^1$ the error term
can be replaced by $O(1)$. In \cite{prep}, Ghioca, Hsia an Tucker showed that the error term is also uniformly bounded independent of $\lambda\in X$ (an arbitrary projective curve) when $\Phi_\lambda$ is an algebraic family of rational maps satisfying the properties:
\begin{enumerate}
\item[(a)] each $\Phi_\lambda$ is superattracting at infinity, i.e. if $\Phi_\lambda=\mathfrak rac{P_\lambda}{Q_\lambda}$ for algebraic families of polynomials $P_\lambda,Q_\lambda\in {\mathbb Q}bar[z]$, then $\deg(P_\lambda)\ge 2+\deg(Q_\lambda)$; and
\item[(b)] the resultant of $P_\lambda$ and $Q_\lambda$ is a nonzero constant.
\end{enumerate}
The condition (a) is very mild for applications; on the other hand condition (b) is restrictive. Essentially condition (b) asks that $\Phi_\lambda$ is a well-defined rational map of same degree as on the generic fiber, i.e., all fibers of $\Phi$ are \emph{good}.
Our main result is to improve the error term of \eqref{C-S result} to $O(1)$ for the algebraic family of rational maps ${\mathbf f}_t(z)=\mathfrak rac{z^d+t}{z}$ where the parameter $t$ varies on the projective line. We denote by ${\widehat h}_{{\mathbf f}_\lambda}$ the canonical height associated to ${\mathbf f}_\lambda$ for each $t=\lambda\in{\mathbb Q}bar$, and we denote by ${\widehat h}_{\mathbf f}$ the canonical height on the generic fiber (i.e., with respect to the map ${\mathbf f}_t(z):=\mathfrak rac{z^d+t}{z}\in{\mathbb Q}bar(t)(z)$).
\begin{thm}
\lambdaabel{variation of canonical height}
Let ${\mathbf c}\in{\mathbb Q}bar(t)$ be a rational map, let $d\ge 2$ be an integer, and let $\{{\mathbf f}_t\}$ be the algebraic family of rational maps given by ${\mathbf f}_t(z):=\mathfrak rac{z^d+t}{z}$. Then as $t=\lambda$ varies in ${\mathbb Q}bar$ we have
\begin{equation}
\lambdaabel{formula variation}
{\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda)) = {\widehat h}_{\mathbf f}({\mathbf c})\cdot h(\lambda) + O(1),
\end{equation}
where the constant in $O(1)$ depends only on ${\mathbf c}$, and it is independent of $\lambda$.
\end{thm}
Alternatively, Theorem~\ref{variation of canonical height} yields that the function $\lambda\mapsto {\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))$ is a Weil height on ${\mathbb P}^1$ associated to the divisor ${\widehat h}_{\mathbf f}({\mathbf c})\cdot \infty\inPic({\mathbb P}^1)\otimes{\mathbb Q}$.
We note that on the fiber $\lambda=0$, the corresponding rational map $\Phi_0$ has degree $d-1$ rather than $d$ (which is the generic degree in the family $\Phi_\lambda$). So, our result is the \emph{first} example of an algebraic family of rational maps (which are neither totally ramified at infinity, nor Latt\'es maps, and also admit \emph{bad} fibers) for which the error term in \eqref{C-S result} is $O(1)$. In addition, we mention that the family ${\mathbf f}_t(z)=\mathfrak rac{z^d+t}{z}$ for $t\in{\mathbb C}$ is interesting also from the complex dynamics point of view. Devaney and Morabito \cite{Devaney} proved that the Julia sets $\{J_t\}_{t\in{\mathbb C}}$ of the above maps converge to the unit disk as $t$ converges to $0$ along the rays ${\rm Arg}(t)=\mathfrak rac{(2k+1)\pi}{d-1}$ for $k=0,\dots, d-1$, providing thus an example of a family of rational maps whose Julia sets have empty interior, but in the limit, these sets converge to a set with nonempty interior.
A special case of our Theorem~\ref{variation of canonical height} is when the starting point ${\mathbf c}$ is constant; in this case we can give a precise formula for the $O(1)$-constant appearing in \eqref{formula variation}.
\begin{thm}
\lambdaabel{precise constant}
Let $d\ge 2$ be an integer, let $\alpha$ be an algebraic number, let $K={\mathbb Q}(\alpha)$ and let $\ell$ be the number of non-archimedean places $|\cdot |_v$ of $K$ satisfying $|\alpha|_v\notin\{ 0,1\}$. If $\{{\mathbf f}_t\}$ is the algebraic family of rational maps given by ${\mathbf f}_t(z):=\mathfrak rac{z^d+t}{z}$, then
$$\lambdaeft|{\widehat h}_{f_\lambda}(\alpha) - {\widehat h}_{\mathbf f}(\alpha)\cdot h(\lambda)\right| < 3d\cdot (1+\ell+2h(\alpha)),$$
as $t=\lambda$ varies in ${\mathbb Q}bar$.
\end{thm}
In particular, Theorem~\ref{precise constant} yields an effective way for determining for any given $\alpha\in{\mathbb Q}bar$ the set of parameters $\lambda$ contained in a number field of bounded degree such that $\alpha$ is preperiodic for ${\mathbf f}_\lambda$. Indeed, if $\alpha\in{\mathbb Q}bar$ then either $\alpha=0$ and then it is preperiodic for all ${\mathbf f}_\lambda$, or $\alpha\ne 0$ in which case generically $\alpha$ is not preperiodic and ${\widehat h}_{{\mathbf f}}(\alpha)=\mathfrak rac{1}{d}$ (see Proposition~\ref{canonical height generic nonzero}). So, if $\alpha\in{\mathbb Q}bar^*$
is preperiodic for ${\mathbf f}_\lambda$ then ${\widehat h}_{{\mathbf f}_\lambda}(\alpha)=0$ and thus, Theorem~\ref{precise constant} yields that
\begin{equation}
\lambdaabel{bounded parameter}
h(\lambda)<3d^2\cdot (1+\ell+2h(\alpha)).
\end{equation}
For example, if $\alpha$ is a root of unity, then $h(\lambda)<3d^2$ for all parameters $\lambda\in\overline{\Q}$ such that $\alpha$ is preperiodic for ${\mathbf f}_\lambda$.
Besides the intrinsic interest in studying the above problem, recently it was discovered a very interesting connection between the variation of the canonical height in algebraic families and the problem of unlikely intersections in algebraic dynamics (for a beautiful introduction to this area, please see the book of Zannier \cite{Zannier}). Masser and Zannier \cite{M-Z-1, M-Z-2} proved that for the family of Latt\'es maps ${\mathbf f}_\lambda(z)=\mathfrak rac{(z^2-\lambda)^2}{4z(z-1)(z-\lambda)}$ there exist at most finitely many $\lambda\in{\mathbb Q}bar$ such that both $2$ and $3$ are preperiodic for ${\mathbf f}_\lambda$. Geometrically, their result says the following: given the Legendre family of elliptic curves $E_\lambda$ given by the equation $y^2=x(x-1)(x-\lambda)$, there exist at most finitely many $\lambda\in{\mathbb Q}bar$ such that $P_\lambda:=\lambdaeft(2,\sqrt{2(2-\lambda)}\right)$ and $Q_\lambda:=\lambdaeft(3, \sqrt{6(3-\lambda)}\right)$ are simultaneously torsion points for $E_\lambda$. Later Masser and Zannier \cite{M-Z-3} extended their result by proving that for any two sections $P_\lambda$ and $Q_\lambda$ on any elliptic surface $E_\lambda$, if there exist infinitely many $\lambda\in{\mathbb C}$ such that both $P_\lambda$ and $Q_\lambda$ are torsion for $E_\lambda$ then the two sections are linearly dependent over ${\mathbb Z}$. Their proof uses the recent breakthrough results of Pila and Zannier \cite{P-Z}. Moreover, Masser and Zannier exploit in a crucial way the existence of the analytic uniformization map for elliptic curves.
Motivated by a question of Zannier, Baker and DeMarco \cite{Baker-Demarco} showed that for any $a,b\in{\mathbb C}$, if there exist infinitely many $\lambda\in{\mathbb C}$ such that both $a$ and $b$ are preperiodic for ${\mathbf f}_\lambda(z)=z^d+\lambda$ (where $d\ge 2$), then $a^d=b^d$. Later their result was generalized by Ghioca, Hsia and Tucker \cite{prep-0} to arbitrary families of polynomials. The method of proof employed in both \cite{Baker-Demarco} and \cite{prep-0} uses an equidistribution statement (see \cite[Theorem~7.52]{Baker-Rumely} and \cite{Favre-Rivera-0, Favre-Rivera}) for points of small canonical height on Berkovich spaces. Later, using the powerful results of Yuan and Zhang \cite{Yuan, Yuan-Zhang} on metrized line bundles, Ghioca, Hsia and Tucker \cite{prep} proved the first results on unlikely intersections for families of rational maps and also for families of endomorphisms of higher dimensional projective spaces.
The difference between the results of \cite{Baker-Demarco, prep-0, prep} and the results of \cite{M-Z-1, M-Z-2, M-Z-3} is that for arbitrary families of polynomials there is no analytic uniformization map as in the case of the elliptic curves. Instead one needs to employ a more careful analysis of the local canonical heights associated to the family of rational maps. This led the authors of \cite{prep} to prove the error term in \eqref{C-S result} is $O(1)$ for the rational maps satisfying conditions $(a)-(b)$ listed above. Essentially, in order to use the equidistribution results of Baker-Rumely, Favre-Rivera-Letelier, and Yuan-Zhang, one needs to show that certain metrics converge uniformly and in turn this relies on showing that the local canonical heights associated to the corresponding family of rational maps vary uniformly across the various fibers of the family; this leads to improving to $O(1)$ the error term in \eqref{C-S result}. It is of great interest to see whether the results on unlikely intersections can be extended to more general families of rational maps beyond families of Latt\'es maps \cite{M-Z-1, M-Z-2, M-Z-3}, or of polynomials \cite{Baker-Demarco, prep-0}, or of rational maps with good fibers for all points in the parameter space \cite{prep}.
On the other hand, a preliminary step to ensure the strategy from \cite{Baker-Demarco, prep, prep-0} can be employed to proving new results on unlikely intersections in arithmetic dynamics is to improve to $O(1)$ the error term from \eqref{C-S result}. For example, using the exact strategy employed in \cite{prep}, the results of our paper yield that if ${\mathbf c}_1(t),{\mathbf c}_2(t)\in {\mathbb Q}bar(t)$ have the property that there exist infinitely many $\lambda\in{\mathbb Q}bar$ such that both ${\mathbf c}_1(\lambda)$ and ${\mathbf c}_2(\lambda)$ are preperiodic under the action of ${\mathbf f}_\lambda(z):= \mathfrak rac{z^d+\lambda}{z}$, then for \emph{each} $\lambda\in{\mathbb Q}bar$ we have that ${\mathbf c}_1(\lambda)$ is preperiodic for ${\mathbf f}_\lambda$ if and only if ${\mathbf c}_2(\lambda)$ is preperiodic for ${\mathbf f}_\lambda$. Furthermore, if in addition ${\mathbf c}_1,{\mathbf c}_2$ are constant, then the same argument as in \cite{prep} yields that for \emph{each} $\lambda\in{\mathbb Q}bar$, we have ${\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}_1)={\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}_2)$. Finally, this condition should yield that ${\mathbf c}_1={\mathbf c}_2$; however finding the exact relation between ${\mathbf c}_1$ and ${\mathbf c}_2$ is usually difficult (see the discussion from \cite{prep-0, prep}).
In our proofs we use in an essential way the decomposition of the (canonical) height in a sum of local (canonical) heights. So, in order
to prove Theorems~\ref{variation of canonical height} and \ref{precise constant} we show first (see Proposition~\ref{each place}) that for all but finitely many places $v$, the contribution of the corresponding local height to $d^2\cdot {\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))$ matches the $v$-adic contribution to the height for the second iterate ${\mathbf f}^2_\lambda({\mathbf c}(\lambda))$. This allows us to conclude that $\lambdaeft|{\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))-\mathfrak rac{h({\mathbf f}_\lambda^2({\mathbf c}(\lambda)))}{d^2}\right|$ is uniformly bounded as $\lambda$ varies. Then, using that $\deg_\lambda({\mathbf f}^2_\lambda({\mathbf c}(\lambda)))={\widehat h}_{\mathbf f}({\mathbf c})\cdot d^2$, an application of the height machine finishes our proof. The main difficulty lies in proving that for each place $v$ the corresponding local contribution to $d^2\cdot {\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))$ varies from the $v$-adic contribution to $h({\mathbf f}^2_\lambda({\mathbf c}(\lambda)))$ by an amount bounded solely in terms of $v$ and of ${\mathbf c}$. In order to derive our conclusion we first prove the statement for the special case when ${\mathbf c}$ is constant. Actually, in this latter case we can prove (see Propositions~\ref{e=0} and \ref{e=0 d=2}) that $ \lambdaeft|{\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))-\mathfrak rac{h({\mathbf f}_\lambda({\mathbf c}(\lambda)))}{d}\right|$ is uniformly bounded as $\lambda$ varies. Then for the general case of Proposition~\ref{each place}, we apply Propositions~\ref{e=0} and \ref{e=0 d=2} to the first iterate of ${\mathbf c}(\lambda)$ under ${\mathbf f}_\lambda$. For our analysis, we split the proof into $3$ cases:
\begin{enumerate}
\item[(i)] $|\lambda|_v$ is much larger than the absolute values of the coefficients of the polynomials $A(t)$ and $B(t)$ defining ${\mathbf c}(t):=\mathfrak rac{A(t)}{B(t)}$.
\item[(ii)] $|\lambda|_v$ is bounded above and below by constants depending only on the absolute values of the coefficients of $A(t)$ and of $B(t)$.
\item[(iii)] $|\lambda|_v$ is very small.
\end{enumerate}
The cases (i)-(ii) are not very difficult and the same proof is likely to work for more general families of rational maps (especially if $\infty$ is a superattracting point for the rational maps ${\mathbf f}_\lambda$; note that the case $d=2$ for Theorems~\ref{variation of canonical height} and \ref{precise constant} requires a different approach). However, case (iii) is much harder, and here we use in an essential way the general form of our family of maps. It is not surprising that this is the hard case since $\lambda=0$ is the only bad fiber of the family ${\mathbf f}_\lambda$.
We do not know whether the error term of $O(1)$ can be obtained for the variation of the canonical height in more general families of rational maps. It seems that each time $\lambda$ is close to a singularity of the family (i.e., $\lambda$ is close $v$-adically to some $\lambda_0$ for which $\deg(\Phi_{\lambda_0})$ is less than the generic degree in the family) would require a different approach.
The plan of our paper is as follows. In the next section we setup the notation for our paper. Then in Section~\ref{generic fiber section} we compute the height ${\widehat h}_{\mathbf f}({\mathbf c})$ on the generic fiber of our dynamical system. We continue in Section~\ref{reductions section} with a series of reductions of our main results; we reduce Theorem~\ref{variation of canonical height} to proving Proposition~\ref{each place}. We conclude by proving Theorem~\ref{precise constant} in Section~\ref{section e=0}, and then finishing the proof of Proposition~\ref{each place} in Section~\ref{proofs}.
\emph{Acknowledgments.} We thank Joseph Silverman and Laura DeMarco for useful discussions regarding this project.
\section{Notation}
\lambdaabel{notation}
\subsection{Generalities}
For a rational function $f(z)$, we denote by $f^n(z)$ its $n$-th iterate (for any $n\ge 0$, where $f^0$ is the identity map). We call a point $P$ preperiodic if its orbit under $f$ is finite.
For each real number $x$, we denote $\lambdaog^+x:=\lambdaog\max\{1,x\}$.
\subsection{Good reduction for rational maps}
Let $K$ be a number field, let $v$ be a nonarchimedean valuation on
$K$, let $\mathfrak o_v$ be the ring of $v$-adic integers of $K$,
and let $k_v$ be the residue field at $v$. If $A,B\in K[z]$ are coprime polynomials, then $\varphi(z):=A(z)/B(z)$ has good reduction (see \cite{Silverman07}) at all places $v$ satisfying the properties:
\begin{enumerate}
\item[(1)] the coefficients of $A$ and of $B$ are in $\mathfrak o_v$;
\item[(2)] the leading coefficients of $A$ and of $B$ are units in $\mathfrak o_v$; and
\item[(3)] the resultant of the polynomials $A$ and $B$ is a unit in $\mathfrak o_v$.
\end{enumerate}
Clearly, all but finitely many places $v$ of $K$ satisfy the above conditions (1)-(3). In particular this yields that if we reduce modulo $v$ the coefficients of both $A$ and $B$, then the induced rational map $\overline{\varphi}(z):=\overline{A}(z)/\overline{B}(z)$ is a well-defined rational map defined over $k_v$ of same degree as $\varphi$.
\subsection{Absolute values}
We denote by $\Omega_{\mathbb Q}$ the set of all (inequivalent) absolute values of ${\mathbb Q}$ with the usual normalization so that the product formula holds: $\prod_{v\in\Omega_{\mathbb Q}}|x|_v=1$ for each nonzero $x\in{\mathbb Q}$. For each $v\in\Omega_{\mathbb Q}$, we fix an extension of $|\cdot |_v$ to ${\mathbb Q}bar$.
\subsection{Heights}
\lambdaabel{heights subsection}
\subsubsection{Number fields}
Let $K$ be a number field. For each $n\ge 1$, if $P:=[x_0:\cdots :x_n]\in {\mathbb P}^n(K)$ then the Weil height of $P$ is
$$h(P):=\mathfrak rac{1}{[K:{\mathbb Q}]}\cdot \sum_{\sigma:K\lambdaongrightarrow{\mathbb Q}bar}\sum_{v\in\Omega_{\mathbb Q}} \lambdaog\max\{|\sigma(x_0)|_v,\cdots ,|\sigma(x_n)|_v\},$$
where the first summation runs over all embeddings $\sigma:K\lambdaongrightarrow {\mathbb Q}bar$.
The definition is independent of the choice of coordinates $x_i$ representing $P$ (by an application of the product formula) and it is also independent of the particular choice of number field $K$ containing the coordinates $x_i$ (by the fact that each place $v\in\Omega_{\mathbb Q}$ is defectless, as defined by \cite{Serre}). In this paper we will be concerned mainly with the height of points in ${\mathbb P}^1$; furthermore, if $x\in {\mathbb Q}bar$, then we identify $x$ with $[x:1]\in{\mathbb P}^1$ and define its height accordingly. The basic properties for heights which we will use are: for all $x,y\in{\mathbb Q}bar$ we have
\begin{enumerate}
\item[(1)] $h(x+y)\lambdae h(x)+h(y)+\lambdaog(2)$,
\item[(2)] $h(xy)\lambdae h(x)+h(y)$, and
\item[(3)] $h(1/x)=h(x)$.
\end{enumerate}
\subsubsection{Function fields}
We will also work with the height of rational functions (over ${\mathbb Q}bar$). So, if $L$ is any field, then the Weil height of a rational function $g\in L(t)$ is defined to be its degree.
\subsection{Canonical heights}
\subsubsection{Number fields}
Let $K$ be a number field, and let $f\in K(z)$ be a rational map of degree $d\ge 2$. Following \cite{Call-Silverman} we define the canonical height of a point $x\in {\mathbb P}^1({\mathbb Q}bar)$ as
\begin{equation}
\lambdaabel{definition of canonical height}
{\widehat h}_f(x)=\lambdaim_{n\to\infty} \mathfrak rac{h(f^n(x))}{d^n}.
\end{equation}
As proved in \cite{Call-Silverman}, the difference $|h(x )-{\widehat h}_f(x )|$ is uniformly bounded for all $x\in {\mathbb P}^1({\mathbb Q}bar)$. Also, ${\widehat h}_f(x)=0$ if and only if $x$ is a preperiodic point for $f$. If $x\in {\mathbb Q}bar$ then we view it embedded in ${\mathbb P}^1$ as $[x:1]$ and denote by ${\widehat h}_f(x)$ its canonical height under $f$ constructed as above.
\subsubsection{Function fields}
Let $L$ be an arbitrary field, let $f\in L(t)(z)$ be a rational function of degree $d\ge 2$, and let $x\in L(t)$. Then the canonical height ${\widehat h}_f(x):={\widehat h}_f([x:1])$ is defined the same as in \eqref{definition of canonical height}.
\subsection{Canonical heights for points and rational maps as they vary in algebraic families}
\subsubsection{Number fields}
If $\lambda\in {\mathbb Q}bar^*$,
$x=[A:B]\in {\mathbb P}^1({\mathbb Q}bar)$ and $f_\lambda(z):=\mathfrak rac{z^d+\lambda}{z}$, then we can define ${\widehat h}_{f_\lambda}(x)$ alternatively as follows. We let $A_{\lambda,[A:B],0}:=A$ and $B_{\lambda,[A:B],0}:=B$, and for each $n\ge 0$ we let
$$A_{\lambda,[A:B],n+1}:=A_{\lambda,[A:B],n}^d+\lambda\cdot B_{\lambda,[A:B],n}^d\text{ and }B_{\lambda,[A:B],n+1}:=A_{\lambda,[A:B],n}\cdot B_{\lambda,[A:B],n}^{d-1}.$$
Then $f_\lambda^n([A:B])=[A_{\lambda,[A:B],n}:B_{\lambda,[A:B],n}]$ and so,
$${\widehat h}_{f_\lambda}(x)=\lambdaim_{n\to\infty} \mathfrak rac{h([A_{\lambda,[A:B],n}:B_{\lambda,[A:B],n}])}{d^n}.$$
Also, for each place $v$, we define the local canonical height of $x=[A:B]$ with respect to $f_\lambda$ as
\begin{equation}
\lambdaabel{defi local canonical height 0}
{\widehat h}_{f_\lambda,v}(x)=\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog\max\lambdaeft\{\lambdaeft|A_{\lambda,[A:B],n}\right|_v, \lambdaeft|B_{\lambda,[A:B],n}\right|_v\right\}}{d^n}.
\end{equation}
If $x\in {\mathbb Q}bar$ we view it embedded in ${\mathbb P}^1({\mathbb Q}bar)$ as $[x:1]$ and compute its canonical heights (both global and local) under $f_\lambda$ as above starting with $A_{\lambda,x,0}:=x$ and $B_{\lambda,x,0}:=1$.
For $x=A/B$ with $B\ne 0$, we get that
\begin{equation}
\lambdaabel{conversion 0}
A_{\lambda,[A:B],n}=A_{\lambda,x,n}\cdot B^{d^n}\text{ and }B_{\lambda,[A:B],n}=B_{\lambda,x,n}\cdot B^{d^n},
\end{equation}
for all $n\ge 0$. If in addition $A\ne 0$, then $B_{\lambda,[A:B],1}=A\cdot B^{d-1}\ne 0$ and then for all $n\ge 0$ we have
\begin{equation}
\lambdaabel{conversion -1}
A_{\lambda,[A:B],n+1}=A_{\lambda,f_\lambda(x),n}\cdot B_{\lambda,[A:B],1}^{d^n}\text{ and }B_{\lambda,[A:B],n+1}=B_{\lambda,f_\lambda(x),n}\cdot B_{\lambda,[A:B],1}^{d^n}
\end{equation}
and in general, if $B_{\lambda,[A:B],k_0}\ne 0$, then
\begin{equation}
\lambdaabel{conversion k_0}
A_{\lambda,[A:B],n+k_0}=A_{\lambda,f_\lambda^{k_0}(x),n}\cdot B_{\lambda,[A:B],k_0}^{d^n}\text{ and }B_{\lambda,[A:B],n+k_0}=B_{\lambda,f_\lambda^{k_0}(x),n}\cdot B_{\lambda,[A:B],k_0}^{d^n}.
\end{equation}
We will be interested also in studying the variation of the canonical height of a family of starting points parametrized by a rational map (in $t$) under the family $\{{\mathbf f}_t(z)\}$ of rational maps. As before, ${\mathbf f}_t(z):=\mathfrak rac{z^d+t}{z}$, and for each $t=\lambda\in{\mathbb Q}bar$ we get a map in the above family of rational maps. When we want to emphasize the fact that each ${\mathbf f}_\lambda$ (for $\lambda\in{\mathbb Q}bar^*$) belongs to this family of rational maps (rather than being a single rational map), we will use the boldface letter ${\mathbf f}$ instead of $f$. Also we let ${\mathbf c}(t):=\mathfrak rac{{\mathbf A}(t)}{{\mathbf B}(t)}$ where ${\mathbf A},{\mathbf B}\in K[t]$ are coprime polynomials defined over a number field $K$. Again, for each $t=\lambda\in{\mathbb Q}bar$ we get a point ${\mathbf c}(\lambda)\in {\mathbb P}^1({\mathbb Q}bar)$.
We define ${\mathbf A}_{{\mathbf c},n}(t)\in K[t]$ and ${\mathbf B}_{{\mathbf c},n}(t)\in K[t]$ so that for each $n\ge 0$ we have ${\mathbf f}_t^n({\mathbf c}(t))=[{\mathbf A}_{{\mathbf c}, n}(t):{\mathbf B}_{{\mathbf c}, n}(t)]$. In particular, for each $t=\lambda\in{\mathbb Q}bar$ we have ${\mathbf f}_\lambda^n({\mathbf c}(\lambda))=[{\mathbf A}_{{\mathbf c}, n}(\lambda):{\mathbf B}_{{\mathbf c}, n}(\lambda)]$.
We let ${\mathbf A}_{{\mathbf c}, 0}(t):={\mathbf A}(t)$ and ${\mathbf B}_{{\mathbf c},0}(t):={\mathbf B}(t)$. Our definition for ${\mathbf A}_{{\mathbf c},n}$ and ${\mathbf B}_{{\mathbf c},n}$ for $n= 1$ will depend on whether ${\mathbf A}(0)$ (or equivalently ${\mathbf c}(0)$) equals $0$ or not. If ${\mathbf A}(0)\ne 0$, then we define
\begin{equation}
\lambdaabel{1st iterate nonzero}
{\mathbf A}_{{\mathbf c},1}(t):={\mathbf A}(t)^d+t {\mathbf B}(t)^d\text{ and }{\mathbf B}_{{\mathbf c},1}(t):={\mathbf A}(t){\mathbf B}(t)^{d-1},
\end{equation}
while if ${\mathbf c}(0)=0$, then
\begin{equation}
\lambdaabel{1st iterate zero}
{\mathbf A}_{{\mathbf c},1}(t):=\mathfrak rac{{\mathbf A}(t)^d+t {\mathbf B}(t)^d}{t}\text{ and }{\mathbf B}_{{\mathbf c},1}(t):=\mathfrak rac{{\mathbf A}(t){\mathbf B}(t)^{d-1}}{t}.
\end{equation}
Then for each positive integer $n$ we let
\begin{equation}
\lambdaabel{general iterate defi}
{\mathbf A}_{{\mathbf c},n+1}(t):={\mathbf A}_{{\mathbf c},n}(t)^d+t\cdot {\mathbf B}_{{\mathbf c}, n}(t)^d\text{ and }{\mathbf B}_{{\mathbf c}, n+1}(t):={\mathbf A}_{{\mathbf c}, n}(t)\cdot {\mathbf B}_{{\mathbf c}, n}(t)^{d-1}.
\end{equation}
Whenever it is clear from the context, we will use ${\mathbf A}_n$ and ${\mathbf B}_n$ instead of ${\mathbf A}_{{\mathbf c}, n}$ and ${\mathbf B}_{{\mathbf c}, n}$ respectively. For each $t=\lambda\in{\mathbb Q}bar$, the canonical height of ${\mathbf c}(\lambda)$ under the action of ${\mathbf f}_\lambda$ may be computed as follows:
$${\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))=\lambdaim_{n\to\infty}\mathfrak rac{h\lambdaeft([{\mathbf A}_{{\mathbf c},n}(\lambda): {\mathbf B}_{{\mathbf c},n}(\lambda)]\right)}{d^n}.$$
Also, for each place $v$, we define the local canonical height of ${\mathbf c}(\lambda)$ at $v$ as follows:
\begin{equation}
\lambdaabel{defi local canonical height}
{\widehat h}_{{\mathbf f}_\lambda,v}({\mathbf c}(\lambda)) = \lambdaim_{n\to\infty} \mathfrak rac{\lambdaog\max\{\lambdaeft|{\mathbf A}_{{\mathbf c},n}(\lambda)\right|_v, \lambdaeft|{\mathbf B}_{{\mathbf c},n}(\lambda)\right|_v\}}{d^n}.
\end{equation}
The limit in \eqref{defi local canonical height} exists, as proven in Corollary~\ref{the limit exists}.
The following is a simple observation based on \eqref{conversion k_0}: if $\lambda\in{\mathbb Q}bar$ such that ${\mathbf B}_{{\mathbf c}, k_0}(\lambda)\ne 0$, then for each $k_0,n\ge 0$ we have
\begin{equation}
\lambdaabel{conversion}
{\mathbf A}_{{\mathbf c}, n+k_0}(\lambda)={\mathbf B}_{{\mathbf c}, k_0}(\lambda)^{d^n}\cdot A_{\lambda, f^{k_0}_\lambda({\mathbf c}(\lambda)), n}\text{ and }{\mathbf B}_{{\mathbf c}, n+k_0}(\lambda)={\mathbf B}_{{\mathbf c}, k_0}(\lambda)^{d^n}\cdot B_{\lambda,f^{k_0}_\lambda({\mathbf c}(\lambda)), n}.
\end{equation}
\subsubsection{Function fields}
We also compute the canonical height of ${\mathbf c}(t)$ on the generic fiber of the family of rational maps ${\mathbf f}$ with respect to the action of ${\mathbf f}_t(z)=\mathfrak rac{z^d+t}{z}\in {\mathbb Q}(t)(z)$ as follows
$${\widehat h}_{{\mathbf f}}({\mathbf c}):={\widehat h}_{{\mathbf f}_t}({\mathbf c}(t)):=\lambdaim_{n\to\infty} \mathfrak rac{h({\mathbf f}_t^n({\mathbf c}(t)))}{d^n} =\lambdaim_{n\to\infty} \mathfrak rac{\deg_t({\mathbf f}_t^n({\mathbf c}(t)))}{d^n}.$$
\section{Canonical height on the generic fiber}
\lambdaabel{generic fiber section}
For each $n\ge 0$, the map $t\lambdaongrightarrow {\mathbf f}_t^n({\mathbf c}(t))$ is a rational map; so, $\deg({\mathbf f}^n_t({\mathbf c}(t)))$ will always denote its degree. Similarly, letting ${\mathbf f}(z):=\mathfrak rac{z^d+t}{z}\in\overline{\Q}(t)(z)$ and ${\mathbf c}(t):=\mathfrak rac{{\mathbf A}(t)}{{\mathbf B}(t)}$ for coprime polynomials ${\mathbf A},{\mathbf B}\in{\mathbb Q}bar[t]$, then ${\mathbf f}^n({\mathbf c}(t))$ is a rational function for each $n\ge 0$. In this section we compute ${\widehat h}_{\mathbf f}({\mathbf c})$. It is easier to split the proof into two cases depending on whether ${\mathbf c}(0)=0$ (or equivalently ${\mathbf A}(0)=0$) or not.
\begin{prop}
\lambdaabel{canonical height generic nonzero}
If ${\mathbf c}(0)\ne 0$, then
$${\widehat h}_{\mathbf f}({\mathbf c})=\mathfrak rac{\deg({\mathbf f}_t({\mathbf c}(t)))}{d}= \mathfrak rac{\deg({\mathbf f}^2_t({\mathbf c}(t)))}{d^2}.$$
\end{prop}
\begin{proof}
According to \eqref{1st iterate nonzero} and \eqref{general iterate defi} we have defined ${\mathbf A}_{{\mathbf c},n}(t)$ and ${\mathbf B}_{{\mathbf c},n}(t)$ in this case.
It is easy to prove that $\deg({\mathbf A}_n)>\deg({\mathbf B}_n)$ for all positive integers $n$. Indeed, if $\deg({\mathbf A})> \deg({\mathbf B})$, then an easy induction yields that $\deg({\mathbf A}_n)> \deg({\mathbf B}_n)$ for all $n\ge 0$. If $\deg({\mathbf A})\lambdae \deg({\mathbf B})$, then $\deg({\mathbf A}_1)=1+d\cdot \deg({\mathbf B})> d\cdot \deg({\mathbf B})\ge \deg({\mathbf B}_1)$. Again an easy induction finishes the proof that $\deg({\mathbf A}_n)>\deg({\mathbf B}_n)$ for all $n\ge 1$.
In particular, we get that $\deg({\mathbf A}_n)=d^{n-1}\cdot \deg({\mathbf A}_1)$ for all $n\ge 1$. The following claim will finish our proof.
\begin{claim}
\lambdaabel{A and B coprime}
For each $n\ge 0$, ${\mathbf A}_n$ and ${\mathbf B}_n$ are coprime.
\end{claim}
\begin{proof}[Proof of Claim~\ref{A and B coprime}.]
The statement is true for $n=0$ by definition. Assume now that it holds for all $n\lambdae N$ and we'll show that ${\mathbf A}_{N+1}$ and ${\mathbf B}_{N+1}$ are coprime.
Assume there exists $\alpha\in{\mathbb Q}bar$ such that the polynomial $t-\alpha$ divides both ${\mathbf A}_{N+1}(t)$ and ${\mathbf B}_{N+1}(t)$. First we claim that $\alpha\ne 0$. Indeed, if $t$ would divide ${\mathbf A}_{N+1}$, then it would also divide ${\mathbf A}_N$ and inductively we would get that $t\mid {\mathbf A}_0(t)={\mathbf A}(t)$, which is a contradiction since $A(0)\ne 0$. So, indeed $\alpha\ne 0$. But then from the fact that both ${\mathbf A}_{N+1}(\alpha)=0={\mathbf B}_{N+1}(\alpha)$ (and $\alpha\ne 0$) we obtain from the recursive formula defining $\{{\mathbf A}_n\}_n$ and $\{{\mathbf B}_n\}_n$ that also ${\mathbf A}_N(\alpha)=0$ and ${\mathbf B}_N(\alpha)=0$. However this contradicts the assumption that ${\mathbf A}_N$ and ${\mathbf B}_N$ are coprime. Thus ${\mathbf A}_n$ and ${\mathbf B}_n$ are coprime for all $n\ge 0$.
\end{proof}
Using the definition of ${\widehat h}_{\mathbf f}({\mathbf c})$ we conclude the proof of Proposition~\ref{canonical height generic nonzero}.
\end{proof}
If ${\mathbf c}(0)=0$ (or equivalently ${\mathbf A}(0)=0$) the proof is very similar, only that this time we use \eqref{1st iterate zero} to define ${\mathbf A}_1$ and ${\mathbf B}_1$.
\begin{prop}
\lambdaabel{canonical height generic zero}
If ${\mathbf c}(0)= 0$, then
$${\widehat h}_{\mathbf f}({\mathbf c})=\mathfrak rac{\deg({\mathbf f}_t({\mathbf c}(t)))}{d}= \mathfrak rac{\deg({\mathbf f}^2_t({\mathbf c}(t)))}{d^2}.$$
\end{prop}
\begin{proof}
Since $t\mid {\mathbf A}(t)$ we obtain that ${\mathbf A}_1,{\mathbf B}_1\in{\mathbb Q}bar[t]$; moreover, they are coprime because ${\mathbf A}$ and ${\mathbf B}$ are coprime. Indeed, $t$ does not divide ${\mathbf B}(t)$ and so, because $t$ divides ${\mathbf A}(t)$ and $d\ge 2$, we conclude that $t$ does not divide ${\mathbf A}_1(t)$. Now, if there exists some $\alpha\in{\mathbb Q}bar^*$ such that both ${\mathbf A}_1(\alpha)={\mathbf B}_1(\alpha)=0$, then we obtain that also both ${\mathbf A}(\alpha)={\mathbf B}(\alpha)=0$, which is a contradiction.
Using that ${\mathbf A}_1$ and ${\mathbf B}_1$ are coprime, and also that $t\nmid {\mathbf A}_1$, the same reasoning as in the proof of Claim~\ref{A and B coprime} yields that ${\mathbf A}_n$ and ${\mathbf B}_n$ are coprime for each $n\ge 1$.
Also, arguing as in the proof of Proposition~\ref{canonical height generic nonzero}, we obtain that $\deg({\mathbf A}_n)> \deg({\mathbf B}_n)$ for all $n\ge 1$. Hence, $$\deg({\mathbf f}_t^n({\mathbf c}(t)))=\deg_t({\mathbf A}_n(t))=d^{n-2}\cdot \deg({\mathbf f}_t^2({\mathbf c}(t)))= d^{n-1}\cdot \deg({\mathbf f}_t({\mathbf c}(t))),$$
as desired.
\end{proof}
\section{Reductions}
\lambdaabel{reductions section}
With the above notation, Theorem~\ref{variation of canonical height} is equivalent with showing that
\begin{equation}
\lambdaabel{restatement}
\lambdaim_{n\to\infty} \mathfrak rac{h([{\mathbf A}_{{\mathbf c},n}(\lambda):{\mathbf B}_{{\mathbf c},n}(\lambda)])}{d^n} = {\widehat h}_{{\mathbf f}}({\mathbf c})\cdot h(\lambda)+O_{\mathbf c}(1).
\end{equation}
In all of our arguments we assume $\lambda\ne 0$, and also that ${\mathbf A}$ and ${\mathbf B}$ are not identically equal to $0$ (where ${\mathbf c}={\mathbf A}/{\mathbf B}$ with ${\mathbf A},{\mathbf B}\in {\mathbb Q}bar[t]$ coprime). Obviously excluding the case $\lambda=0$ does not affect the validity of Theorem~\ref{variation of canonical height} (the quantity ${\widehat h}_{{\mathbf f}_0}({\mathbf c}(0))$ can be absorbed into the $O(1)$-constant). In particular, if $\lambda\ne 0$ then the definition of ${\mathbf A}_{{\mathbf c},1}$ and ${\mathbf B}_{{\mathbf c},1}$ (when ${\mathbf c}(0)=0$) makes sense (i.e. we are allowed to divide by $\lambda$). Also, if ${\mathbf A}$ or ${\mathbf B}$ equal $0$ identically, then ${\mathbf c}(\lambda)$ is preperiodic for ${\mathbf f}_\lambda$ for all $\lambda$ and then again Theorem~\ref{variation of canonical height} holds trivially.
\begin{prop}
\lambdaabel{almost all places are fine}
Let $\lambda\in{\mathbb Q}bar^*$. Then for all but finitely many $v\in\Omega_{\mathbb Q}$, we have $\lambdaog\max\{|{\mathbf A}_{{\mathbf c} ,n}(\lambda)|_v, |{\mathbf B}_{{\mathbf c} ,n}(\lambda)|_v\}=0$ for all $n\in\bN$.
\end{prop}
\begin{proof}
First of all, for the sake of simplifying our notation (and noting that ${\mathbf c}$ and $\lambda$ are fixed in this Proposition), we let $A_n:={\mathbf A}_{{\mathbf c},n}(\lambda)$ and $B_n:={\mathbf B}_{{\mathbf c}, n}(\lambda)$.
From the definition of $A_1$ and $B_1$ we see that not both are equal to $0$ (here we use also the fact that $\lambda\ne 0$ which yields that if both $A_1$ and $B_1$ are equal to $0$ then ${\mathbf A}(\lambda)={\mathbf B}(\lambda)=0$, and this contradicts the fact that ${\mathbf A}$ and ${\mathbf B}$ are coprime).
Let $S$ be the set of all non-archimedean places $v\in\Omega_{\mathbb Q}$ such that $|\lambda|_v=1$ and also $\max\{|A_1|_v, |B_1|_v\}=1$. Since not both $A_1$ and $B_1$ equal $0$ (and also $\lambda\ne 0$), then all but finitely many non-archimedean places $v$ satisfy the above conditions.
\begin{claim}
\lambdaabel{useful claim}
If $v\in S$, then $\max\{|A_n|_v, |B_n|_v\}=1$ for all $n\in\bN$.
\end{claim}
\begin{proof}[Proof of Claim~\ref{useful claim}.]
This claim follows easily by induction on $n$; the case $n=1$ follows by the definition of $S$. Since
$$\max\{|A_n|_v,|B_n|_v\}=1$$
and $|\lambda|_v= 1$
then $\max\{|A_{n+1}|_v, |B_{n+1}|_v\}\lambdae 1$. Now, if $|A_n|_v=|B_n|_v=1$ then $|B_{n+1}|_v=1$. On the other hand, if $\max\{|A_n|_v,|B_n|_v\}=1>\min\{|A_n|_v, |B_n|_v\}$, then $|A_{n+1}|_v=1$ (because $|\lambda|_v=1$).
\end{proof}
Claim~\ref{useful claim} finishes the proof of Proposition~\ref{almost all places are fine}.
\end{proof}
We let $K$ be the finite extension of ${\mathbb Q}$ obtained by adjoining the coefficients of both ${\mathbf A}$ and ${\mathbf B}$ (we recall that ${\mathbf c}(t)={\mathbf A}(t)/{\mathbf B}(t)$). Then ${\mathbf A}_n(\lambda):={\mathbf A}_{{\mathbf c}, n}(\lambda)\text{, }{\mathbf B}_n(\lambda):={\mathbf B}_{{\mathbf c}, n}(\lambda)\in K(\lambda)$ for each $n$ and for each $\lambda$.
Proposition~\ref{almost all places are fine} allows us to invert the limit from the left-hand side of \eqref{restatement} with the following sum
$$h([{\mathbf A}_n(\lambda):{\mathbf B}_n(\lambda)])=\mathfrak rac{1}{[K(\lambda):{\mathbb Q}]}\cdot \sum_{\sigma:K(\lambda)\lambdaongrightarrow {\mathbb Q}bar}\sum_{v\in\Omega_{\mathbb Q}} \lambdaog\max\{|\sigma({\mathbf A}_n(\lambda))|_v, |\sigma({\mathbf B}_n(\lambda))|_v\},$$
because for all but finitely many places $v$, we have $\lambdaog\max\{|\sigma({\mathbf A}_n(\lambda))|_v, |\sigma({\mathbf B}_n(\lambda))|_v\}=0$. Also we note that $\sigma({\mathbf A}_{{\mathbf c},n}(\lambda))={\mathbf A}_{{\mathbf c}^\sigma,n}(\sigma(\lambda))$ and $\sigma({\mathbf B}_{{\mathbf c} ,n}(\lambda))={\mathbf B}_{{\mathbf c}^\sigma,n}(\sigma(\lambda))$, where ${\mathbf c}^\sigma$ is the polynomial whose coefficients are obtained by applying the homomorphism $\sigma\in\Gal({\mathbb Q}bar/{\mathbb Q})$ to each coefficient of ${\mathbf c}$. Using the definition of the local canonical height from \eqref{defi local canonical height}, we observe that \eqref{restatement} is equivalent with showing that
\begin{equation}
\lambdaabel{restatement 2}
\mathfrak rac{1}{[K(\lambda):{\mathbb Q}]} \sum_{v\in\Omega_{\mathbb Q}}\sum_{\sigma:K(\lambda)\lambdaongrightarrow {\mathbb Q}bar} {\widehat h}_{{\mathbf f}_{\sigma(\lambda)},v}({\mathbf c}^{\sigma}(\sigma(\lambda))) = {\widehat h}_{\mathbf f}({\mathbf c}) h(\lambda)+O_{\mathbf c}(1).
\end{equation}
For each $v\in\Omega_{\mathbb Q}$, and each $n\ge 0$ we let $M_{{\mathbf c},n,v}(\lambda):=\max\{|{\mathbf A}_{{\mathbf c},n}(\lambda)|_v, |{\mathbf B}_{{\mathbf c},n}(\lambda)|_v\}$. When ${\mathbf c}$ is fixed, we will use the notation $M_{n,v}(\lambda):=M_{{\mathbf c},n,v}(\lambda)$; if $\lambda$ is fixed then we will use the notation $M_{n,v}:=M_{n,v}(\lambda)$. If $v$ is also fixed, we will use the notation $M_{n}:=M_{n,v}$.
\begin{prop}
\lambdaabel{equality for almost all non-archimedean places}
Let $v\in\Omega_{\mathbb Q}$ be a non-archimedean place such that
\begin{enumerate}
\item[(i)] each coefficient of ${\mathbf A}$ and of ${\mathbf B}$ are $v$-adic integers;
\item[(ii)] the resultant of the polynomials ${\mathbf A}$ and ${\mathbf B}$, and the leading coefficients of both ${\mathbf A}$ and of ${\mathbf B}$ are $v$-adic units; and
\item[(iii)] if the constant coefficient $a_0$ of ${\mathbf A}$ is nonzero, then $a_0$ is a $v$-adic unit.
\end{enumerate}
Then for each $\lambda\in{\mathbb Q}bar^*$ we have $\mathfrak rac{\lambdaog M_{{\mathbf c},n,v}(\lambda)}{d^n} =\mathfrak rac{\lambdaog M_{{\mathbf c},1,v}(\lambda)}{d}$, for all $n\ge 1$.
\end{prop}
\begin{remarks}
\lambdaabel{geometric interpretation of the condition}
\begin{enumerate}
\item[(1)] Since we assumed ${\mathbf A}$ and ${\mathbf B}$ are nonzero, then conditions (i)-(iii) are satisfied by all but finitely many places $v\in\Omega_{\mathbb Q}$.
\item[(2)] Conditions (i)-(ii) of Proposition~\ref{equality for almost all non-archimedean places} yield that ${\mathbf c}(t)={\mathbf A}(t)/{\mathbf B}(t)$ has good reduction at $v$. On the other hand, if ${\mathbf A}(t)/t{\mathbf B}(t)$ has good reduction at $v$, then condition (iii) must hold.
\end{enumerate}
\end{remarks}
\begin{proof}
Let $\lambda\in{\mathbb Q}bar^*$, let $|\cdot |:=|\cdot |_v$, let $A_n:={\mathbf A}_{{\mathbf c},n}(\lambda)$, $B_n:={\mathbf B}_{{\mathbf c},n}(\lambda)$, and $M_n:=\max\{|A_n|, |B_n|\}$.
Assume first that $|\lambda|>1$. Using conditions (i)-(ii), then $M_0=|\lambda|^{\deg({\mathbf c})}$. If ${\mathbf c}$ is nonconstant, then $M_0>1$; furthermore, for each $n\ge 1$ we have $|A_n|>|B_n|$ (because $\deg({\mathbf A}_{{\mathbf c},n}(t))>\deg({\mathbf B}_{{\mathbf c},n}(t))$ for $n\ge 1$),
and so, $M_n= M_1^{d^{n-1}}$ for all $n\ge 1$. On the other hand, if ${\mathbf c}$ is constant, then $|{\mathbf A}_1|=|\lambda|>|{\mathbf B}_1|=1$, and then again for each $n\ge 1$ we have $M_n=M_1^{d^{n-1}}$. Hence Proposition~\ref{equality for almost all non-archimedean places} holds when $|\lambda|>1$.
Assume $|\lambda|\lambdae 1$. Then it is immediate that $M_n\lambdae 1$ for all $n\ge 0$. On the other hand, because $v$ is a place of good reduction for ${\mathbf c}$, we get that $M_0=1$. Then, using that $|\lambda|=1$ we obtain
$$|{\mathbf A}_1(\lambda)|=|{\mathbf A}(\lambda)^d+\lambda{\mathbf B}(\lambda)^d|\text{ and }|{\mathbf B}_1(\lambda)|=|{\mathbf A}(\lambda){\mathbf B}(\lambda)^{d-1}|.$$
Then Claim~\ref{useful claim} yields that $M_n=1$ for all $n\ge 1$, and so Proposition~\ref{equality for almost all non-archimedean places} holds when $|\lambda|=1$.
Assume now that $|\lambda|<1$, then either $|{\mathbf A}(\lambda)|=1$ or $|{\mathbf A}(\lambda)|<1$. If the former holds, then first of all we note that ${\mathbf A}(0)\ne 0$ since otherwise $|{\mathbf A}(\lambda)|\lambdae |\lambda|<1$. An easy induction yields that $|A_n|=1$ for all $n\ge 0$ (since $|B_n|\lambdae 1$ and $|\lambda|<1$). Therefore, $M_n=1$ for all $n\ge 0$. Now if $|{\mathbf A}(\lambda)|<1$, using that $|\lambda|<1$, we obtain that $a_0=0$. Indeed, if $a_0$ were nonzero, then $|a_0|=1$ by our hypothesis (iii), and thus $|{\mathbf A}(\lambda)|=|a_0|=1$. So, indeed ${\mathbf A}(0)=0$, which yields that
\begin{equation}
\lambdaabel{formula for A_1}
A_1=\mathfrak rac{{\mathbf A}(\lambda)^d}{\lambda} + {\mathbf B}(\lambda)^d.
\end{equation}
On the other hand, since $v$ is a place of good reduction for ${\mathbf c}$, and $|{\mathbf A}(\lambda)|<1$ we conclude that $|{\mathbf B}(\lambda)|=1$. Thus \eqref{formula for A_1} yields that $|A_1|=1$ because $d\ge 2$ and $ |{\mathbf A}(\lambda)|\lambdae|\lambda|<1$. Because for each $n\ge 1$ we have $A_{n+1}=A_n^d+\lambda\cdot B_n^d$ and $|\lambda|<1$, while $|B_n|\lambdae 1$, an easy induction yields that $|A_n|=1$ for all $n\ge 1$.
This concludes the proof of Proposition~\ref{equality for almost all non-archimedean places}.
\end{proof}
The following result is the key for our proof of Theorem~\ref{variation of canonical height}.
\begin{prop}
\lambdaabel{each place}
Let $v\in\Omega_{\mathbb Q}$.
There exists a positive real number $C_{v,{\mathbf c}}$ depending only on $v$, and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ (but independent of $\lambda$) such that
\begin{align*}
& \lambdaeft|\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog\max\{|{\mathbf A}_{{\mathbf c},n}(\lambda)|_v, |{\mathbf B}_{{\mathbf c},n}(\lambda)|_v\}}{d^n} - \mathfrak rac{\lambdaog\max\{|{\mathbf A}_{{\mathbf c},2}(\lambda)|_v, |{\mathbf B}_{{\mathbf c},2}(\lambda)|_v\}}{d^2}\right|\\
& \lambdae C_{v,{\mathbf c}},
\end{align*}
for all $\lambda\in{\mathbb Q}bar^*$ such that ${\mathbf c}(\lambda)\ne 0,\infty$.
\end{prop}
Propositions~\ref{equality for almost all non-archimedean places} and \ref{each place} yield Theorem~\ref{variation of canonical height}.
\begin{proof}[Proof of Theorem~\ref{variation of canonical height}.]
First of all we deal with the case that either ${\mathbf A}$ or ${\mathbf B}$ is the zero polynomial, i.e. ${\mathbf c}=0$ or ${\mathbf c}=\infty$ identically. In both cases, we obtain that ${\mathbf B}_{{\mathbf c},n}=0$ for all $n\ge 1$, i.e., ${\mathbf c}$ is preperiodic for ${\mathbf f}$ being always mapped to $\infty$. Then the conclusion of Theorem~\ref{variation of canonical height} holds trivially since ${\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))=0={\widehat h}_{\mathbf f}({\mathbf c})$.
Secondly, assuming that both ${\mathbf A}$ and ${\mathbf B}$ are nonzero polynomials, we deal with the values of $\lambda$ excluded from the conclusion of Proposition~\ref{each place}. Since there are finitely many $\lambda\in{\mathbb Q}bar$ such that either $\lambda=0$ or ${\mathbf A}(\lambda)=0$ or ${\mathbf B}(\lambda)=0$ we see that the conclusion of Theorem~\ref{variation of canonical height} is not affected by these finitely many values of the parameter $\lambda$; the difference between ${\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda))$ and ${\widehat h}_{\mathbf f}({\mathbf c})\cdot h(\lambda)$ can be absorbed in $O(1)$ for those finitely many values of $\lambda$. So, from now on we assume that $\lambda\in{\mathbb Q}bar^*$ such that ${\mathbf c}(\lambda)\ne 0,\infty$.
For each $\sigma\in\Gal({\mathbb Q}bar/{\mathbb Q})$ let $S_{{\mathbf c}^\sigma}$ be the finite set of places $v\in\Omega_{\mathbb Q}$ such that either $v$ is archimedean, or $v$ does not satisfy the hypothesis of Proposition~\ref{equality for almost all non-archimedean places} with respect to ${\mathbf c}^\sigma$. Let $S=\bigcup S_{{\mathbf c}^\sigma}$, and let $C$ be the maximum of all constants $C_{v,{\mathbf c}^\sigma}$ (from Proposition~\ref{each place}) over all $v\in S$ and all $\sigma\in\Gal({\mathbb Q}bar/{\mathbb Q})$. Thus from Propositions~\ref{equality for almost all non-archimedean places} and \ref{each place} we obtain for each $\lambda\in{\mathbb Q}bar^*$ such that ${\mathbf A}(\lambda),{\mathbf B}(\lambda)\ne 0$ we have
\begin{align*}
& \lambdaeft|\mathfrak rac{h([{\mathbf A}_{{\mathbf c},2}(\lambda):{\mathbf B}_{{\mathbf c},2}(\lambda)])}{d^2} - {\widehat h}_{f_\lambda}({\mathbf c}(\lambda)) \right| \\
& = \lambdaeft|\mathfrak rac{1}{[K(\lambda):{\mathbb Q}]}\sum_{\sigma}\sum_{v\in\Omega_{\mathbb Q}} \mathfrak rac{\lambdaog\max\{|{\mathbf A}_{{\mathbf c}^\sigma, 2}(\sigma(\lambda))|_v, |{\mathbf B}_{{\mathbf c}^\sigma, 2}(\sigma(\lambda))|_v\}}{d^2} - {\widehat h}_{{\mathbf f}_{\sigma(\lambda)},v}({\mathbf c}^\sigma(\sigma(\lambda))) \right|\\
& \lambdae \mathfrak rac{1}{[K(\lambda):{\mathbb Q}]} \sum_{\sigma} \sum_{v\in S} \lambdaeft| \mathfrak rac{\lambdaog\max\{|A_{{\mathbf c}^\sigma, 2}(\sigma(\lambda))|_v, |B_{{\mathbf c}^\sigma, 2}(\sigma(\lambda))|_v\}}{d^2} - {\widehat h}_{{\mathbf f}_{\sigma(\lambda)},v}({\mathbf c}^\sigma(\sigma(\lambda))) \right|\\
& \lambdae C\cdot |S|,
\end{align*}
where the outer sum is over all embeddings $\sigma:K(\lambda)\lambdaongrightarrow{\mathbb Q}bar$.
Finally, since the rational map $t\mapsto g_2(t):=\mathfrak rac{{\mathbf A}_{{\mathbf c},2}(t)}{{\mathbf B}_{{\mathbf c},2}(t)}$ has degree $d^2\cdot {\widehat h}_{\mathbf f}({\mathbf c})$ (see Propositions~\ref{canonical height generic nonzero} and \ref{canonical height generic zero}), \cite[Theorem~1.8]{Lang-diophantine} yields that there exists a constant $C_1$ depending only on $g_2$ (and hence only on the coefficients of ${\mathbf c}$) such that for each $\lambda\in{\mathbb Q}bar$ we have:
\begin{equation}
\lambdaabel{close to the height}
\lambdaeft|\mathfrak rac{h([{\mathbf A}_{{\mathbf c},2}(\lambda):{\mathbf B}_{{\mathbf c},2}(\lambda)])}{d^2} - {\widehat h}_{\mathbf f}({\mathbf c})\cdot h(\lambda)\right|\lambdae C_1.
\end{equation}
Using inequality \eqref{close to the height} together with the inequality
$$\lambdaeft|\mathfrak rac{h([{\mathbf A}_{{\mathbf c},2}(\lambda):{\mathbf B}_{{\mathbf c},2}(\lambda)])}{d^2} - {\widehat h}_{{\mathbf f}_\lambda}({\mathbf c}(\lambda)) \right|\lambdae C\cdot |S|,$$
we conclude the proof of Theorem~\ref{variation of canonical height} (note that $S$ depends only on ${\mathbf c}$).
\end{proof}
\section{The case of constant starting point}
\lambdaabel{section e=0}
In this Section we complete the proof of Proposition~\ref{each place} in the case ${\mathbf c}$ is a nonzero constant, and then proceed to proving Theorem~\ref{precise constant}.
We start with several useful general results (not only for the case ${\mathbf c}$ is constant).
\begin{prop}
\lambdaabel{fundamental inequality}
Let $m$ and $M$ be positive real numbers, let $d\ge 2$ and $k_0\ge 0$ be integers, and let $\{N_k\}_{k\ge 0}$ be a sequence of positive real numbers. If
$$m\lambdae \mathfrak rac{N_{k+1}}{N_k^d}\lambdae M$$
for each $k\ge k_0$, then
$$\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog N_k}{d^k} - \mathfrak rac{\lambdaog N_{k_0}}{d^{k_0}}\right| \lambdae \mathfrak rac{\max\{-\lambdaog(m), \lambdaog(M)\}}{d^{k_0}(d-1)}.$$
\end{prop}
\begin{proof}
We obtain that for each $k\ge k_0$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog N_{k+1}}{d^{k+1}} - \mathfrak rac{\lambdaog N_k}{d^k} \right| \lambdae \mathfrak rac{\max\{-\lambdaog(m), \lambdaog(M)\}}{d^{k+1}}.$$
The conclusion follows by adding the above inequalities for all $k\ge k_0$.
\end{proof}
We let $|\cdot |_v$ be an absolute value on ${\mathbb Q}bar$.
As before, for each ${\mathbf c}(t)\in{\mathbb Q}bar(t)$ and for each $t=\lambda\in{\mathbb Q}bar$ we let $M_{{\mathbf c},n,v}(\lambda):=\max\{|{\mathbf A}_{{\mathbf c},n}(\lambda)|_v, |{\mathbf B}_{{\mathbf c},n}(\lambda)|_v\}$ for each $n\ge 0$.
\begin{prop}
\lambdaabel{bounded lambda}
Consider $\lambda\in{\mathbb Q}bar^*$ and $|\cdot|_v$ an absolute value on ${\mathbb Q}bar$. Let $m\lambdae 1\lambdae M$ be positive real numbers. If $m\lambdae |\lambda|_v\lambdae M$, then for each $1\lambdae n_0\lambdae n$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}(\lambda)}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}(\lambda)}{d^{n_0}}\right| \lambdae \mathfrak rac{\lambdaog(2M)-\lambdaog(m)}{d^{n_0}(d-1)} .$$
\end{prop}
Using the classical telescoping argument, we conclude that for each $\lambda\in{\mathbb Q}bar^*$, the limit $\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}}{d^n}$ exists.
\begin{cor}
\lambdaabel{the limit exists}
Consider $\lambda\in{\mathbb Q}bar^*$ and $|\cdot|_v$ an absolute value on ${\mathbb Q}bar$. Then for each $n_0\ge 1$ we have
$$\lambdaeft|\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}(\lambda)}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}(\lambda)}{d^{n_0}}\right| \lambdae \mathfrak rac{\lambdaog(2\max\{1, |\lambda|_v\})-\lambdaog(\min\{1, |\lambda|_v\})}{d^{n_0}(d-1)} .$$
\end{cor}
\begin{proof}[Proof of Proposition~\ref{bounded lambda}.]
We let $A_n:={\mathbf A}_{{\mathbf c},n}(\lambda)$, $B_n:={\mathbf B}_{{\mathbf c},n}(\lambda)$ and $M_{n,v}:=M_{n,v}(\lambda)$.
\begin{lemma}
\lambdaabel{upper bound lemma}
Let $\lambda\in{\mathbb Q}bar^*$ and let $|\cdot|_v$ be an absolute value on ${\mathbb Q}bar$. If $|\lambda|_v\lambdae M$, then for each $n\ge 1$, we have $M_{n+1,v}\lambdae (M+1)\cdot M_{n,v}^d$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{upper bound lemma}.]
Since $|\lambda|_v\lambdae M$, we have that for each $n\in\mathbb{N}$, $|A_{n+1}|_v\lambdae (M+1)\cdot M_{n,v}^d$ and also $|B_{n+1}|_v\lambdae M_{n,v}^d$; so
\begin{equation}
\lambdaabel{upper bound recursive 0}
M_{n+1,v}\lambdae (M+1)\cdot M_{n,v}^d,
\end{equation}
for each $n\ge 1$.
\end{proof}
Because $M\ge 1$, Lemma~\ref{upper bound lemma} yields that
\begin{equation}
\lambdaabel{uper bound recursive}
M_{n+1,v}\lambdae 2M\cdot M_{n,v}^d.
\end{equation}
The following result will finish our proof.
\begin{lemma}
\lambdaabel{lower bound in terms of lambda}
If $\lambda\in{\mathbb Q}bar^*$ and $|\cdot|_v$ is an absolute value on ${\mathbb Q}bar$, then for each $n\ge 1$ we have $M_{n+1,v}\ge \mathfrak rac{\min\{|\lambda|_v,1\}}{2\max\{|\lambda|_v,1\}}\cdot M_{n,v}^d$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lower bound in terms of lambda}.]
We let $\ell:=\min\{|\lambda|_v,1\}$ and $L:=\max\{|\lambda|_v,1\}$. Now, if
$$\lambdaeft(\mathfrak rac{2L}{\ell}\right)^{\mathfrak rac{1}{d}}\cdot |B_n|_v \ge |A_n|_v\ge \lambdaeft(\mathfrak rac{\ell}{2L}\right)^{\mathfrak rac{1}{d}}\cdot |B_n|_v,$$
then $M_{n+1,v}\ge |B_{n+1}|_v\ge (\ell/2L)^{(d-1)/d}\cdot M_{n,v}^d$ (note that $\ell<2L$). On the other hand, if
$$\text{either }\lambdaeft|\mathfrak rac{A_n}{B_n}\right|_v> \lambdaeft(\mathfrak rac{2L}{\ell}\right)^{\mathfrak rac{1}{d}} \text{ or } \lambdaeft|\mathfrak rac{A_n}{B_n}\right|_v< \lambdaeft(\mathfrak rac{\ell}{2L}\right)^{\mathfrak rac{1}{d}}$$
then $M_{n+1,v}\ge |A_{n+1}|_v>(\ell/2L)\cdot M_{n,v}^d$. Indeed, if $|A_n/B_n|_v>(2L/\ell)^{1/d}>1$ then
$$|A_{n+1}|_v> |A_n|_v^d\cdot \lambdaeft(1-|\lambda|_v\cdot\mathfrak rac{\ell}{2L}\right)\ge M_{n,v}^d\cdot \lambdaeft(1-\mathfrak rac{\ell}{2}\right)\ge \mathfrak rac{\ell}{2}\cdot M_{n,v}^d\ge \mathfrak rac{\ell}{2L}\cdot M_{n,v}^d.$$
Similarly, if $|A_n/B_n|_v<(\ell/2L)^{1/d}<1$ then
$$|A_{n+1}|_v> |B_n|_v^d\cdot \lambdaeft(|\lambda|_v - \mathfrak rac{\ell}{2L}\right)\ge M_{n,v}^d\cdot \lambdaeft(\mathfrak rac{\ell}{L}-\mathfrak rac{\ell}{2L}\right)=\mathfrak rac{\ell}{2L}\cdot M_{n,v}^d.$$
In conclusion, we get $\mathfrak rac{\ell}{2L}\cdot M_{n,v}^d \lambdae M_{n+1,v}$ for all $n$.
\end{proof}
Lemmas \ref{upper bound lemma} and \ref{lower bound in terms of lambda}, and Proposition~\ref{fundamental inequality} finish the proof of Proposition~\ref{bounded lambda}.
\end{proof}
The next result shows that Proposition~\ref{each place} holds when ${\mathbf c}$ is a constant $\alpha$, and moreover $|\alpha|_v$ is large compared to $|\lambda|_v$. In addition, this result holds for $d>2$; the case $d=2$ will be handled later in Lemma~\ref{d=2 large lambda 2}.
\begin{prop}
\lambdaabel{large lambda archimedean}
Assume $d\ge 3$. Let $M\ge 1$ be a real number, let $|\cdot|_v$ be an absolute value on ${\mathbb Q}bar$, let $\lambda,\alpha\in{\mathbb Q}bar$, let $A_n:=A_{\lambda,\alpha,n}$, $B_n:=B_{\lambda,\alpha,n}$ and $M_{n,v}:=\max\{|A_n|_v, |B_n|_v\}$ for $n\ge 0$. Let $n_0$ be a nonnegative integer. If $|\alpha|_v\ge |\lambda|_v/M\ge 2M$ then for $0\lambdae n_0\lambdae n$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae \mathfrak rac{\lambdaog(2)}{d^{n_0}(d-1)}.$$
\end{prop}
In particular, since we know that for each given $\lambda$, the limit $\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}}{d^n}$ exists, we conclude that
$$\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae \mathfrak rac{\lambdaog(2)}{d^{n_0}(d-1)}.$$
\begin{proof}[Proof of Proposition~\ref{large lambda archimedean}.]
We prove by induction on $n$ the following key result.
\begin{lemma}
\lambdaabel{growth for large lambda e=0}
For each $n\ge 0$ , we have $|A_n|_v\ge \mathfrak rac{|\lambda|_v}{M}\cdot |B_n|_v$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{growth for large lambda e=0}.]
Set $|\cdot|:=|\cdot|_v$. The case $n=0$ is obvious since $A_0=\alpha$ and $B_0=1$.
Now assume $|A_n|\ge \mathfrak rac{|\lambda|}{M}\cdot |B_n|$ and we prove the statement for $n+1$. Indeed, using that $|\lambda|\ge 2M^2$ and $d\ge 3$ we obtain
\begin{align*}
& |A_{n+1}|-\mathfrak rac{|\lambda|}{M}\cdot |B_{n+1}|\\
& \ge |A_n|^d - |\lambda|\cdot |B_n|^d - \mathfrak rac{|\lambda|}{M}\cdot |A_n|\cdot |B_n|^{d-1}\\
& = |A_n|^d\cdot \lambdaeft(1-|\lambda|\cdot \mathfrak rac{|B_n|^d}{|A_n|^d} - |\lambda|\cdot \mathfrak rac{|B_n|^{d-1}}{|A_n|^{d-1}}\right)\\
& \ge |A_n|^d \cdot \lambdaeft( 1- M^d\cdot |\lambda|^{1-d} - M^{d-1}|\lambda|^{2-d}\right)\\
& \ge |A_n|^d \cdot \lambdaeft( 1- M^{2-d}\cdot 2^{1-d}-M^{3-d}\cdot 2^{2-d}\right)\\
& \ge |A_n|^d\cdot \lambdaeft(1-2^{-2}-2^{-1}\right)\\
& \ge 0,
\end{align*}
as desired.
\end{proof}
Lemma~\ref{growth for large lambda e=0} yields that $M_{n,v}=|A_n|_v$ for each $n$ (using that $|\lambda|_v/M\ge 2M>1$). Furthermore, Lemma~\ref{growth for large lambda e=0} yields
$$\lambdaeft|M_{n+1,v}-M_{n,v}^d\right| \lambdae|\lambda\cdot B_n^d|_v\lambdae M_{n,v}^d\cdot M^d|\lambda|_v^{1-d}\lambdae M_{n,v}^d\cdot M^{2-d}\cdot 2^{1-d}\lambdae \mathfrak rac{1}{4}\cdot M_{n,v}^d,$$
because $|\lambda|_v\ge 2M^2$, $M\ge 1$ and $d-1\ge 2$.
Thus for each $n\ge 1$ we have
\begin{equation}
\lambdaabel{growth of iterate large lambda e=0}
\mathfrak rac{3}{4}\lambdae \mathfrak rac{M_{n+1,v}}{M_{n,v}^d}\lambdae \mathfrak rac{5}{4}.
\end{equation}
Then Proposition~\ref{fundamental inequality} yields the desired conclusion.
\end{proof}
The next result yields the conclusion of Proposition~\ref{each place} for when the starting point ${\mathbf c}$ is constant equal to $\alpha$, and $d$ is larger than $2$.
\begin{prop}
\lambdaabel{e=0}
Assume $d>2$. Let $\alpha,\lambda\in{\mathbb Q}bar^*$, let $|\cdot |_v$ be an absolute value, and for each $n\ge 0$ let $A_n:=A_{\lambda,\alpha,n}$, $B_n:=B_{\lambda,\alpha, n}$ and $M_{n,v}:=\max\{|A_n|_v, |B_n|_v\}$. Consider $L:=\max\{|\alpha|_v, 1/|\alpha|_v\}$. Then for all $n_0\ge 1$ we have
$$\lambdaeft|\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae (3d-2)\lambdaog(2L).$$
\end{prop}
\begin{proof}
We split our proof into three cases: $|\lambda|_v$ is large compared to $|\alpha|_v$; $|\lambda|_v$ and $|\alpha|_v$ are comparable, and lastly, $|\lambda|_v$ is very small. We start with the case $|\lambda|_v\gg |\alpha|_v$. Firstly, we note $L=\max\lambdaeft\{|\alpha|_v, |\alpha|_v^{-1}\right\}\ge 1$.
\begin{lemma}
\lambdaabel{lambda large e=0}
If $|\lambda|_v>8L^d$ then for integers $1\lambdae n_0\lambdae n$ we have
\begin{equation}
\lambdaabel{inequality for large lambda}
\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae \mathfrak rac{\lambdaog(2)}{d^{n_0}(d-1)}.
\end{equation}
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lambda large e=0}.]
Since $|\lambda|_v>8L^d$, then $|\alpha|_v^{d-1}<\mathfrak rac{|\lambda|_v}{2|\alpha|_v}$ and therefore
$$|f_\lambda(\alpha)|_v=\lambdaeft|\alpha^{d-1}+\mathfrak rac{\lambda}{\alpha}\right|_v>\mathfrak rac{|\lambda|_v}{2|\alpha|_v} \ge \mathfrak rac{|\lambda|_v}{2L}\ge 4L.$$
This allows us to apply Proposition~\ref{large lambda archimedean} coupled with \eqref{conversion} (with $k_0=1$) and obtain that for all $1\lambdae n_0\lambdae n$ we have
\begin{align*}
& \lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right|\\
& = \mathfrak rac{1}{d}\cdot \lambdaeft| \mathfrak rac{\lambdaog\max\lambdaeft\{|A_{\lambda,f_\lambda(\alpha),n-1}|_v, |B_{\lambda,f_\lambda(\alpha),n-1}|_v\right\}}{d^{n-1}} - \mathfrak rac{\lambdaog\max\lambdaeft\{|A_{\lambda,f_\lambda(\alpha), n_0-1}|_v, |B_{\lambda,f_\lambda(\alpha),n_0-1}|_v\right\}}{d^{n_0-1}}\right|\\
& \lambdae \mathfrak rac{1}{d}\cdot \mathfrak rac{\lambdaog(2)}{d^{n_0-1}(d-1)},
\end{align*}
as desired.
\end{proof}
Let $R=\mathfrak rac{1}{4^dL^d}$. If $R\lambdae |\lambda|_v\lambdae 8L^d$, then Proposition~\ref{bounded lambda} yields that for all $1\lambdae n_0\lambdae n$ we have
\begin{equation}
\lambdaabel{inequality for middle lambda}
\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{d^n}-\mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae \mathfrak rac{2d\lambdaog(4L)}{d^{n_0}(d-1)}\lambdae \lambdaog(4L).
\end{equation}
So we are left to analyze the range $|\lambda|_v< R$.
\begin{lemma}
\lambdaabel{no n_1}
If $|\lambda|_v<R$, then $\lambdaeft| \mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae (3d-2)\lambdaog(2L)$ for all integers $0\lambdae n_0\lambdae n$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{no n_1}.]
Firstly we note that since $|\lambda|_v<R<1$, Lemma~\ref{upper bound lemma} yields that $M_{n+1,v}\lambdae 2\cdot M_{n,v}^d$ and arguing as in the proof of Proposition~\ref{fundamental inequality} we obtain that for all $0\lambdae n_0\lambdae n$ we have
\begin{equation}
\lambdaabel{right hand side is fine}
\mathfrak rac{\lambdaog M_{n,v}}{d^n} -\mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}} \lambdae \mathfrak rac{\lambdaog(2)}{d^{n_0}(d-1)}.
\end{equation}
Next, we will establish a lower bound for the main term from \eqref{right hand side is fine}. Since
$$|f_\lambda^0(\alpha)|_v=|\alpha|_v\ge \mathfrak rac{1}{L}>\sqrt[d]{2R}>\sqrt[d]{2|\lambda|_v},$$
we conclude that the smallest integer $n_1$ (if it exists) satisfying $|f_\lambda^{n_1}(\alpha)|_v<\sqrt[d]{2|\lambda|_v}$ is positive. We will now derive a lower bound for $n_1$ (if $n_1$ exists) in terms of $L$.
We know that for all $n\in\{0,\dots, n_1-1\}$ we have $|f_\lambda^n(\alpha)|_v\ge \sqrt[d]{2|\lambda|_v}$. Hence, for each $0\lambdae n\lambdae n_1-1$ we have
\begin{equation}
\lambdaabel{(1)}
|A_{n+1}|_v \ge |A_n|_v^d\cdot \lambdaeft(1- \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v^d}\right) \ge \mathfrak rac{|A_n|_v^d}{2}.
\end{equation}
On the other hand,
\begin{equation}
\lambdaabel{(2)}
\mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v} \lambdae \mathfrak rac{|f_\lambda^n(\alpha)|_v^{d-1}}{2}.
\end{equation}
So, for each $0\lambdae n\lambdae n_1-1$ we have
\begin{equation}
\lambdaabel{(3)}
|f_\lambda^{n+1}(\alpha)|_v \ge |f_\lambda^n(\alpha)|_v^{d-1} - \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v} \ge \mathfrak rac{|f_\lambda^n(\alpha)|_v^{d-1}}{2}.
\end{equation}
Therefore, repeated applications of \eqref{(3)} yield that for $0\lambdae n\lambdae n_1$ we have
\begin{equation}
\lambdaabel{(4)}
|f_\lambda^n(\alpha)|_v \ge \mathfrak rac{|\alpha|_v^{(d-1)^n}}{2^{\mathfrak rac{(d-1)^n-1}{d-2}}}\ge \mathfrak rac{1}{L^{(d-1)^n}\cdot 2^{\mathfrak rac{(d-1)^n-1}{d-2}}}\ge \mathfrak rac{1}{(2L)^{(d-1)^n}},
\end{equation}
because $|\lambda|_v\ge 1/L$ and $d-2\ge 1$. So, if $|f_\lambda^{n_1}(\alpha)|_v<\sqrt[d]{2|\lambda|_v}$, then
$$\mathfrak rac{1}{(2L)^{(d-1)^{n_1}}} < \sqrt[d]{2|\lambda|_v}.$$
Using now the fact that $\lambdaog(2)< \lambdaog(2L)\cdot (d-1)^{n_1}$ and that $d\lambdae (d-1)^2-1$ (since $d\ge 3$) we obtain
\begin{equation}
\lambdaabel{(9)}
\lambdaog\lambdaeft(\mathfrak rac{1}{|\lambda|_v}\right) < \lambdaog(2L)\cdot (d-1)^{n_1+2}.
\end{equation}
Moreover, inequality \eqref{(4)} yields that for each $0\lambdae n\lambdae n_1-1$, we have
\begin{equation}
\lambdaabel{(5)}
|B_{n+1}|_v = |B_n|_v^d \cdot |f_\lambda^n(\alpha)|_v \ge |B_n|_v^d \cdot \mathfrak rac{1}{(2L)^{(d-1)^n}}.
\end{equation}
Combining \eqref{(1)} and \eqref{(5)} we get $M_{n+1,v} \ge \mathfrak rac{M_{n,v}^d}{(2L)^{(d-1)^n}}$, if $0\lambdae n\lambdae n_1-1$. So,
\begin{equation}
\lambdaabel{(6)}
\mathfrak rac{\lambdaog M_{n+1,v}}{d^{n+1}} \ge \mathfrak rac{\lambdaog M_{n,v}}{d^n} - \lambdaog(2L)\cdot \lambdaeft(\mathfrak rac{d-1}{d}\right)^n.
\end{equation}
Summing up \eqref{(6)} starting from $n=n_0$ to $N-1$ for some $N\lambdaeq n_1$, and using inequality \eqref{right hand side is fine} we obtain that for $0\lambdae n_0\lambdae n\lambdae n_1$ we have
\begin{equation}
\lambdaabel{inequality for small lambda}
\lambdaeft| \mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}} \right|\lambdae d\lambdaog(2L).
\end{equation}
Now, for $n\ge n_1$, we use Lemma~\ref{lower bound in terms of lambda} and obtain
\begin{equation}
\lambdaabel{(12)}
M_{n+1,v} \ge \mathfrak rac{\min\{|\lambda|_v,1\}}{2\max\{|\lambda|_v,1\}}\cdot M_{n,v}^d= \mathfrak rac{|\lambda|_v}{2}\cdot M_{n,v}^d,
\end{equation}
because $|\lambda|_v<R<1$.
Inequalities \eqref{right hand side is fine} and \eqref{(12)} yield that for all $n\ge n_0\ge n_1$, we have
\begin{equation}
\lambdaabel{(13)}
\lambdaeft|\mathfrak rac{\lambdaog(M_{n,v})}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right| \lambdae \lambdaog\lambdaeft(\mathfrak rac{2}{|\lambda|_v}\right)\cdot \sum_{n=n_0}^{n-1}\mathfrak rac{1}{d^{n+1}}<\mathfrak rac{2\lambdaog\lambdaeft(\mathfrak rac{1}{|\lambda|_v}\right)}{(d-1)\cdot d^{n_1}}.
\end{equation}
In establishing inequality \eqref{(13)} we also used the fact that $|\lambda|_v<R<1/2$ and so, $\lambdaog(2/|\lambda|_v)<2\lambdaog(1/|\lambda|_v)$.
Combining inequalities \eqref{(9)}, \eqref{inequality for small lambda} and \eqref{(13)} yields that for all $0\lambdae n_0\lambdae n$ we have
\begin{align*}
& \lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right|\\
& < d\lambdaog(2L)+\mathfrak rac{2\cdot (d-1)^{n_1+2}\lambdaog(2L)}{(d-1)\cdot d^{n_1}}\\
& < \lambdaog(2L)\cdot (d+2\cdot (d-1))\\
& \lambdae (3d-2)\lambdaog(2L),
\end{align*}
as desired.
If on the other hand, we had that $|f_\lambda^n(\alpha)|_v\ge \sqrt[d]{2|\lambda|_v}$ for all $n\in\mathbb{N}$,
we get that equation \eqref{inequality for small lambda} holds for all $n\in\mathbb{N}$. Hence, in this case too, the Lemma follows.
\end{proof}
Lemmas~\ref{lambda large e=0} and \ref{no n_1} and inequality \eqref{inequality for middle lambda} finish our proof.
\end{proof}
For $d=2$ we need a separate argument for proving Proposition~\ref{each place} when ${\mathbf c}$ is constant.
\begin{prop}
\lambdaabel{e=0 d=2}
Let $d=2$, $\alpha,\lambda\in{\mathbb Q}bar^*$, let $|\cdot |_v$ be an absolute value, and for each $n\ge 0$ let $A_n:=A_{\lambda,\alpha,n}$, $B_n:=B_{\lambda,\alpha, n}$ and $M_{n,v}:=\max\{|A_n|_v, |B_n|_v\}$. Let $L:=\max\{|\alpha|_v, 1/|\alpha|_v\}$. Then for all $1\lambdae n_0\lambdae n$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right| \lambdae 4\lambdaog(2L).$$
\end{prop}
In particular, since we know (by Corollary~\ref{the limit exists}) that the limit $\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}}{2^n}$ exists, we conclude that
$$\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right| \lambdae 4\lambdaog(2L).$$
\begin{proof}[Proof of Proposition~\ref{e=0 d=2}.]
We employ the same strategy as for the proof of Proposition~\ref{e=0}, but there are several technical difficulties for this case. Essentially the problem lies in the fact that $\infty$ is not a superattracting (fixed) point for ${\mathbf f}_\lambda(z)=\mathfrak rac{z^2+\lambda}{z}$. So the main change is dealing with the case when $|\lambda|_v$ is large, but there are changes also when dealing with the case $|\lambda|_v$ is close to $0$.
\begin{lemma}
\lambdaabel{d=2 large lambda 2}
Assume $|\lambda|_v>Q:=(2L)^4$. Then for integers $1\lambdae n_0\lambdae n$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right| <\mathfrak rac{5}{2}.$$
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{d=2 large lambda 2}.]
Let $k_1$ be the smallest positive integer (if it exists) such that $|f_\lambda^{k_1}(\alpha)|_v< \sqrt{2|\lambda|_v}$. So, we know that $|f_\lambda^n(\alpha)|_v\ge \sqrt{2|\lambda|_v}$ if $1\lambdae n\lambdae k_1-1$. We will show that $k_1>\lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{4L^2}\right)\ge 1$ (note that $|\lambda|_v>Q=(2L)^4$).
\begin{claim}
\lambdaabel{k_1 is large}
For each positive integer $n\lambdae \lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2L}\right)$ we have $|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{|\lambda|_v}{2^{n}L}$.
\end{claim}
\begin{proof}[Proof of Claim~\ref{k_1 is large}.]
The claim follows by induction on $n$; the case $n=1$ holds since $|\lambda|_v>(2L)^4$ and so,
$$|f_\lambda(\alpha)|_v\ge \mathfrak rac{|\lambda|_v}{|\alpha|_v}-|\alpha|_v\ge \mathfrak rac{|\lambda|_v}{L}-L\ge \mathfrak rac{|\lambda|_v}{2L}.$$
Now, assume for $1\lambdae n\lambdae \lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2L}\right)$ we have $|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{|\lambda|_v}{2^{n}L}$. Then
$$|f_\lambda^{n+1}(\alpha)|_v\ge |f_\lambda^n(\alpha)|_v-\mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v} \ge \mathfrak rac{|\lambda|_v}{2^nL}-2^nL\ge \mathfrak rac{|\lambda|_v}{2^{n+1}L},$$
because $|\lambda|_v\ge 4^n\cdot 2L$ since $n\lambdae \lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2L}\right)$. This concludes our proof.
\end{proof}
Claim~\ref{k_1 is large} yields that for each $1\lambdae n\lambdae \lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{4L^2}\right)<
\lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2L}\right)$ we have
$$|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{|\lambda|_v}{2^nL}\ge \mathfrak rac{|\lambda|_v}{\sqrt{\mathfrak rac{|\lambda|_v}{4L^2}}\cdot L}>\sqrt{2|\lambda|_v}.$$
Hence,
\begin{equation}
\lambdaabel{k_1 is indeed large}
k_1>\lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{4L^2}\right).
\end{equation}
Now for each $1\lambdae n\lambdae k_1-1$ we have
\begin{equation}
\lambdaabel{inequality step n 0}
\mathfrak rac{|A_n|_v}{|B_n|_v}=|f_\lambda^n(\alpha)|_v\ge \sqrt{2|\lambda|_v}>1,
\end{equation}
because $|\lambda|_v>Q>2$ and so, $M_{n,v}=|A_n|_v$. Furthermore,
$$|f_\lambda^{k_1}(\alpha)|_v\ge |f_\lambda^{k_1-1}(\alpha)|_v - \mathfrak rac{|\lambda|_v}{|f_\lambda^{k_1-1}(\alpha)|_v} \ge \sqrt{2|\lambda|_v} - \sqrt{\mathfrak rac{|\lambda|_v}{2}}=\sqrt{\mathfrak rac{|\lambda|_v}{2}}>1.$$
Hence $M_{k_1}=|A_{k_1}|_v$ and therefore, for each $1\lambdae n\lambdae k_1-1$, using \eqref{inequality step n 0} we have
$$|M_{n+1,v}-M_{n,v}^2|\lambdae|\lambda|_v\cdot |B_n|_v^2\lambdae \mathfrak rac{|A_n|_v^2}{2}=\mathfrak rac{M_{n,v}^2}{2}.$$
Hence $\mathfrak rac{M_{n,v}^2}{2}\lambdae M_{n+1,v}\lambdae \mathfrak rac{3M_{n,v}^2}{2}$, and so
\begin{equation}
\lambdaabel{d=2 bounds equation}
\lambdaeft|\mathfrak rac{\lambdaog M_{n+1,v}}{2^{n+1}}-\mathfrak rac{\lambdaog M_{n,v}}{2^n}\right|<\mathfrak rac{\lambdaog(2)}{2^{n+1}},
\end{equation}
for $1\lambdae n\lambdae k_1-1$. The next result establishes a similar inequality to \eqref{d=2 bounds equation} which is valid for all $n\in\bN$.
\begin{claim}
\lambdaabel{general bounds for d=2}
For each $n\ge 1$ we have $\mathfrak rac{1}{2|\lambda|_v}\lambdae \mathfrak rac{M_{n+1,v}}{M_{n,v}^2}\lambdae 2|\lambda|_v$.
\end{claim}
\begin{proof}[Proof of Claim~\ref{general bounds for d=2}.]
The lower bound is an immediate consequence of Lemma~\ref{lower bound in terms of lambda} (note that $|\lambda|_v>Q>1$), while the upper bound follows from Lemma~\ref{upper bound lemma}.
\end{proof}
Using Claim~\ref{general bounds for d=2} we obtain that for all $n\ge 1$ we have
\begin{equation}
\lambdaabel{general bounds equation for d=2}
\lambdaeft|\mathfrak rac{\lambdaog M_{n+1,v}}{2^{n+1}}-\mathfrak rac{\lambdaog M_{n,v}}{2^n}\right| \lambdae \mathfrak rac{\lambdaog(2|\lambda|_v)}{2^{n+1}}.
\end{equation}
Using inequalities \eqref{k_1 is indeed large}, \eqref{d=2 bounds equation} and \eqref{general bounds equation for d=2} we obtain that for all $1\lambdae n_0\lambdae n$ we have
\begin{align}
\nonumber
& \lambdaeft| \mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right|\\
\nonumber
& \lambdae \sum_{n=1}^{k_1-1} \mathfrak rac{\lambdaog(2)}{2^{n+1}} + \sum_{n=k_1}^\infty \mathfrak rac{\lambdaog(2|\lambda|_v)}{2^{n+1}}\\
\nonumber
& \lambdae \mathfrak rac{\lambdaog(2)}{2} + \mathfrak rac{\lambdaog(2|\lambda|_v)}{2^{k_1}}\\
\nonumber
& \lambdae \mathfrak rac{\lambdaog(2)}{2} + \mathfrak rac{\lambdaog(2|\lambda|_v)}{\sqrt{\mathfrak rac{|\lambda|_v}{4L^2}}}\\
\nonumber
& \lambdae \mathfrak rac{1}{2} + \mathfrak rac{\lambdaog(2|\lambda|_v)}{\sqrt[4]{|\lambda|_v}}\text{ (because $|\lambda|_v>(2L)^4$)}\\
\lambdaabel{inequality for large lambda 2}
& < \mathfrak rac{5}{2}\text{ (because $|\lambda|_v>Q\ge 16$),}
\end{align}
as desired.
If on the other hand for all $n\in\mathbb{N}$, we have that $|f_\lambda^n(\alpha)|_v\ge \sqrt{2|\lambda|_v}$, we get that equation \eqref{d=2 bounds equation} holds for all $n\in\mathbb{N}$. Hence, in this case too the Lemma follows.
\end{proof}
Let $R=\mathfrak rac{1}{4L^2}$. If $R\lambdae |\lambda|_v\lambdae Q$ then for each $n_0\ge 1$, Proposition~\ref{bounded lambda} yields
\begin{equation}
\lambdaabel{inequality for middle lambda 2}
\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right| \lambdae\mathfrak rac{\lambdaog(2Q)-\lambdaog(R)}{2} < \mathfrak rac{7\lambdaog(2L)}{2}<4\lambdaog(2L).
\end{equation}
Next we deal with the case $|\lambda|_v$ is small.
\begin{lemma}
\lambdaabel{no n_1 2}
If $|\lambda|_v<R$, then for all $1\lambdae n_0\lambdae n$, we have $\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right| \lambdae 3\lambdaog(2L)$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{no n_1 2}.]
The argument is similar to the proof of Lemma~\ref{d=2 large lambda 2}, only that this time we do not know that $|f_\lambda^n(\alpha)|_v>1$ (and therefore we do not know that $|A_n|_v>|B_n|_v$) because $|\lambda|_v$ is small. Also, the proof is similar to the proof of Lemma~\ref{no n_1}, but there are several changes due to the fact that $d=2$.
We note that since $|\lambda|_v<R<1$ then Lemma~\ref{upper bound lemma} yields
\begin{equation}
\lambdaabel{right hand side is fine 2}
M_{n+1}\lambdae 2M_n^2.
\end{equation}
Now, let $n_1$ be the smallest integer (if it exists) such that $|f_\lambda^{n_1}(\alpha)|_v<\sqrt{2|\lambda|_v}$. Note that $|f_\lambda^0(\alpha)|_v=|\alpha|_v\ge \mathfrak rac{1}{L}\ge \sqrt{2|\lambda|_v}$ because $|\lambda|_v<R=\mathfrak rac{1}{4L^2}$. Hence (if $n_1$ exists), we get that $n_1\ge 1$. In particular, for each $0\lambdae n\lambdae n_1-1$ we have $|f_\lambda^{n}(\alpha)|_v\ge \sqrt{2|\lambda|_v}$ and this yields
\begin{equation}
\lambdaabel{(1) 2}
|A_{n+1}| \ge |A_n|^2\cdot \lambdaeft(1- \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v^2}\right) \ge \mathfrak rac{|A_n|^2}{2}.
\end{equation}
On the other hand,
\begin{equation}
\lambdaabel{(2) 2}
\mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v} \lambdae \mathfrak rac{|f_\lambda^n(\alpha)|_v}{2}.
\end{equation}
So, for each $0\lambdae n\lambdae n_1-1$ we have
\begin{equation}
\lambdaabel{(3) 2}
|f_\lambda^{n+1}(\alpha)|_v \ge |f_\lambda^n(\alpha)|_v - \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v} \ge \mathfrak rac{|f_\lambda^n(\alpha)|_v}{2}.
\end{equation}
Therefore, repeated applications of \eqref{(3) 2} yield for $n\lambdae n_1$ that
\begin{equation}
\lambdaabel{(4) 2}
|f_\lambda^n(\alpha)|_v \ge \mathfrak rac{|\alpha|_v}{2^n}\ge \mathfrak rac{1}{2^nL},
\end{equation}
because $|\alpha|_v\ge 1/L$. So, for each $n\ge 0$ we have
\begin{equation}
\lambdaabel{(5) 2}
|B_{n+1}|_v = |B_n|_v^2 \cdot |f_\lambda^n(\alpha)|_v \ge |B_n|_v^2 \cdot \mathfrak rac{1}{2^nL}.
\end{equation}
Combining \eqref{(1) 2} and \eqref{(5) 2} we get
\begin{equation}
\lambdaabel{left hand side is fine 2}
M_{n+1} \ge \mathfrak rac{M_n^2}{L\cdot 2^{\max\{1,n\}}}
\end{equation}
for all $n\ge 0$. Using \eqref{right hand side is fine 2} and \eqref{left hand side is fine 2} we obtain for $0\lambdae n\lambdae n_1-1$ that
\begin{equation}
\lambdaabel{no n_1 equation}
\lambdaeft|\mathfrak rac{\lambdaog M_{n+1}}{2^{n+1}}-\mathfrak rac{\lambdaog M_n}{2^n}\right| \lambdae \mathfrak rac{\max\{1,n\}\cdot \lambdaog(2)+\lambdaog(L)}{2^{n+1}}.
\end{equation}
Summing up \eqref{no n_1 equation} starting from $n=n_0$ to $n=n_1-1$ we obtain that for $1\lambdae n_0\lambdae n\lambdae n_1$ we have
\begin{equation}
\lambdaabel{inequality for small lambda 22}
\lambdaeft| \mathfrak rac{\lambdaog M_n}{2^n} - \mathfrak rac{\lambdaog M_{n_0}}{2^{n_0}} \right|\lambdae \sum_{k=n_0}^{n-1} \mathfrak rac{k\lambdaog(2)+\lambdaog(L)}{2^{k+1}}< \lambdaog(2)+\lambdaog(L)=\lambdaog(2L).
\end{equation}
Using inequality \eqref{(4) 2} for $n=n_1$ yields $\mathfrak rac{1}{2^{n_1}L} \lambdae |f_\lambda^{n_1}(\alpha)|_v< \sqrt{2|\lambda|_v}$,
and so,
\begin{equation}
\lambdaabel{(9) 2}
\mathfrak rac{1}{|\lambda|_v} < 4^{n_1}\cdot 2L^2.
\end{equation}
Now, for $n\ge n_1$, we use Lemma~\ref{lower bound in terms of lambda} and obtain
\begin{equation}
\lambdaabel{(12) 2}
M_{n+1} \ge \mathfrak rac{\min\{|\lambda|_v,1\}}{2\max\{|\lambda|_v,1\}}\cdot M_n^2= \mathfrak rac{|\lambda|_v}{2}\cdot M_n^2,
\end{equation}
because $|\lambda|<R<1$.
Inequality \eqref{(12) 2} coupled with inequality \eqref{right hand side is fine 2} yields that for all $n\ge n_0\ge n_1$, we have
\begin{equation}
\lambdaabel{(13) 2}
\lambdaeft|\mathfrak rac{\lambdaog(M_n)}{2^n} - \mathfrak rac{\lambdaog M_{n_0}}{2^{n_0}} \right| < \lambdaog\lambdaeft(\mathfrak rac{2}{|\lambda|_v}\right)\cdot \sum_{n=n_0}^{n-1}\mathfrak rac{1}{2^{n+1}}<\mathfrak rac{\lambdaog\lambdaeft(\mathfrak rac{2}{|\lambda|_v}\right)}{2^{n_1}}.
\end{equation}
Combining inequalities \eqref{(9) 2}, \eqref{no n_1 equation} and \eqref{(13) 2} yields that for all $1\lambdae n_0\lambdae n$ we have
\begin{align}
\nonumber
& \lambdaeft|\mathfrak rac{\lambdaog M_n}{2^n} - \mathfrak rac{\lambdaog M_{n_0}}{2^{n_0}}\right|\\
\nonumber
& < \lambdaog(2L)+\mathfrak rac{(n_1+1)\lambdaog(4)+2\lambdaog(L)}{2^{n_1}}\\
\nonumber
& < \lambdaog(2L)+\lambdaog(4)+\lambdaog(L)\\
\lambdaabel{inequality for small lambda 222}
& \lambdae 3\lambdaog(2L),
\end{align}
as desired.
\end{proof}
Lemmas~\ref{d=2 large lambda 2} and \ref{no n_1 2}, and inequality \eqref{inequality for middle lambda 2} finish our proof.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{precise constant}.]
First we deal with the case $\alpha=0$. In this case, $\alpha=0$ is preperiodic under the action of the family ${\mathbf f}_\lambda$ and so, ${\widehat h}_{{\mathbf f}_\lambda}(\alpha)=0=h(\alpha)$. From now on, assume that $\alpha\ne 0$. Secondly, if $\lambda=0$ (and $d\ge 3$) then ${\widehat h}_{{\mathbf f}_0}(\alpha)=h(\alpha)$ (since ${\mathbf f}_0(z)=z^{d-1}$) and thus
$${\widehat h}_{{\mathbf f}_0}(\alpha)-{\widehat h}_{\mathbf f}(\alpha)\cdot h(\alpha)=\mathfrak rac{d-1}{d}\cdot h(\alpha)\lambdae 6d\cdot h(\alpha),$$
and so the conclusion of Theorem~\ref{precise constant} holds. So, from now on we assume both $\alpha$ and $\lambda$ are nonzero.
Propositions~\ref{e=0} and \ref{e=0 d=2} allow us to apply the same strategy as in the proof of Theorem~\ref{variation of canonical height} only that this time it suffices to compare ${\widehat h}_{f_\lambda}(\alpha)$ and $h([A_{\lambda,\alpha,1}:B_{\lambda,\alpha,1}])$.
As before, we let $S$ be the set of places of ${\mathbb Q}$ containing the archimedean place and all the non-archimedean places $v$ for which there exists some $\sigma\in\Gal({\mathbb Q}bar/{\mathbb Q})$ such that $|\sigma(\alpha)|_v\ne 1$. Since $\alpha\ne 0$, we have that $S$ is finite; moreover $|S|\lambdae 1+\ell$. So, applying Proposition~\ref{equality for almost all non-archimedean places} and Propositions~\ref{e=0} and \ref{e=0 d=2} with $n_0=1$ (see also \eqref{defi local canonical height}) we obtain
\begin{align*}
& \lambdaeft|\mathfrak rac{h([A_{\lambda,\alpha,1}:B_{\lambda,\alpha,1}])}{d} - {\widehat h}_{f_\lambda}(\alpha) \right|\\
& = \lambdaeft|\mathfrak rac{1}{[K(\lambda):{\mathbb Q}]}\sum_{\sigma}\sum_{v\in\Omega_{\mathbb Q}} \mathfrak rac{\lambdaog\max\{|A_{\sigma(\lambda),\sigma(\alpha), 1}|_v, |B_{\sigma(\lambda), \sigma(\alpha), 1}|_v\}}{d} - {\widehat h}_{{\mathbf f}_{\sigma(\lambda)},v}(\sigma(\alpha)) \right|\\
& \lambdae \mathfrak rac{1}{[K(\lambda):{\mathbb Q}]} \sum_{\sigma} \sum_{v\in S} \lambdaeft| \mathfrak rac{\lambdaog\max\{|A_{\sigma(\lambda),\sigma(\alpha), 1}|_v, |B_{\sigma(\lambda), \sigma(\alpha), 1}|_v\}}{d} - {\widehat h}_{{\mathbf f}_{\sigma(\lambda)},v}(\sigma(\alpha)) \right|\\
& \lambdae \mathfrak rac{3d-2}{[K(\lambda):{\mathbb Q}]}\sum_{\sigma:K(\lambda)\lambdaongrightarrow {\mathbb Q}bar}\sum_{v\in S}\lambdaog\lambdaeft(2\max\{|\sigma(\alpha)|_v, |\sigma(\alpha)|_v^{-1}\}\right)\\
& \lambdae \mathfrak rac{3d-2}{[K(\lambda):{\mathbb Q}]}\sum_{\sigma:K(\lambda)\lambdaongrightarrow {\mathbb Q}bar}\sum_{v\in S}\lambdaeft(\lambdaog(2)+\lambdaog^+|\alpha|_v+ \lambdaog^+\lambdaeft|\mathfrak rac{1}{\alpha}\right|_v\right)\\
& \lambdae (3d-2)\cdot \lambdaeft(|S|+h(\alpha)+h\lambdaeft(\mathfrak rac{1}{\alpha}\right)\right)\\
& \lambdae (3d-2)\cdot (1+\ell+2h(\alpha)).
\end{align*}
On the other hand, using that ${\widehat h}_{\mathbf f}(\alpha)=1/d$ (by Proposition~\ref{canonical height generic nonzero}) and also using the basic inequalities (1)-(3) for the Weil height from Subsection~\ref{heights subsection} we obtain
\begin{align*}
& \lambdaeft|\mathfrak rac{h([A_{\lambda,\alpha,1}:B_{\lambda,\alpha,1}])}{d}-{\widehat h}_{\mathbf f}(\alpha)\cdot h(\lambda)\right|\\
& = \lambdaeft|\mathfrak rac{h\lambdaeft(\alpha^{d-1}+\mathfrak rac{\lambda}{\alpha}\right)}{d} - \mathfrak rac{h(\lambda)}{d}\right|\\
& \lambdae \mathfrak rac{1}{d}\cdot \lambdaeft(\lambdaeft| h\lambdaeft(\alpha^{d-1}+\mathfrak rac{\lambda}{\alpha}\right) - h\lambdaeft(\mathfrak rac{\lambda}{\alpha}\right)\right| + \lambdaeft| h\lambdaeft(\mathfrak rac{\lambda}{\alpha}\right) - h(\lambda)\right|\right)\\
& \lambdae \mathfrak rac{1}{d}\cdot ((d-1)h(\alpha)+\lambdaog(2)+h(\alpha))\\
& < h(\alpha)+1.
\end{align*}
This finishes the proof of Theorem~\ref{precise constant}.
\end{proof}
\begin{remark}
Theorem~\ref{precise constant} yields an effective method for finding all $\lambda\in{\mathbb Q}bar$ such that a given point $\alpha\in{\mathbb Q}bar$ is preperiodic under the action of ${\mathbf f}_\lambda$.
\end{remark}
\section{Proof of our main result}
\lambdaabel{proofs}
So we are left to proving Proposition~\ref{each place} in full generality. We fix a place $v\in\Omega_{\mathbb Q}$.
Before completing the proof of Proposition~\ref{each place} we need one more result.
\begin{prop}
\lambdaabel{really small lambda}
Assume $d>2$. Let $\alpha, \lambda\in{\mathbb Q}bar$, and let $|\cdot |_v$ be an absolute value. We let $A_n:=A_{\lambda,\alpha,n}$, $B_n:=B_{\lambda,\alpha,n}$ and $M_{n,v}:=\max\{|A_n|_v, |B_n|_v\}$. If $|\alpha|_v\ge 2$ and $|\lambda|_v\lambdae \mathfrak rac{1}{2}$ then for each $n_0\ge 0$ we have
$$\lambdaeft|\displaystyle\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{d^{n_0}}\right|\lambdae \mathfrak rac{\lambdaog(2)}{d^{n_0}(d-1)} .$$
\end{prop}
\begin{proof}
First we claim that for each $n\ge 0$ we have $|f_\lambda^n(\alpha)|_v\ge 2$. Indeed, for $n=0$ we have $|\alpha|_v\ge 2$ as desired. We assume $|f_\lambda^n(\alpha)|_v\ge 2$ and since $|\lambda|_v\lambdae 1/2$ we get that
$$ |f_\lambda^{n+1}(\alpha)|_v
\ge |f_\lambda^n(\alpha)|_v^{d-1} - \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v}
\ge 4-\mathfrak rac{1}{4} > 2.
$$
Hence $M_{n,v}=|A_n|_v$ and we obtain that
$$\lambdaeft| M_{n+1,v}-M_{n,v}^d\right| \lambdae |\lambda|_v\cdot |B_n|_v^d \lambdae \mathfrak rac{M_{n,v}^d}{2\cdot \lambdaeft|\mathfrak rac{A_n}{B_n}\right|_v^d}= \mathfrak rac{M_{n,v}^d}{2|f_\lambda^n(\alpha)|_v^d} \lambdae \mathfrak rac{M_{n,v}^d}{16}$$
because $d\ge 3$.
Thus Proposition~\ref{fundamental inequality} yields the desired conclusion.
\end{proof}
The next result deals with the case $d=2$ in Proposition~\ref{really small lambda}.
\begin{prop}
\lambdaabel{d=2 really small lambda}
Assume $d=2$. Let $M\ge 1$ be a real number, let $\alpha, \lambda\in{\mathbb Q}bar$, and let $|\cdot |_v$ be an absolute value. We let $A_n:=A_{\lambda,\alpha,n}$, $B_n:=B_{\lambda,\alpha,n}$ and $M_{n,v}:=\max\{|A_n|_v, |B_n|_v\}$. If $|\alpha|_v\ge \mathfrak rac{1}{M\cdot |\lambda|_v}\ge 2M$ then for each $0\lambdae n_0\lambdae n$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right|\lambdae 4\lambdaog(2) .$$
\end{prop}
In particular, using Corollary~\ref{the limit exists} we obtain that for all $n_0\ge 0$ we have
$$\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right|\lambdae 4\lambdaog(2) .$$
\begin{proof}[Proof of Proposition~\ref{d=2 really small lambda}.]
Since $|\lambda|_v\lambdae \mathfrak rac{1}{2M^2}\lambdae \mathfrak rac{1}{2}$, using Lemmas~\ref{upper bound lemma} and \ref{lower bound in terms of lambda} we obtain for all $n\ge 0$ that
\begin{equation}
\lambdaabel{general bound d=2 again}
\lambdaeft|\mathfrak rac{\lambdaog M_{n+1,v}}{2^{n+1}} - \mathfrak rac{\lambdaog M_{n,v}}{2^{n}}\right|\lambdae \mathfrak rac{\lambdaog\lambdaeft(\mathfrak rac{2}{|\lambda|_v}\right)}{2^{n+1}}.
\end{equation}
We need to improve the above bound and in order to do this we prove a sharper inequality when $n$ is small compared to $\mathfrak rac{1}{|\lambda|_v}$. The strategy is similar to the one employed in the proof of Lemma~\ref{d=2 large lambda 2}.
First of all, since $|\lambda|_v<1$, Lemma~\ref{upper bound lemma} yields that for all $ n\ge 0$ we have
\begin{equation}
\lambdaabel{easy inequality}
\mathfrak rac{\lambdaog M_{n+1,v}}{2^{n+1}}-\mathfrak rac{\lambdaog M_{n,v}}{2^{n}}\lambdae \mathfrak rac{\lambdaog(2)}{2^{n+1}}.
\end{equation}
We will prove a lower bound for the main term from \eqref{easy inequality} when $n_0$ and $n$ are small compared to $\mathfrak rac{1}{|\lambda|_v}$. First we prove that $|f_\lambda^n(\alpha)|_v$ is large when $n$ is small.
\begin{lemma}
\lambdaabel{the iterates are large in the beginning}
For each integer $n\lambdae \mathfrak rac{3M^2}{4|\lambda|_v}$, we have $|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{3M}{2}$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{the iterates are large in the beginning}.]
We will prove the statement inductively. For $n=0$, we know that $|f_\lambda^0(\alpha)|_v=|\alpha|_v\ge 2M$. If now for some $n\ge0$ we have that $|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{3M}{2}$, then $|f_\lambda^{n+1}(\alpha)|_v\ge |f_\lambda^n(\alpha)|_v-\mathfrak rac{2|\lambda|_v}{3M}$. Therefore, for all $n\lambdae \mathfrak rac{3M^2}{4|\lambda|_v}$ we have
$$|f_\lambda^n(\alpha)|_v\ge |\alpha|_v- \mathfrak rac{n\cdot 2|\lambda|_v}{3M^2}\ge \mathfrak rac{3M}{2},$$
as desired.
\end{proof}
In conclusion, if we let $n_1$ be the smallest positive integer larger than $\mathfrak rac{3M^2}{4|\lambda|}$ we know that for all $0\lambdae n\lambdae n_1-1$ we have $|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{3}{2}$. In particular,
$$|f_\lambda^{n_1}(\alpha)|_v\ge |f_\lambda^{n_1-1}(\alpha)|_v - \mathfrak rac{|\lambda|_v}{|f_\lambda^{n_1-1}(\alpha)|_v}\ge \mathfrak rac{3}{2}-\mathfrak rac{\mathfrak rac{1}{2}}{\mathfrak rac{3}{2}}>1.$$
Therefore, $M_n=|A_n|$ for all $0\lambdae n\lambdae n_1$, and moreover for $0\lambdae n\lambdae n_1-1$ we have
\begin{equation}
\lambdaabel{bound for small n M_n}
M_{n+1}-M_n^2= |A_{n+1}|_v-|A_n^2|_v\ge -|\lambda|_v\cdot |B_n|_v^2=-M_n^2\cdot \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v^2}\ge -\mathfrak rac{4M_n^2}{9},
\end{equation}
because $|\lambda|_v<1$ and $|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{3}{2}$.
Inequality \eqref{bound for small n M_n} coupled with the argument from Proposition~\ref{fundamental inequality} yields that for all $0\lambdae n\lambdae n_1-1$ we have
\begin{equation}
\lambdaabel{upper bound for small n M_n}
\mathfrak rac{\lambdaog M_{n+1}}{2^{n+1}} - \mathfrak rac{\lambdaog M_{n}}{2^{n}}> -\mathfrak rac{\lambdaog(2)}{2^{n+1}}.
\end{equation}
Using the definition of $n_1$ and inequalities \eqref{general bound d=2 again}, \eqref{easy inequality} and \eqref{upper bound for small n M_n}, we conclude that
\begin{align*}
& \lambdaeft|\mathfrak rac{\lambdaog M_n}{2^n} - \mathfrak rac{\lambdaog M_{n_0}}{2^{n_0}}\right|\\
& \lambdae \sum_{n=0}^{n_1-1}\mathfrak rac{\lambdaog(2)}{2^{n+1}}+\sum_{n=n_1}^\infty \mathfrak rac{\lambdaog\lambdaeft(\mathfrak rac{2}{|\lambda|}\right)}{2^{n+1}}\\
& \lambdae \lambdaog(2)+ \mathfrak rac{ \lambdaog\lambdaeft(\mathfrak rac{8n_1}{3M^2}\right)}{2^{n_1}}\\
& \lambdae 4\lambdaog(2),
\end{align*}
for all $0\lambdae n_0\lambdae n$.
\end{proof}
Finally, we will establish the equivalent of Proposition~\ref{large lambda archimedean} for $d=2$.
\begin{prop}
\lambdaabel{large lambda non-archimedean}
Assume $d=2$. Let $M\ge 1$ be a real number, let $\alpha, \lambda\in{\mathbb Q}bar$, and let $|\cdot |_v$ be an absolute value. We let $A_n:=A_{\lambda,\alpha,n}$, $B_n:=B_{\lambda,\alpha,n}$ and $M_{n,v}:=\max\{|A_n|_v, |B_n|_v\}$. If $|\alpha|_v\ge \mathfrak rac{|\lambda|_v}{M}\ge 8M$, then for each $0\lambdae n_0\lambdae n$ we have
$$\lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right|\lambdae 1+8M .$$
\end{prop}
In particular, using Corollary~\ref{the limit exists} we obtain that for all $n_0\ge 0$ we have
$$\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right|\lambdae 1+8M .$$
\begin{proof}[Proof of Proposition~\ref{large lambda non-archimedean}.]
We know that $|\lambda|_v\ge 8M^2>1$. Thus, Lemmas~\ref{upper bound lemma} and \ref{lower bound in terms of lambda} yield that for all $n\ge 0$ we have
\begin{equation}
\lambdaabel{non-archimedean 1}
\lambdaeft|\mathfrak rac{\lambdaog M_{n+1,v}}{2^{n+1}}-\mathfrak rac{\lambdaog M_{n,v}}{2^n}\right|\lambdae \mathfrak rac{\lambdaog(2|\lambda|_v)}{2^{n+1}}.
\end{equation}
As in the proof of Proposition~\ref{d=2 really small lambda}, we will find a sharper inequality for small $n$. Arguing identically as in Claim~\ref{k_1 is large}, we obtain that for $0\lambdae n\lambdae \lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2M}\right)$ we have
\begin{equation}
\lambdaabel{the first iterates are large non-archimedean}
|f_\lambda^n(\alpha)|_v\ge \mathfrak rac{|\lambda|_v}{2^nM}\ge 2^{n+1}\ge 2.
\end{equation}
So, let $n_1$ be the smallest integer larger than $\lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2M^2}\right)-1$. Since $|\lambda|_v\ge 8M^2$, we get that $n_1\ge 1$. Also, by its definition, $n_1\lambdae \lambdaog_4\lambdaeft(\mathfrak rac{|\lambda|_v}{2M}\right)$; so, for each $0\lambdae n\lambdae n_1$, inequality \eqref{the first iterates are large non-archimedean} holds, and thus $M_{n,v}=|A_n|_v$. Moreover, for $0\lambdae n\lambdae n_1-1$ we get that
$$|M_{n+1,v}-M_{n,v}^2|=||A_{n+1}|_v-|A_n^2|_v|\lambdae |\lambda|_v\cdot |B_n|_v^2= M_{n,v}^2\cdot \mathfrak rac{|\lambda|_v}{|f_\lambda^n(\alpha)|_v^2}\lambdae M_{n,v}^2\cdot \mathfrak rac{|\lambda|_v}{\mathfrak rac{|\lambda|_v^2}{4^nM^2}} \lambdae \mathfrak rac{M_{n,v}^2}{2}.$$
So, using Proposition~\ref{fundamental inequality} we obtain that for all $0\lambdae n\lambdae n_1-1$ we have
\begin{equation}
\lambdaabel{inequality for small n non-archimedean}
\lambdaeft|\mathfrak rac{\lambdaog M_{n+1,v}}{2^{n+1}}-\mathfrak rac{\lambdaog M_{n,v}}{2^n}\right| \lambdae \mathfrak rac{\lambdaog(2)}{2^{n+1}}.
\end{equation}
Using the definition of $n_1$ and inequalities \eqref{non-archimedean 1} and \eqref{inequality for small n non-archimedean} we conclude that
\begin{align*}
& \lambdaeft|\mathfrak rac{\lambdaog M_{n,v}}{2^n} - \mathfrak rac{\lambdaog M_{n_0,v}}{2^{n_0}}\right|\\
& \lambdae \sum_{n=0}^{n_1-1}\mathfrak rac{\lambdaog(2)}{2^{n+1}} + \sum_{n=n_1}^\infty \mathfrak rac{\lambdaog(2|\lambda|_v)}{2^{n+1}}\\
& \lambdae \lambdaog(2)+\mathfrak rac{\lambdaog(2|\lambda|_v)}{2^{n_1}}\\
& \lambdae \lambdaog(2)+ \mathfrak rac{2\lambdaog(2|\lambda|_v)}{\sqrt{\mathfrak rac{|\lambda|_v}{2M^2}}}\\
& \lambdae \lambdaog(2)+ 4M\cdot \mathfrak rac{\lambdaog(2|\lambda|_v)}{\sqrt{|\lambda|_v}}\\
& < 1+8M\text{ (because $|\lambda|_v\ge 8$),}
\end{align*}
for all $0\lambdae n_0\lambdae n$.
\end{proof}
Our next result completes the proof of Proposition~\ref{each place} by considering the case of nonconstant ${\mathbf c}(t)=\mathfrak rac{{\mathbf A}(t)}{{\mathbf B}(t)}$, where ${\mathbf A},{\mathbf B}\in{\mathbb Q}bar[t]$ are nonzero coprime polynomials.
\begin{prop}
\lambdaabel{place in S}
Assume ${\mathbf c}(\lambda)=\mathfrak rac{{\mathbf A}(\lambda)}{{\mathbf B}(\lambda)}\in{\mathbb Q}bar(\lambda)$ is nonconstant, and let $|\cdot |_v$ be any absolute value on ${\mathbb Q}bar$. Consider $\lambda_0\in{\mathbb Q}bar^*$ such that ${\mathbf c}(\lambda_0)\ne 0,\infty$. For each $n\ge 0$ we let ${\mathbf A}_n:={\mathbf A}_{{\mathbf c}, n}(\lambda_0)$, ${\mathbf B}_n:={\mathbf B}_{{\mathbf c},n}(\lambda_0)$ and $M_{n,v}:=\max\{|{\mathbf A}_n|_v, |{\mathbf B}_n|_v\}$.
Then there exists a constant $C$ depending only on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ (but independent of $\lambda_0$) such that
\begin{equation}
\lambdaabel{inequality for nonconstant}
\lambdaeft|\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{2,v}}{d^2}\right| \lambdae C.
\end{equation}
\end{prop}
\begin{proof}
We let $\alpha:=f_{\lambda_0}({\mathbf c}(\lambda_0))$. Since $\lambda_0$ is fixed in our proof, so is $\alpha$. On the other hand, we will prove that the constant $C$ appearing in \eqref{inequality for nonconstant} does not depend on $\alpha$ (nor on $\lambda_0$).
We split our proof in three cases depending on $|\lambda_0|_v$. We first deal with the case of large $|\lambda_0|_v$. As proven in Propositions~\ref{canonical height generic nonzero} and \ref{canonical height generic zero}, $\deg_t({\mathbf A}_{{\mathbf c}, 1}(t))-\deg_t({\mathbf B}_{{\mathbf c}, 1}(t))\ge 1$. We let $c_1$ and $c_2$ be the leading coefficients of ${\mathbf A}_{{\mathbf c},1}(t)$ and ${\mathbf B}_{{\mathbf c},1}(t)$ respectively. Then, there exists a positive real number $Q$ depending on $v$ and the coefficients of ${\mathbf A}$ and ${\mathbf B}$ only, such that if $|\lambda|_v>Q$ then
$$\mathfrak rac{|{\mathbf A}_{{\mathbf c}, 1}(\lambda)|_v}{|{\mathbf B}_{{\mathbf c},1}(\lambda)|_v}\ge \mathfrak rac{|\lambda|_v\cdot |c_1|_v}{2|c_2|_v}\ge 8M,$$
where $M:=2\max\{1,|c_2/c_1|_v\}$. Our first step is to prove the following result.
\begin{lemma}
\lambdaabel{really large lambda}
If $|\lambda_0|_v>Q$ then
\begin{equation}
\lambdaabel{inequality for large lambda final}
\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{2,v}}{d^2}\right|\lambdae \mathfrak rac{1+16\max\lambdaeft\{1,\lambdaeft|\mathfrak rac{c_2}{c_1}\right|\right\}}{d}.
\end{equation}
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{really large lambda}.]
We recall that $M:=2\max\{1,|c_2/c_1|_v\}$.
Since $|\lambda_0|_v>Q$, then
$$|\alpha|_v=\mathfrak rac{|{\mathbf A}_{{\mathbf c},1}(\lambda_0)|_v}{|{\mathbf B}_{{\mathbf c},1}(\lambda_0)|_v}\ge |\lambda_0|_v/M\ge 8M.$$
We apply the conclusion of Propositions~\ref{large lambda archimedean} and \ref{large lambda non-archimedean} with $n_0=1$ and we conclude that
$$\lambdaeft|\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, n}|_v, |B_{\lambda_0,\alpha, n}|_v\}}{d^n} - \mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, 1}|_v, |B_{\lambda_0,\alpha, 1}|_v\}}{d} \right| \lambdae 1+8M.$$
On the other hand, using \eqref{conversion} with $k_0=1$ (note that by our assumption, ${\mathbf B}_{{\mathbf c},1}(\lambda_0)={\mathbf A}(\lambda_0){\mathbf B}(\lambda_0)^{d-1}\ne 0 $) we obtain
\begin{align*}
& \lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{2,v}}{d^2}\right|\\
& =\mathfrak rac{1}{d}\cdot \lambdaeft| \lambdaim_{n\to\infty} \mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, n}|_v, |B_{\lambda_0,\alpha, n}|_v\}}{d^n} - \mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, 1}|_v, |B_{\lambda_0,\alpha, 1}|_v\}}{d} \right|\\
& \lambdae \mathfrak rac{1+8M}{d},
\end{align*}
as desired.
\end{proof}
We will now deal with the case when $|\lambda_0|_v$ is small. We will define another quantity, $R$, which will depend only on $v$ and on the coefficients of $A$ and of $B$, and we will assume that $|\lambda_0|_v<R$. The definition of $R$ is technical since it depends on whether ${\mathbf c}(0)$ equals $0$, $\infty$ or neither. However, the quantity $R$ will depend on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ only (and will not depend on $\lambda_0$ nor on $\alpha=f_{\lambda_0}({\mathbf c}(\lambda_0))$).
Assume ${\mathbf c}(0)\ne 0,\infty$ (i.e., ${\mathbf A}(0)\ne 0$ and ${\mathbf B}(0)\ne 0$). Let $c_3:={\mathbf A}(0)\ne 0$ and $c_4:={\mathbf B}(0)\ne 0$ be the constant coefficients of ${\mathbf A}$ and respectively of ${\mathbf B}$.
Then there exists a positive real number $R$ depending on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ only, such that if $|\lambda|_v<R$, then
$$\mathfrak rac{|c_3|_v}{2} < |{\mathbf A}(\lambda)|_v<\mathfrak rac{3|c_3|_v}{2}\text{ and }\mathfrak rac{|c_4|_v}{2}<|{\mathbf B}(\lambda)|_v<\mathfrak rac{3|c_4|_v}{2}.$$
Hence $\mathfrak rac{|c_3|_v}{3|c_4|_v}<|{\mathbf c}(\lambda)|_v<\mathfrak rac{3|c_3|_v}{|c_4|_v}$.
Then we can apply Propositions~\ref{e=0} and \ref{e=0 d=2} with $n_0=2$ (coupled with \eqref{conversion} for $k_0=0$); we obtain that if $|\lambda_0|_v<R$ then
\begin{align}
\nonumber
& \lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{2,v}}{d^2} \right| \\
\nonumber
& = \lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog\max\{|A_{\lambda_0,{\mathbf c}(\lambda_0),n}|_v, |B_{\lambda_0,{\mathbf c}(\lambda_0),n}|_v\}}{d^n} - \mathfrak rac{\lambdaog\max\{|A_{\lambda_0,{\mathbf c}(\lambda_0),2}|_v, |B_{\lambda_0,{\mathbf c}(\lambda_0),2}|_v\}}{d^2}
\right| \\
\nonumber
& \lambdae (3d-2)\lambdaog\lambdaeft(2\max\{|{\mathbf c}(\lambda_0)|_v, |{\mathbf c}(\lambda_0)|_v^{-1}\}\right)\\
\lambdaabel{conclusion 1}
& \lambdae (3d-2)\lambdaeft(2+\lambdaog\lambdaeft(\max\lambdaeft\{\mathfrak rac{|c_3|_v}{|c_4|_v}, \mathfrak rac{|c_4|_v}{|c_3|_v}\right\}\right)\right).
\end{align}
Assume $c(0)=\infty$. Then ${\mathbf A}(0)\ne 0$ but ${\mathbf B}(0)=0$; in particular $\deg({\mathbf B})\ge 1$ since ${\mathbf B}$ is not identically equal to $0$. We recall that $c_3=A(0)$ is the constant coefficient of ${\mathbf A}$ (we know $c_3\ne 0$). Also, let $c_5$ be the first nonzero coefficient of ${\mathbf B}$. Then there exists a positive real number $R$ depending on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ only, such that if $0<|\lambda|_v<R$ then
$$\mathfrak rac{|c_3|_v}{2}<|{\mathbf A}(\lambda)|_v\text{ and }|{\mathbf B}(\lambda)|_v<2|c_5|_v\cdot |\lambda|_v,$$
and moreover
$$|{\mathbf c}(\lambda)|_v=\lambdaeft|\mathfrak rac{{\mathbf A}(\lambda)}{{\mathbf B}(\lambda)}\right|_v>\mathfrak rac{1}{M\cdot |\lambda|_v}\ge 2M,$$
where $M=4\max\{1,|c_5/c_3|_v\}$. Then applying Propositions~\ref{really small lambda} and \ref{d=2 really small lambda} with $n_0=2$ (coupled with \eqref{conversion} for $k_0=0$) we conclude that if $|\lambda_0|_v<R$ then
\begin{align}
\nonumber
& \lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^{n}}-\mathfrak rac{\lambdaog M_{2,v}}{d^2}\right|\\
\nonumber
& = \lambdaeft| \lambdaim_{n\to\infty}\mathfrak rac{\lambdaog\max\{|A_{\lambda_0,{\mathbf c}(\lambda_0), n}|_v, |B_{\lambda_0,{\mathbf c}(\lambda_0), n}|_v\}}{d^n} -\mathfrak rac{\lambdaog\max\{|A_{\lambda_0,{\mathbf c}(\lambda_0), 2}|_v, |B_{\lambda_0,{\mathbf c}(\lambda_0),2}|_v\}}{d^2}\right|\\
\lambdaabel{conclusion 3'}
& \lambdae 4\lambdaog(2).
\end{align}
Assume ${\mathbf c}(0)=0$. Then ${\mathbf A}(0)=0$ but ${\mathbf B}(0)\ne 0$; in particular $\deg({\mathbf A})\ge 1$ since ${\mathbf A}$ is not identically equal to $0$. So, the constant coefficient of ${\mathbf B}$ is nonzero, i.e., $c_4=c_5={\mathbf B}(0)\ne 0$ in this case. There are two cases: ${\mathbf A} '(0)=0$ or not. First, assume $c_6:={\mathbf A} '(0)\ne 0$. Then there exists a positive real number $R$ depending on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ only such that if $0<|\lambda|_v<R$ then
$$|{\mathbf A}_{{\mathbf c},1}(\lambda)|_v=\lambdaeft|\mathfrak rac{{\mathbf A}(\lambda)^d}{\lambda}+{\mathbf B}(\lambda)^d\right|_v\in \lambdaeft(\mathfrak rac{|c_4|_v^d}{2}, \mathfrak rac{3|c_4|_v^d}{2}\right)\text{ and }$$
$$|{\mathbf B}_{{\mathbf c},1}(\lambda)|_v=\lambdaeft|\mathfrak rac{{\mathbf A}(\lambda){\mathbf B}(\lambda)^{d-1}}{\lambda}\right|_v\in \lambdaeft(\mathfrak rac{\lambdaeft|c_6c_4^{d-1}\right|_v}{2},\mathfrak rac{3\lambdaeft|c_6c_4^{d-1}\right|_v}{2}\right).$$
Hence $\mathfrak rac{|c_4|_v}{3|c_6|_v}\lambdae |\alpha|_v\lambdae \mathfrak rac{3|c_4|_v}{|c_6|_v}$ (also note that we are using the fact that $\lambda_0\ne 0$ and so the above inequalities apply to our case). Hence using Propositions~\ref{e=0} and \ref{e=0 d=2} with $n_0=1$ (combined also with \eqref{conversion} for $k_0=1$, which can be used since ${\mathbf B}_{{\mathbf c},1}(\lambda_0)={\mathbf A}(\lambda_0){\mathbf B}(\lambda_0)^{d-1}\ne 0$) we obtain
\begin{align}
\nonumber
& \lambdaeft|\lambdaim_{n\to\infty} \mathfrak rac{\lambdaog M_{n,v}}{d^n} - \mathfrak rac{\lambdaog M_{2,v}}{d^2}\right|\\
\nonumber
& = \mathfrak rac{1}{d}\cdot \lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha,n}|_v, |B_{\lambda_0,\alpha,n}|_v\}}{d^n} - \mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, 1}|_v, |B_{\lambda_0,\alpha, 1}|_v\}}{d}\right|\\
\nonumber
& \lambdae \mathfrak rac{3d-2}{d}\cdot\lambdaog\lambdaeft(2\max\lambdaeft\{|\alpha|_v, |\alpha|_v^{-1}\right\}\right)\\
\lambdaabel{conclusion 2}
& \lambdae 3\cdot\lambdaeft(2+\lambdaog\lambdaeft(\max\lambdaeft\{\lambdaeft|\mathfrak rac{c_4}{c_6}\right|_v, \lambdaeft|\mathfrak rac{c_6}{c_4}\right|_v\right\}\right) \right).
\end{align}
Next assume ${\mathbf A}(0)={\mathbf A} '(0)=0$. So, let $c_7$ be the first nonzero coefficient of ${\mathbf A}$. Also, we recall that $c_4=c_5={\mathbf B}(0)\ne 0$ in this case.
Then there exists a positive real number $R$ depending on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ only such that if $0<|\lambda|_v<R$ then
$$\mathfrak rac{|c_4|_v^d}{2}< |{\mathbf A}_{{\mathbf c},1}(\lambda)|_v \text{ and }|{\mathbf B}_{{\mathbf c},1}(\lambda)|_v< 2\lambdaeft|c_7c_4^{d-1}\right|_v\cdot |\lambda|_v,$$
and moreover
$$\lambdaeft|\mathfrak rac{{\mathbf A}_{{\mathbf c},1}(\lambda)}{{\mathbf B}_{{\mathbf c},1}(\lambda)}\right|_v>\mathfrak rac{1}{M\cdot |\lambda|_v}\ge 8M,$$
where $M:=4\max\lambdaeft\{1,\mathfrak rac{|c_7|_v}{|c_4|_v}\right\}$. Hence, if $|\lambda_0|_v<R$ (using also that ${\mathbf c}(\lambda_0)\ne 0,\infty$), we obtain
\begin{equation}
\lambdaabel{last equation for alpha}
|\alpha|_v\ge \mathfrak rac{1}{M\cdot |\lambda_0|_v}\ge 8M.
\end{equation}
Then Propositions~\ref{really small lambda} and \ref{d=2 really small lambda} with $n_0= 1$ (combined with \eqref{conversion} for $k_0=1$, which can be used since ${\mathbf B}_{{\mathbf c},1}(\lambda_0)\ne 0$) yield
\begin{align}
\nonumber
& \lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^{n}}-\mathfrak rac{\lambdaog M_{2,v}}{d^2}\right|\\
\nonumber
& =\mathfrak rac{1}{d}\cdot \lambdaeft| \lambdaim_{n\to\infty}\mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, n}|_v, |B_{\lambda_0,\alpha, n}|_v\}}{d^n} -\mathfrak rac{\lambdaog\max\{|A_{\lambda_0,\alpha, 1}|_v, |B_{\lambda_0,\alpha,1}|_v\}}{d}\right|\\
& \lambdae \mathfrak rac{1+32\max\lambdaeft\{1,\mathfrak rac{|c_7|_v}{|c_4|_v}\right\}}{d} .
\lambdaabel{conclusion 3}
\end{align}
On the other hand, Proposition~\ref{bounded lambda} yields that if $R\lambdae |\lambda_0|_v\lambdae Q$ then
\begin{equation}
\lambdaabel{inequality for middle lambda final}
\lambdaeft|\lambdaim_{n\to\infty}\mathfrak rac{\lambdaog M_{n,v}}{d^n} -\mathfrak rac{\lambdaog M_{2,v}}{d^2}\right| \lambdae \mathfrak rac{\lambdaog(2Q)-\lambdaog(R)}{18}.
\end{equation}
Noting that $R$ and $Q$ depend on $v$ and on the coefficients of ${\mathbf A}$ and of ${\mathbf B}$ only, inequalities \eqref{inequality for large lambda final}, \eqref{conclusion 1}, \eqref{conclusion 3'}, \eqref{conclusion 2}, \eqref{conclusion 3} and \eqref{inequality for middle lambda final} yield the conclusion of Proposition~\ref{each place}.
\end{proof}
\end{document}
|
\begin{document}
\selectlanguage{english}
\pagenumbering{arabic}
\title{Matrix-free multigrid solvers\\ for phase-field fracture problems}
\author[1]{D. Jodlbauer}
\author[1]{U. Langer}
\author[2,3]{T. Wick}
\affil[1]{Johann Radon Institute for Computational and Applied Mathematics, Austrian Academy of Sciences, Altenbergerstr. 69, A-4040 Linz, Austria}
\affil[2]{Institut f\"ur Angewandte Mathematik, Leibniz Universit\"at Hannover, Welfengarten 1, 30167 Hannover, Germany}
\affil[3]{Cluster of Excellence PhoenixD (Photonics, Optics, and
Engineering - Innovation Across Disciplines), Leibniz Universit\"at Hannover, Germany}
\date{}
\maketitle
\begin{abstract}
In this work, we present a framework for the matrix-free solution to a
monolithic quasi-static phase-field fracture model with geometric multigrid methods.
Using a standard matrix based approach
within the Finite Element Method
requires lots of memory,
which eventually becomes a serious bottleneck.
A matrix-free approach overcomes this problems and greatly reduces the
amount of required memory, allowing to solve larger problems on available
hardware. One key challenge is concerned with the crack irreversibility
for which a primal-dual active set method is employed. Here, the active
set values of fine meshes must be available on coarser levels of the multigrid
algorithm. The developed multigrid method
provides
a preconditioner for
a generalized minimal residual (GMRES) solver. This method is used
for solving the linear equations inside Newton's method for treating the
overall nonlinear-monolithic discrete displacement/phase-field formulation.
Several numerical examples demonstrate
the performance and robustness of our solution technology.
Mesh refinement studies, variations in the phase-field
regularization parameter,
iterations numbers of the linear and nonlinear solvers, and some parallel
performances are conducted to substantiate the efficiency of the proposed
solver for single fractures, multiple pressurized fractures, and
a L-shaped panel test in three dimensions. \\[1em]
Keywords:
phase-field fracture propagation; matrix-free;
geometric multigrid; primal-dual active set\\
\end{abstract}
\section{Introduction}
Predicting fracture growth can be of big interest in a variety of fields, such as manufacturing processes, engineering sciences, and medical applications.
However, fracture propagation poses several challenges for numerical methods.
A main problem is the fact, that cracks are usually lower-dimensional phenomena.
For example, fractures in a sheet of glass are, most often, just thin lines.
This makes it very difficult for standard numerical methods,
like the Finite Element Method (FEM)
to represent such kind of discontinuities.
Another challenge is the irreversibility property, i.e. a fracture
should not be able to heal itself, which introduces time-dependent
constraints on the solution. More difficulties are related to
fracture nucleation, branching, and fracture networks with
curve-linear, complex, crack patterns.
Methods like generalized/extended FEM (GFEM/XFEM) try to overcome these fallacies of standard FEM by enriching the solution space with discontinuous functions.
This allows the representation of sharper objects like cracks, see
e.g. \cite{BeBl99,MeDu07}. A short overview of various methods is compiled in \cite{WiSiWh16}.
We now focus on a variational phase-field based approach,
which was first introduced for fractures in
\cite{BoFrMa00} based on the fracture model presented in \cite{FrMa98}.
Therein, Griffith's original model for brittle fractures \cite{GrTa21} is rewritten by means of an energy minimization problem.
This numerical approximation introduces an additional scalar-valued indicator variable representing the fracture.
It is then computed as the minimizer of the total energy functional, together with the unknown solid displacement.
Specifically therein, the continuous model is approximated using elliptic Ambrosio-Tortorelli functionals, see \cite{AmTo90}.
Such an approximation yields a smooth representation of the originally sharp crack.
Thus, the solution does no longer jump from broken to unbroken parts, but provides a smoothed transition between them.
The approximation quality is given by the phase-field regularization parameter $\varepsilon$.
Roughly speaking, it determines the size of the diffusive zone around the fracture, see \cref{pic:notation} for an illustration.
This smoothing effect could be seen as a disadvantage of phase-field
methods, as it is no longer possible to accurately localize the
fracture. Indeed, fracture width computations \cite{LeWhWi17} and
posing interface conditions on the fracture boundary are challenging
\cite{MiWhWi15} with no final answer to date.
On the other hand, it comes with some benefits: the problem is reduced
to find the minimizers of a nonlinear energy functional, which is a
common procedure in engineering and applied mathematics.
Thus, it is possible to use well-known Galerkin techniques,
making it easier to find suitable software to assist with the
implementation. Furthermore, the fracture is entirely represented using an additional variable.
In particular, the mesh is fixed and does not need to be updated when
the fracture propagates, as it is the case for methods like XFEM.
The phase-field fracture (PFF) model can be enhanced to include, for
instance,
pressure-driven cracks; see, e.g,
\cite{MiMaTe15,MiMa16,LeWhWi16,MiWhWi15a,MiWhWi19,HeMa17,WiLa16,YoBo16,ChBoYo19,SiVe18}.
Furthermore, thermodynamically consistent schemes have been developed in \cite{MiHoWe10,MiWeHo10}.
Therein, the elastic energy is split into tensile and compressive parts.
Other splitting strategies exists, e.g. \cite{AmMaMa09} based on
a deviatoric split, or a hybrid strategy as presented in
\cite{AmGeDe15}. The latter also contains a nice comparison of the different splitting techniques.
The most challenging part is related to the nonlinear equations and
the irreversibility condition, which appears in terms of a variational inequality.
For treating the irreversibility constraint, several approaches
(simple penalization, augmented Lagrangian techniques, or a
strain-history field) have
been proposed in the literature. In this work, we adopt
a primal-dual active set method \cite{HiItKu03,ItKu00} that
can be identified as a semi-smooth Newton method \cite{HiItKu03} and
was first applied to phase-field fracture in \cite{HeWhWi15}.
With regard to the nonlinearities in the governing equations
for displacements and phase-field two main approaches can be
distinguished: staggered approaches \cite{Bo07,BuOrSu10,MiHoWe10}
and monolithic methods \cite{GeDe16,Wi17,Wi17b}.
Typically, the nonlinear solvers reduce the problem to the repeated solution of linear systems of equations.
Krylov-subspace based solvers like Conjugate Gradients (CG) or Generalized Minimal Residual (GMRES) \cite{Sa03} have been proven to be very effective methods for solving such large sparse linear systems, if provided with a reasonable preconditioner.
Different possibilities to precondition the PFF problem are shown in \cite{FaMa17}.
A powerful class of solvers/preconditioners that is known to work for many different types of problems are multigrid methods.
These methods smooth the iteration error on the fine grid and correct the iterate by
a course-grid correction in a two-grid setting, whereas a recurrent applications on a sequence of grids
lead to a multigrid procedure; see \cite{Ha85,Br93}.
Multigrid methods have been successfully applied to a wide range of related problems, see e.g.
\cite{Ko94,Ko02,HaMi83,Ho87,HoMi89,Be93,BlBrSu04,ReVoHe13,HeLaNa13,GrSa19,KoGr09}.
Algebraic multigrid methods provided by Trilinos \cite{HeBaHo03,GeSiHu06} have been applied to phase-field fracture in \cite{HeWi18}, yielding a scalable parallel solution scheme; see also
\cite{St01,HaLa02,BrScSc07} for algebraic
multigrid method and their parallelization.
When attempting to solve larger and larger problems, memory soon becomes the limiting factor.
To overcome this, matrix-free methods have been developed.
Instead of assembling and storing the discrete Jacobian, as it is usually done, the necessary information is computed on-the-fly.
This drastically reduces the amount of memory required, but may increase the computational cost.
Hence, a clever implementation is needed to keep up with standard matrix-based methods.
In this work, we rely on the FEM library deal.II \cite{AlArBa18},
which readily includes such an implementation
\cite{KrKo12,KrKo17}.
As indicated in our previous overviews, the main aims of this paper are two-fold:
\begin{itemize}
\item Development of a geometric multigrid preconditioner for the GMRES
iterations inside Newton's method;
\item A matrix-free implementation of the geometric multigrid method.
\end{itemize}
Here, one focus is on quantities that are given naturally on the
finest grid and which may also be required on the coarser levels of the multigrid algorithm, e.g. the current linearization point or the Active-Set.
We will present strategies to handle those problems with relative
ease. Indeed, the FEM library deal.II has gone to great lengths in
providing a rich and flexible interface for the implementation of such sophisticated methods.
The outline of this paper is as follows.
In \cref{sec:problem}, we will describe the governing equations of phase-field fractures and clarify some of the notation used.
Furthermore, we briefly describe some details on the general discretization scheme used.
In \cref{sec:solution}, we discuss our approach for the solution of the phase-field problem.
This includes the non-linear solver (Active-Set) and the linear solver (geometric Multigrid).
The main part of this work is presented in \cref{sec:matrixfree}, which is devoted to the matrix-free realization of this solution approach.
Finally, we present our findings and numerical results for various test cases in \cref{sec:results}.
\section{Problem Description}
\label{sec:problem}
In this section, we introduce the basic notation and the underlying equations.
In the following, let $D \subset \mathbb{R}^d, d=2,3$ the total domain wherein $\mathcal{C}\subset \mathbb{R}^{d-1}$ denotes the fracture and $\Omega \subset \mathbb{R}^d$ {is} the intact domain.
We assume (possibly time-dependent non-homogeneous) Dirichlet conditions on the outer boundary $\partial D$.
\begin{figure}
\caption{Illustration of the Notation: sharp fracture $\mathcal{C}
\label{pic:notation}
\end{figure}
In the phase-field approach, the one-dimensional fracture $\mathcal{C}$ is approximated by $\Omega_F \subset \mathbb{R}^d$ with the help of an elliptic (Ambrosio-Tortorelli) functional \cite{AmTo90,AmTo92}.
Using the phase-field variable $\varphi$ (introduced in \cref{sec:problem}), $\Omega_F$ can be defined as
\begin{equation}
\label{eq:omega}
\Omega_F := \{ x\in \mathbb{R}^d \vert \; \varphi(x) < 1 \}.
\end{equation}
The lower-dimensional fracture $\mathcal{C}$ and its phase-field approximation $\Omega_F$ are visualized in \cref{pic:notation}.
The width of the phase-field fracture is determined by $\varepsilon$.
For fracture formulations posed in a variational setting, this has been first proposed in \cite{BoFrMa00}.
The inner fracture boundary is denoted by the smeared $\varepsilon$-dependent boundary $\partial\Omega_F$.
We note that the precise location of $\partial\Omega_F$ is of no
importance in this work
(in contrast to e.g., \cite{MiWhWi15}).
The reader is referred to \cref{pic:notation} for an illustration of the notation.
Finally, we denote the $L^2$ scalar product with $(a,b) := (a,b)_{D}:=\int_{D} a \cdot b \ dx$.
For tensor-valued functions $A,B$, we use $(A,B) := (A,B)_{D}:=\int_{D} A : B \ dx$.
\subsection{Quasi-static phase-field for brittle fracture}
We briefly recapitulate the ingredients for a phase-field model for
solid mechanics and pressurized fractures in brittle materials.
Such a model is based on the variational/phase-field fracture approach of \cite{FrMa98,BoFrMa00}.
Thermodynamically-consistent phase-field techniques using a stress-split into tension and compression have been proposed in \cite{AmMaMa09} and \cite{MiWeHo10}.
The previous formulations start with an energy functional $E(u, \varphi)$ which is minimized with respect to the unknown solution variables $u : D \to \mathbb{R}^d$ (displacements) and a smoothed scalar-valued phase-field function $\varphi: D \to [0,1]$.
The latter one varies in the zone of size $\varepsilon$ from $0$ (fracture) to $1$ (intact material).
The first-order necessary condition are the Euler-Lagrange equations, which are obtained by differentiation with respect to the two unknowns $u$ and $\varphi$.
Adding a pressure $p : D \to \mathbb{R}$ to the Euler-Lagrange
equations that acts on the fracture boundary has been formulated and
mathematically analyzed in \cite{MiWhWi15a,MiWhWi19} for staggered and
monolithic approaches, respectively.
In all the previous fracture models, the physics of the underlying problem ask to enforce a crack irreversibility condition (the crack can never heal) that is an inequality condition in time:
\begin{align}
\partial_t \varphi \leq 0.
\end{align}
Consequently, modeling of fracture evolution problems leads to a variational inequality system, that is always, due to this constraint, quasi-stationary or time-dependent.
The resulting variational formulation is stated in an incremental (i.e., time-discretized) formulation in which the continuous irreversibility constraint is approximated by \[ \varphi \leq \varphi^{old}. \]
Here, $\varphi^{old}$ will later denote the previous time step solution and $\varphi$ the current solution.
Let $V:=H^1_0(D)$ and
\begin{equation}
\label{W_in}
W_{in}:=\{w\in H^1(D) |\, w\leq \varphi^{old} \leq 1 \text{ a.e. on
} D\}
\end{equation}
be the function spaces we work with here; and for later purposes we also need $W:=H^1(D)$.
The Euler-Lagrange system for pressurized phase-field fractures reads \cite{MiWhWi19}:
\begin{problem}
\label{problem}
Let $p\in L^{\infty}(D)$ be given.
Find vector-valued displacements and a scalar-valued phase-field variable $\{ u, \varphi \}:= \{ u^n, \varphi^n \} \in \{u_D + V\} \times W_{in}$ such that:
\begin{align*}
& \left( g_\kappa (\varphi) \sigma^+(u), e(w) \right) + \left(\sigma^-(u), e(w) \right) + \left(\varphi^2 p, \mbox{div } w \right) = 0 \quad & \forall w \in V, \\
& & \\
& \frac{1}{2} \left( \partial_\varphi g_\kappa(\varphi) E_s^+(e(u)), \psi - \varphi \right) + 2 (\varphi \ p \ \mbox{div } u, \psi - \varphi) & \\
& + G_c \left( -\frac{1}{\varepsilon} ( 1-\varphi, \psi -\varphi ) + \varepsilon \left( \nabla \varphi, \nabla (\psi - \varphi ) \right) \right) \geq 0 \quad & \forall \psi \in W \cap L^\infty(D),
\end{align*}
at each incremental step $n=1,2,3,\ldots$.
\end{problem}
The elastic energy is denoted by $$E_s(e) := \frac{1}{2} \lambda \mbox{tr}(e)^2 + \mu (e, e)$$ with resulting stress tensor $$\sigma := \frac{\partial E_s(e)}{\partial e} = 2\mu e + \lambda \mbox{tr}(e) I.$$
The parameters $\mu$ and $\lambda$ denote the usual Lam\'e coefficients, $e(u) = \frac{1}{2} (\nabla u + \nabla u^T)$ is the linearized strain tensor and $I$ denotes the identity matrix.
The degradation function $g_\kappa$ is defined as $g_\kappa(\varphi) := (1-\kappa) \varphi^2 + \kappa$, with a regularization parameter $\kappa \ll 1$.
Different choices for $g_\kappa$ are possible (see \cite{KuScMl15}), for example $g_\kappa := \varphi^2 + \kappa$ as used in \cite{BoFrMa00}.
Physically, $\kappa$ represents the residual stiffness of the material.
Consequently, since \[ g_\kappa(\varphi) \rightarrow \kappa \quad \text{for } \varphi \to 0, \] the material stiffness decreases while approaching the fracture zone.
The pressure terms $( \varphi^2 p, \mbox{div } w)$ and $2 ( \varphi \ p\; \mbox{div } u, \psi - \varphi )$ have been derived in \cite{MiWhWi15a,MiWhWi19} and are based on an interface law that has been further manipulated using Gauss' divergence theorem.
A superscript $+$ denotes the splitting of energy/stress into two parts, i.e. $E_s = E_s^+ + E_s^-$ and consequently, similar for $\sigma$.
In this work, we mainly considered the case without splitting ($E_s^+ := E_s$) and the Miehe-type splitting $$E_s^+ := \frac{1}{2} \lambda \langle \mbox{tr}(e) \rangle_+^2 + \mu \mbox{tr}(e_+^2); $$ see \cite{MiWeHo10,MiHoWe10}.
The positive part of a scalar variable is defined as $\langle x \rangle_+ := \mbox{max}(0, x)$.
For the strain tensor, the positive part is defined by means of the eigenvalues.
More precisely, $$e_+ := \sum_{i=1}^{d} \langle \lambda_i \rangle_+ n_i \otimes n_i,$$ with $\lambda_i$ and $n_i$ denoting the eigenvalues and eigenvectors of the strain tensor $e$.
Consequently, the stress tensor splitting is defined as $\sigma^+(u) = \partial_e E_s^+$ (independent of the actual splitting used).
A discussion of various splitting laws can be found in \cite{BoVeSc12}[Section 2.2] and \cite{AmGeDe15}.
\subsection{Challenges}
\label{sec:challenges}
In the following, we will briefly describe the main challenges regarding the phase-field fracture propagation problem.
\textbf{Relationship of $\varepsilon - h$}
From the theory of $\Gamma$-convergence (e.g., \cite{AmTo92})
we want to have $h \in o(\varepsilon)$.
Intuitivly speaking, we require a mesh-size $h$ small enough to resolve $\varepsilon$.
Hence, we do not only have to investigate convergence properties with
respect to $h$, but also to $\varepsilon$.
Due to the interplay of $h$ and $\epsilon$, this is a serious
theoretical challenge.
Some first numerical experiments were undertaken in \cite{Wi16a}.
\textbf{Nonlinearities}
Problem \Cref{problem} is nonlinear due to the monolithic formulation, the stress splitting, and the inequality constraint.
The most critical term is the quasi-linearity $\left(g_\kappa(\varphi)
\sigma^+(u), e(w) \right)$, which causes most of the challenges in
designing reliable and efficient solution algorithms because
the corresponding Hessian matrix is indefinite.
In fact, integrating this term with respect to $u$ yields the corresponding term on the energy level:
\[ \left( g_\kappa(\varphi) \sigma^+(u), e(u) \right), \]
which has been well-characterized to be non-convex in both variables
$u$ and $\varphi$ simultaneously in the very early work
\cite{BoFrMa00,Bo07} (without the stress-splitting though).
A very simple
prototype example demonstrating the difficulties was studied in \cite{Wi17b}.
\textbf{Linear solver}
Another major issue is the solution of the linearized systems of equations.
Depending on the linearization strategy used, we will have to solve many linear equations.
Thus, in order to simulate large problems, a fast and robust linear
solver is an absolute necessity.
Here, a crucial issue is the robustness of the linear solver with
respect to discretization, model, and material parameters.
\textbf{Matrix-Free framework}
In this work, we will consider a matrix-free implementation for the geometric multigrid preconditioner.
Implementing such a solver is quite challenging.
For example, quantities given naturally on the finest grid may also be required on the coarser levels of the multigrid algorithm, e.g. the current linearization point or the Active-Set.
In this paper we will present strategies to handle those problems with relative ease.
Furthermore, we would like to mention that the FEM library deal.II has gone to great lengths in providing a rich and flexible interface for the implementation of such sophisticated methods.
\textbf{Adaptivity and parallelization}
Solving ever larger problems towards practical applications requires, at some point, the use of more than one core to speed up the simulations.
Furthermore, adaptive refinement in the regions of interest is of huge importance.
This aims to keep the problem size as small as possible without losing accuracy of the final solution.
On the other hand, adaptivity usually poses additional difficulties towards the implementation and the linear solvers.
\subsection{A quasi-monolithic semi-linear form}
For the solution process, we add both equations in \cref{problem} and define a common semi-linear form $A$.
Here, we use a linear extrapolation \cite{HeWhWi15} in the first term
of the $u$-equation in order to deal with the otherwise non-convex
problem. Thus, we replace $\varphi$ by a time-lagged extrapolation
$\tilde\varphi$ yielding $g_\kappa (\tilde\varphi)$ and $\tilde\varphi^2$; see \cite{HeWhWi15}.
Hence, we obtain
\begin{align}
\begin{split}
\label{eq:semilinear}
A(u, \varphi)(w, \psi - \varphi) :=
&\left( g_\kappa (\tilde\varphi) \sigma^+(u), e(w) \right) + \left(\sigma^-(u), e(w) \right) + \left(\tilde\varphi^2 p, \mbox{div } w \right) \\
&+ \frac{1}{2} \left( \partial_\varphi g_\kappa(\varphi) E_s^+(e(u)), \psi - \varphi \right) + 2 (\varphi \ p \ \mbox{div } u, \psi - \varphi) \\
&+ G_c \left( -\frac{1}{\varepsilon} ( 1-\varphi, \psi -\varphi ) + \varepsilon \left( \nabla \varphi, \nabla (\psi - \varphi ) \right) \right).
\end{split}
\end{align}
Hence, \cref{problem} reduces to the solution of $A(u, \varphi)(w, \psi - \varphi) \geq 0$.
In order to deal with the variational inequality,
the constraint $\varphi \leq \varphi^{old}$ (prescribed in the set
$W_{in}$ in \eqref{W_in})
is treated with a primal-dual Active-Set algorithm, which is described in \cref{sec:active_set}.
\begin{remark}
In the literature, the phase-field approximation described
here is also referred to as AT~2 model (named after Ambrosio/Tortorelli) introduced in
\cite{AmTo90} for the Mumford-Shah problem and the original
variational fracture formulation \cite{BoFrMa00}.
Changing the last line in \cref{problem} leads to another well
known model referred to as AT~1, e.g., \cite{BoMaMa14}.
However, in this work we only consider the AT~2 model.
\end{remark}
\subsection{Discretization}
Further on, we will require discrete counterparts of the semi-linear form (\ref{eq:semilinear}) and its derivative.
For the discretization we use a quadrilateral ($2d$) and hexahedral ($3d$) decomposition of the mesh with $Q_1$ elements for the displacement $u$ and phase-field $\varphi$.
The implementation was done using the C++ FEM library deal.II \cite{BaHaKa07,AlArBa18}.
With the usual FEM approach, we define the discrete version of \eqref{eq:semilinear} as $$A_h(\underline{U}_h)_i := A(U_h)(\Phi_i) \quad \forall i = 1, 2, \dots .$$
Here, $\Phi_i$ denotes the $i$-th shape-function, corresponding to either $u$ or $\varphi$.
The current FE-function, at which the semi-linear form is evaluated, is given by $U_h := (u_h, \varphi_h)$.
Its coefficients are given by $\underline{U}_h$, i.e. $U_h = \sum_j \underline{U}_h^j \Phi_j$.
Similarly, we define the discretization of the Jacobian $$G(U_h)_{ij} := A'(U_h)(\Phi_i, \Phi_j).$$
For the sake of simplicity, we omit the underline and use $U$ instead
of $\underline{U}$ in the remainder of this paper.
\begin{problem}
\label{discreteproblem}
Let $V_h \subset V, W_h \subset W$ be standard conforming FE-spaces,
and let $p \in L^{\infty}(D)$ be given.
Find discrete vector-valued displacements and a scalar-valued phase-field variable $\{ u_h, \varphi_h \} := \{ u^n_h, \varphi^n_h \} \in \{u_D + V_h \} \times W_{h,in}$ such that:
\begin{align*}
& \left( g_\kappa (\varphi_h) \sigma^+(u_h), e(w_h) \right) + \left(\sigma^-(u_h), e(w_h) \right) + \left(\varphi_h^2 p, \mbox{div } w_h \right) = 0 \quad & \forall w_h \in V_h, \\
& & \\
& \frac{1}{2} \left( \partial_\varphi g_\kappa(\varphi_h) E_s^+(e(u_h)), \psi_h - \varphi_h \right) + 2 (\varphi_h \ p \ \mbox{div } u_h, \psi_h - \varphi_h) & \\
& + G_c \left( -\frac{1}{\varepsilon} ( 1-\varphi_h, \psi_h - \varphi_h ) + \varepsilon \left( \nabla \varphi_h, \nabla (\psi_h - \varphi_h ) \right) \right) \geq 0 \quad & \forall \psi_h \in W_h,
\end{align*}
at each incremental step $n = 1, 2, 3, \ldots$.
\end{problem}
\section{Solution Approach}
\label{sec:solution}
In this section, we discuss the question on how to solve \cref{discreteproblem}.
The remaining key challenge is the variational inequality resulting from
the irreversibility condition $\varphi \leq \varphi^{old}$.
Treating this inequality requires specially tailored solution methods.
In this work, we chose a variant of the well-known Primal-Dual Active-Set method \cite{ItKu00, HiItKu03}, which we briefly describe in the next section.
The linear systems of equations arising from the Active-Set method will be solved using the GMRES solver \cite{SaSc86} with geometric multigrid preconditioning.
\Cref{sec:multigrid} describes the details of the multigrid scheme.
\subsection{Primal-Dual Active-Set}
\label{sec:active_set}
We now turn to the description of the nonlinear solver.
We deal with two types of nonlinearities: nonlinear equations
and nonlinear behavior due to the inequality constraint.
The goal is to design an algorithm that treats both
simultaneously\footnote{In this paper, the combined nonlinear
(semi-smooth) Newton solver
is called primal-dual active set or, sometimes shorter, as active set (\cref{sec:results}).}.
The original $2d$ version adapted to the phase-field fracture problem was developed in \cite{HeWhWi15}.
In \cite{LeWhWi16}, and more recently in \cite{HeWi18}, it has been
proven to be computationally reliable also for the $3d$ setting.
In the following, we omit the subscript $h$ to make the notation more readable.
The general algorithm reads:
\begin{algorithm}[H]
\caption{Primal-dual active set}
\label{alg:active_set}
Repeat for $k=0, \dots$ until $\mathcal{A}_k = \mathcal{A}_{k-1}$ and $\Vert \widetilde{R}_k \Vert \leq \varepsilon_{as}$:
\begin{algorithmic}[1]
\State Assemble residual $R_k = -A(U_k)$
\State Assemble matrix $G_k = A'(U_k)$
\State Compute active set $\mathcal{A}_k = \{i \mid (M^{-1} R_k)_i + c \ (U_k - U^{old})_i > 0 \text{ and } i \sim \varphi \}$
\State Eliminate rows and columns in $\mathcal{A}_k$ from $G_k$ and $R_k$ to obtain $\widetilde{G}_k$ and $\widetilde{R}_k$
\State Solve linear system $\widetilde{G}_k \cdot \delta U_k = \widetilde{R}_k$
\State Update $U_{k+1} = U_k + \delta U_k$
\end{algorithmic}
\end{algorithm}
\begin{remark}
Step 3 requires the computation of $M^{-1} R_k$, where $M$ denotes the usual mass-matrix.
For practical purposes, we assemble $M$ using a Gauss-Lobatto quadrature rule.
This leads to a diagonal mass matrix, which is trivial to invert and store.
Note that the active set acts only on dofs associated to the phase-field $\varphi$ (indicated by $i \sim \varphi$).
Hence, it is sufficient to perform the computations in steps 3 and 4 only on the phase-field dofs.
The entries associated to the displacement remain unchanged.
In our simulations we chose $c = 100$ and $\varepsilon_{as} = 10^{-10}$.
\end{remark}
\begin{remark}
\Cref{alg:active_set} presented above switches between the computation of $\mathcal{A}_k$ (step 3) and a single Newton correction step on the inactive set $\mathcal{A}_k^c$ (steps 5-6).
We could also modify this method to perform more Newton corrections before updating the active set again.
However, in our experiments we observed that the bottleneck in
this method is the convergence of the active set, i.e.,
$\mathcal{A}_k = \mathcal{A}_{k-1}$
and not the Newton convergence criterion (see also Fig. 14 in \cite{HeWhWi15}).
\end{remark}
\subsection{Geometric Multigrid}
\label{sec:multigrid}
In this work, we solve the linear equations in Step $5$ in Algorithm
\ref{alg:active_set}
from the Active-Set algorithm using a geometric multigrid approach.
The main idea of multigrid methods is to solve problems on coarser
grids and use
this information to enhance the solution on the finest grid.
Transfer of vectors from one grid level $L$ to another level $l$ is achieved by the canonical restriction and prolongation operators $\mathcal{R}^L_l$ and $\mathcal{P}^L_l$.
However, restricting a function to a coarser grid is subject to heavy losses of accuracy, since we cannot represent highly oscillating parts anymore.
One of the key steps to make multigrid methods work is to "smooth" the vector that needs to be transferred.
This reduces the oscillating parts and hence, it can be approximated on a coarser grid without loosing too much information.
For more details regarding multigrid methods we refer the reader to the standard literature, see e.g. \cite{Br93,Ha94}.
Smoothing is handled by the Chebyshev-Jacobi method, which will be described in \cref{sec:smoother}.
A slight variation of the smoother is used for the solution on the coarsest grid as explained in \cref{sec:coarse}.
\Cref{sec:precondition} illustrates the use of multigrid methods for systems with block-structure.
\subsubsection{Chebyshev-Jacobi Smoother}
\label{sec:smoother}
Our final goal is to solve \cref{discreteproblem} (i.e., Step 5 in
Algorithm \ref{alg:active_set}) in a matrix-free fashion.
Hence, we do not have the matrix $G$ at our disposal, which narrows the selection of applicable smoothers.
A frequently used smoother in the context of highly parallel matrix-free computations is the Chebyshev-accelerated Jacobi smoother, which works for positive definite and symmetric problems.
The important property of the Chebyshev-Jacobi smoother with respect to a matrix-free formulation is, that it only relies on matrix-vector products and an estimate of the largest eigenvalue.
In particular, an explicit representation of the matrix $G$ is not required.
Chebyshev acceleration is applied on top of a general iterative method with iteration matrix $M$, i.e. $$x_{m+1} = M x_m + b$$ for the solution of $A x = b$.
The accelerated iteration matrix is then given by $p(M)$, with $p(x)$ denoting the (scaled and shifted) Chebyshev polynomials on the interval $[a, b]$, resulting in $$x_{m+1} = p(M) x_m + b.$$
For practical purposes, it makes sense to utilize the well-known three-term recurrence relation for Chebyshev polynomials.
More details can be found in \cite{Va99} and the references provided therein.
The interval $[a,b]$ determines which eigenvalues should be targeted by the acceleration technique.
For example: if it is used as a solver, we would like to reduce the error with respect to all eigenvalues/vectors.
Hence, it would be beneficial to have $a := \lambda_{min}, b := \lambda_{max}$.
However, information about the extremal eigenvalues is (most often) not available.
Luckily, $\lambda_{max}$ is easy to approximate, e.g. by means of a CG method.
In our work, the main purpose for the Chebyshev method is to act as a smoother inside a multigrid scheme.
There, the small eigenvalues are usually handled by the coarse-grid correction.
Hence, we want to focus the action of the Chebyshev acceleration to some of the largest eigenvalues only, i.e. $[c \ \lambda_{max}, \lambda_{max}]$, with $0 < c < 1$.
In our application, we apply this acceleration technique to the Jacobi method, whose iteration matrix is given by $M = (I - D^{-1} A)$.
The matrix diagonal $D$ required here can be easily computed also in
the matrix-free context,
as discussed in \cref{sec:matrixfree}.
A comparison of different smoothers regarding their performance
in HPC (high performance computing) applications is
demonstrated in \cite{AdBrHu03,BaFaKo11}.
Within our numerical experiments presented in Section \ref{sec:results}, we approximate $\lambda_{max}$ using $10$ CG iterations.
A safety factor of $1.2$ is included, to account for possible underestimation of $\lambda_{max}$ by the CG method.
This is required, since the total number of iterations increases a lot, if the smoothing range is smaller than $\lambda_{max}$.
On the other hand, overestimating $\lambda_{max}$ does not significantly alter the number of required iterations.
This behavior is also observed in \cite{AdBrHu03}.
As denoted before, we chose the lower eigenvalue bound as a fraction of the upper bound.
Summarizing, we define the smoothing range as $[1.2 \ \tilde\lambda_{max} / 5, 1.2 \ \tilde\lambda_{max}]$, where $\tilde\lambda_{max}$ is the approximation obtained via the CG method.
Furthermore, we restrict ourselves to Chebyshev polynomials of degree $4$.
\subsubsection{Coarse-Grid Solver}
\label{sec:coarse}
On the coarsest multigrid level, we use the Chebyshev method again but now for solving instead of smoothing.
We adjust the "smoothing" range to $[0.9 \ \tilde{\lambda}_{min}, 1.2 \ \tilde\lambda_{max}]$, i.e. treating the whole (approximate) spectrum.
In our experiments, using more expensive solvers (e.g. LU-factorization) did not yield significant improvements.
\subsection{Preconditioning}
\label{sec:precondition}
We recall that at Step 5 in Algorithm \ref{alg:active_set} we
solve
\[
\widetilde{G} \cdot \delta U = \widetilde{R}
\]
where $\widetilde{G}$ has the block structure
\[
\widetilde{G} =
\begin{pmatrix}
G_{uu} & G_{u\varphi}\\
G_{\varphi u} & G_{\varphi\varphi}
\end{pmatrix}
\]
The block $G_{u\varphi}$ is zero because we use the time-lagged
extrapolation in the displacement equation and therefore the
linearization with respect to $\varphi$ is zero.
The preconditioned system reads:
\[
P^{-1} \widetilde{G} \cdot \delta U = P^{-1} \widetilde{R}
\]
with a preconditioner $P^{-1}$ approximating the inverse of $\widetilde{G}$.
With these preparations, the geometric multigrid can be applied in several ways.
One possibility is to utilize the block-structure in matrix $\widetilde{G}$ and apply a multigrid scheme to each of the diagonal blocks.
This yields the block-diagonal preconditioner $$P_{diag}^{-1} := \begin{bmatrix} MG(\widetilde{U}) & 0 \\ 0 & MG(\widetilde{P}) \end{bmatrix},$$ with Chebyshev-Jacobi smoothers $\mathcal{S}$ used inside each of the multigrid methods $MG$.
A different approach is to apply the multigrid scheme to the entire linear system $\widetilde{G}$, yielding $$P_{full}^{-1} := MG(\widetilde{G}).$$
Within this approach, the block-structure is utilized inside the smoother in a similar manner as before:
$$\mathcal{S}_{full} := \begin{bmatrix} \mathcal{S}(\widetilde{U}) & 0 \\ 0 & \mathcal{S}(\widetilde{P}) \end{bmatrix},$$ with Cheybshev smoothers for the blocks $U$ and $P$.
One could extend the diagonal smoother $\mathcal{S}_{full}$ by adding the off-diagonal block.
However, we did not observe a significant decrease in the number of required iterations.
\section{Matrix-Free}
\label{sec:matrixfree}
Due to ever increasing demand to solve larger and larger problems, memory limitations tend to become more of an issue.
To avoid these limitations, we aim to implement the geometric multigrid solver in a matrix-free fashion.
This allows us to handle large problems without the need for enormous amounts of memory.
In this section, we describe some basic aspects and algorithmic details regarding the matrix-free realization of the geometric multigrid solver.
The main components are the matrix-free vector multiplication on the finest level and the corresponding part on the coarser levels.
We split the discussion into these two parts, where the former contains common aspects of the matrix-free framework, whereas the latter contains some peculiarities regarding the operators on the coarser levels.
\subsection{Matrix-Vector Multiplication in the Matrix-Free Context}
In this part, we briefly illustrate the main principles behind matrix-free techniques.
More details on matrix-free and its integration into deal.II are found in \cite{KrKo12}, and \cite{KrKo17} for discontinuous elements.
Within the matrix-free context, we want to evaluate $v = G \cdot u$ without actually assembling the FE-matrix $G := G(U_h)$.
This product may be rewritten in an element-wise fashion $$G \cdot u = \sum_{k=1}^{n_e} C^T P_k^T G_k (P_k C u).$$
Here, $n_e$ denotes the number of elements, $C$ defines possible constraints on dofs, $P_k$ is the element-wise global-to-local mapping and $G_k$ are the local stiffness matrices.
We can further rewrite the local contributions by transforming it to the reference element.
For simplicity, we illustrate this for the Laplace problem $(\kappa(x) \nabla u , \nabla v )$.
Then we have $$G_k = B^T_R J^{-1}_k D_k J_k^{-T} B_R,$$ with $J_k$ denoting the reference element transformation, $B_R$ the gradient matrix on the reference element and $D_k$ a diagonal matrix representing the coefficient $\kappa$.
The precise definition of all quantities is given in \cite{KrKo12}.
An important further optimization consists of utilizing the tensor-product structure for computation of $B_R$ on the reference element $[0,1]^d$.
This changes the computational complexity from $\mathcal{O}(p^{2d})$ to $\mathcal{O}(p^{d+1} d^2)$ per element.
\begin{remark}
Similar derivations can be carried out for different bilinear forms.
Luckily, this is already included in deal.II for operators of the form $(\cdot, \Phi)$, $(\cdot, \nabla \Phi)$ and $(\cdot, \Delta \Phi)$, i.e. arbitrary terms tested with the test-function $\Phi$, its gradient or its Laplacian.
This is more than sufficient to cover most partial differential equations.
Furthermore, deal.II implements vectorization with respect to the elements, i.e. computations are carried out on multiple elements at the same time, if run on a reasonably modern CPU \cite{KrKo12,KrKo17}.
\end{remark}
\begin{remark}
In a similar manner, one can explicitly compute the vector of diagonal entries of the matrix $A$.
This is required for the Jacobi-smoother in the multigrid method.
Furthermore, the matrix-free framework can also be used to compute the residual $R_k$ required within the nonlinear solver.
\end{remark}
\subsection{Coarse-Level Operators}
The operators on the coarse levels are implemented in a matrix-free fashion as well.
Most parts of the implementation matches those on the fine level $L$, however, there are some important differences.
First of all, the active set, linearization point and old solutions naturally live on the finest grid.
In order to apply the matrix-free operator, we need to transfer this information to the coarser levels $l$ as well.
By $\mathcal{R}_l$ we will denote the restriction operator mapping from the finest level to level $l$.
For the active set, our approach starts by building a vector $a$ representing the active set $\mathcal{I}_\mathcal{A}$ by
$$ a := (a^i) := \begin{cases} 1 &i \in \mathcal{A} \\ 0 &\text{else} \end{cases} $$
for all dof indices $i$.
The active sets on the coarser levels $\mathcal{A}_l$ are then computed via the restrictions $a_l := \mathcal{R}_l a$ by $\mathcal{A}_l := \{ i : a_l^i = 1 \}.$
Note that the entries $a_l^i \in \{0, 1\}$, since we are using $Q_1$ elements on nested grids, i.e. each vertex (and hence dof) on level $l$ also exists on all finer levels.
The big advantage of this method is that it only uses the restriction operators $\mathcal{R}_l$, which are readily available in the multigrid context.
The natural way to evaluate the coarse level operators would include information about the linearization point and old solutions from the finest level, i.e. we would need to evaluate $G_l(U) \cdot v_l$ analogously to $G(U) \cdot v$ on the finest level as before.
However, this is very expensive, since for each quadrature point on the coarse levels we have to
\begin{enumerate}
\item find the fine-grid element this point belongs to,
\item evaluate all fine-level basis functions (or its gradients) on this element, and
\item compute the value/gradient using the current linearization point on the fine level.
\end{enumerate}
One could precompute parts of this information in order to save computational time, however, this would defy the whole idea of matrix-free, as it introduces lots of memory overhead.
Furthermore, these values would have to be updated again every time the linearization point changes.
To overcome this, we approximate the evaluation of $G_l(U) \cdot v_l$ by $G_l(U_l) \cdot v_l$, with $U_l := \mathcal{R}_l U$ being the restriction to the coarser levels.
Thus, evaluating $G_l$ now depends only on information from level $l$.
This is way cheaper, since step (1) is no longer required and step (2) is required anyway for the matrix-free multiplication (but now on the coarse elements), whereas step (3) is cheap in any case.
The only additional cost comes from the computation and storage of $U_l = \mathcal{R}_l U$ for all levels.
Again, this computation needs to be redone every time $U$ changes.
\begin{remark}
For the sake of simplicity, we omitted the dependence of $G$ on the old solutions $U^{n-1}, U^{n-2}$ (required for the extrapolation).
These can be treated in the very same way as $U := U^n$.
\end{remark}
\section{Numerical Results}
\label{sec:results}
We proceed to show the capabilities of the presented multigrid method applied to some test cases frequently found in the literature.
In all numerical experiments, we mainly vary the level of refinement and consider fixed values for (most) parameters.
The linear solver consists of an outer GMRES solver, preconditioned by the previously presented monolithic multigrid method.
The stopping criterion for the Active-Set method is determined by an absolute tolerance $\varepsilon_{as} := 10^{-10}$.
The linear solver stops once the absolute residual is less than $10^{-10}$ or a reduction of $10^{-4}$ compared to the initial (linear) residual is reached, i.e. $\Vert r_k \Vert \leq \mbox{max}\{ 10^{-12}, 10^{-4} \Vert r_0 \Vert \}$.
Quantities of interest include loads and energies.
The crack energy is given by $$\int_{\Omega} \frac{G_c}{2} \left( \frac{1}{\varepsilon} (1 - \varphi)^2 + \varepsilon (\nabla \varphi, \nabla \varphi ) \right) dx.$$
For the bulk energy we have $$\int_{\Omega} \left( g_\kappa(\varphi) E_s^+(u) + E_s^-(u) + \varphi^2 p \ \mbox{div } u \right) dx.$$
The loading (in $y$-direction) is evaluated on parts of the boundary using $$L_y = \int_{\Gamma_L} (\sigma \cdot n)_y ds.$$
In the upcoming sections, we present several test scenarios.
We start with a setting showing multiple fractures joining and branching, taken from \cite{HeWhWi15,HeWi18}.
We proceed with a L-shaped panel test, which illustrates crack initiation without predefined fractures.
This test is frequently found in the literature, see e.g. \cite{AmGeDe15,BeMoCh12,FeHo06,UnEcKo07,Wi17,MeDu07} and was first reported in \cite{Wi01}.
We conclude with a simple $3d$ variation of the L-shaped test to show the applicability of our method also in the $3d$ setting.
\newcommand{\summary}[1]{
\pgfplotstabletypeset[
columns={lvl, dofs, cells, h, eps},
columns/lvl/.style={column name=$l$},
columns/dofs/.style={sci, zerofill, precision = 1},
columns/h/.style={column name=$h$, sci, precision = 1},
columns/eps/.style={column name=$\varepsilon$, sci, precision = 1},
columns/cells/.style={column name={elements}, sci, zerofill, precision = 1},
every head row/.style={before row=\toprule,after row=\midrule},
every last row/.style={after row=\bottomrule},
]{#1}
}
\subsection{Multiple Fractures}
\subsubsection{Problem Description}
This problem consists of two prescribed fractures inside the domain $\Omega = ( 0\mbox{ m} , 4\mbox{ m} ) ^2$ as shown in \cref{multiple:geometry}.
Further propagation of the fractures is driven by an increasing pressure inside the cracks, in particular $p(t) = 10^3 t \mbox{ Pa}$.
For the computation, a constant time increment of $dt := 10^{-2} s$ has been used.
The initial cracks are given at $ \{ 2.5 - h / 2 \leq x \leq 2.5 + h / 2 \text{ and } 0.8 \leq y \leq 1.5 \} $ (vertical fracture) and $ \{ 0.5 \leq x \leq 1.5 \text{ and } 3 - h / 2 \leq y \leq 3 + h / 2 \} $ (horizontal fracture), with the element diameter $h$.
Initial conditions are given by homogeneous Dirichlet conditions for $u$.
The phase-field regularization parameter is chosen as $\varepsilon :=
h$.
The full list of parameters is summarized in \cref{multiple:summary,multiple:parameters}.
\begin{table}[ht]
\renewcommand{1.3}{1.3}
\centering
\summary{data/summary_multiple_refinement_heps/summary.csv}
\caption{Multiple Fractures.
Values and parameters for the different tests.}
\label{multiple:summary}
\end{table}
\begin{table}[ht]
\renewcommand{1.3}{1.3}
\centering
\begin{tabular}{c|c|l}
Variable & Value & Unit \\ \hline
$\nu$ & $0.2$ & $[1]$ \\
$E$ & $10^4$ & $[Pa]$\\
$\mu$ & $ 4166.6 $ & $[N/m^2]$ \\
$\lambda$ & $ 2777.7 $ & $[N/m^2]$ \\
$\alpha$ & $0.0$ & $[1]$ \\
$p(t)$ & $10^3 t$& $[N/m^2]$ \\
$G_c$ & $1.0$ & $[N / m]$ \\
$\kappa$ & $10^{-10}$ & $[1]$ \\
$\varepsilon$ & $0.044$ & $[m]$ \\
$dt \ [s]$ & $10^{-2}$ & $[s]$ \\
\end{tabular}
\caption{Multiple Fractures.
Material parameters and configuration. Lam\'e parameters $\mu, \lambda$ are derived from the Poisson ratio $\nu$ and elastic modulus $E$.}
\label{multiple:parameters}
\end{table}
\begin{figure}
\caption{Multiple Fractures.
Geometry and position of initial cracks.}
\label{multiple:geometry}
\end{figure}
\subsubsection{Random Variations}
Following the original experiments presented in \cite{HeWhWi15}, we consider a randomly perturbed elastic modulus for the multiple fracture problem.
The elastic modulus is varied between $0.1$ and $1$-times its original value.
The degree of perturbation is given by a "smooth" random field $r(x,y)$, illustrated in \cref{multiple:pictures}.
Computation of $r(x,y)$ was done using the Simplex algorithm provided by the C++ library FastNoise \cite{Au18}.
The precise settings to reproduce the created random field are (without explanation): seed $= 2$, frequency $= 0.5$, simplex noise and linear interpolation.
Source code and an explanation of the different settings can be found in the online repository\footnote{https://github.com/Auburns/FastNoise, as of Dec. 1st, 2018}.
This provides a way of deterministic evaluation of the random field at given coordinates, i.e. equal coordinates always result in the same value.
Therefore, this information can be used consistently on all multigrid levels and possibly multiple cores without additional effort.
\begin{remark}
The main purpose of these random variations is to show that our solver is capable of handling perturbations.
We do not aim to provide a realistic representation of inhomogeneous material coefficients.
\end{remark}
\subsubsection{Numerical Results}
In \cref{multiple:iterations} (left), the number of iterations of the linear solver (GMRES) per Active-Set step is shown for different refinement levels.
We observe an overall increase in the number of iterations as the fracture grows, with larger spikes when the crack touches the boundary.
Up to this moment, the effect of $h$-refinement is negligible, showing the robustness of our method.
Afterwards, the behavior is more irregular, with the largest spike happening for $l=7$.
In the case of Miehe-splitting (\cref{multiple:iterations}, right), we require more iterations of the linear solver.
Furthermore, the irregular iteration counts become more prominent once the fracture gets close to the boundary ($\sim T = 0.2s$).
Similar observations are obtained for the Amor-type splitting.
The number of Active-Set iterations behaves quite similar. \Cref{multiple:activeset} shows these numbers over time for different refinement levels. Again, in the case without splitting, the iteration counts remain quite stable, with some peaks towards the end of the simulation. In the Miehe-case, these peaks become more severe.
\begin{figure}
\caption{Multiple fractures.
Iterations of the linear solver per active set step over time for different refinement levels $l$ with $\varepsilon = h$.
Left: no stress splitting, right: Miehe-type stress splitting.}
\label{multiple:iterations}
\end{figure}
\begin{figure}
\caption{Multiple fractures (Example 1).
Number of Active-Set steps over time for different
refinement levels $l$ with $\varepsilon = h$.
Left: no splitting. Right: Miehe-splitting.
}
\label{multiple:activeset}
\end{figure}
\Cref{multiple:pictures} shows the resulting fracture
using randomly perturbed values for the elastic modulus.
The random variations range from $0.1$ to $1$-times the original value, indicated by the image on the right.
There, black denotes large values of elastic modulus, whereas white denotes small values.
It can be observed that the resulting fracture avoids the high elastic modulus regions.
\begin{figure}
\caption{Multiple fractures.
Fracture pattern using random variations in the elastic modulus ranging from $0.1$ (white) to $1$ (black) times the original value as shown in the right figure.}
\label{multiple:pictures}
\end{figure}
Matrix-free implementations are particularly suitable for parallelization.
A first result into this direction is shown in \cref{multiple:scaling}.
There, the speed-up for a full simulation of the multiple fracture test with $0.8 m$ and $3.2 m$ dofs is shown.
The parallel (strong) scaling using distributed parallelization is close to perfect down to a local problem size of roughly $25 k$ dofs.
When using more cores, the performance drops due to the increased overhead required for communication for smaller local problems.
This allows us to reduce the computational time for the whole simulation from $400$ minutes down to $10$ minutes on $64$ cores for $l = 7$.
In the case of $l = 8$, the simulation time drops from $35$ hours down to $23$ minutes using $128$ cores.
\begin{figure}
\caption{Multiple fractures.
Speedup using distributed parallelization depending on the number of cores for a full simulation with $\varepsilon = h$ and roughly $0.8 m$ and $3.2 m$ dofs.
Remarkably, the total number of iterations (Active-Set, linear) matches perfectly for all numbers of cores.
}
\label{multiple:scaling}
\end{figure}
\subsection{L-Shaped Panel}
\subsubsection{Problem Description}
In this section, we consider the L-shaped panel test, which illustrates crack initiation and propagation without any predefined fractures.
The computational domain $\Omega_L$ is given by an L-shape $(0, 500)^2 \setminus \left( (250, 500) \times (0, 250) \right)$, as depicted in \cref{lshape:configuration} (measures given in $mm$).
Fracture propagation is driven by cyclic displacement boundary conditions $u_y(t)$ on $(500 - 30, 500) \times \{ 250 \}$, i.e. the lower boundary of the upper right part of the L-shape.
The specimen is fixed on the lower part, i.e. $u = 0 \text{ on } (0, 250) \times \{0\}$.
No additional pressure is enforced inside the crack, hence $p = 0$.
The time-dependent displacement (given in $mm$) is defined by
$$u_y(t) := \begin{cases} t & 0 s \leq t < 0.3 s, \\ 0.6 - t & 0.3 s \leq t < 0.8 s, \\ -1 + t & 0.8 s \leq t < 2 s, \end{cases}$$
leading to a final displacement of $1 mm$ after $2 s$.
A constant time-step of $dt := 10^{-3}$ was employed.
Again, we chose $\varepsilon := h$ for the fracture parameter.
The load is evaluated on the top boundary $\Gamma_{up} := (0,500) \times \{ 500 \}$.
The full list of parameters is summarized in \cref{lshape:summary,lshape:parameters}.
\Cref{lshape:pic} shows the resulting crack pattern at different points in time.
\begin{figure}
\caption{L-shaped panel test.
Geometry and cyclic displacement boundary conditions for the L-shaped panel test.}
\label{lshape:configuration}
\end{figure}
\begin{table}[ht]
\renewcommand{1.3}{1.3}
\centering
\summary{data/summary_lshape_refinement_heps/summary.csv}
\caption{L-Shaped Panel.
Important values and parameters for different refinement levels $l$.}
\label{lshape:summary}
\end{table}
\begin{table}[ht]
\renewcommand{1.3}{1.3}
\centering
\begin{tabular}{c|c|l}
Variable & Value & Unit \\ \hline
$\mu$ & $ 10.95 $ & $[kN/mm^2]$ \\
$\lambda$ & $ 6.16 $ & $[kN/mm^2]$ \\
$G_c$ & $8.9 \cdot 10^{-5}$ & $[kN / mm]$ \\
$\kappa$ & $10^{-10}$ & $[1]$ \\
$\varepsilon$ & $11$ & $[mm]$ \\
$dt \ [s]$ & $10^{-3}$ & $[s]$ \\
\end{tabular}
\caption{L-Shaped Panel.
Parameters.}
\label{lshape:parameters}
\end{table}
\begin{figure}
\caption{L-Shaped Panel.
Resulting fracture pattern at times $t = 0.22s, t = 0.3s$ (top) and $t = 1.45s, t = 2s$ (bottom)}
\label{lshape:pic}
\end{figure}
\subsubsection{Numerical Results}
For this second test, the number of Active-Set iterations is shown for $\varepsilon = h$ and $\varepsilon = 22.0$. Random peaks happen in either case, but seem to cluster at different times. The iterations counts increase with higher levels towards the end of the simulation in case of constant $\varepsilon$. The $h$-dependent scenario is more robust in this regard.
The number of GMRES iterations presented in \cref{lshape:iterations} shows similar characteristics as before.
Again, the iteration counts increase once the fracture starts growing at $t \sim 0.3s$ and again at $t \sim 0.8s$.
In the first half of the simulation, our linear solver is robust with respect to $h$-refinement.
During the second half, the number of iterations increases mildly on refinement in the case of $\varepsilon = h$, but stays almost constant for $\varepsilon = const$.
On the finest grid ($l=8$), a huge increase can be observed in either case at $t \sim 0.8s$.
in case of constant $\varepsilon$, such irregular behavior can also be observed in the first half of the simulation.
These results were obtained using the Miehe splitting, but similar findings were gathered for the Amor splitting.
The solver also works if no splitting is applied ($\sim 2-7$ iterations), however, no crack initiation occurred in this case.
\begin{figure}
\caption{L-shaped panel.
Number of Active-Set steps over time for different refinement levels $l$ with $\varepsilon = h$ and $\varepsilon = 22 mm$ (right).}
\label{lshape:activeset}
\end{figure}
\begin{figure}
\caption{L-shaped panel.
Iterations of the linear solver per active set step over time for different refinement levels $l$ with $\varepsilon = h$ (left) and $\varepsilon = 22 mm$ (right).}
\label{lshape:iterations}
\end{figure}
\Cref{lshape:load_displacement} shows the load-displacement curves for different refinement levels.
Our findings look similar to results found in literature in case of $\varepsilon = h$.
Since the phase-field parameter $\varepsilon$ changes during refinement, convergence of the loading curves is not necessarily expected.
Nonetheless, the overall shape of the presented curves agree mostly on each other.
A major difference happens on the finest grid ($l = 8$), where the load drops to zero at the turning point $u=-0.2m$, which does not happen on the coarser levels.
The same effect is also observed in the right image of \cref{lshape:load_displacement} (constant $\varepsilon$).
In addition, the refinements $l=7$ and $l=8$ yield negative loading values during the second cycle, which is not as expected.
Furthermore, small irregularities are visible at turning points, i.e. step-like curves somewhere between displacements of $0.2-0.4 mm$.
Similar artifacts are also visible in the crack and bulk-energy, see \cref{lshape:crackenergy,lshape:bulkenergy}.
This test case seems to be very sensitive to the actual choice of
parameters, elements and meshes,
which is also reported in \cite{Wi17,MaWiWo19}.
\begin{figure}
\caption{L-shaped panel.
Load -- displacement curves for different refinement levels $l$.
The left image shows the results for $\varepsilon = h$, whereas on the right we have a fixed value $\varepsilon = 22$.}
\label{lshape:load_displacement}
\end{figure}
\begin{figure}
\caption{L-shaped panel.
Crack energy for different refinement levels $l$ with $\varepsilon = h$ (left) and $\varepsilon = 22$ (right).}
\label{lshape:crackenergy}
\end{figure}
\begin{figure}
\caption{L-shaped panel.
Bulk energy for different refinement levels $l$ with $\varepsilon = h$ (left) and $\varepsilon = 22$ (right).}
\label{lshape:bulkenergy}
\end{figure}
\subsection{$3d$ L-Shaped Panel}
\subsubsection{Problem Description}
In this section, we consider a $3d$ version of the previous L-shaped
panel test. A related numerical study is \cite{MeBoKh15}. A related
experimental test was previously proposed in \cite{Wi01}.
The computational domain is given by extruding $\Omega_L$ from before into z-direction.
Hence, $\Omega_L^{3d} := \Omega_L \times (0, 250)$, as depicted in \cref{lshape3d:configuration} (measures given in $mm$).
The same cyclic displacement boundary conditions are applied as before on the extruded area $(500 - 30, 500) \times \{ 250 \} \times (0, 250)$.
The specimen is fixed on the bottom, i.e. $u = 0 \text{ on } (0, 250) \times \{0\} \times (0, 250)$.
Again, the loading force is evaluated on the upper boundary $\Gamma_{up} := (0,500) \times \{ 500 \} \times (0, 250)$.
The full list of parameters is summarized in \cref{lshape3d:summary,lshape3d:parameters}.
\Cref{lshape3d:pic} illustrates the resulting fracture.
\begin{figure}
\caption{$3d$ L-shaped panel.
Geometry and loading for the L-shaped panel test.}
\label{lshape3d:configuration}
\end{figure}
\begin{table}[ht]
\renewcommand{1.3}{1.3}
\centering
\summary{data/summary_lshape3d/summary.csv}
\caption{$3d$ L-Shaped Panel.
Important values and parameters for different refinement levels $l$.}
\label{lshape3d:summary}
\end{table}
\begin{table}[ht]
\renewcommand{1.3}{1.3}
\centering
\begin{tabular}{c|c|l}
Variable & Value & Unit \\ \hline
$\mu$ & $ 10.95 $ & $[kN/mm^2]$ \\
$\lambda$ & $ 6.16 $ & $[kN/mm^2]$ \\
$G_c$ & $8.9 \cdot 10^{-5}$ & $[kN / mm]$ \\
$\kappa$ & $10^{-10}$ & $[1]$ \\
$\varepsilon$ & $11$ & $[mm]$ \\
$dt \ [s]$ & $10^{-3}$ & $[s]$ \\
\end{tabular}
\caption{$3d$ L-Shaped Panel.
Parameters.}
\label{lshape3d:parameters}
\end{table}
\begin{figure}
\caption{$3d$ L-Shaped Panel.
Resulting fracture pattern at times $t = 0.22s, t = 0.3s$ (top) and $t = 1.45s, t = 2s$ (bottom). Only parts with $\varphi < 0.75$ are shown.}
\label{lshape3d:pic}
\end{figure}
\subsubsection{Numerical Results}
The results for the $3d$ extension of the L-shaped panel test are very similar to the previously shown $2d$ setup.
The number of Active-Set iterations is in the same range as before, but less peaks are visible in the $3d$ setting, see \Cref{lshape3d:iterations_active} (left).
\Cref{lshape3d:iterations_active} (right) shows that the number of GMRES iterations per Active-Set step is in again very similar, i.e. roughly $4-10$ iterations.
We also observe a similarly increased iteration count at times when the fracture is growing.
Also, the shape of energy and loading curves (\cref{lshape3d:load_displacement,lshape3d:energy}) look very similar in the $2d$ and $3d$ case.
The exception being the coarsest $3d$ grid, which shows a very roundly shaped loading curve, which does not agree with the $2d$ setting.
However, the numbers do not even remotely agree, as seen in \cref{lshape3d:load_displacement,lshape3d:energy}.
\begin{figure}
\caption{$3d$ L-shaped panel.
Left: number of Active-Set iterations over time.
Right: number of iterations of the linear solver per active set step over time for different refinement levels $l$ with $\varepsilon = h$.}
\label{lshape3d:iterations_active}
\end{figure}
\begin{figure}
\caption{$3d$ L-shaped panel.
Load -- displacement curves for different refinement levels $l$ in the $3d$ case (left).
For comparison, we show the respective results for $2d$ case again (right).}
\label{lshape3d:load_displacement}
\end{figure}
\begin{figure}
\caption{$3d$ L-shaped panel.
Crack and bulk energy for different refinement levels $l$ with $\varepsilon = h$.}
\label{lshape3d:energy}
\end{figure}
\section{Conclusions and Future Work}
In this work, we developed a matrix-free geometric multigrid solver
for fracture propagation problems
using a phase-field description.
Our numerical experiments show that this solver can handle a variety of test cases within a reasonable number of iterations.
Although we have only shown results for the AT 2 model, it shall be noted that the solver also works for the simpler AT 1 model, with iteration counts usually below the presented values.
Limitations of this method appear primary in case of stress-splittings.
The improvement of the solver in these cases is part of our future work.
In order to deal with larger simulations (in particular in 3d), incorporating adaptivity and/or parallelization is necessary; both of which are part of our ongoing work.
Another point of concern is the Active-Set method.
In some simulations, convergence of this non-linear solver is observed
to be very slow as also indicated in fundamental studies on
primal-dual active set methods, e.g., \cite{Ha15,CuHaRo15}.
Hence, it may be beneficial to further investigate properties of the
active set method with the goal of further improvements.
Ultimately, one could also think of combining the non-linear and linear solver in terms of nested iterations or non-linear multigrid methods (see e.g. \cite{Kr06,Ha94}).
\section{Acknowledgments}
This work has been supported by the Austrian Science Fund (FWF) grant P29181 `Goal-Oriented Error Control for Phase-Field Fracture Coupled to Multiphysics Problems'.
\printbibliography
\end{document}
|
\begin{document}
\onehalfspace
\title{Constant Threshold Intersection Graphs\\ of Orthodox Paths in Trees}
\author{Claudson Ferreira Bornstein$^1$
\and
Jos\'{e} Wilson Coura Pinto$^2$
\and
Dieter Rautenbach$^3$
\and
Jayme Luiz Szwarcfiter$^2$
}
\date{}
\maketitle
\begin{center}
$^1$ DCC-IM, Federal University of Rio de Janeiro,
Rio de Janeiro, Brazil\\
\texttt{[email protected]}\\[3mm]
$^2$ PESC-COPPE, Federal University of Rio de Janeiro,
Rio de Janeiro, Brazil\\
\texttt{jwcoura,[email protected]}\\[3mm]
$^3$ Institute of Optimization and Operations Research,
Ulm University, Germany\\
\texttt{[email protected]}
\end{center}
\begin{abstract}
A graph $G$ belongs to the class ${\rm ORTH}[h,s,t]$ for integers $h$, $s$, and $t$
if there is a pair $(T,{\cal S})$,
where $T$ is a tree of maximum degree at most $h$,
and ${\cal S}$ is a collection $(S_u)_{u\in V(G)}$ of subtrees $S_u$ of maximum degree at most $s$ of $T$, one for each vertex $u$ of $G$,
such that,
for every vertex $u$ of $G$, all leaves of $S_u$ are also leaves of $T$, and,
for every two distinct vertices $u$ and $v$ of $G$,
the following three properties are equivalent:
\begin{enumerate}[(i)]
\item $u$ and $v$ are adjacent.
\item $S_u$ and $S_v$ have at least $t$ vertices in common.
\item $S_u$ and $S_v$ share a leaf of $T$.
\end{enumerate}
The class ${\rm ORTH}[h,s,t]$ was introduced by Jamison and Mulder.
Here we focus on the case $s=2$, which is closely related to the well-known VPT and EPT graphs.
We collect general properties of the graphs in ${\rm ORTH}[h,2,t]$,
and provide a characterization in terms of tree layouts.
Answering a question posed by Golumbic, Lipshteyn, and Stern,
we show that ${\rm ORTH}[h+1,2,t]\setminus {\rm ORTH}[h,2,t]$ is non-empty for every $h\geq 3$ and $t\geq 3$.
We derive decomposition properties,
which lead to efficient recognition algorithms for the graphs in ${\rm ORTH}[h,2,2]$ for every $h\geq 3$.
Finally, we give a complete description of the graphs in ${\rm ORTH}[3,2,2]$,
and show that the graphs in ${\rm ORTH}[3,2,3]$ are line graphs of planar graphs.
\end{abstract}
{\small
\begin{tabular}{lp{13cm}}
{\bf Keywords:} & Intersection graph; $(h,s,t)$-representation; orthodox $(h,s,t)$-representation; line graph; chordal graph
\end{tabular}
}
\section{Introduction}
Intersection graphs are a well studied topic \cite{mcmc,gotr}
and the intersection graphs of paths in trees have received special attention.
In the present paper
we study so-called orthodox representations
with bounds on the maximum degree of the host tree
as well as
on the size of the intersections corresponding to adjacencies.
Such representations were introduced by Jamison and Mulder \cite{jamu1,jamu2}.
Before we give precise definitions and explain our own as well as related results,
we collect some standard notation and terminology.
We consider finite, undirected, and simple graphs as well as finite and undirected multigraphs, which are allowed to contain parallel edges and loops.
A {\it clique} in $G$ is a complete subgraph of $G$.
For a tree $T$, let ${\cal L}(T)$ be the set of {\it leaves} of $T$,
which are the vertices of $T$ of degree at most $1$.
Let $L(H)$ be the {\it line graph} of some multigraph $H$,
whose vertex set $V(L(H))$ is the edge set $E(H)$ of $H$,
and in which two distinct vertices $u$ and $v$ of $L(H)$ are adjacent
if and only if the edges $u$ and $v$ of $H$ intersect.
Two distinct vertices $u$ and $v$ of a graph $G$ are {\it twins} in $G$ if $N_G[u]=N_G[v]$,
and, if $G$ has no twins, then it is {\it twin-free}.
The following notions were formalized by Jamison and Mulder \cite{jamu1,jamu2,jamu3}.
For positive integers $h$, $s$, and $t$,
an {\it $(h,s,t)$-representation} of a graph $G$ is a pair $(T,{\cal S})$,
where $T$ is a tree of maximum degree at most $h$,
and ${\cal S}$ is a collection $(S_u)_{u\in V(G)}$ of subtrees $S_u$ of maximum degree at most $s$ of $T$, one for each vertex $u$ of $G$,
such that two distinct vertices $u$ and $v$ of $G$ are adjacent
if and only if $S_u$ and $S_v$ have at least $t$ vertices in common.
An $(h,s,t)$-representation $(T,{\cal S})$ of $G$ with ${\cal S}=(S_u)_{u\in V(G)}$ is {\it orthodox} if,
for every vertex $u$ of $G$, all leaves of $S_u$ are also leaves of $T$, and,
for every two distinct vertices $u$ and $v$ of $G$,
the following three properties are equivalent:
\begin{enumerate}[(i)]
\item $u$ and $v$ are adjacent.
\item $S_u$ and $S_v$ have at least $t$ vertices in common.
\item $S_u$ and $S_v$ share a leaf of $T$.
\end{enumerate}
Let $[h,s,t]$ and ${\rm ORTH}[h,s,t]$ be the classes of graphs that have an $(h,s,t)$-representation
and an orthodox $(h,s,t)$-representation, respectively.
If no upper bound on the maximum degree of the host $T$ is imposed,
we replace $h$ with $\infty$.
Similarly, if no upper bound on the maximum degree of the subtrees in ${\cal S}$ is imposed,
we replace $s$ with $\infty$.
Note that the classes $[h,s,t]$ and ${\rm ORTH}[h,s,t]$ are hereditary,
that is, closed under taking induced subgraphs.
By iteratively removing irrelevant leaves of the host tree $T$ of some orthodox $(h,s,t)$-representation $(T,{\cal S})$,
one may assume that every leaf of $T$ is also a leaf of some tree in ${\cal S}$.
Using this terminology,
Gavril's famous result \cite{ga1} states that the class of chordal graphs coincides with $[\infty,\infty,1]$.
Jamison and Mulder \cite{jamu2,jamu3} attribute to McMorris and Scheinerman \cite{mcsc}
the insight that $[\infty,\infty,1]={\rm ORTH}[3,3,1]={\rm ORTH}[3,3,2]$.
In \cite{jamu2} they collect several properties of $[3,3,3]$ and ${\rm ORTH}[3,3,3]$.
The well studied
vertex and edge intersection graphs of paths in trees \cite{ga2,goja1,goja2},
also known as VPT-graphs and EPT-graphs,
coincide with $[\infty,2,1]$ and $[\infty,2,2]$, respectively.
Golumbic and Jamison \cite{goja1} have shown that deciding whether a given graph belongs to $[3,2,1]$ is NP-complete.
Alc\'on, Gutierrez, and Mazzoleni \cite{alguma1} strengthened this result and generalized it for every $h\geq 3$.
In \cite{alguma2} they study the forbidden induced subgraphs of $[h,2,1]$.
Golumbic, Lipshteyn, and Stern \cite{golist}
study the classes $[h,2,t]$ and ${\rm ORTH}[h,2,t]$ in detail.
In particular, they show that
${\rm ORTH}[\infty,2,1]={\rm ORTH}[3,2,1]={\rm ORTH}[3,2,2]$,
and that ${\rm ORTH}[\infty,2,1]$ is a proper subclass of ${\rm ORTH}[\infty,2,2]$.
Furthermore, they ask whether ${\rm ORTH}[\infty,2,t]$ and ${\rm ORTH}[3,2,t]$ coincide.
\noindent In the present paper we study the classes ${\rm ORTH}[h,2,t]$.
For $h\leq 2$, these classes are rather simple.
In fact, for every graph $G$ in ${\rm ORTH}[2,2,1]$,
the vertex set $V(G)$ of $G$ can be partitioned into three cliques $A$, $B$, and $C$,
such that $G$ contains all edges between $A$ and $B$, all edges between $B$ and $C$, but no edge between $A$ and $C$,
that is, the only connected twin-free graph in ${\rm ORTH}[2,2,1]$ is $P_3$.
Furthermore, if $t\geq 2$, then the graphs in ${\rm ORTH}[2,2,t]$ consist of one clique and some isolated vertices.
Hence, the smallest interesting value for $h$ is $3$.
In the second section we collect some general properties of the graphs in ${\rm ORTH}[h,2,t]$.
Our main result is a characterization in terms of tree layouts whose precise definition will be given later.
Using this characterization, we are able to answer the above-mentioned question by Golumbic, Lipshteyn, and Stern.
We also derive some decomposition properties,
which lead to efficient recognition algorithms for the graphs in ${\rm ORTH}[h,2,t]$ for every $h\geq 3$ and $t\in \{ 1,2\}$;
contrasting the above hardness results.
In the third section we consider the classes ${\rm ORTH}[3,2,2]$ and ${\rm ORTH}[3,2,3]$ in more detail,
and give a complete structural description of the first one.
We conclude with some open problems motivated by our research.
\section{General properties of ${\rm ORTH}[h,2,t]$}
In this section we collect more general properties of the classes ${\rm ORTH}[h,2,t]$,
and derive important structural consequences.
Our first result closely ties these classes to line graphs.
\begin{theorem}\label{theorem1}
Let $(T,{\cal S})$ be an orthodox $(h,2,t)$-representation of a graph $G$ with $h\geq 3$ and $t\geq 1$
such that, for every leaf $x$ of $T$,
there is some vertex $u$ of $G$
with $x\in V(S_u)$.
The graph $G$ is the line graph of a multigraph $H$ without loops,
and, if $G$ is twin-free, then $H$ is a graph.
Furthermore, if $G$ is a connected twin-free graph of order at least $4$,
and $H$ has no isolated vertices, then
\begin{itemize}
\item $H$ is unique up to isomorphism,
\item there is a bijection $\phi:V(H)\to {\cal L}(T)$, and
\item two distinct vertices $x$ and $y$ of $H$ are adjacent in $H$
if and only if
${\cal S}$ contains the path in $T$ between $\phi(x)$ and $\phi(y)$.
\end{itemize}
\end{theorem}
{\it Proof:} Let ${\cal S}=(S_u)_{u\in V(G)}$.
By definition,
for every vertex $u$ of $G$,
the subtree $S_u$ is a path between leaves of $T$.
Suppose that, for some vertex $u$ of $G$,
the path $S_u$ consists of only a single leaf, say $x$, of $T$.
Let $y$ be the neighbor of $x$ in $T$.
Let $T'$ arise from $T$ by adding the two new vertices $x'$ and $x''$
as well as the two new edges $xx'$ and $xx''$.
Now,
\begin{itemize}
\item replacing $S_v$ by the path $x'xx''$ for every vertex $v$ of $G$ for which $S_v$ consists only of $x$, and,
\item extending $S_v$ by the edge $xx'$ for every vertex $v$ of $G$ for which $S_v$ contains the edge $xy$ of $T$
\end{itemize}
yields an alternative orthodox $(h,2,t)$-representation of $G$ using $T'$ as host tree.
Possibly applying this transformation several times,
we may assume that every path in ${\cal S}$ has positive length.
By definition,
for every leaf $x$ of $T$,
the set
$C_x=\{ u\in V(G):x\in V(S_u)\}$
is a clique in $G$.
Since, for every vertex $u$ of $G$,
the subtree $S_u$ is a path between two distinct leaves of $T$,
every vertex of $G$ belongs to exactly two of the cliques in the collection $(C_x)_{x\in {\cal L}(T)}$.
Furthermore, for every edge $uv$ of $G$,
the two subtrees $S_u$ and $S_v$ share a leaf, say $x$, of $T$,
which implies that $u$ and $v$ both belong to $C_x$.
By results of Krausz \cite{kr} and of Bermond and Meyer \cite{beme},
this implies that $G$ is the line graph of some multigraph $H$ without loops.
Since parallel edges in $H$ correspond to twins in $G$,
if $G$ is twin-free, then $H$ is a graph.
Now, let $G$ be twin-free, connected, and of order at least $4$.
By a result of Whitney \cite{wh}, the graph $H$ is uniquely determined.
Let $H'$ be the graph with $V(H')={\cal L}(T)$
in which two distinct vertices $x$ and $y$ are adjacent
if and only if
${\cal S}$ contains the path in $T$ between $x$ and $y$.
Since every leaf of $T$ is also a leaf of some $S_u$,
the graph $H'$ has no isolated vertex.
By definition, the graph $G$ is isomorphic to $L(H')$,
and, by Whitney's result, the graphs $H'$ and $H$ are isomorphic.
$\Box$
\noindent If the graph $G'$ arises from a graph $G$
by identifying all pairs of twins in $G$,
then $G'$ is twin-free.
Furthermore, it follows easily from the definition that
$G$ is in ${\rm ORTH}[h,2,t]$
if and only if $G'$ is.
For $i\in [2]$, let $(T_i,{\cal S}_i)$ be an orthodox $(h,2,t)$-representation of a graph $G_i$ for some $h\geq 3$ and $t\geq 1$,
where $G_1$ and $G_2$ as well as $T_1$ and $T_2$ are disjoint.
Let $T$ be the tree that arises from the disjoint union of $T_1$ and $T_2$
by subdividing one edge of $T_i$ containing a leaf of $T_i$ with a new vertex $t_i$ for $i\in [2]$,
and adding the edge $t_1t_2$.
Since $h\geq 3$, applying the same subdivisions to the trees in ${\cal S}_1\cup {\cal S}_2$,
it follows that there is an orthodox $(h,2,t)$-representation of the disjoint union of $G_1$ and $G_2$
using $T$ as host tree.
In view of these observations and Theorem \ref{theorem1},
in order to understand the classes ${\rm ORTH}[h,2,t]$,
it suffices to consider connected twin-free line graphs $G$
of order at least $4$ as well as connected graphs $H$ with $L(H)=G$.
\noindent The next result is our central characterization of the graphs in ${\rm ORTH}[h,2,t]$.
\begin{theorem}\label{theorem2}
Let $G$ be a connected twin-free line graph of order at least $4$,
and let $H$ be a connected graph with $L(H)=G$.
The graph $G$ is in ${\rm ORTH}[h,2,t]$
for some $h\geq 3$ and $t\geq 1$
if and only if there is a tree $T$
whose internal vertices all have degree at most $h$
such that $V(H)={\cal L}(T)$, and,
for every two independent edges $xy$ and $x'y'$ of $H$,
the two paths in $T$
between $x$ and $y$ and between $x'$ and $y'$
share at most $t-1$ vertices.
\end{theorem}
{\it Proof:} Let $G$ be in ${\rm ORTH}[h,2,t]$.
As shown in the proof of Theorem \ref{theorem1},
there is an orthodox $(h,2,t)$-representation $(T,{\cal S})$ of $G$
such that every path in ${\cal S}=(S_u)_{u\in V(G)}$ has positive length.
By definition, all internal vertices of $T$ have degree at most $h$.
By Theorem \ref{theorem1}, we may assume that $V(H)={\cal L}(T)$,
and that two distinct vertices $x$ and $y$ of $H$ are adjacent in $H$
if and only if
${\cal S}$ contains the path in $T$ between $x$ and $y$.
If $xy$ and $x'y'$ are two independent edges of $H$,
then there are two vertices $u$ and $v$ in $G$ such that
$S_u$ is the path in $T$ between $x$ and $y$
and
$S_v$ is the path in $T$ between $x'$ and $y'$.
Since $S_u$ and $S_v$ share no leaf,
the two vertices $u$ and $v$ are not adjacent in $G$,
which implies that $S_u$ and $S_v$ share at most $t-1$ vertices.
Now, let $T$ be as in the statement.
Let $T'$ arise from $T$ by subdividing each edge incident with a leaf of $T$ exactly $t-2$ times.
Note that $T'$ still has maximum degree at most $h$,
and that,
for every two independent edges $xy$ and $x'y'$ of $H$,
the two paths in $T'$
between $x$ and $y$ and between $x'$ and $y'$
share at most $t-1$ vertices.
Let ${\cal S}=(S_{xy})_{xy\in E(H)}$,
where $S_{xy}$ is the path in $T'$ between the leaves $x$ and $y$ of $T'$.
Let $u$ and $v$ be two distinct vertices of $G$.
Let $u=xy$ and $v=x'y'$,
where $xy$ and $x'y'$ are the corresponding edges of $H$.
Now, $u$ and $v$ are adjacent in $G$
if and only if the edges $xy$ and $x'y'$ are not independent
if and only if $S_{xy}$ and $S_{x'y'}$ share a leaf of $T'$
if and only if $S_{xy}$ and $S_{x'y'}$ have at least $t$ vertices in common.
This implies that $(T',{\cal S})$ is an orthodox $(h,2,t)$-representation of $G$.
$\Box$
\noindent The relation between $H$ and $T$ in the previous theorem is crucial for all our considerations,
and we introduce some corresponding terminology.
If $H$ is a graph and $T$ is a tree such that
\begin{itemize}
\item the maximum degree of $T$ is at most $h$,
\item $V(H)={\cal L}(T)$, and,
\item for every two independent edges $xy$ and $x'y'$ of $H$,
the two paths in $T$
between $x$ and $y$ and between $x'$ and $y'$
share at most $t-1$ vertices
\end{itemize}
for some integers $h\geq 3$ and $t\geq 1$,
then $T$ is an {\it $(h,t)$-tree layout} of $H$.
As we see now, vertices of degree $2$ are not essential within tree layouts.
\begin{corollary}\label{corollary1}
Let $G$ be a connected twin-free line graph of order at least $4$,
and let $H$ be a connected graph with $L(H)=G$.
The graph $G$ is in ${\rm ORTH}[3,2,t]$ for some $t\geq 1$
if and only if there is a tree $T$
whose internal vertices all have degree exactly $3$
such that $V(H)={\cal L}(T)$, and,
for every two independent edges $xy$ and $x'y'$ of $H$,
the two paths in $T$
between $x$ and $y$ and between $x'$ and $y'$
share at most $t-1$ vertices.
\end{corollary}
{\it Proof:} In view of Theorem \ref{theorem2},
it suffices to argue that no internal vertex of $T$ needs degree $2$.
Therefore, let $T$ be an $(h,t)$-tree layout of $H$.
If some vertex $b$ of $T$
has exactly two neighbors $a$ and $c$ within $T$,
then it is easy to see that $T'=T-b+ac$ is still an $(h,t)$-tree layout of $H$.
Iterating this transformation, it is possible to eliminate all internal vertices of $T$ that are of degree $2$. $\Box$
\noindent Note that every tree with $n$ leaves whose internal vertices all have degree $3$, has order exactly $2n-2$.
Therefore, if $G$, $H$, and $T$ are as in the statement of Corollary \ref{corollary1}, then subdividing the edges incident with leaves of $T$ at most $t-2$ times, and defining ${\cal S}$ as in the proof of Theorem \ref{theorem2} yields an orthodox $(3,2,t)$-representation of $G$
whose underlying tree has order between $2n(H)-2$ and $tn(H)-2$.
Hence, by Theorem \ref{theorem2}, the minimum order of an underlying tree in any orthodox $(3,2,t)$-representation of $G$ lies between these two bounds.
Our next goal is to answer the question posed by Golumbic, Lipshteyn, and Stern \cite{golist}.
In fact, we show that line graphs of complete graphs of suitable orders distinguish the classes ${\rm ORTH}[h,2,t]$ for different values of $h$.
The next lemma is a simple exercise, and we include the proof for completeness.
\begin{lemma}\label{lemma3}
Let $h$ and $t$ be integers with $h\geq 3$ and $t\geq 3$.
If $T$ is a tree of maximum degree at most $h$ such that every two leaves of $T$ have distance at most $t$, then
$$
|{\cal L}(T)|\leq
\begin{cases}
2(h-1)^{\left(\frac{t-1}{2}\right)} & \mbox{, if $t$ is odd, and}\\
h(h-1)^{\left(\frac{t}{2}-1\right)} & \mbox{, if $t$ is even.}
\end{cases}
$$
Furthermore, these bounds are tight for all considered values of $h$ and $t$.
\end{lemma}
{\it Proof:} In view of the desired bound, we may assume that $T$ has two leaves at distance $t$.
Let $x(0)\ldots x(t)$ be a path between two such leaves.
First, let $t$ be odd.
Rooting the two components of $T-x\left(\frac{t-1}{2}\right)x\left(\frac{t-1}{2}+1\right)$
in the two vertices $x\left(\frac{t-1}{2}\right)$ and $x\left(\frac{t-1}{2}+1\right)$
yields rooted $(h-1)$-ary trees $T_1$ and $T_2$ of depth $\frac{t-1}{2}$.
Clearly,
$$|{\cal L}(T)|=|{\cal L}(T_1)|+|{\cal L}(T_2)|\leq (h-1)^{\left(\frac{t-1}{2}\right)}+(h-1)^{\left(\frac{t-1}{2}\right)}$$
with equality if and only if
$T_1$ and $T_2$ are full $(h-1)$-ary trees
of depth $\frac{t-1}{2}$.
Next, let $t$ be even.
Rooting the two components of $T-x\left(\frac{t}{2}\right)x\left(\frac{t}{2}+1\right)$
in the two vertices $x\left(\frac{t}{2}\right)$ and $x\left(\frac{t}{2}+1\right)$
yields rooted $(h-1)$-ary trees $T_1$ of depth $\frac{t}{2}$ and $T_2$ of depth $\frac{t}{2}-1$.
Clearly,
$$|{\cal L}(T)|=|{\cal L}(T_1)|+|{\cal L}(T_2)|\leq (h-1)^{\left(\frac{t}{2}\right)}+(h-1)^{\left(\frac{t}{2}-1\right)}=h(h-1)^{\left(\frac{t}{2}-1\right)}$$
with equality if and only if
$T_1$ and $T_2$ are full $(h-1)$-ary trees
of depths $\frac{t}{2}$ and $\frac{t}{2}-1$, respectively.
$\Box$
\noindent The following result answers the question posed by Golumbic, Lipshteyn, and Stern \cite{golist} mentioned in the introduction.
For two integers $p$ and $q$, let $[p,q]$ denote the set of integers at least $p$ and at most $q$.
\begin{theorem}\label{theorem4}
Let $h$ and $t$ be integers with $h\geq 3$ and $t\geq 3$.
If $t$ is odd, then
$$\Big\{ n\in \mathbb{N}: L(K_n)\in {\rm ORTH}[h+1,2,t]\setminus {\rm ORTH}[h,2,t]\Big\}
=\left[ 2(h-1)^{\left(\frac{t-1}{2}\right)}+1,2h^{\left(\frac{t-1}{2}\right)}\right],$$
and, if $t$ is even, then
$$\Big\{ n\in \mathbb{N}: L(K_n)\in {\rm ORTH}[h+1,2,t]\setminus {\rm ORTH}[h,2,t]\Big\}
=\left[ h(h-1)^{\left(\frac{t}{2}-1\right)}+1,(h+1)h^{\left(\frac{t}{2}-1\right)}\right].$$
\end{theorem}
{\it Proof:} We only give details for odd $t$,
because the proof for even $t$ is analogous.
Since the considered classes are hereditary,
it suffices to show that
\begin{enumerate}[(i)]
\item $L\left(K_{2h^{\left(\frac{t-1}{2}\right)}}\right)\in {\rm ORTH}[h+1,2,t]$, and
\item
$L\left(K_{2(h-1)^{\left(\frac{t-1}{2}\right)}}+1\right)\not\in {\rm ORTH}[h,2,t]$.
\end{enumerate}
By Lemma \ref{lemma3}, there is a tree of maximum degree at most $h+1$
with $2h^{\left(\frac{t-1}{2}\right)}$ leaves
such that every two leaves have distance at most $t$.
By Theorem \ref{theorem2}, this implies (i).
Now, suppose that (ii) does not hold.
Again by Theorem \ref{theorem2},
there is a tree $T$ of maximum degree at most $h$
with $2h^{\left(\frac{t-1}{2}\right)}$ leaves such that,
for every four distinct leaves $u_1$, $v_1$, $u_2$, and $v_2$,
the two paths in $T$ between $u_1$ and $v_1$
and between $u_2$ and $v_2$ share at most $t-1$ vertices.
Let $u_1$ and $v_1$ be two leaves of $T$ with maximum distance $\ell$.
We assume that $T$ is chosen such that $\ell$ is as small as possible.
Let $u_1'$ and $v_1'$ be the two neighbors of $u_1$ and $v_1$, respectively.
Clearly, the vertices $u_1'$ and $v_1'$ are distinct.
By the choice of $T$,
the vertex $u_1'$ is adjacent to a leaf $u_2$ distinct from $u_1$,
and the vertex $v_1'$ is adjacent to a leaf $v_2$ distinct from $v_1$.
Considering the two independent edges $u_1v_1$ and $u_2v_2$ implies that $\ell\leq t$.
By Lemma \ref{lemma3}, this implies the contradiction
that $T$ has at most $2(h-1)^{\left(\frac{t-1}{2}\right)}$ leaves. $\Box$
\noindent Next, we show that all subgraphs of the essential graphs $H$ with $L(H)\in {\rm ORTH}[h,2,t]$
have balanced separations of bounded order.
\begin{theorem}\label{theorem5}
Let $G$ be a connected twin-free line graph of order at least $4$,
and let $H$ be a connected graph with $L(H)=G$.
Let $G$ be in ${\rm ORTH}[h,2,t]$ for some $h\geq 3$ and $t\geq 1$.
If $H'$ is a subgraph of $H$ of order at least $2$,
then there is a set $X$ of vertices of $H'$, and
a partition of $V(H')$ into two sets $A$ and $B$
such that
\begin{enumerate}[(i)]
\item $\frac{1}{h}n(H')\leq |A|,|B|\leq \frac{h-1}{h}n(H')$,
\item $|X|\leq \max\left\{ 1,(h-1)^{(t-2)}\right\}$, and
\item $H'$ contains no edge between $A\setminus X$ and
$B\setminus X$.
\end{enumerate}
Furthermore, given $H'$, the sets $X$, $A$, and $B$ can be found in polynomial time.
\end{theorem}
{\it Proof:} Let $T$ be as in Theorem \ref{theorem2},
that is, the tree $T$ is an $(h,t)$-tree layout of $H$.
Clearly, iteratively removing leaves of $T$ that are not vertices of $H'$
yields an $(h,t)$-tree layout $T'$ of $H'$.
If $t=1$,
$u$ is some vertex of $T'$,
$Y$ is the set of leaves of some components of $T'-u$,
and
$Z$ is the set of leaves of the remaining components of $T'-u$,
then the properties of a $(h,1)$-tree layout
imply the existence of a vertex $x$ in $Y\cup Z$
such that all edges of $H'$ between $Y$ and $Z$
are incident with $x$,
because otherwise $H'$ contains two independent edges between $Y$ and $Z$
such that the corresponding paths in $T'$ share $u$.
Similarly, if $t\geq 2$, $P:u_1\ldots u_t$ is a path of order $t$ in $T'$,
$Y$ is the set of leaves of $T'$ that lie in the same component of $T'-E(P)$ as $u_1$, and
$Z$ is the set of leaves of $T'$ that lie in the same component of $T'-E(P)$ as $u_t$,
then there is a vertex $x$ in $Y\cup Z$
such that all edges of $H'$ between $Y$ and $Z$
are incident with $x$.
Let $r$ be any internal vertex of $T'$, and root $T'$ in $r$.
Let $a$ be a vertex of $T'$ of maximum distance from $r$
such that at least $\frac{1}{h}n(H')$ descendants of $a$ are leaves.
Since $T'$ has $n(H')$ leaves and $r$ has degree at most $h$,
the vertex $a$ is not $r$.
Since $a$ has at most $h-1$ children,
at most $\frac{h-1}{h}n(H')$ descendants of $a$ are leaves.
Let $b$ be the parent of $a$.
Let $A$ be the set of leaves of $T'$ that lie in the same component of $T'-ab$ as $a$, and
let $B$ be the set of leaves of $T'$ that lie in the same component of $T'-ab$ as $b$.
Clearly, (i) holds.
If $t=1$, then the above observation implies the existence of a single vertex $x$ such that all edges of $H'$ between $A$ and $B$ are incident with $x$, and, (ii) and (iii) follow.
Now, let $t\geq 2$.
Rooting the component $T'_b$ of $T'-ab$ that contains $b$ in the vertex $b$,
and considering all leaves of $T'_b$ at depth at most $t-2$
as well as all non-leaf vertices of $T'_b$ at depth exactly $t-2$,
it follows that $B$ can be partitioned into $k\leq (h-1)^{t-2}$ sets $B_1,\ldots,B_k$
such that, for every $i\in [k]$,
if $B_i$ contains not only one vertex,
then there is a path $P_i$ of order $t$
such that every path in $T'$ between a leaf in $A$
and a leaf in $B_i$ has $P_i$ as a subpath.
If $B_i$ contains only one vertex,
then trivially all edges of $H'$ between $A$ and $B_i$ are incident with only one vertex in $A\cup B_i$.
If $B_i$ contains not only one vertex,
then the above observation also implies that
all edges of $H'$ between $A$ and $B_i$ are incident with only one vertex in $A\cup B_i$.
Altogether, it follows that there is a set $X$ of at most $k$ vertices of $H'$
such that all edges between $A$ and
$B=B_1\cup \ldots \cup B_k$
are incident with a vertex in $X$, and, (ii) and (iii) follow.
It remains to explain, how to determine suitable sets $X$, $A$, and $B$ efficiently given $H'$.
Therefore, let $p=\max\left\{ 1,(h-1)^{(t-2)}\right\}$.
If $p\geq \frac{h-1}{h}n(H')$,
then choosing $A$ as any set of $\left\lceil\frac{1}{h}n(H')\right\rceil$
vertices of $H'$, and $B=X$ as $V(H')\setminus A$
satisfies (i), (ii), and (iii).
Now, let $p<\frac{h-1}{h}n(H')$.
For any specific set $X$ of $p$ vertices of $H'$,
we explain how to decide whether sets $A$ and $B$ with (i), (ii), and (iii) exist.
Therefore, let $X$ be such a set.
Let $K_1,\ldots,K_\ell$ be the components of $H'-X$.
If some component $K_i$ with $i\in [\ell]$ has order more than $\frac{h-1}{h}n(H')$,
then the desired sets $A$ and $B$ do not exist.
If some component $K_i$ with $i\in [\ell]$ has order between $\frac{1}{h}n(H')$ and $\frac{h-1}{h}n(H')$,
then choosing $A=V(K_i)$ and $B=V(H')\setminus A$ has the desired properties.
Finally,
if all components $K_i$ with $i\in [\ell]$ have order less than $\frac{1}{h}n(H')$,
then forming a union of suitably many of their vertex sets
yields a set $A$ of order between $\frac{1}{h}n(H')$ and $\frac{h-1}{h}n(H')$, and defining $B$ as above
leads to sets with the desired properties.
Altogether, considering the $O(n(H')^p)$ many choices for $X$,
suitable sets can be determined in polynomial time,
which completes the proof.
$\Box$
\noindent The following immediate consequence of Theorem \ref{theorem5}
might be useful in order to devise efficient recognition algorithms for the graphs in ${\rm ORTH}[h,2,t]$.
\begin{corollary}\label{corollary2}
If $G$ is a connected twin-free line graph of order at least $4$ in ${\rm ORTH}[h,2,t]$ for some $h\geq 3$ and $t\geq 1$, and $H$ is a connected graph with $L(H)=G$, then the treewidth of $H$ is bounded as a function of $h$ and $t$.
\end{corollary}
{\it Proof:} This follows easily from Theorem \ref{theorem5}
and a result of Dvo\v{r}\'ak and Norin \cite{dvno}. $\Box$
\noindent Our final goal in this section are efficient recognition algorithms for the graphs in ${\rm ORTH}[h,2,1]$ and ${\rm ORTH}[h,2,2]$
based on Theorem \ref{theorem5}.
\begin{lemma}\label{lemma4}
Let $H$ be a graph.
Let the sets $A$ and $B$ partition $V(H)$.
Let $a$ in $A$ and let $b$ in $B$ be such that
every edge of $H$ between $A$ and $B$ is incident with $a$,
and $b$ is a neighbor of $a$.
The graph $H$ has an $(h,t)$-tree layout
for some $h\geq 3$ and $t\geq 1$
if and only if
the two graphs $H_A=H[A\cup \{ b\}]$ and $H_B=H[B\cup \{ a\}]$ have $(h,t)$-tree layouts.
\end{lemma}
{\it Proof:} If $H$ has an $(h,t)$-tree layout,
then so does every induced subgraph, which implies the necessity.
For the sufficiency, assume that
$T_A$ and $T_B$ are $(h,t)$-tree layouts of $H_A$ and $H_B$,
respectively.
Note that $T_A$ and $T_B$ share exactly the two vertices $a$ and $b$.
If the tree $T$ arises from the disjoint union of $T_A$ and $T_B$,
where we distinguish the two copies of $a$ and $b$ within $T_A$ and $T_B$,
by adding an edge between the copy of $b$ in $T_A$ and the copy of $a$ in $T_B$,
then it follows easily that $T$ is an $(h,t)$-tree layout of $H$,
which completes the proof. $\Box$
\begin{corollary}\label{corollary3}
For every two integers $h\geq 3$ and $t\in \{ 1,2\}$,
the graphs in ${\rm ORTH}[h,2,t]$ can be recognized in polynomial time.
\end{corollary}
{\it Proof:} Let $G$ be a given graph for which we want to decide whether is belongs to ${\rm ORTH}[h,2,t]$.
As observed after Theorem \ref{theorem1},
we may assume that $G$ is a connected twin-free line graph of order at least $4$.
Using for instance the algorithms in \cite{desi},
we can efficiently determine the unique connected graph $H$
with $L(H)=G$.
Clearly, $n(H)\leq m(H)+1=n(G)+1$.
By Theorem \ref{theorem2},
we need to decide whether $H$ has an $(h,t)$-tree layout.
Note that, for $t\in \{ 1,2\}$,
the set $X$ in Theorem \ref{theorem5} contains at most one vertex.
Furthermore, sets $X$, $A$, and $B$ with (i), (ii), and (iii) can be found efficiently for every subgraph $H'$ of $H$ of order at least $2$.
Note that the graphs $H_A$ and $H_B$ considered in Lemma \ref{lemma4} have orders $|A|+1$ and $|B|+1$, respectively.
Let $n_0$ be such that $\frac{h-1}{h}n+1\leq \frac{h}{h+1}n$ for $n\geq n_0$.
Note that, if $H$ has order at least $n_0$,
then $H_A$ and $H_B$ both have orders at most $\frac{h}{h+1}n(H)$.
Therefore,
iteratively applying Theorem \ref{theorem5} yields
$k\leq 2^{\lceil \log_{(h+1)/h}(n/n_0)\rceil}$ many graphs $H_1,\ldots,H_k$,
each of order at most $n_0$,
such that
$H$ has an $(h,t)$-tree layout
if and only each $H_i$ an $(h,t)$-tree layout for every $i\in [k]$.
Clearly, testing this property for these polynomially many graphs of bounded order can be done in polynomial time.
$\Box$
\section{${\rm ORTH}[3,2,2]$ and ${\rm ORTH}[3,2,3]$}
This section collects more specific properties of the classes ${\rm ORTH}[3,2,2]$ and ${\rm ORTH}[3,2,3]$.
As shown by Golumbic, Lipshteyn, and Stern \cite{golist},
we have ${\rm ORTH}[\infty,2,1]={\rm ORTH}[3,2,1]={\rm ORTH}[3,2,2]$,
which implies ${\rm ORTH}[h,2,1]={\rm ORTH}[3,2,2]$ for every $h\geq 3$.
\begin{theorem}\label{theorem6}
Let $G$ be a connected twin-free line graph of order at least $4$,
and let $H$ be a connected graph with $L(H)=G$.
The graph $G$ is in ${\rm ORTH}[3,2,2]$
if and only if
all blocks of $H$ are of order at most $3$.
\end{theorem}
{\it Proof:}
In order to show the necessity,
we assume that $G$ is in ${\rm ORTH}[3,2,1]={\rm ORTH}[3,2,2]$
but that some block of $H$ has order at least $4$.
This implies that $H$ has some cycle $C$ of length $\ell$ at least $4$ as a subgraph.
Hence, $G$ contains the induced cycle $L(C)$ of length $\ell$.
Nevertheless, by Gavril's result \cite{ga1}, the graphs in ${\rm ORTH}[3,2,1]$ are chordal,
which is a contradiction.
In order to show the sufficiency,
we assume that all blocks of $H$ are of order at most $3$.
Since $K_3$ has a $(3,1)$-tree layout, it follows easily by an inductive argument similar to the proof of Lemma \ref{lemma4}
that $H$ has a $(3,1)$-tree layout.
Suppose, for instance,
that $H$ arises from a smaller graph $H'$
containing a vertex $u$
by adding two vertices $v$ and $w$,
and three new edges $uv$, $uw$, and $vw$,
that is, $H$ arises from $H'$ by attaching one new $K_3$ block.
If $T'$ is a $(3,1)$-tree layout of $H'$,
then subdividing the edge of $T'$ incident with $u$
by a new vertex $x$,
adding three more vertices $y$, $v$, and $w$,
and adding three more edges $xy$, $vy$, and $wy$
yields a $(3,1)$-tree layout of $H$.
By Theorem \ref{theorem2}, this implies that $G$ is in ${\rm ORTH}[3,2,1]={\rm ORTH}[3,2,2]$.
$\Box$
\begin{lemma}\label{lemma1}
If $e$ and $f$ are two independent edges of $K_5$, $H$ is a subdivision of $K_5-\{ e,f\}$, and $G=L(H)$,
then $G$ is not in ${\rm ORTH}[3,2,3]$.
\end{lemma}
{\it Proof:} Let $H_0=K_5-\{ e,f\}$.
We denote the five vertices of $H_0$ by $u_1,\ldots,u_5$
without specifying which two edges are missing.
Note that $G$ is a connected twin-free line graph of order at least $4$.
For a contradiction, suppose that $G$ is in ${\rm ORTH}[3,2,3]$.
Let $T$ be as in Corollary \ref{corollary1} for $t=3$.
Let $r$ be any internal vertex of $T$, and root $T$ in $r$.
Let $s$ be a vertex of $T$ of maximum distance from $r$
such that at least two descendants, say $u_1$ and $u_2$,
of $s$ within $T$ are vertices of $H_0$.
Since every internal vertex of $T$ has degree $3$,
exactly two descendants of $s$ are vertices of $H_0$,
which implies that $s$ is not $r$.
Let $t$ be the parent of $s$.
Let $s'$ and $s''$ be the two neighbors of $t$ distinct from $s$.
Let $S$, $S'$, and $S''$ be the vertex sets of the three components of $T-t$
that contain $s$, $s'$, and $s''$, respectively.
By the pigeonhole principle and by symmetry,
we may assume that $u_4$ and $u_5$ lie in $S'$,
and that $u_1u_4$ and $u_2u_5$ are edges of $H_0$.
Note that every edge $uv$ of $H_0$ corresponds to a path $P(uv)$ in $H$
between $u$ and $v$ whose internal vertices are all of degree $2$,
and that the edges of this path correspond to leaf to leaf paths in $T$
such that, for independent edges, the corresponding paths share at most $2$ vertices.
Since the two paths $P(u_1u_4)$ and $P(u_2u_5)$ are between a vertex in $S$ and a vertex in $S'$,
each contains
\begin{itemize}
\item an edge between $S$ and $S'$, or
\item an edge between $S$ and $S''$ as well as an edge between $S'$ and $S''$.
\end{itemize}
Since any edge of $P(u_1u_4)$ is disjoint from any edge of $P(u_2u_5)$,
the structure of $T$ implies that we may assume that
$P(u_1u_4)$ contains an edge $e_1$ between $S$ and $S'$,
and that
$P(u_2u_5)$ contains an edge $e_2$ between $S$ and $S''$
as well as an edge $e_2'$ between $S'$ and $S''$.
If $u_1$ is adjacent to $u_5$ in $H_0$, then, in view of $e_2$, the path $P(u_1u_5)$ contains an edge between $S$ and $S'$,
and,
if $u_2$ is adjacent to $u_4$ in $H_0$, then, in view of $e'_2$, the path $P(u_2u_4)$ contains an edge between $S$ and $S'$.
Since every edge of $P(u_1u_5)$ is disjoint from every edge of $P(u_2u_4)$,
this implies that $u_1u_5$ or $u_2u_4$ is one of the two non-edges of $H_0$.
If $u_3$ is in $S''$, then, in view of $e_2$ and $e_2'$,
the vertex $u_3$ can not be adjacent to $u_1$ or $u_4$ in $H_0$,
which is a contradiction.
Hence, $u_3$ is in $S'$.
In view of $e_1$ and $e_2'$, the vertex $u_3$ is not adjacent to $u_2$ in $H_0$.
Together with our earlier observation,
this implies that the two missing edges of $H_0$ are exactly $u_1u_5$ and $u_2u_3$.
In view of $e_2$, the path $P(u_1u_3)$ contains an edge $e_3$ between $S$ and $S'$.
In view of $e_2'$, the path $P(u_2u_4)$ contains an edge $e_4$ between $S$ and $S'$.
Now, the two paths in $T$ between the endpoints of $e_3$
as well as
between the endpoints of $e_4$ share three vertices $s$, $t$, and $s'$,
which is a contradiction.
$\Box$
\noindent The previous lemma has a suitable generalization for larger values of $h$ than $3$.
A similar proof also shows that $L(K_{2,5})$ does not lie in ${\rm ORTH}[3,2,3]$.
\begin{lemma}\label{lemma2}
If $H$ is a subdivision of $K_{3,3}$ and $G=L(H)$,
then $G$ is not in ${\rm ORTH}[3,2,3]$.
\end{lemma}
{\it Proof:} Let the two partite sets of $K_{3,3}$ be $\{ u_1,u_2,u_3\}$ and $\{ u'_1,u'_2,u'_3\}$.
Note that $G$ is a connected twin-free line graph of order at least $4$.
For a contradiction, suppose that $G$ is in ${\rm ORTH}[3,2,3]$.
Let $T$ be as in Corollary \ref{corollary1} for $t=3$.
Let $r$ be any internal vertex of $T$, and root $T$ in $r$.
Let $s$ be a vertex of $T$ of maximum distance from $r$
such that at least two descendants of $s$ within $T$
are vertices of $K_{3,3}$.
Again, exactly two descendants of $s$ are vertices of $K_{3,3}$.
Let $t$, $s'$, $s''$, $S$, $S'$, and $S''$ be as in the proof of Lemma \ref{lemma1}.
We consider some cases concerning the two vertices of $K_{3,3}$ in $S$
as well as the distribution of the remaining four vertices $K_{3,3}$ within $S'$ and $S''$.
\noindent {\bf Case 1} {\it $u_1,u_2\in S$.}
\noindent By symmetry, we may assume that $u_1',u_2'\in S'$.
Arguing as in the proof of Lemma \ref{lemma1},
we may assume, by symmetry,
that $P(u_1u_1')$ contains an edge $e_1$ between $S$ and $S''$
as well as an edge $e_1'$ between $S'$ and $S''$,
and that $P(u_2u_2')$ contain an edge $e_2$ between $S$ and $S'$.
In view of $e_1$, the path $P(u_2u_3')$ contains no edge between $S$ and $S''$.
In view of $e_1'$, the path $P(u_2u_3')$ contains no edge between $S'$ and $S''$.
This implies that $u_3'\in S'$.
In view of $e_2$, the path $P(u_1u_3')$ contains no edge between $S$ and $S'$.
In view of $e_1'$, the path $P(u_1u_3')$ contains no edge between $S'$ and $S''$.
This implies a contradiction.
In view of Case 1, we may assume that the two vertices of $K_{3,3}$ in $S$
belong to different partite sets, that is, by symmetry, $u_1,u_1'\in S$.
\noindent {\bf Case 2} {\it $u_1,u_1'\in S$, $u_2,u_2'\in S'$, and $u_3,u_3'\in S''$.}
\noindent By symmetry, we may assume that
$P(u_1u_2')$ contains an edge $e_1$ between $S$ and $S''$ as well as an edge $e_1'$ between $S'$ and $S''$, and that
$P(u'_1u_2)$ contains an edge $e_2$ between $S$ and $S'$.
In view of $e_1$, the path $P(u_2u_3')$ contains no edge between $S$ and $S''$.
In view of $e_1'$, the path $P(u_2u_3')$ contains no edge between $S'$ and $S''$.
This implies a contradiction.
\noindent {\bf Case 3} {\it $u_1,u_1'\in S$ and $u_2,u_2',u_3,u_3'\in S'$.}
\noindent By symmetry, we may assume that
$P(u_1u_2')$ contains an edge $e_1$ between $S$ and $S''$ as well as an edge $e_1'$ between $S'$ and $S''$, and that
$P(u'_1u_2)$ contains an edge $e_2$ between $S$ and $S'$.
In view of $e_2$, the path $P(u_1u_3')$ contains no edge between $S$ and $S'$.
In view of $e_1'$, the path $P(u_1u_3')$ contains no edge between $S'$ and $S''$.
This implies a contradiction.
In view of Cases 2 and 3,
we may assume, by symmetry, that $u_2\in S'$ and $u_2'\in S''$.
If $u_3\in S'$ and $u_3'\in S''$, then we can argue as in Case 1.
If $u_3'\in S'$ and $u_3\in S''$, then we can argue as in Case 2.
Hence, by symmetry, it suffices to consider the following final case.
\noindent {\bf Case 4} {\it $u_1,u_1'\in S$, $u_2,u_3,u_3'\in S'$, and $u_2'\in S''$.}
\noindent By symmetry, we may assume that
$P(u_1u_3')$ contains an edge $e_1$ between $S$ and $S''$ as well as an edge $e_1'$ between $S'$ and $S''$, and that
$P(u_1'u_3)$ contains an edge $e_2$ between $S$ and $S'$.
In view of $e_2$, the path $P(u_2u_2')$ contains no edge between $S$ and $S'$.
In view of $e_1'$, the path $P(u_2u_2')$ contains no edge between $S'$ and $S''$.
This implies a contradiction, and completes the proof. $\Box$
\begin{theorem}\label{theorem3}
If a connected twin-free graph $G$ of order at least $4$ is in ${\rm ORTH}[3,2,3]$, then $G$ is the line graph of a planar graph.
\end{theorem}
{\it Proof:} By Theorem \ref{theorem1},
there is a unique connected graph $H'$ with $G=L(H')$.
For a contradiction, suppose that $H'$ is not planar.
By a result of Kuratowski \cite{ku},
the graph $H'$ has a subgraph $H$
that is a subdivision of a graph $H_0$
such that $H_0$ is either $K_5$ or $K_{3,3}$.
Since $L(H)$ is an induced subgraph of $L(H')$,
the graph $L(H)$ is in ${\rm ORTH}[3,2,3]$.
Now, if $H_0$ is $K_5$ or $K_{3,3}$,
then Lemma \ref{lemma1} or Lemma \ref{lemma2}
implies the contradiction that $L(H)$ is not in ${\rm ORTH}[3,2,3]$,
respectively.
$\Box$
\noindent Since Lemma \ref{lemma1} actually concerns subdivisions of $K_5-e$, the containment in Theorem \ref{theorem3} is proper.
\section{Conclusion}
The most natural open problems concern the structure
of the graphs in the classes ${\rm ORTH}[3,2,3]$ and ${\rm ORTH}[h,2,2]$ for $h\geq 4$.
For ${\rm ORTH}[3,2,3]$, the complexity of the recognition is unknown.
In view of Corollary \ref{corollary2}, efficient recognition algorithms for all classes ${\rm ORTH}[h,2,t]$ seem possible.
Our results should have further algorithmic consequences.
If, for example, $G$ and $H$ are as in Theorem \ref{theorem2},
then the chromatic number of $G$ is either the maximum degree of $H$ or one more,
and, by Corollary \ref{corollary2}, these two cases can be distinguished efficiently.
\end{document}
|
\begin{document}
{\mathfrak m}arkright{INTRODUCTION}
\begin{center}
{\small Group Representation Theory, eds. M. Geck, D. Testerman, J. Th\'evenaz,}
{\small EPFL Press, Lausanne 2007, pp. 405-450 }\vskip1cm
{{\mathfrak m}athbf Large Bounds for the orders of the finite subgroups of $G(k)$}
\vskip 0.7cm
Jean-Pierre SERRE
\end{center}
\vskip1cm
\specialsection*{\bf Introduction}
The present text reproduces - with a number of additions - a series of three two-hour lectures given at the Ecole Polytechnique F\'ed\'erale de Lausanne (E.P.F.L.) on May 25-26-27, 2005.
The starting point is a classical result of Minkowski, dating from 1887, which gives a multiplicative upper bound for the orders of the finite subgroups of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$. The method can easily be extended to other algebraic groups than $\operatorname{{\mathfrak m}athbf{GL}}_n$, and the field ${\mathfrak m}athbf Q$ can be replaced by any number field. What is less obvious is that:
a) one can work over an arbitrary ground field;
b) in most cases one may construct examples showing that the bound thus obtained is optimal.
This is what I explain in the lectures.
\vskip 0.3cm
Lecture I is historical: Minkowski (\S 1), Schur (\S 2), Blichfeldt and others (\S 3). The results it describes are mostly well-known, so that I did not feel compelled to give complete proofs.
Lecture II gives upper bounds for the order of a finite $\ell$-subgroup of $G(k)$, where $G$ is a reductive group over a field $k$, and $\ell$ is a prime number. These bounds depend on $G$ via its root system, and on $k$ via the size of the Galois group of its $\ell$-cyclotomic tower (\S 4). One of these bounds (called here the S-bound, cf. \S 5) is a bit crude but is easy to prove and to apply. The second one (called the M-bound) is the most interesting one (\S 6). Its proof follows Minkowski's method, combined with Chebotarev's density theorem (for schemes of any dimension, not merely dimension 1); it has a curious cohomological generalization cf. \S 6.8. The last subsection (\S 6.9) mentions some related problems, not on semisimple groups, but on Cremona groups; for instance: does the field ${\mathfrak m}athbf Q(X,Y,Z)$ have an automorphism of order 11 ?
Lecture III gives the construction of ``optimal" large subgroups. The case of the classical groups (\S 9) is not difficult. Exceptional groups such as $E_8$ are a different matter; to handle them, we shall use Galois twists, braid groups and Tits groups, cf. \S\S 10-12.
\eject
{\sl Acknowledgements.} A first draft of these notes, made by D. Testerman and R. Corran, has been very useful; and so has been the generous help of D. Testerman with the successive versions of the text. My thanks go to both of them, and to the E.P.F.L. staff for its hospitality. I also thank M. Brou\'e and J. Michel for several discussions on braid groups.
\vskip 0.5cm
\noindent J-P. Serre\hskip 2cm April 2006
\vskip2cm
\begin{center}
{\bf Table of Contents}
\end{center}
\vskip 1cm
${}$\hskip2cm Lecture I. History: Minkowski, Schur, ...
\begin{enumerate}
\item[1.] Minkowski
\item[2.] Schur
\item[3.] Blichfeldt and others
\end{enumerate}
${}$\hskip2cm Lecture II. Upper bounds
\begin{enumerate}
\item[4.] The invariants $t$ and $m$
\item[5.] The S-bound
\item[6.] The M-bound
\end{enumerate}
${}$\hskip2cm Lecture III. Construction of large subgroups
\begin{enumerate}
\item[7.] Statements
\item[8.] Arithmetic methods $(k = {\mathfrak m}athbf Q)$
\item[9.] Proof of theorem 9 for classical groups
\item[10.] Galois twists
\item[11.] A general construction
\item[12.] Proof of theorem 9 for exceptional groups
\item[13.] Proof of theorems 10 and 11
\item[14.] The case $m = \infty$
\end{enumerate}
${}$\hskip2cm References
{\mathfrak m}arkright{LECTURE I: HISTORY: MINKOWSKI, SCHUR, ...}
\specialsection*{\bf I. History: Minkowski, Schur, ...}
\vskip 0.5cm
\begin{center}
{\bf {\S 1. Minkowski}}
\end{center}
\vskip 0.3cm
Reference: [Mi 87].
\setcounter{section}{1}
\subsection{Statements} We shall use the following notation:
$\ell$ is a fixed prime number; when we need other primes we usually denote
them by $p$;
the $\ell$-adic valuation of a rational number $x$ is denoted by $v_\ell (x)$; one has $v_{\ell}(\ell ) = 1$, and $v_\ell (x) = 0$ if $x$ is an integer with $(x, \ell ) = 1$;
the number of elements of a finite set $A$ is denoted by $|A|$; we write $v_\ell (A)$ instead of $v_\ell (|A|)$; if $A$ is a group, $\ell^{^{v_\ell (A)}}$ is the order of an $\ell$-Sylow of $A$;
if $x$ is a real number, its integral part (``floor") is denoted by $[x]$.
{\mathfrak m}edskip
\noindent We may now state Minkowski's theorem ([Mi 87]):
\begin{mytheorem1}
\label{I.1}
Let $n$ be an integer ${{\mathfrak m}athfrak g}e 1$, and let $\ell$ be a prime number. Define{\rm :}
$$
M(n,\ell ) = \left[ \frac{n}{\ell-1} \right] + \left[ \frac{n}{\ell(\ell-1)} \right] + \left[ \frac{n}{\ell^2(\ell-1)} \right] + \cdots
$$
Then{\rm :}
\noindent {\rm (i)} If $A$ is a finite subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n ({\mathfrak m}athbf Q )$, we have $v_\ell (A) \le M(n,\ell ).$
\noindent {\rm (ii)} There exists a finite $\ell$-subgroup $A$ of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q )$ with $v_\ell (A) = M(n,\ell )$.
\end{mytheorem1}
The proof will be given in \S 1.3 and \S 1.4.
\vskip 0.2cm
\noindent {\sl Remarks.}
1) Let us define an integer $M(n)$ by:
$$
M(n) = \prod_\ell\, \ell^{M(n,\ell)} .
$$
Part (i) of th.1 says that the order of any finite subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n ({\mathfrak m}athbf Q)$ {\sl divides} $M(n)$, and part (ii) says that $M(n)$ is the smallest integer having this property. Hence $M(n)$ is a sharp multiplicative bound for $|A|$.
Here are the values of $M(n)$ for $n \le 8$:
\vskip 0.1cm
\noindent $M(1) = 2$
\noindent $M(2) = 2^3\!\cdot3=24$
\noindent $M(3) = 2^4\!\cdot3 = 48$
\noindent $M(4) = 2^7\!\cdot3^2\!\cdot5 = 5760$
\noindent $M(5) = 2^8\!\cdot3^2\!\cdot 5 = 11520$
\noindent $M(6) = 2^{10}\!\cdot3^4\!\cdot5\cdot 7 = 2903040 $
\noindent $M(7) = 2^{11}\!\cdot3^4\!\cdot5\cdot7 = 5806080$
\noindent $M(8) = 2^{15}\!\cdot3^5\!\cdot5^2\!\cdot 7 = 1393459200.$
\vskip 0.1cm
Note that
$$M(n)/M(n-1) =\left\{ \begin{array}{ll}
2 &{\hbox{\sl if }} n {\hbox{\sl { is odd }}}\\
&\\
{\hbox{\rm {denominator of }}} b_n/n& {\hbox{\sl { if }}} n {\hbox{\sl { is even}}},\end{array}\right.$$
\noindent where $b_n$ is the $n$-th Bernoulli number. (The occurence of the Bernoulli numbers is natural in view of the mass formulae which Minkowski had proved a few years before.)
2) One may ask whether there is a finite subgroup $A$ of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$ of order $M(n)$. It is so for $n = 1$ and $n = 3$ and probably for no other value of $n$ (as Burnside already remarked on p.484 of [Bu 11]). Indeed, some incomplete arguments of Weisfeiler and Feit would imply that the upper bound of $|A|$ is $2^n\cdot n$! if $n > 10$, which is much smaller than $M(n)$. See the comments of Guralnick-Lorenz in [GL 06], \S 6.1.
\vskip 0.2cm
\noindent{\small{\sl Exercise.} Let $\left[\frac{n}{\ell-1}\right] = \sum a_i\ell^{i}, 0 \le a_i \le \ell -1$, be the $\ell$-adic expansion of $\left[\frac{n}{\ell-1}\right].$
\noindent Show that
$
M(n,\ell ) = \sum a_i \frac{\ell^{i+1}-1}{\ell -1} = \sum M(a_i\ell^{i} (\ell-1),\ell).
$}
\subsection{Minkowski's lemma.} Minkowski's paper starts with the following often quoted lemma:
\begin{lemma}
\label{lem1} If $m {{\mathfrak m}athfrak g}e 3$, the kernel of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z ) \rightarrow \operatorname{{\mathfrak m}athbf{GL}} _n ({\mathfrak m}athbf Z / m{\mathfrak m}athbf Z)$
is torsion free.
\end{lemma}
\begin{proof}
Easy exercise ! One may deduce it from general results on formal groups over local rings, cf. Bourbaki [LIE III], \S7. Many variants exist. For instance:
\vskip 0.2cm
\noindent{\bf Lemma 1$^\prime$.} {\em Let $R$ be a local ring with maximal ideal ${{\mathfrak m}athfrak m}$ and residue field $k = R/{\mathfrak m}athfrak m.$ If $\ell$ is a prime number distinct from {\rm char}$(k)$, the kernel of the map $\operatorname{{\mathfrak m}athbf{GL}}_n(R) \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_n(k)$ does not contain any element of order $\ell$. }
\vskip 0.2cm
\noindent{\sl Proof.} Suppose $x \in \operatorname{{\mathfrak m}athbf{GL}}_n(R)$ has order $\ell$ and gives 1 in $\operatorname{{\mathfrak m}athbf{GL}}_n(k)$. Write $x = 1 + y$; all the coefficients of the matrix $y$ belong to ${\mathfrak m}athfrak m$. Since $x^\ell = 1$, we have
$$
\ell \cdot y + {\ell\choose2}\cdot y^2 + \dots + \ell\cdot y^{\ell -1} + y^\ell = 0,
$$
which we may write as $y\cdot u = 0$, with $u = \ell + {\ell\choose2} y + \dots + y^{\ell -1}$. The image of $u$ in $\operatorname{{\mathfrak m}athbf{GL}}_n(k)$ is $\ell$, which is invertible. Hence $u$ is invertible, and since $y\cdot u$ is 0, this shows that $y = 0$.
\end{proof}
Several other variants can be found in [SZ 96].
\vskip 0.3cm
\noindent {\sl Remark.}
A nice consequence of lemma $1^\prime$ is the following result of Malcev and Selberg ([Bo 69], \S 17):
$(^*)$ {\sl Let} ${\mathfrak m}athbf Gamma$ {\sl be a finitely generated subgroup of} $\operatorname{{\mathfrak m}athbf{GL}}_n(K)$, {\sl where} $K$ {\sl is a field of characteristic} $0$. {\sl Then} ${\mathfrak m}athbf Gamma$
{\sl has a torsion free subgroup of finite index}.
\noindent {\sl Sketch of proof} (for more details, see Borel, {\sl loc.cit.}). Let $S$ be a finite generating subset of ${\mathfrak m}athbf Gamma$, and let $L$ be the ring generated by the coefficients of the elements of $S \cup S^{-1}$. We have ${\mathfrak m}athbf Gamma \subset \operatorname{{\mathfrak m}athbf{GL}}_n (L)$. Let ${\mathfrak m}athfrak m$ be a maximal ideal of $L$; the residue field $k = A/{\mathfrak m}athfrak m$ is finite ([AC V], p.68, cor.1 to th.3); let $p$ be its characteristic. The kernel ${\mathfrak m}athbf Gamma_1$ of ${\mathfrak m}athbf Gamma \rightarrow {\bf GL}_n(k)$ has finite index in ${\mathfrak m}athbf Gamma$; by lemma $1^\prime$ (applied to the local ring $R = L_{{\mathfrak m}athfrak m}$), ${\mathfrak m}athbf Gamma_1$ does not have any torsion except possibly $p$-torsion. By choosing another maximal ideal of $L$, with a different residue characteristic, one gets a torsion free subgroup of finite index of ${\mathfrak m}athbf Gamma_1$, and hence of ${\mathfrak m}athbf Gamma$.
$\Box$
\vskip 0.3cm
\noindent{\sl Remark.} When $K$ has characteristic $p > 0$ the same proof shows that ${\mathfrak m}athbf Gamma$ has a subgroup of finite index which is ``$p^\prime$-torsion free", i.e. such that its elements of finite order have order a power of $p$.
\subsection{Proof of theorem 1 (i).} Let $A$ be a finite subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$; we have to show that $v_\ell(A) \leq M(n,\ell )$. Note first:
\subsubsection{The group $A$ is conjugate to a subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$.}${}$
This amounts to saying that there exists an $A$-stable lattice in ${\mathfrak m}athbf Q^n$,
which is clear: just take the lattice generated by the $A$-transforms of the
standard lattice ${\mathfrak m}athbf Z^n$.
\subsubsection{There is a positive definite quadratic form on ${\mathfrak m}athbf Q^n$, with
integral coefficients, which is invariant by $A$} ${}$
Same argument: take the sum of the $A$-transforms of $x_1^2 + \dots + x_n^2$,
and multiply it by a suitable non-zero integer, in order to cancel any denominator.
\noindent Let us now proceed with the proof of $v_\ell (A) \leq M(n,\ell )$. We do it in two steps:
\subsubsection{The case $\ell > 2$} ${}$
By 1.3.1, we may assume that $A$ is contained in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$. Let $p$ be a prime number $\not= 2$. By lemma 1, the map $A \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)$ is injective. Hence
$$
v_\ell (A) \le a(p) = v_\ell \big(\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)\big).
$$
The order of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)$ is $p^{n(n-1)/2}(p-1)(p^2-1)\dots (p^n-1)$. Let us assume that $p \not= \ell$. Then we have
$$
a(p) = \sum^n_{i=1} v_\ell (p^{i}-1).
$$
We now choose $p$ in such a way that $a(p)$ is as small as possible. More precisely, we choose $p$ such that:
$(^*)$ {\emph{The image of $p$ in $({\mathfrak m}athbf Z/\ell^2{\mathfrak m}athbf Z)^*$ is a generator of that group.}}
This is possible by Dirichlet's theorem on the existence of primes in arithmetic progressions (of course, one should also observe that $({\mathfrak m}athbf Z/\ell^2{\mathfrak m}athbf Z)^*$ is cyclic.)
Once $p$ is chosen in that way, then $p^{i}-1$ is divisible by $\ell$ only if $i$ is divisible by $\ell-1$; moreover, one has $v_\ell (p^{\ell-1}-1) = 1$ because of $(^*)$, and this implies that $v_\ell(p^{i}-1) = 1 + v_\ell (i)$ if $i$ is divisible by $\ell-1$. (This is where the hypothesis $\ell > 2$ is used.) One can then compute $a(p)$ by the formula above. The number of indices $i \le n$ which are divisible by $\ell-1$ is $\left[\frac{n}{\ell-1}\right]$. We thus get:
\begin{eqnarray*}
a(p) & = & \left[\frac{n}{\ell-1}\right] + \sum_{1 \le j \le \left[\frac{n}{\ell -1}\right]} v_\ell (j) = \left[\frac{n}{\ell-1}\right] + v_\ell \big(\left[\frac{n}{\ell-1}\right]!\big)\\
& = & \left[\frac{n}{\ell-1}\right] + \left[\frac{n}{\ell (\ell -1)}\right] + \dots = M(n,\ell ).
\end{eqnarray*}
\noindent This proves th.1 (i) in the case $\ell \not= 2$.
\subsubsection{The case $\ell = 2$.}${}$
When $\ell = 2$, the method above does not give the right bound as soon as $n > 1$. One needs to replace $\operatorname{{\mathfrak m}athbf{GL}}_n$ by an orthogonal group. Indeed, by 1.3.1 and 1.3.2, we may assume, not only that $A$ is contained in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$, but also that it is contained in the orthogonal group $\operatorname{{\mathfrak m}athbf{O}}_n(q)$, where $q$ is a non-degenerate quadratic form with integral coefficients. Let $D$ be the discriminant of $q$, and let us choose a prime number $p > 2$ which does not divide $D$. The image of $A$ in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)$ is contained in the orthogonal group $\operatorname{{\mathfrak m}athbf{O}}_n ({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)$ relative to the reduction of $q$ mod $p$. If we put $r = [n/2]$, the order of $\operatorname{{\mathfrak m}athbf{O}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)$ is known to be:
$$2\cdot p^{r^2} (p^2-1) (p^4-1) \dots (p^{2r}-1)\quad {{\mathfrak m}box{ if $n$ is odd.}}
$$
\noindent and
$$2\cdot p^{r(r-1)}(p^2-1)(p^4-1)\dots (p^{2r}-1)/(p^r+\varepsilon)\quad {{\mathfrak m}box{if $n$ is even,}}
$$
with $\varepsilon = \pm 1$ equal to the Legendre symbol at $p$ of $(-1)^rD.$
If we choose $p \equiv \pm 3$ (mod 8), we have $v_2(p^{2i}-1) = 3 + v_2(i)$, and
$v_2(p^r+\varepsilon ) {{\mathfrak m}athfrak g}e 1$. If $n$ is odd, this gives
$$v_2(\operatorname{{\mathfrak m}athbf{O}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)) = 1 + 3r + v_2(r!) = n + r + \left[\frac{r}{2}\right] + \left[\frac{r}{4}\right] + \dots = M(n,2),$$ and, if $n$ is even:
$$v_2(\operatorname{{\mathfrak m}athbf{O}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)) \le 3r + v_2(r!) = M(n,2).$$
Hence $v_2(A)$ is at most equal to $M(n,2)$.
$\Box$
\vskip 0.3cm
\noindent {\sl Remark.} There are several ways of writing down this proof. For instance:
- There is no need to embed $A$ in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$. It sits in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z[1/N])$ for a suitable $N {{\mathfrak m}athfrak g}e 1$, and this allows us to reduce mod $p$ for all $p$'s not dividing $N$.
- Minkowski's lemma is not needed either: we could replace it by the trivial fact that a matrix which is different from 1 is not congruent to 1 {\mathfrak m}box{$\operatorname{mod} p$} for all large enough $p$'s.
- Even when $\ell > 2$, we could have worked in $\operatorname{{\mathfrak m}athbf{O}}_n$ instead of $\operatorname{{\mathfrak m}athbf{GL}}_n$; that is what Minkowski does.
- When $\ell = 2$ the case $n$ even can be reduced to the case $n$ odd by observing that, if $A \subset \operatorname{{\mathfrak m}athbf{GL}}_n ({\mathfrak m}athbf Q)$, then $A\times \{\pm 1\}$ embeds into $\operatorname{{\mathfrak m}athbf{GL}}_{n+1}({\mathfrak m}athbf Q)$, and $M(n+1,2)$ is equal to $1 + M(n,2)$.
\subsection{Proof of theorem 1 (ii).} The symmetric group $S_\ell$ has a faithful representation $S_\ell \rightarrow \operatorname{{\mathfrak m}athbf{GL}}(V_1)$ where $V_1$ is a ${\mathfrak m}athbf Q$-vector space of dimension $\ell-1$. Put $r = \left[\frac{n}{\ell-1}\right]$, and let $V = V_1 \oplus \dots \oplus V_r$ be the direct sum of $r$ copies of $V_1$. Let $S$ be the semi-direct product of $S_r$ with the product $(S_\ell )^r$ of $r$ copies of $S_\ell$ (``wreath product"). The group $S$ has a natural, and faithful, action on $V$. We may thus view $S$ as a subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_{r(\ell -1)}({\mathfrak m}athbf Q)$, hence also of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$, since $n {{\mathfrak m}athfrak g}e r(\ell-1)$. We have
$$ v_\ell (S) = r + v_\ell (r!) = \left[\frac{n}{\ell-1}\right] + \left[\frac{n}{\ell(\ell-1)}\right] + \dots = M(n,\ell).$$
An $\ell$-Sylow $A$ of $S$ satisfies the conditions of th.1 (ii).
$\Box$
\vskip 0.2cm
\noindent {\sl Example.}
When $\ell = 2$ the group $S$ defined above is the ``hyper-octahedral group", i.e. the group of automorphisms of an $n$-cube (= the Weyl group of a root system
of type $B_n$); in ATLAS notation, it may be written as $2^n\cdot S_n$.
\subsection{A conjugacy theorem.} The finite $\ell$-subgroups of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$ have the following Sylow-like property:
\begin{mytheorem1'} Let $A$ and $A^\prime$ be two finite $\ell$-subgroups of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$. Assume that $A$ has the maximal order allowed by th.$1$. Then $A^\prime$ is conjugate to a subgroup of $A$.
\end{mytheorem1'}
\begin{corollary}
If $|A| = |A^\prime| = \ell^{M(n,\ell)}$, then $A$ and $A^\prime$ are conjugate in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$.
\end{corollary}
\vskip 0.5cm
\noindent{\sl Proof of theorem $1^\prime$.} See Bourbaki, [LIE III], \S7, exerc.6 f) where only the case $\ell > 2$ is given, and Feit [Fe 97] who does the case $\ell = 2$. Let us sketch Bourbaki's method (which we shall use in \S6.6 in a more general setting):
We may assume that $A$ and $A^\prime$ are contained in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$. Choose a prime $p$ as in 1.3.3, and reduce mod $p$. The groups $A$ and $A^\prime$ then become {{\mathfrak m}box{$\ell$-subgroups}} of $G_p = \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z/p{\mathfrak m}athbf Z)$, and $A$ is an $\ell$-Sylow of $G_p$. By Sylow's theorem applied to $G_p$, one finds an injection $i : A^\prime \rightarrow A$ which is induced by an inner automorphism of $G_p$. The two linear representations of $A^\prime$:
$$
A^\prime \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)\quad {{\mathfrak m}box {\rm and}}\quad A^\prime \stackrel{i}{\rightarrow} A \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$$
become isomorphic after reduction mod $p$. Since $p \not= \ell$, a standard argument shows that they are isomorphic over ${\mathfrak m}athbf Q$, which proves th.$1^\prime$ in that case. The case $\ell = 2$ can be handled by a similar, but more complicated, argument: if $n$ is odd, one uses orthogonal groups as in 1.3.4, and one reduces the case $n$ even to the case $n$ odd by the trick mentioned at the end of \S 1.3.
$\Box$
\vskip 0.3cm
\noindent{\small{\sl Exercise.} Let $A(n)$ be a maximal 2-subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$. Show that the $A(n)$'s can be characterized by the following three properties:
\begin{eqnarray*}
A(1) & = & \{\pm 1\}.\\
A(2n) & = & \big(A(n)\times A(n)\big)\cdot\{\pm 1\} \,\, {{\mathfrak m}box{(wreath product) if $n$ is a power of $2$.}}\\
A(n) & = & A(2^{^{m_1}})\times \dots \times A(2^{^{m_k}})\,{{\mathfrak m}box{if $n$ = $2^{^{m_1}} \!\!+\dots + 2^{^{m_k}} $ with $m_1 < \dots < m_k$}}.
\end{eqnarray*}}
\vskip 0.5cm
\begin{center}
{\bf {\S 2. Schur}}
\vskip 0.3cm
\end{center}
\setcounter{section}{2}
\setcounter{subsection}{0}
Ten years after [Mi 87], Frobenius founded the theory of characters of finite
groups. It was then (and still is now) very tempting to use that theory to
give a different proof of Minkowski's results. The first people to do so were
Schur ([Sch 05]) and Burnside ([Bu 11], Note G). Schur's paper is especially
interesting. He works first over ${\mathfrak m}athbf Q$, as Minkowski did, and uses a very
original argument in character theory, see \S 2.1 below. He then attacks the
case of an arbitrary number field, where he gets a complete answer, see \S 2.2.
\subsection{Finite linear groups with rational trace.} What Schur proves in \S1 of [Sch 05] is:
\begin{mytheorem2} Let $A$ be a finite $\ell$-subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf C).$ Assume that the traces of the elements of $A$ lie in ${\mathfrak m}athbf Q$. Then $v_\ell(A) \le M(n,\ell )$, where $M(n,\ell)$ is as in th.$1$.
\end{mytheorem2}
The condition on the traces is obviously satisfied if $A$ is contained in $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$. Hence th.2 is a generalization of th.1. (As a matter of fact, it is a genuine generalization only when $\ell = 2$; indeed, when $\ell > 2$, it is known, cf. [Ro 58], that a finite $\ell$-subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf C)$ with rational trace is conjugate to a subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Q)$.)
\begin{proof} We start from the following general fact, which is implicit in [Sch 05] (and is sometimes called ``Blichfeldt's lemma"):
\begin{proposition}
Let $G$ be a finite subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf C)$ and let $X$ be the subset of ${\mathfrak m}athbf C$ made up of the elements $\operatorname{Tr}(g)$ for $g \in G, g\not= 1.$ Let {\mathfrak m}box{$N = \prod (n-x)$} be the product of the $n-x$, for $x \in X$. Then $N$ is a non-zero integer which is divisible by $|G|.$
\end{proposition}
(Hence the knowledge of the set $X$ gives a multiplicative bound for the order of $G$.)
\vskip 0.3cm
\noindent{\sl Proof.} Let $m = |G|$, and let $z$ be a primitive $m$-th root of unity. The elements of $X$ are sums of powers of $z$; hence they belong to the ring of integers of the cyclotomic field $K = {\mathfrak m}athbf Q(z)$. This already shows that $N$ is an algebraic integer. If $s$ is an element of $\operatorname{Gal}(K/{\mathfrak m}athbf Q)$, one has $s(z) = z^a$ for some $a\in ({\mathfrak m}athbf Z/m{\mathfrak m}athbf Z)^*$. If $x = \operatorname{Tr}(g)$, with $g\in G$, then $s(x) = \operatorname{Tr}(g^a)$, hence $s(x)$ belongs to $X$. This shows that $X$ is stable under the action of $\operatorname{Gal}(K/{\mathfrak m}athbf Q)$; hence $N$ is fixed by $\operatorname{Gal}(K/{\mathfrak m}athbf Q)$; this proves that $N$ belongs to ${\mathfrak m}athbf Z$.
The factors of $N$ are $\not= 0$. Indeed, $\operatorname{Tr}(g)$ is equal to the sum of $n$ complex numbers $z_i$ with $|z_i| = 1$, hence can be equal to $n$ only if all the $z_i$ are equal to $1$, which is impossible since $g\not= 1$. This shows that $N\not= 0$ (one could also prove that $N$ is positive, but we shall not need it).
It remains to see that $N$ is divisible by $|G|$. It is well-known that, if $\chi$ is a generalized character of $G$, the sum $\sum_{g\in G} \chi(g)$ is divisible by $|G|$. Let us apply this to the function $g {\mathfrak m}apsto \chi(g) = \prod_{x\in X}\big(\operatorname{Tr}(g) -x\big)$, which is a ${\mathfrak m}athbf Z$-linear combination of the characters $g {\mathfrak m}apsto \operatorname{Tr}(g)^m, m {{\mathfrak m}athfrak g}e 0$. Since $\chi(g) = 0$ for $g \not= 1$ and $\chi(1) = N$, the sum of the $\chi(g)$ is equal to $N$. Hence $N$ is divisible by $|G|$.\end{proof}
The next lemma gives an information on the $\operatorname{Tr}(g)$'s:
\begin{lemma} Let $A$ be as in th.$2$. If $g\in A$, then $\operatorname{Tr}(g)$ may be written as $n - \ell y$ with $y \in{\mathfrak m}athbf Z$ and $0 \le y \le n/(\ell -1)$.
\end{lemma}
\begin{proof} Each eigenvalue of $g$ is of order $\ell^\alpha$ for some $\alpha {{\mathfrak m}athfrak g}e 0$, and all the eigenvalues with the same $\alpha$ have the same multiplicity. By splitting ${\mathfrak m}athbf C^n$ according to the $\alpha$'s, one is reduced to the following three cases:
\noindent (1) $g = 1$ and $n = 1$. Here $\operatorname{Tr}(g) = 1$ and we take $y = 0.$
\noindent (2) $g$ has order $\ell$ and $n = \ell -1$. Here $\operatorname{Tr}(g) = -1$, and $y = 1$.
\noindent (3) $g$ has order $\ell^\alpha$ with $\alpha > 1$ and $n = \ell^{\alpha -1} (\ell -1)$. Here $\operatorname{Tr}(g) = 0$ and $y = \ell^{\alpha -2} (\ell -1).$
In each case we have $0 \le y \le n/(\ell -1).$\end{proof}
\vskip 0.3cm
\noindent{\sl End of the proof of theorem 2.} We apply prop.1 to $G = A$. By lemma 2, each factor $n-x$ of $N$ can be written as $\ell y$ with $1 \le y \le d = [n/\ell -1)]$. This shows that $N$ divides the product $\ell^d\cdot d$! and we have
$$
v_\ell(N) < d + v_\ell (d!) = [n/(\ell -1)] + [n/\ell (\ell -1)] + \dots = M(n,\ell ).
$$
Since $|G|$ divides $N$, this proves th.2.
$\Box$
\vskip 0.3cm
\noindent{\sl Remark.} One may ask whether th.2 can be complemented by a conjugacy theorem analogous to th.$1^\prime$ of \S 1.5. The answer is of course ``yes" if $\ell > 2$ (because of th.$1^\prime$), but it is ``no" for $\ell = 2$: the dihedral group $D_4$ and the quaternion group $Q_8$ are non-conjugate $2$-subgroups of $\operatorname{{\mathfrak m}athbf{GL}}_2({\mathfrak m}athbf C)$, with rational trace, which have the maximal order allowed by th.2, namely 8.
\subsection{Replacing ${\mathfrak m}athbf Q$ by an arbitrary number field.} This is what Schur does in \S\S 2-6 of [Sch 05]. Before stating his result, some notation is necessary:
- $k$ is a number field, viewed as a subfield of ${\mathfrak m}athbf C$.
- For each $a {{\mathfrak m}athfrak g}e 1$, $z_a$ denotes a primitive $a$-th root of unity.
- (assuming $\ell > 2$). We put $t = [k(z_\ell ):k]$ and we denote by $m$ the maximal $a$ such that $k(z_\ell )$ contains $z_{_{\ell^a}}$ (this notation coincides with Schur's, and it will be extended to arbitrary fields in \S 4 of Lect.II). We put
$$
M_k(n,\ell ) = m\cdot \left[\frac{n}{t}\right] + \left[\frac{n}{\ell t}\right] + \left[\frac{n}{\ell^2t}\right] + \dots
$$
- (assuming $\ell = 2$). We put $t = [k(i):k]$ and we define $m$ as explained in \S 4.2 (warning: $t$ and $m$ do not always coincide with Schur's $t_2$ and $m_2$). We put:
$$
M_k(n,2) = n + (m^\prime -1) \left[\frac{n}{t}\right] + \left[\frac{n}{2t}\right] + \left[\frac{n}{4t}\right] + \dots ,
$$
where $m^\prime$ is equal to $m+1$ in case (b) of \S4.2 and is equal to $m$ in the other cases.
The main result of [Sch 05] is:
\begin{mytheorem2'} Let $A$ be a finite $\ell$-subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf C)$ such that $\operatorname{Tr}(g)$ belongs to $k$ for every $g\in A$. Then $v_\ell (A) \le M_k(n,\ell )$.
\end{mytheorem2'}
\noindent Note that, when $k = {\mathfrak m}athbf Q$, the integer $M_k(n,\ell )$ is equal to Minkowski's $M(n,\ell )$; hence th.2$^\prime$ is a generalization of th.2.
\begin{proof} I shall not give all the details of Schur's proof, but just
explain its main steps. For more information, see [Sch 05] (and also [GL 06] for the case $\ell > 2$).
One of the inputs of the proof is the following result, which had just been proved by Blichfeldt ([Bl 04] - see also \S3 below):
\subsubsection{Every linear representation of $A$ is monomial}${}$
Hence one can decompose the vector space ${\mathfrak m}athbf C^n$ as a direct sum of $n$ lines $D_1,\dots ,D_n$ which are permuted by $A$. This gives a homomorphism $A \rightarrow S_n$; its kernel $A^\prime$ is a normal abelian subgroup of $A$. Hence:
\subsubsection{The group $A$ has a normal abelian subgroup $A^\prime$ such that $(A:A^\prime)$ divides $n!$}${}$
This led Schur to investigate the case where $A$ is abelian. He proved:
\subsubsection{If $A$ is as in th.$2^\prime$, and is abelian, then\ {\rm :}}
$$
v_\ell (A) \le \left\{ \begin{array}{ll}
m\cdot \left[\frac{n}{t}\right] &{{\mathfrak m}box{\sl if}}\,\, \ell > 2\\
&\\
(m^\prime - t)\cdot \left[\frac{n}{t}\right] + n & {{\mathfrak m}box{\sl if}}\,\, \ell = 2.\end{array}
\right.
$$
\noindent{\sl Sketch of proof.} Since $A$ is abelian, and the traces of its
elements belong to $k$, it is conjugate to a subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n(k)$. Let $R$
be the $k$-subalgebra of ${\bf M}_n(k)$ generated by $A$. We may write $R$ as a
product $\prod K_i$, where the $K_i$ are cyclotomic extensions of $k$, of the
form $k(z_{_{\ell^{a_i}}})$, with $a_i {{\mathfrak m}athfrak g}e 0$. Let $n_i = [K_i:k]$; then
$\sum n_i \le n$. The image of $A$ in $K^*_i$ is a cyclic group of order
$\ell^{^{a_i}}$. If $\ell > 2$, it is not difficult to see that
$a_i \le m\cdot\!\left[\frac{n_i}{t}\right]$ for every $i$. Adding up, we find
$\sum a_i \le m\cdot \left[\frac{n}{t}\right]$, and since
$v_\ell (A) \le \sum a_i$, we get the inequality (2.2.3). The case $\ell = 2$ is similar.
\end{proof}
Once this is done, the case $\ell = 2$ follows. Indeed (2.2.2) and (2.2.3) give $v_2(A) \le v_2(A^\prime) + v_2(n!) \le n + (m^\prime -t)\cdot \left[\frac{n}{t}\right] + v_2(n!)$, and this is equivalent to $v_2(A) \le M_k(n,2)$. The case $\ell > 2$ requires more work, cf. [Sch 05], \S 5.
$\Box$
\vskip 0.3cm
\noindent{\sl Remarks}
1) The bound $v_\ell(A) \le M_k(n,\ell )$ is {\sl optimal}; this is proved by the same explicit constructions as in \S 1.4, cf. [Sch 05], \S 6.
2) As we already pointed out in \S 2.1, the hypothesis $\operatorname{Tr}(A) \subset k$ implies, when $\ell > 2$, that $A$ is conjugate to a subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_n(k)$. One may then use Minkowski's method, as will be explained in \S 6 for semisimple algebraic groups (of course $\operatorname{{\mathfrak m}athbf{GL}}_n$ is not semisimple, but the method applies with almost no change -- the invariant degrees $d_i$ of \S 6 have to be replaced by $1,2,\dots ,n)$. The bound found in that way coincides with Schur's.
For $\ell = 2$, if one does not assume that $A$ can be embedded in $\operatorname{{\mathfrak m}athbf{GL}}_n(k)$, I do not see how to apply either Minkowski's method or the cohomological method of \S 6.8. This raises interesting questions. For instance, consider a finite subgroup $A$ of $E_8({\mathfrak m}athbf C)$, and suppose that the conjugacy classes of the elements of $A$ are ${\mathfrak m}athbf Q$-rational. Is it true that $v_2(A) \le 30$, $v_3(A) \le 13, \dots ,$ as would be the case if $A$ were contained in the rational points of a ${\mathfrak m}athbf Q$-form of $E_8$, cf. \S 6.3.2 ?
\vskip 0.5cm
\begin{center}
{\bf {\S 3. Blichfeldt and others}}
\vskip 0.3cm
\end{center}
\setcounter{section}{3}
\setcounter{subsection}{0}
Blichfeldt's theorem (\S 3.1 below) has already been used in \S 2.2. The results of \S 3.3 will be applied in \S 5.4, in order to prove what I call the ``S-bound".
\subsection{Blichfeldt's theorem.} Recall that a finite group $A$ is called {\sl supersolvable} if it has a composition series
$$
1 = A_0 \subset A_1 \subset \dots \subset A_m = A
$$
where the $A_i$ are normal in $A$ (and not merely in $A_{i+1})$ and the quotients $A_i/A_{i-1}$ are cyclic. One has
\begin{center}
nilpotent ${\mathfrak m}athbf Rightarrow$ supersolvable ${\mathfrak m}athbf Rightarrow$ solvable.
\end{center}
In particular, an $\ell$-group is supersolvable.
One proves easily:
$(^*)$ {\sl If $A$ is supersolvable and non abelian, there exists an abelian normal subgroup $A^\prime$ of $A$ which is not contained in the center of $A$.}
Recall also that a linear representation $V$ of a group $A$ is called {\sl monomial} if one can split $V$ as a direct sum of lines which are permuted by $A$. When $V$ is irreducible, this amounts to saying that $V$ is induced by a 1-dimensional representation of a subgroup of $A$.
We may now state Blichfeldt's theorem ([Bl 04], see also [Bu 11], \S 258):
\begin{mytheorem3} Every complex linear representation of a supersolvable finite group is monomial.
\end{mytheorem3}
\noindent (As a matter of fact, Blichfeldt was only interested in the case where $A$ is nilpotent.)
\begin{proof}
The argument is now standard. We may assume that the given representation $V$ is irrreducible and faithful. If $A$ is abelian, we have $\dim V = 1$ and there is nothing to prove. If not, we choose
$A^\prime$ as in ${(^*)}$ above, and we split $V$ as $V = \oplus V_\chi$, where $\chi$ runs through the 1-dimensional characters of $A^\prime$, and $V_\chi$ is the corresponding eigenspace; let $V_\psi$ be a non-zero $V_\chi$; it is distinct from $V$ (otherwise, $A^\prime$ would be central), and every non-zero $V_\chi$ is an $A$-transform of $V_\psi$ (because $V$ is irreducible). Call $B$ the subgroup of $A$ stabilizing $V_\psi$. We have $A^\prime \subset B \subset A$, and $|B| < |A|$. Using induction on $|A|$, we may assume that th.3 is true for $B$; this gives a splitting of $V_\psi$ as a direct sum of lines which are stable under $B$. By transforming them by $A$, we get the desired splitting of $V$.
$\Box$
\subsection{Borel-Serre.} In [BS 53], Borel and I proved:
\begin{mytheorem3'}
Let $G$ be a compact real Lie group, and let $A$ be a finite supersolvable subgroup of $G$. There exists a maximal torus $T$ of $G$ which is normalized by $A$.
\end{mytheorem3'}
\vskip 0.3cm
\noindent{\sl Remark.} When one applies th.3$^\prime$ to $G = \operatorname{{\mathfrak m}athbf{U}}_n({\mathfrak m}athbf C)$, one recovers th.3. Hence th.3$^\prime$ may be viewed as a generalization of Blichfeldt's theorem.
\vskip 0.3cm
\noindent{\sl Proof of theorem 3 \!$^\prime$} ({\sl sketch}).
\begin{lemma}
Let ${\mathfrak m}athfrak{g}$ be a finite dimensional Lie algebra over a field of characteristic 0, and let $s$ be an automorphism of prime order of ${\mathfrak m}athfrak{g}$. If $s$ has no fixed point $\not= 0$, then ${\mathfrak m}athfrak{g}$ is nilpotent.
\end{lemma}
\noindent (Note the analogy with a - much deeper - theorem of Thompson \linebreak[4][Th\nolinebreak[4] 60-64]: if a finite group $G$ has an automorphism of prime order with no non-trivial fixed point, then $G$ is nilpotent.)
\vskip 0.3cm
\noindent{\sl Proof of lemma 3.} By extending scalars, we may assume that the ground field is algebraically closed. Let $p$ be the order of $s$, and let $z$ be a primitive $p$-th root of unity. Let ${\mathfrak m}athfrak{g}_i$ be the kernel of $s - z^i$ in ${\mathfrak m}athfrak{g}$. We have
$$
{\mathfrak m}athfrak{g} = {\mathfrak m}athfrak{g}_0 \oplus {\mathfrak m}athfrak{g}_1 \oplus \dots \oplus {\mathfrak m}athfrak{g}_{p-1}\, ,
$$
and the hypothesis made on $s$ means that ${\mathfrak m}athfrak{g}_0 = 0$. One then shows that $\operatorname{ad}(x)^{p-1} = 0$ for every $x$ belonging to one of the ${\mathfrak m}athfrak{g}_i$'s. This implies that the Killing form of ${\mathfrak m}athfrak{g}$ is 0, hence that ${\mathfrak m}athfrak{g}$ is solvable (Cartan's criterion). The fact that ${\mathfrak m}athfrak{g}$ is nilpotent follows easily. (For more details, see \S 4 of [BS 54].)
$\Box$
\vskip0.2cm
Once this is done, th.3 \!\!$^\prime$ is proved by an induction argument similar to the one used in the proof of Blichfeldt's theorem, cf. [BS 53], \S 3.\end{proof}
\subsection{Steinberg and Springer-Steinberg.} We now come to the setting of
linear algebraic groups. Let $k$ be a field, and let $G$ be an algebraic
group over $k$. We shall assume in what follows that $G$ is linear and
smooth over $k$; the connected component of the identity of $G$ is denoted by
$G^\circ$. Recall that $G$ is said to be {\sl reductive} if it is connected
and if its unipotent radical (over an algebraic closure of $k$) is trivial, cf. [Bo 91], \S 11.21. If $k = {\mathfrak m}athbf C$,
such groups correspond (by a standard dictionary, cf. [Se 93], \S 5) to the
connected compact Lie groups. [In the literature, a group $G$ such that
$G^\circ$ is reductive is sometimes called ``reductive"; this is reasonable in
characteristic 0, but not otherwise. Here we prefer that ``reductive" implies ``connected".]
Theorem 3 \!\!$^\prime$ has the following analogue:
\begin{mytheorem3''} Let $A$ be a finite supersolvable group of order prime to $\operatorname{char}(k)$ and let $G$ be a reductive group over $k$ on which $A$ acts by $k$-automorphisms. Then there exists a maximal torus $T$ of $G$, defined over $k$, which is stable under the action of $A$.
\end{mytheorem3''}
\noindent (When $k = {\mathfrak m}athbf C$, this is equivalent to th.3 \!$^\prime$, thanks to the dictionary mentioned above.)
\begin{corollary}
If $A$ is a finite supersolvable subgroup of $G(k)$, of order prime to $\operatorname{char} (k)$, there is a maximal $k$-torus $T$ of $G$ whose normalizer $N$ is such that $A$ is contained in $N(k)$.
\end{corollary}
\noindent (Recall that, if $X$ is a $k$-variety, $X(k)$ is the set of $k$-points of $X$.)
\vskip 0.3cm
\noindent{\sl Proof of theorem 3 \!$^{\prime\prime}$.} When $k$ is algebraically closed, this is proved in [SS 68], I.5.16, with the help of several results from [St 68]. For an arbitrary field $k$, the same proof works with very little change. One starts with the following basic result of Steinberg ([St 68], th.7.2):
\begin{proposition}
Assume $k$ is algebraically closed. Let $s : G \rightarrow G$ be a surjective homomorphism. Then there exists a Borel subgroup $B$ of $G$ such that $s(B) = B$.
\end{proposition}
When $s$ has finite order prime to $\operatorname{char}(k)$, one can say much more:
\begin{proposition}
Let $s$ be an automorphism of $G$ of finite order prime to $\operatorname{char}(k)$, and let $G^s$ be the subgroup of $G$ fixed by $s$. Then\hbox{ \!{\rm :}}
{\rm a)} The connected component of $G^s$ is reductive.
{\rm b)} One has $\dim G^s > 0$ if $G$ is not a torus.
{\rm c)} If $k$ is algebraically closed, there exists a Borel subgroup $B$ of $G$ and a maximal torus $T$ of $B$ such that $s(B) = B$ and $s(T) = T$.
\end{proposition}
\vskip 0.3cm\noindent{\sl Proof} ({\sl sketch}). We may assume $k$ is algebraically closed, since assertions a) and b) are ``geometric". A proof of a) is given in [St 68], cor.9.4. A proof of c) is given in [SS 68], I.2.9, as an application of prop.2. Assertion b) follows from c) by the following method of Steinberg: one observes that a pair $(B,T)$ with $B\supset T$, determines {\sl canonically} a homomorphism $h : {\mathfrak m}athbf G_m \rightarrow T$ (indeed $B$ gives a basis of the root system of $(G,T)$, and one takes for $h$ twice the sum of the corresponding coroots). Moreover, $h$ is non-trivial if $G$ is not a torus. The canonicity of $h$ implies that it is fixed by $s$. Hence $G^s$ contains $\operatorname{Im}(h)$.
$\Box$
\vskip 0.3cm
\noindent{\sl End of the proof of th.3 \!$^{\prime\prime}$}. By induction on
$|A| + \dim G$. When $A = 1$, one takes for $T$ any maximal $k$-torus of
$G$; by a theorem of Grothendieck, there is such a torus
(cf. [Bo 91], th.18.2). We may thus assume $A\not= 1$. In that case $A$
contains a cyclic subgroup $<s>$, non-trivial, which is normal. We may also
assume that $G$ is semisimple and that $A$ acts faithfully. Let $G_1$ be the
connected component of $G^s$; we have $\dim G_1>0$, cf. prop.3 b). The group
$A/A^\prime$ acts on $G_1$. By the
induction assumption, there is a maximal torus $T_1$ of $G_1$, defined over
$k$, which is stable under the action of $A/A^\prime$, hence of $A$.
Let $G_2$ be the centralizer of $T_1$
in $G$. It is a reductive group of the same rank as $G$. We have
$\dim G_2 < \dim G$, since $T_1$ is not contained in the center of $G$.
Moreover, $G_2$ is stable under the action by $A$. By applying the
induction assumption to the pair $(G_2,A)$ we get a maximal $k$-torus $T$
of $G_2$ which is $A$-stable. Since $G_2$ and $G$ have the same rank, $T$
is a maximal torus of $G$.
$\Box$
{\mathfrak m}arkright{LECTURE II: UPPER BOUNDS}
\specialsection*{\bf II. Upper bounds}
\setcounter{section}{4}
\setcounter{subsection}{-1}
\setcounter{theorem}{3}
Let $G$ be a reductive group over a field $k$, and let $\ell$ be a prime number, different from $\operatorname{char}(k)$. Let $A$ be a finite subgroup of $G(k)$. We want to give an upper bound for $v_\ell (A)$, in terms of invariants of $G$, $k$ and $\ell$. We give two such bounds. The first one (\S 5) is less precise, but very easy to apply; we call it the S-bound (S for Schur). The other bound (\S 6) is the M-bound (M for Minkowski). Both bounds involve some cyclotomic invariants of $k$, which are defined in \S 4 below.
\vskip 0.5cm
\begin{center}
{\bf {\S 4. The invariants $t$ and $m$}}
\end{center}
\subsection{Cyclotomic characters}
\label{notation}
Let $\bar{k}$ be an algebraic closure of $k$, and let $k_s$ be the separable closure of $k$ in $\bar{k}$. For each $n {{\mathfrak m}athfrak g}e 1$ prime to $\operatorname{char}(k)$, let ${\mathfrak m}u_n \subset k^\ast_s$ be the group of $n$-th roots of unity and let $z_n$ be a generator of ${\mathfrak m}u_n$.
The Galois group ${\mathfrak m}athbf Gamma_k = \operatorname{Gal}(k_s/k)$ acts on $\langlez_n\rangle = {\mathfrak m}u_n$. This action defines a continuous homomorphism
$$\chi_{_n}:{\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Aut}({\mathfrak m}u_n) = ({\mathfrak m}athbf Z/n{\mathfrak m}athbf Z)^*,$$
which is called the $n$-{\sl th cyclotomic character of} $k$.
This applies in particular to $n = \ell^d \, (d = 0,1,\dots )$; by taking inverse limits we get the $\ell^\infty$-cyclotomic character
$$\chi_{_{\ell^\infty}}:{\mathfrak m}athbf Gamma_k \rightarrow {\mathfrak m}athbf Z^*_\ell = {\mathfrak m}athop{\varprojlim}\limits ({\mathfrak m}athbf Z /\ell^d{\mathfrak m}athbf Z)^*,$$
where ${\mathfrak m}athbf Z_\ell$ is the ring of $\ell$-adic integers. What matters for us is the image $\operatorname{Im}$ $\chi_{_{\ell^\infty}}$, which is a closed subgroup of ${\mathfrak m}athbf Z^*_\ell$. To discuss its structure, it is convenient to separate the cases $\ell \not= 2$ and $\ell = 2$.
\subsection{The case $\ell \not= 2$}
We have
$${\mathfrak m}athbf Z^*_\ell = C_{\ell -1} \times \left\{ 1 + \ell\!\cdot\!{\mathfrak m}athbf Z_\ell\right\}$$
where $C_{\ell -1}$ is cyclic of order $\ell - 1$ (i.e. $C_{\ell -1}$ is the group ${\mathfrak m}u_{\ell -1}$ of the \linebreak$\ell$-adic field ${\mathfrak m}athbf Q_\ell$; it is canonically isomorphic to ${\mathfrak m}athbf F^*_\ell$). As for $1 + \ell\!\cdot \!{\mathfrak m}athbf Z_\ell$, it is procyclic, generated by $1 + \ell$, and isomorphic to the additive group ${\mathfrak m}athbf Z_\ell$; its closed subgroups are the groups $1 + \ell^d\!\cdot\! {\mathfrak m}athbf Z_\ell = \, \langle1 + \ell^d\rangle$, $d = 1,2,\dots , \infty$, with the convention $\ell^\infty = 0$.
Since $\ell - 1$ and $\ell$ are relatively prime, the subgroup $\operatorname{Im} \chi_{_{\ell^\infty}}$ of ${\mathfrak m}athbf Z^*_\ell$ decomposes as a direct product:
$$\operatorname{Im} \chi_{_{\ell^\infty}} = C_t \times \left\{ 1 + \ell^m \!\cdot\! {\mathfrak m}athbf Z_\ell\right\}\, ,$$
where $t$ is a divisor of $\ell-1$, $C_t$ is cyclic of order $t$ and $m = 1,2, \dots$ or $\infty$.
\vskip 0.1cm
\noindent {\sl Remark.}
An alternative definition of the invariants $t$ and $m$ is:
\begin{eqnarray*}
t & = & \left[ k(z_\ell ) : k\right] = k{{\mathfrak m}box{-degree of}}\,\, z_\ell\\
m & = & {{\mathfrak m}box{upper bound of the}} \,\, d{{\mathfrak m}athfrak g}e 1\,\, {{\mathfrak m}box{such that}} \,\, z_{_{\ell^d}} \,\, {{\mathfrak m}box{is contained in}} \,\, k(z_\ell ).
\end{eqnarray*}
\noindent {\sl Examples.} If $k = {\mathfrak m}athbf Q$ or ${\mathfrak m}athbf Q_\ell$, $\chi_{_{\ell^\infty}}$ is surjective and we have $t = \ell -1$, $m = 1$. If $k = k_s$, then $\chi_{_{\ell^\infty}}$ is trivial and $t = 1$, $m = \infty$. If $k$ is finite with $q$ elements, $\operatorname{Im} \chi_{_{\ell^\infty}}$ is the closed subgroup of ${\mathfrak m}athbf Z^*_\ell$ generated by $q$ and we have:
\begin{eqnarray*}
t & = &{{\mathfrak m}box{order of}} \,\,\, q\,\,\, {{\mathfrak m}box{in}} \,\,\, {\mathfrak m}athbf F^*_\ell\,\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\\
m & = & v_\ell (q^t - 1) = v_\ell\,\, (q^{\ell -1} - 1)\, .\quad\quad\quad\quad\quad\quad
\end{eqnarray*}
\subsection{The case $\ell = 2$} Here ${\mathfrak m}athbf Z^*_2 = C_2 \times \{ 1 + 4\!\cdot \!{\mathfrak m}athbf Z_2\}$, where $C_2 = \left\{ 1, -1\right\}$ and the multiplicative group $1 + 4
\!\cdot \!{\mathfrak m}athbf Z_2$ is isomorphic to the additive group ${\mathfrak m}athbf Z_2$. There are three possibilities for $\operatorname{Im} \chi_{_{2^\infty}}$:
\begin{enumerate}
\item[(a)] $\operatorname{Im} \chi_{_{2^\infty}} = 1+2^m \! \cdot\!{\mathfrak m}athbf Z_2 = \langle1 + 2^m\rangle$, with $m = 2, \dots ,\infty$.
We put $t=1$.
\item[(b)] $\operatorname{Im} \chi_{_{2^\infty}} = \langle-1+2^m \rangle $, with $m = 2,\dots ,\infty$.
We put $t=2$.
\item[(c)] $\operatorname{Im} \chi_{_{2^\infty}} = C_2 \times \{1 + 2^m\! \cdot\!{\mathfrak m}athbf Z_2\} = \langle-1,1+2^m \rangle$, $m = 2,\dots ,\infty$.
We put $t=2$.
\end{enumerate}
If $m < \infty$, these types are distinct. If $m = \infty$, types (b) and (c) coincide; in that case $\operatorname{Im} \chi_{_{2^\infty}}$ is equal to $C_2$.
\vskip 0.5cm
\noindent{\sl Remark.} We have $t = [k (i) : k]$ with the usual notation $i = z_4$. Hence case (a) means that $-1$ is a square in $k$, and in that case $m$ is the largest $d {{\mathfrak m}athfrak g}e 2$ such that $z_{_{2^d}} \in k$.
If $t = 2$, case (c) is characterized by the fact that $-1$ belongs
to $\operatorname{Im} \chi_{_{2^\infty}}$. As for $m$, it is given by:
\begin{eqnarray*}
m & = & -1 + {\mathfrak m}box{ upper bound of the } d{{\mathfrak m}athfrak g}eq2 {\mathfrak m}box{ such that } z_{_{2^d}} \in k(i) {\mathfrak m}box{ in case (b)}
\\
m & = & {\mathfrak m}box{upper bound of the } d{{\mathfrak m}athfrak g}eq2 {\mathfrak m}box{ such that } z_{_{2^d}} \in k(i) {\mathfrak m}box{ in case (c)}.
\end{eqnarray*}
\noindent {\sl Examples.} If $k = {\mathfrak m}athbf Q$ or ${\mathfrak m}athbf Q_2$, we have type (c) with $t = 2, m = 2$. If $k = {\mathfrak m}athbf R$, we have types (b) and (c) with $m = \infty$. If $k$ is separably closed, we have type (a) with $ t = 1$ and $m = \infty$.
When $\operatorname{char} (k) \not= 0$, type (c) is impossible unless $m = \infty$. If $k$ is finite with $q$ elements, we have type (a) with $m = v_2 (q-1)$ if $q \equiv 1$ (mod 4) and type (b) with $m = v_2 (q+1)$ if $q \equiv -1$ (mod 4).
\vskip 0.5cm
\subsection{The case of finitely generated fields.} Let $k_0$ be the prime subfield of $k$, i.e. ${\mathfrak m}athbf Q$ if $\operatorname{char}(k) = 0$ or ${\mathfrak m}athbf F_p$ if $\operatorname{char}(k) = p > 0$. Suppose that $k$ is {\sl finitely generated over} $k_0$. Then {\sl the invariant} $m$ {\sl is finite}, i.e. $\operatorname{Im} \chi_{_{\ell^\infty}}$ is infinite.
Indeed, if not, there would be a finite extension $k'$ of $k$ containing the group ${\mathfrak m}u$ of all the $\ell^d$-th roots of unity $(d = 1,2, \dots ).$ Let $K = k_0 ({\mathfrak m}u )$ be the extension of $k_0$ generated by ${\mathfrak m}u$. Then:
\begin{enumerate}
\item[(a)] $K$ is algebraic over $k_0$
\item[(b)] $K$ is finitely generated over $k_0$ (because it is contained in $k'$, cf. \newline [A V], \S14, cor. 3 to prop. 17).
\end{enumerate}
Hence $K$ is either a finite field or a number field, which is absurd since such a field only contains finitely many roots of unity.
\vskip 1cm
\begin{center}
{\bf {\S 5. The S-bound}}
\end{center}
\vskip 0.5cm
\setcounter{section}{5}
\setcounter{subsection}{0}
We start with the case of tori:
\subsection{The S-bound for a torus: statements}
\begin{mytheorem4}
\label{thm4}
Let $T$ be a torus over $k$, and let $A$ be a finite subgroup of $T(k)$. Then
$$
v_\ell (A) \,\le\, m \left[ \frac{\dim T}{\varphi (t)}\right]\, ,
$$
where $m$ and $t$ are defined as in \S {\rm 4} above and $ \varphi$ is Euler's totient function.
\end{mytheorem4}
\noindent The bound given by th.4 is optimal. More precisely:
\begin{mytheorem4'}
\label{thm4'}
Assume $m < \infty$. For every $n {{\mathfrak m}athfrak g}e 1$ there exist a $k$-{\sl torus} $T$ of dimension $n$ and a finite subgroup $A$ of $T(k)$ such that $v_{\ell} (A) = m \cdot [n/\varphi (t)].$
\end{mytheorem4'}
\vskip 0.4cm
\noindent {\sl Example.} Take $k = {\mathfrak m}athbf Q$ and $\ell = 2$, so that $t = m = 2$. Then th.4 says that any finite 2-subgroup of $T({\mathfrak m}athbf Q)$ has order $\le 4^{\dim\, T}$, and th.4$^\prime$ says that this bound can be attained.
\vskip 0.5cm
\subsection{Proof of theorem 4.}
\setcounter{lemma}{3}
\begin{lemma}
\label{lem4}
Let $u \in {\mathfrak m}athbf{M}_n({\mathfrak m}athbf Z_\ell)$ be an $n \times n$ matrix with coefficients in ${\mathfrak m}athbf Z_\ell$,
which we view as an endomorphism of $({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^n$.
Then
$$
v_\ell \big ( \ker(u) \big ) = v_\ell \big ( \det(u) \big ).
$$
\end{lemma}
\begin{proof}
This is clear if $u$ is a diagonal matrix, and one reduces the general case
to the diagonal one by multiplying $u$ on the right and on the left by invertible matrices.
\end{proof}
Now let $n$ be the dimension of the torus $T$.
Let $Y(T) = \operatorname{Hom}_{k_s}({\mathfrak m}athbf G_m,T)$ be the group of cocharacters of $T$.
The action of ${\mathfrak m}athbf Gamma_k$ on $Y(T)$ gives a homomorphism
$\rho:{\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Aut} \big ( Y(T) \big ) \cong \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$.
If we identify $T$ with ${\mathfrak m}athbf G_m \times \dots \times {\mathfrak m}athbf G_m$ (over $k_s$) by choosing a basis of $Y(T)$,
the $\ell^\infty$-division points of $T(k_s)$ form a group isomorphic to $({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^n$
and the action of $g \in {\mathfrak m}athbf Gamma_k$ on that group is by
$\rho(g) \chi(g)$, where $\chi = \chi_{_{\ell^\infty}}$.
\begin{lemma}
\label{lem5}
{\sl Let} $A$ {\sl be a finite subgroup of} $T(k)$. {\sl For every} $g \in {\mathfrak m}athbf Gamma_k$ {\sl we have}
$$
v_\ell (A) \,\, \le\,\, v_\ell \big(\det (\rho (g)\, \chi (g) - 1)\big) = v_\ell \big(\det (\rho (g^{-1}) - \chi (g)\big).$$
\end{lemma}
\begin{proof}
By replacing $A$ by its $\ell$-Sylow subgroup, we may assume that $A$ is an $\ell$-group, hence is contained in the $\ell$-division points of $T(k_s)$. Since the points of $A$ are rational over $k$, they are fixed by $g$, i.e. they belong to the kernel of $g\!-\!1$. The inequality then follows from lemma 4, applied to $u = \rho (g) \,\chi (g) - 1.$
\end{proof}
We now choose $g \in {\mathfrak m}athbf Gamma_k$ such that the inequality of lemma 5
gives that of th.4.
Here is the choice:
$$
\quad \chi(g) = z_tu, \quad {\mathfrak m}box{ where } z_t\in {\mathfrak m}athbf Z_{\ell}^* {\mathfrak m}box{ has order } t, {\mathfrak m}box{ and } v_\ell (1\!-\!u) = m.
$$
\big(This works for $\ell=2$ as well as for $\ell \neq 2$, thanks to the definition of $t$ in \S4.1 and \S4.2.
Note that in all cases but $\ell=2$, type (c), $\chi(g)$ is a topological generator of $\operatorname{Im}\chi$.\big)
We have $\rho(g) \in \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$, and $\rho(g)$ is of finite order
(because the image of $\rho:{\mathfrak m}athbf Gamma_k \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_n({\mathfrak m}athbf Z)$ is finite).
Hence the characteristic polynomial $F$ of $\rho(g^{-1})$ is a product of cyclotomic polynomials:
\numberwithin{equation}{section}
\numberwithin{equation}{subsection}
\setcounter{equation}{0}
\setcounter{subsection}{2}
\setcounter{subsubsection}{0}
\begin{equation}
\label{eq521}
F = \prod {\mathfrak m}athbf Phi_{d_j}, \quad {\mathfrak m}box{ with } \sum \varphi(d_j) = n.
\end{equation}
The inequality of lemma~\ref{lem5} gives
$$
\label{eq10}
v_\ell (A) \leq \sum v_\ell \big({\mathfrak m}athbf Phi_{d_j} (z_tu) \big).
$$
We thus need to compute $v_\ell \big({\mathfrak m}athbf Phi_d (z_tu)\big)$ for every $d {{\mathfrak m}athfrak g}e 1$. The result is:
\begin{lemma}
\label{lem6}
We have
$$
v_\ell \big( {\mathfrak m}athbf Phi_d (z_tu ) \big) = \left\{
\begin{array}{ll}
m &{{\mathfrak m}box{if }} \,d=t \\
1 & {{\mathfrak m}box{if }} \,d = t\cdot \ell^\alpha, \ \alpha {{\mathfrak m}athfrak g}eq 1 \ {\mathfrak m}box{ or }
\ \alpha = -1 \ ({\mathfrak m}box{if } t=2=\ell)\\
0 & {{\mathfrak m}box{otherwise.}}
\end{array}
\right.
$$
\end{lemma}
\begin{proof}
(We restrict ourselves to the case $\ell \neq 2$.
The case $\ell=2$ is analogous but slightly different.)
We have ${\mathfrak m}athbf Phi_d (z_tu) = \prod (z_tu-z)$ where $z$ runs through the primitive $d$-th roots of unity in $\overline{{\mathfrak m}athbf Q}_\ell$. Write $d$ as $d = \delta\!\cdot\!\ell^\alpha$ with $(\delta , \ell) = 1$ and $ \alpha {{\mathfrak m}athfrak g}e 0$. The images of the $z$'s in the residue field $\overline{{\mathfrak m}athbf F}_\ell$ of $\overline{{\mathfrak m}athbf Q}_\ell$ are primitive $\delta$-th roots of unity. If $\delta \not= t$, none of them is equal to the image of $z_tu$, which has order $t$. In that case, all the $z_tu -z$ are units in $\overline{{\mathfrak m}athbf Q}_\ell$ hence have valuation 0 and we have $v_\ell \big({\mathfrak m}athbf Phi_d (z_tu)\big) = 0.$ If $\delta = t$, i.e. $d = t\!\cdot\!\ell^\alpha$ with $\alpha {{\mathfrak m}athfrak g}e 0$, there are two cases:
(a) $\alpha = 0$, i.e. $d = t$. In that case, one of the $z$'s is equal to $z_t$ and we have $v_\ell (z_tu -z) = v_\ell (u-1) =m$; the other $z$'s contribute $0$.
(b) $\alpha {{\mathfrak m}athfrak g}eq 1$. Here $z$ can be written as $z'\!\cdot\!z''$
where $z'$ runs through the {\mathfrak m}box{$t$-th} primitive roots of $1$,
and $z''$ through the $\ell^\alpha$-th primitive roots of $1$.
The valuation of $z-z_t u$ is $0$ unless $z' = z_t$,
in which case $v_\ell(z-z_t u) = v_\ell(z'' - u)$.
It is well-known that $v_\ell (z''-1) = \frac{1}{(\ell-1)\ell^{\alpha-1}}$.
Since $v_\ell(u-1) = m$, which is strictly larger, we have
$$v_\ell(z''-u) = v_\ell\big ( (z''-1) - (u-1) \big )
= \frac{1}{(\ell-1)\ell^{\alpha-1}}= \frac{1}{\varphi(\ell^\alpha)}.$$
Since the number of the $z''$ is $\varphi(\ell^\alpha)$,
we thus get $v_\ell\big ( {\mathfrak m}athbf Phi_d(z_{t}u) \big ) =1$, as claimed.
\end{proof}
We can now prove theorem 4:
With the notation of~(\ref{eq521}), denote by $r_1$ the number of $j$'s with $d_j=t$,
and by $r_2$ the number of $j$'s with $d_j = t\!\cdot\!\ell^{\alpha_j}$, $\alpha_j {{\mathfrak m}athfrak g}eq 1$,
or $\alpha_j=-1$ in case $\ell=2,t=2$.
Using lemmas 5 and 6 we get
$$
v_\ell(A) \leq r_1m +r_2
$$
{\mathfrak m}box{ and of course }
$$
r_1 \varphi(t) + \sum \varphi(t\!\cdot\!\ell^{\alpha_j}) \leq n = \dim T.
$$
Since $ \varphi(t\!\cdot\!\ell^{\alpha_j}) {{\mathfrak m}athfrak g}eq \varphi(t)(\ell-1)$ this shows that
$r_1 \varphi(t) + r_2 \varphi(t) (\ell-1) \leq n$.
\noindent Hence $r_1 + r_2 (\ell -1) \le [n/\varphi (t)]$, and we have:
$$
v_{\ell} (A) \le r_1m + r_2\, \le \,r_1m + r_2 (\ell -1)m\, \le\,m [n/\varphi (t)]\, ,$$
which concludes the proof.
$\Box$
\vskip 0.2cm
\noindent {\sl Remark.}
Since $(\ell -1)m >0$ in all cases (even if $\ell = 2)$, the above proof shows that $v_\ell (A)$ can be equal to $m[n/\varphi (t)]$ only when $r_2 = 0$. In other words:
\vskip 0.2cm
\noindent{\bf Complement to theorem 4.} {\sl Assume} $v_\ell(A) = m[n/\varphi (k)]$, {\sl where}\linebreak
$n = \dim T$. {\sl If} $g \in {\mathfrak m}athbf Gamma_k$ {\sl is such that} $\chi (g) = z_{t}u$, {\sl with} $v_\ell (u-1) = m$ {\sl as above, the characteristic polynomial of} $\rho (g)$ {\sl is divisible by} $({\mathfrak m}athbf Phi_t)^N$, {\sl with} $N = [n/\varphi (k)]$.
\noindent(In other words, the primitive $t$-th roots of unity are eigenvalues of $\rho (g)$ with multiplicity $N$.)
\vskip 0.2cm
When $t = 1$ or 2 (i.e. when $\varphi (t) = 1$), this can be used to determine the structure of an ``optimal'' $T$:
\begin{corollary} Assume $t = 1$ or $2$, and $v_\ell (A) = mn.$ Then{\rm { :}}
{\rm (i)} \,\,If $t = 1$, the torus $T$ is split {\rm (i.e. isomorphic to the
product of $n$ copies of }${\mathfrak m}athbf G_m${\rm )}.
{\rm (ii)} If $t = 2$, $T$ is isomorphic to the product of $n$ non-split tori of dimension $1$ which are split by the quadratic extension $k(z_\ell )/k$ if $\ell \not= 2$ and by $k(i)/k$ if $\ell = 2$.
\end{corollary}
\begin{proof}
We give the proof for $t = 2$ and $\ell > 2$: the case $t =1$ is easier and the case $t = 2 = \ell$ requires similar, but more detailed, arguments.
Let ${{\mathfrak m}athfrak g}amma \in {\mathfrak m}athbf Gamma_k$. We may write $\chi ({{\mathfrak m}athfrak g}amma )$ as $e_{{\mathfrak m}athfrak g}amma\!\cdot\!u_{{\mathfrak m}athfrak g}amma$, with $e_{{\mathfrak m}athfrak g}amma \in \{1,-1\}$ and
{\mathfrak m}box{$u_{{\mathfrak m}athfrak g}amma \in \{1 + \ell^m{\mathfrak m}athbf Z_\ell\}$}. There are three cases:
\begin{enumerate}
\item[(a)] $e_{{\mathfrak m}athfrak g}amma = -1$ and $v_\ell (u_{{\mathfrak m}athfrak g}amma -1) = m$
\item[(b)] $e_{{\mathfrak m}athfrak g}amma = -1$ and $v_\ell (u_{{\mathfrak m}athfrak g}amma - 1) > m$
\item[(c)] $e_{{\mathfrak m}athfrak g}amma = 1.$
\end{enumerate}
In case (a), the ``complement" above shows that $\rho ({{\mathfrak m}athfrak g}amma )$ has $-1$ for eigenvalue with multiplicity $n$, hence $\rho ({{\mathfrak m}athfrak g}amma ) = -1$ in $\operatorname{Aut} (T) \simeq {\mathfrak m}athbf G{\mathfrak m}athbf L_n ({\mathfrak m}athbf Z)$.
In case (b), choose $g \in {\mathfrak m}athbf Gamma_k$ of type (a); this is possible by the very definition of $t$ and $m$. The element $g^2{{\mathfrak m}athfrak g}amma$ is of type (a) (this uses the fact that $\ell$ is odd); hence we have $\rho (g^2{{\mathfrak m}athfrak g}amma) = -1$ and since $\rho (g) = -1$ this shows that $\rho ({{\mathfrak m}athfrak g}amma ) = -1$.
If ${{\mathfrak m}athfrak g}amma$ is of type (c), then $g{{\mathfrak m}athfrak g}amma$ is of type (a) or (b) and we have $\rho (g{{\mathfrak m}athfrak g}amma) = -1$ hence $\rho({{\mathfrak m}athfrak g}amma) = 1.$
In all cases, we have $\rho ({{\mathfrak m}athfrak g}amma ) \in \{1,-1\}$, and more precisely $\rho ({{\mathfrak m}athfrak g}amma ) = e_{{\mathfrak m}athfrak g}amma$.
The corollary follows.\end{proof}
It would be interesting to have a similar classification for $t > 2$.
\subsection{Proof of theorem 4$^\prime$: construction of tori with large $A$'s}
To prove th.4$^\prime$ it is enough to construct a $k$-torus $T$, of dimension $n = \varphi (t)$, such that $T(k)$ contains a cyclic subgroup of order $\ell^m$. Here is the construction:
Let $K$ be the field $k(z_\ell)$ if $\ell \neq 2$ and the field $k(i)$ if $\ell =2$.
It is a cyclic extension of $k$ of degree $t$ with Galois group $C_t$.
Let $T_1 = R_{K/k} {\mathfrak m}athbf G_m$ be the torus: ``multiplicative group of $K$";
we have $T_1(k) = K^*$, and $T_1(k)$ contains the group $\langlez_{_{\ell^m}}\rangle$, cf.
\S 4.
If $\sigma$ is a generator of $C_t$, $\sigma$ acts on $T_1$,
and we have $\sigma^t -1 = 0$ in the ring $\operatorname{End}(T_1)$.
Let us write the polynomial $X^t-1$ as ${\mathfrak m}athbf Phi_t(X)\!\cdot\!{\mathfrak m}athbf Psi(X)$,
where ${\mathfrak m}athbf Phi_t$ is the $t$-th cyclotomic polynomial.
We have ${\mathfrak m}athbf Phi_t(\sigma) {\mathfrak m}athbf Psi(\sigma)= 0$ in $\operatorname{End}(T_1)$.
Let $T = \operatorname{Im} {\mathfrak m}athbf Psi(\sigma)$ be the image of
$${\mathfrak m}athbf Psi(\sigma):T_1 \rightarrow T_1.$$
\noindent One checks that
(a) $\dim T = \varphi(t)$
(b) $T(k)$ contains $z_{_{\ell^m}}$.
\noindent
(For $\ell \neq 2$, (b) follows from the fact that the restriction of ${\mathfrak m}athbf Psi(\sigma)$
to $\langlez_{_{\ell^m}}\rangle$ is an automorphism.
For $\ell=2$, use the fact that $T$ is the kernel of ${\mathfrak m}athbf Phi_t(\sigma)$.)
Hence $T$ has the required properties.
$\Box$
\vskip 0.5cm
{\sl Alternate description of} $T$. It is enough to describe its character group $T^* = \operatorname{Hom}_{_{k_s}}(T,{\mathfrak m}athbf G_m)$, together with the action of ${\mathfrak m}athbf Gamma_k$ on $T^*$:\smallbreak
- $T^* = {\mathfrak m}athbf Z [X]/{\mathfrak m}athbf Phi_t(X)$ = algebraic integers of the cyclotomic field ${\mathfrak m}athbf Q ({\mathfrak m}u_t)$\vskip.1cm
- ${\mathfrak m}athbf Gamma_k$ acts on $T^*$ by ${\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Im} \chi_{_{\ell^\infty}} \rightarrow C_t \,\,\tilde{\rightarrow}\,\, \operatorname{Aut} \big({\mathfrak m}athbf Q ({\mathfrak m}u_t)\big).$\vskip.1cm
\noindent(It does not matter which isomorphism of $C_t$ onto $\operatorname{Aut} \big({\mathfrak m}athbf Q ({\mathfrak m}u_t)\big)$ one chooses; they all give isomorphic tori.)
\subsection{The S-bound for reductive groups}
Recall, cf. \S 3.3, that ``reductive" ${\mathfrak m}athbf Rightarrow$ ``connected".
\begin{mytheorem5}
{\sl Let} $G$ {\sl be a reductive group over} $k$, {\sl of rank} $r$, {\sl with Weyl group} $W$. {\sl If} $A$ {\sl is a finite subgroup of} $G(k)$, {\sl one has}
$$
v_\ell (A) \le m \left[ \frac{r}{\varphi (t)}\right] + v_\ell (W).
$$
\end{mytheorem5}
\begin{proof}
As usual, we may assume that $A$ is an $\ell$-group. In that case it is nilpotent, and by the corollary to th.3$^{\prime\prime}$ of \S 3.3 there exists a maximal $k$-torus $T$ of $G$ whose normalizer $N = N_G(T)$ contains $A$. Put $W_T = N/T$; this is a finite $k$-group such that $W_T(k_s)\simeq W$. If $A_T$ denotes the intersection of $A$ with $T(k)$, we have an exact sequence
$$
1 \rightarrow A_T \rightarrow A\rightarrow W_T(k)\, .
$$
Hence $v_\ell (A) \le v_\ell (A_T) + v_\ell \big(W_T(k)\big).$ By th.4, we have $v_\ell (A_T) \le m\cdot [r/\varphi (t)]$; on the other hand $W_T(k)$ is isomorphic to a subgroup of $W$, hence $v_\ell \big(W_T(k)\big)
\break \le v_\ell (W)$. The theorem follows. \end{proof}
\begin{corollary}
If $r<\varphi(t)$, then $G(k)$ is $\ell$-torsion free {\rm (i.e. does not contain any elements of order} $\ell${\rm )}.
\end{corollary}
\begin{proof}
We have $\left[{\frac{r}{\varphi(t)}}\right]=0$. Hence by th.5 it is enough
to show that \linebreak
$v_{\ell}(W)=0$, but this follows from th.1 of \S 1.1 since $W$
is isomorphic to a subgroup of ${\bf GL}_r({\bf Z})$ and
$r<\varphi(t)\leq t\leq\ell-1$.
\end{proof}
\noindent {\sl Remark.} The ``S-bound" given by th.5 looks {\sl a priori} rather coarse:
(a) The torus $T$ is not an arbitrary torus of dimension $r$; the fact that it is a subtorus of $G$ puts non-trivial conditions on it; for instance the action of ${\mathfrak m}athbf Gamma_k$ on $T^* = \operatorname{Hom}_{k_s} (T, {\mathfrak m}athbf G_m)$ stabilizes the set of roots.
(b) The group $W_T(k)$ is in general smaller than $W$ itself, and the image of $N(k) \rightarrow W_T(k)$ may be even smaller.
It is therefore surprising how often the S-bound is close to being optimal. As an example, take $k = {\mathfrak m}athbf Q$ and $G$ of type $E_8$. We have $m = 1$ and $t = \ell -1$ (except when $\ell = 2$ in which case $m = t = 2$), $r = 8$, $|W| = 2^{14}3^{5}5^{2}7$. The S-bound tells us that, if $A$ is a finite subgroup of $G({\mathfrak m}athbf Q )$, its order divides the number
$$
M_{S} = 2^{30}\!\cdot\!3^{13}\!\cdot\!5^6\!\cdot\!7^5\!\cdot\!13^2\cdot\!17
\!\cdot\!19\!\cdot 31\, .$$
We shall see later (cf. \S6.3.2 and \S7) that the best bound is $M = M_{S}/5\!
\cdot\!7\!\cdot\!17\,.$ In particular, the $\ell$-factors of $M_{S}$ are optimal for all $\ell$'s except $\ell = 5, 7$ and $17$.
\vskip 1cm
\begin{center}
{\bf {\S 6. The M-bound}}
\end{center}
\vskip 0.5cm
\setcounter{section}{6}
\setcounter{subsection}{0}
\subsection{Notation} From now on, $G$ is a semisimple \footnote{We could also
accept inner forms of reductive groups, for instance
${\bf GL}_n$ or more generally ${\bf GL}_D$, where $D$ is a central
simple $k$-algebra with $[D:k]=n^2$. In that case,
one has $r=n$, the $d_i$'s are the integers $1,2,\dots,n$ and th.6 is valid, with the
same proof.} group over $k$. We denote by $R$ its root system (over $k_s$), by $W$ its Weyl group, and by $r$ its rank. The group $W$ has a natural linear representation of degree $r$. The invariants of $W$ acting on ${\mathfrak m}athbf Q[x_1,\dots ,x_r]$ make up a graded polynomial algebra of the form ${\mathfrak m}athbf Q [P_1, \dots ,P_r]$, where the $P_i$ are homogeneous of degrees $d_i$, with \linebreak$d_1 \le d_2 \le \dots \le d_r$, (Shephard-Todd theorem, cf. e.g. [LIE V], \S5, th.4 or [Se 00], p.95). The $d_i$'s are called the {\sl invariant degrees} of $W$ (or of $G$). One has
$$
\prod d_i = |W| \quad {{\mathfrak m}box{and}} \quad \sum (2d_i-1) = \dim G\, .
$$
When $G$ is quasi-simple (i.e. when $R$ is irreducible) $d_r$ is equal to the Coxeter number $h = (\dim G)/r-1$, and one has the symmetry formula
$$
d_i + d_{r+1-i} = h +2\, .
$$
Moreover, if $j < h$ is prime to $h$, then $j+1$ is one of the $d_i$'s. These properties make $d_1,\dots , d_r$ very easy to compute (see e.g. the tables of [LIE VI]).
For instance, for $G$ of type $E_8$, the $d_i$'s are: $2$, $8$, $12$, $14$, $18$, $20$, $24$, $30$.
Let Dyn$(R)$ be the Dynkin diagram of $R$. There is a natural action of the Galois group ${\mathfrak m}athbf Gamma_k$ on Dyn$(R)$: this follows from the fact that Dyn$(R)$ can be defined intrinsically from $G_{/k_s} $ (cf. [LIE VIII], \S 4, no 4, Scholie, or [SGA 3], expos\'{e} XXIV, \S3, p.344). In what follows (with the only exception of \S 6.7) we make the assumption that {\sl the action of } ${\mathfrak m}athbf Gamma_k$ {\sl on} Dyn$(R)$ {\sl is trivial}: one then says that $G$ is {\sl of inner type} (it can be obtained from a split group $G_0$ by a Galois twist coming from the adjoint group of $G_0$).
\vskip 0.3cm
\noindent {\sl Examples of groups of inner type{\rm{ :}}}
\noindent - ${\bf SL}_n$, or more generally, ${\bf SL}_D$, where $D$ is a central simple algebra over \nolinebreak$k$.
\noindent - \!Any group $G$ whose root system has no non-trivial automorphism, e.g. any group of type $A_1, B_r, C_r, G_2, F_4, E_7, E_8$.
\subsection{Statement of the theorem} We fix $\ell, k$, and the root system $R$ of $G$. Recall that $\operatorname{Im} \chi_{_{\ell^\infty}}$ is a closed subgroup of ${\mathfrak m}athbf Z^*_\ell$. Define:
$$
M(\ell , k,R) = \inf_{x\, \in\, \operatorname{Im}\, \chi_{_{\ell^\infty}}} \sum v_\ell (x^{^{d_i}}-1) = \inf_{g \,\in\, {\mathfrak m}athbf Gamma_k} \sum v_\ell (\chi_{_{\ell^\infty}} (g)^{^{d_i}} - 1)\, .
$$
This is either an integer ${{\mathfrak m}athfrak g}e 0$ or $\infty$ (it is $\infty$ if and only if the invariants $m,t$ of $k$ defined in \S 4 are such that $m = \infty$ and $t$ divides one of the $d_i$'s, see prop.4 below).
\begin{mytheorem6} Let $A$ be a finite subgroup of $G(k)$. Then $v_\ell (A) \le M\,({\ell , k,R)}$. {\rm (Recall that} $G$ {\rm is semisimple of inner type, cf. \S 6.1.)}\end{mytheorem6} \vskip0.2cm
This is what we call the ``M-bound" for $v_\ell(A)$. It will be proved in \S 6.5 below by a method similar to Minkowski's. We shall see in Lect. III that it is ``optimal" except possibly in the case $\ell = 2$, type (c) of \S 4.2.
For computations, it is useful to write $M$$(\ell ,k,R)$ explicitly in terms of the invariants $t$ and $m$ of \S 4:
\setcounter{proposition}{3}
\begin{proposition}
\rm{
(1) {\sl If} $\ell \not= 2$ {\sl or if} $\ell = 2, t = 1$ (case (a)), {\sl one has}
$$
M(\ell ,k,R) = {\mathfrak m}athop{\sum_i}_{d_i \equiv \,\,0 \,\,( {\rm mod} \,t)} \big(m + v_{\ell}(d_i)\big)
$$
(2) {\sl If} $\ell = 2$ {\sl and} $t = 2$ \big(cases (b) and (c)\big), {\sl one has}
$$ M (2,k,R) = r_1 + mr_0 + v_2 (W)\, ,
$$
{\sl where} $r_0$ (resp. $r_1$) {\sl is the number of indices} $i$ {\sl such that} $d_i$ {\sl is even} (resp. $d_i$ {\sl is odd}).
}
\end{proposition}
\begin{proof}
Let us begin with the case $\ell \not= 2$. One shows first that, if $t|d$, one has $v_\ell (x^d - 1) {{\mathfrak m}athfrak g}e m + v_\ell (d)$ for every $x \in \operatorname{Im}\chi_{_{\ell^\infty}}$. (This is easy, since $x$ can be written as $zu$ with $z^t = 1$ and $v_\ell (u-1) {{\mathfrak m}athfrak g}e m$, hence $x^d-1 = u^d-1.)$
This already shows that $M(\ell ,k,R) {{\mathfrak m}athfrak g}e \sum_{t|d_i} \big(m + v_\ell (d_i)\big)$. To prove the opposite inequality, one chooses $x \in \operatorname{Im} \chi_{_{\ell^\infty}}$ of the form $zu$ with $z$ of order $t$ and $v_\ell(u\!-\!1) = m$. One gets (1).
The same argument works if $\ell = 2$ and $t = 1$. If $\ell = 2$ and $t = 2$, one has
\begin{eqnarray*}
v_2 (x^d -1 ) & {{\mathfrak m}athfrak g}e & m + v_2 (d) \,\,\, {{\mathfrak m}box{ if $d$ is even}}\\
v_2(x^d -1) & {{\mathfrak m}athfrak g}e &1\,\,\, {{\mathfrak m}box {if $d$ is odd}}\, ,
\end{eqnarray*}
for every $x \in \operatorname{Im}\chi_{_{2^\infty}}$. This gives:
$$
M(2,k,R) {{\mathfrak m}athfrak g}eq \sum_{d_i\,\,{\rm odd}} 1 + \sum_{d_i\,\,{\rm even}} \big(m + v_2 (d_i)\big) = r_1 + mr_0 + v_2 (W)\, .
$$
To get the opposite inequality, observe that $x = -1 + 2^m$ belongs to $\operatorname{Im} \chi_{_{2^\infty}}$ and check that $\sum v_2 (x^{^{d_i}} -1)$ is equal to $r_1 + mr_0 + v_2(W)\, .$
\end{proof}
\begin{corollary}{\rm{
{\sl Let} $a(t)$ {\sl be the number of indices $i$ such that} $d_i \equiv 0$ (mod $t$). {\sl If} $a(t) = 0,$ {\sl then} $G(k)$ {\sl is} $\ell$-{\sl torsion free}.
\vskip0.1cm
Indeed, if $a(t) = 0$, the sum occurring in prop.4 is an empty sum, hence $M(\ell ,k,R) = 0$ and one applies th.6.
$\Box$
}}\end{corollary}
\subsection{Two examples: $A_1$ and $E_8$} We take $k = {\mathfrak m}athbf Q$, so that $t = \ell-1$ and $m=1$ if $\ell > 2$ and $t = m = 2$ if $\ell = 2$.
\subsubsection{Type $A_1$} There is only one $d_i$, namely $d_1 = 2$, and prop.4 gives:
$$
M\big(\ell ,{\mathfrak m}athbf Q,A_1\big) = \left\{
\begin{array}{lll}
3&{{\mathfrak m}box{if}}&\ell = 2\\
1&{{\mathfrak m}box{if}}& \ell = 3\\
0&{{\mathfrak m}box{if}}& \ell > 3\, .
\end{array}
\right.
$$
In other words, every finite subgroup of $G({\mathfrak m}athbf Q)$ has an order which divides $2^3\!\cdot \!3$. This bound is optimal in the following sense:
(a) The split adjoint group ${\mathfrak m}athbf P{\mathfrak m}athbf G{\mathfrak m}athbf L_2 ({\mathfrak m}athbf Q)$ contains both a subgroup of order 3 and a dihedral subgroup of order $8$ (but no subgroup of order 24).
(b) The simply connected group ${\bf SL}_{\bf H} ({\mathfrak m}athbf Q)$, where ${\mathfrak m}athbf H$ is the standard quaternion division algebra, contains a subgroup of order $24$ which
is isomorphic to ${\bf SL}_2 ({\mathfrak m}athbf F_3)$. However the split group ${\bf SL}_2 ({\mathfrak m}athbf Q)$
does not contain any subgroup of order 8 (but it does contain cyclic subgroups of order 3 and 4).
\subsubsection{Type $E_8$} If we define $M({\mathfrak m}athbf Q,E_8)$ as $\prod_\ell \ell^{^{M(\ell , {\mathfrak m}athbf Q,E_8)}}$, prop.4 gives:
$$
M({\mathfrak m}athbf Q ,E_8) = 2^{30}\!\cdot \!3^{13}\!\cdot \!5^5\!\cdot\! 7^4\!\cdot\! 11^2\!
\cdot\! 13^2\!\cdot \!19\!\cdot\! 31,\,\, {{\mathfrak m}box{see e.g. [Se 79], \S 3.3}}.
$$
By th.6, the order of every finite subgroup of $G({\mathfrak m}athbf Q)$ divides $M({\mathfrak m}athbf Q,E_8)$. As we shall see in the next lecture, this multiplicative bound is optimal.
\subsection{A Chebotarev-style result} We need such a result in order to generalize Minkowski's method of \S 1.
Let $L$ be a normal domain which is finitely generated over ${\mathfrak m}athbf Z$ as a ring, and let $k$ be its field of fractions. If $d = \dim (L)$ denotes the Krull dimension of $L$ ([AC VIII], \S 1), one has ({\sl{loc.cit.}}, \S 2):
$$
\begin{array}{rclcrcl}
d&= &1 + {\rm tr.deg} (k/{\mathfrak m}athbf Q)&{{\mathfrak m}box{if}}&\operatorname{char}(k)& = & 0\\
d &=&{\rm tr.deg} (k/{\mathfrak m}athbf F_p)&{{\mathfrak m}box{if}}&\operatorname{char}(k)&= &p > 0\,.
\end{array}
$$
Let Specmax$(L)$ be the set of the maximal ideals of $L$ (= set of closed points of Spec$(L)$). If $x \in$ Specmax$(L)$, the residue field $\kappa (x) = L/x$ is finite (see e.g. [AC V], p. 68, cor. 1). We put $Nx = |\kappa(x)|$; it is the {\sl norm} of $x$.
When $d = 0$, $L$ is a finite field, and Specmax$(L)$ has only one element. If $d > 0$ (e.g. when $\operatorname{char} (k) = 0$), then Specmax$(L)$ is infinite. More precisely, the Dirichlet series $z(s) = \sum_x 1/(Nx)^s$ converges for Re$(s) >d$, and one has
\begin{equation}
z(s) \sim \log \big(1/(s-d)\big) \quad {{\mathfrak m}box{when}}\quad s \rightarrow d\quad ({{\mathfrak m}box{with}}\,\, s >d)\, .
\end{equation}
See [Se 65], \S 2.7, which only contains a sketch of proof; complete details (for a slightly weaker statement) can be found in [Pi 97], App. B
\footnote{When $\operatorname{char}(k) = 0$\,\, one can give a stronger statement, in the spirit of the Prime Number Theorem:
For every $X {{\mathfrak m}athfrak g}e 2$, call $\pi_L(X)$ the number of $x \in$ Specmax$(L)$ such that $Nx \le X$. Then:
$$
\pi_L(X) = (1/d) \,X^d\!/\log X + O (X^d\!/\log^2 X)\quad {{\mathfrak m}box{when}}\quad X \rightarrow \infty \, .$$
The general Chebotarev density theorem can also be stated (and proved) in terms of such ``natural" density (standard method: use Weil-Deligne estimates to reduce everything to the known case $d = 1$). }; see \linebreak
also [FW 84], pp.206-207.
\vskip0.2cm
Let now $n$ be an integer ${{\mathfrak m}athfrak g}e 1$ which is invertible in $L$ (and hence in $k$). Let $\chi_n : {\mathfrak m}athbf Gamma_k \rightarrow ({\mathfrak m}athbf Z/n{\mathfrak m}athbf Z)^*$ denote the $n$-th cyclotomic character of $k$, cf. \S 4.0. As in \S 4, we shall be interested in $\operatorname{Im} \chi_n
\subset ({\mathfrak m}athbf Z/n{\mathfrak m}athbf Z)^*$.
{\begin{mytheorem7}
\label{thm7} Let $c$ be an element of $({\mathfrak m}athbf Z/n{\mathfrak m}athbf Z)^*$, and let $X_c$ be the set of all $x \in$ {\rm Specmax}$(L)$ such that $Nx \equiv c$ {\rm (mod $n$)}. Then {\rm{:}}
{\rm a)} If $c\notin \operatorname{Im} \chi_n$, then $X_c = \varnothing\,.$
{\rm b)} If $c\in \operatorname{Im} \chi_n$ and $d > 0$, then $X_c$ is Zariski-dense in {\rm Specmax}$(L)$ {\rm (or in Spec}$(L)$, {\rm this amounts to the same).} In particular, $X_c$ is infinite.\vskip0.2cm
{\rm A more concrete formulation of b) is that, for every non-zero $f \in L$, there exists an $x$ with $f \not\in x$ and $Nx \equiv c$ (mod $n$).}
\end{mytheorem7}}
\noindent {\sl Example.} Take $L = {\mathfrak m}athbf Z [1/n]$. Then Specmax$(L)$ is the set of all prime numbers which do not divide $n$, and th.7 translates into Dirichlet's theorem on the existence of primes in arithmetic progressions.
\subsection*{\sl Proof of theorem 7.} The group $C = \operatorname{Im} \chi_n$ is the Galois group of the cyclotomic extension $k(z_n)/k$. Let $L_n$ be the integral closure of $L$ in $k(z_n)$. One checks by standard arguments that the ring extension $L_n/L$ is finite and \'{e}tale. In geometric terms, Spec$(L_n) \rightarrow$ Spec$(L)$ is a finite \'{e}tale covering. The group $C$ acts freely on Spec$(L_n)$, with quotient Spec$(L)$. For every closed point $x$ of Spec$(L)$, the Frobenius element $\sigma_x$ of $x$ is a well-defined conjugacy class of $C$ (hence an element of $C$ since $C$ is commutative). Moreover, if we view $C$ as a subgroup of $({\mathfrak m}athbf Z/n{\mathfrak m}athbf Z)^*$, $\sigma_x$ is the image of $Nx$ in ${\mathfrak m}athbf Z/n{\mathfrak m}athbf Z$. This proves a).
Suppose now that $d > 0$ and that $c$ belongs to $C = \operatorname{Im} \chi_n$. Let $z_c(s)$ be the Dirichlet series $\sum 1/(Nx)^s$, where the sum is over the elements $x$ of $X_c$. The general Chebotarev density theorem ([Se 65], [Pi 97]) gives:
\begin{equation}\label{eq642}
z_c (s) \sim \frac{1}{|C|} \log (1/(s-d))\quad {{\mathfrak m}box{when}}\,\,\, s \rightarrow d\quad {{\mathfrak m}box{with}} \quad s > d\, .
\end{equation}
In particular, we have $z_c(d) = + \infty$. If the Zariski closure $\overline{X}_c$ of $X_c$ were of dimension $< d\!-\!1$, we would have $z_c(d) < \infty$, as one sees by splitting $\overline{X}_c$ into irreducible components, and applying (6.4.1). Hence b).
$\Box$
\subsection{\bf Proof of theorem 6} Let $A \subset G(k)$ be as in th.6. We want to prove that
$$
v_\ell (A) \le M(\ell ,k,R)\, .$$
We do it in three steps:
\subsubsection{\bf The case where $k$ is finite} Put $q = |k|$. It is well-known that
$$
|G(k)| = q^N \prod (q^{^{d_i}} - 1),\quad\quad {{\mathfrak m}box{where}}\quad N = |R|/2 = \sum (d_i - 1).
$$
This shows that $v_{\ell}(A) \le \sum v_\ell (q^{^{d_i}}-1)$. The integer $q$, viewed as an element of ${\mathfrak m}athbf Z^*_\ell$, is a topological generator of $\operatorname{Im} \chi_{_{\ell^\infty}}$. Hence every element $u$ of $\operatorname{Im} \chi_{_{\ell^\infty}}$ is an $\ell$-adic limit of powers of $q$ and this implies that $v_\ell (u^d - 1) {{\mathfrak m}athfrak g}e v_\ell (q^d - 1)$ for every $d {{\mathfrak m}athfrak g}e 1$. Hence the lower bound which defines $M(\ell ,k,R)$ is equal to $\sum v_\ell (q^{^{d_i}}-1)$; this proves th.6 in the
case where $k$ is finite.
\subsubsection{\bf The case where $k$ is finitely generated over its prime subfield} By 6.5.1, we may assume that $k$ is infinite. We need a subring $L$ of $k$, with field of fractions $k$, which has the following properties:
(a) $L$ is normal, finitely generated over ${\mathfrak m}athbf Z$ and contains $1/\ell$.
(b) $G$ comes by base change from a semisimple group scheme $\underline{G}$ over $L$, in the sense of [SGA 3], XIX. 2.7.
(c) $A$ is contained in the group $\underline{G}(L)$ of the $L$-points of $\underline{G}$.
\begin{lemma} There exists such an $L$.
{\rm This is standard, see e.g. [EGA IV], \S 8.1}
$\Box$
\end{lemma}
Let us now choose $(L,\underline{G})$ with properties (a), (b) and (c). For every \linebreak$x \in $ Specmax$(L)$, the fiber $\underline{G}_x$ of $\underline{G}$ at $x$ is a semisimple group over $\kappa (x)$, of type $R$. Moreover, the Dynkin diagram of $\underline{G}$ is finite \'{e}tale over Spec$(L)$, cf. [SGA 3], XXIV.3.2; since it is ``constant" for the generic fiber (i.e. over $k$) it is constant everywhere; this shows that the $\underline{G}_x$ are of inner type. The inclusion map $i: A \rightarrow \underline{G}(L)$ gives for every $x$ a homomorphism\linebreak
$i_x : A \rightarrow \underline{G}\big(\kappa (x)\big)$. Since $i$ is injective, there is an open dense subset $X_0$ of Specmax$(L)$ such that $i_x$ is injective for all $x \in X_0$. We thus get:
$$
v_\ell (A) \,\, \le\,\, v_\ell \big(\underline{G} (\kappa (x))\big) = \sum v_\ell \big((Nx)^{^{d_i}}-1\big)\quad {{\mathfrak m}box{for all}} \quad x \in X_0,$$
cf. 6.5.1. Let $u$ be any element of $\operatorname{Im} \chi_{_{\ell^\infty}}$. By applying th.7 to the image of $u$ in $({\mathfrak m}athbf Z/\ell^j{\mathfrak m}athbf Z)^*$ with $j = 1,2,\dots ,$ we find a sequence of points $x_j$ of $X_0$ such that $\lim Nx_j = u$ in ${\mathfrak m}athbf Z^*_\ell$. We have:
$$
v_\ell (u^{^{d_i}} -1) = \lim_{j \rightarrow \infty} \sum v_\ell \big((Nx_j)^{^{d_i}}-1\big)\, ,
$$
and applying the formula above to each of the $x_j$'s we obtain
$$
v_\ell(A) \le \sum v_\ell (u^{^{d_i}} - 1)\quad\quad {{\mathfrak m}box{for every}} \quad u \in \operatorname{Im} \chi_{_{\ell^\infty}}\, .
$$
This proves th.6 in the case 6.5.2.
\vskip 0.2cm
[{\small Variant: One reduces the general case to the case where $\dim(L) = 1$ by using Hilbert's irreducibility theorem, as explained in [Se 81], p.2; in the case $\dim(L) = 1$, one can apply the standard Chebotarev theorem instead of the general one.}]
\subsubsection{\bf The general case} The same argument as for lemma 7 shows that $G$ comes by base change from a semisimple group $G'$ over a subfield $k'$ of $k$ which is finitely generated over the prime subfield of $k$ (i.e. ${\mathfrak m}athbf F_p$ or ${\mathfrak m}athbf Q$). Moreover, one may assume (after enlarging $k'$ if necessary) that $A$ is contained in $G'(k')$. The Galois group ${\mathfrak m}athbf Gamma_{k'}$ acts on the Dynkin diagram Dyn$(R)$ of $G'$ (which is the same as the one of $G$). Let $k''$ be the Galois extension of $k'$ corresponding to the kernel of ${\mathfrak m}athbf Gamma_{k'} \rightarrow \operatorname{Aut}$ Dyn$(R)$. Since $G$ is of inner type over $k$, the field $k''$ is contained in $k$. By base change to $k''$, $G'$ gives a semisimple group $G''$ which is of inner type and we may apply 6.5.2 to $(G'',A)$. We get $v_\ell (A) \le M(\ell ,k'',R)$. Since $k''$ is contained in $k$, we have $M(\ell ,k'',R) \le M(\ell ,k,R)$ : the group $\operatorname{Im} \chi_{_{\ell^\infty}}$ can only decrease by field extensions. Hence $v_\ell (A) \le M(\ell ,k,R)$.
$\Box$
\subsubsection{\bf Remark} Surprisingly, the proof above does not really use the hypothesis that $A$ is a subgroup of $G(k)$. It uses only that $A$ {\sl acts freely on} $G$, viewed merely as a $k$-variety (and not as a homogeneous space); this is indeed enough to ensure that $v_\ell (A) \le v_\ell (G(k))$ when $k$ is finite. Here is an example: take $G = {\bf SL}_2$, $\ell = 2$, $k = {\mathfrak m}athbf Q$; the M-bound is 3, which means that any finite 2-subgroup of ${\bf SL}_2 ({\mathfrak m}athbf Q)$ has order $\le 8$. As was said in \S 6.3.1, there is in fact no subgroup of order 8 in ${\bf SL}_2({\mathfrak m}athbf Q)$. But one can make a cyclic group of order 8 act freely on the variety ${\bf SL}_2$: take for instance the group generated by the automorphism:
$$
\begin{pmatrix}a&b\cr c&d\cr\end{pmatrix} {\mathfrak m}apsto \begin{pmatrix}d-c&-c-d\cr
{\frac{(a-b)}{2}}&{\frac{(a+b)}{2}}\cr\end{pmatrix}=\begin{pmatrix} 0&-1\cr
{\frac{1}{2}}&0\cr\end{pmatrix}\begin{pmatrix} a&b\cr c&d\cr\end{pmatrix}\begin{pmatrix} 1&1\cr -1&1\cr\end{pmatrix}.$$
Hence, even in this bad-looking case, the M-bound can claim to be ``optimal".
\subsection{An analogue of Sylow's theorem}
\begin{mytheorem8} Let $A$ and $A'$ be two finite $\ell$-subgroups of $G(k)$. Assume that $v_\ell (A)$ is equal to the {\rm M-}bound $M(\ell ,k,R)$. Then there exists $y \in G(\bar{k})$ such that $yA'y^{-1} \subset A.$
\end{mytheorem8}
\begin{corollary}
If both $A$ and $A'$ attain the {\rm M-}bound, then they are geometrically conjugate {\rm (i.e. conjugate in $G(\bar{k}))$.} In particular, they are isomorphic.
\end{corollary}
\begin{proof}
We may assume that $k$ is finitely generated over its prime subfield. If it is finite, th.8 is just a special case of Sylow's theorem. Let us assume that $k$ is infinite, and choose $L, \underline{G}$ as in \S 6.5.2 with $A,A' \subset \underline{G}(L)$. Let $Y$ be the subscheme of $\underline{G}$ made up of the points $y$ with $yA'y^{-1} \subset A$. Let $X$ be the set of all $x \in$ Specmax$(L)$ such that $Nx$, viewed as an element of ${\mathfrak m}athbf Z^*_\ell$, is of the form $z_tu$ with $z_t$ of order $t$ and $v_\ell (u\!-\!1) = m$ (note that $m$ is finite, cf. \S 4.3). It follows from th.7, applied to $n = \ell^{m+1}$, that $X$ is Zariski-dense in Spec$(L)$. If $x \in$ Specmax$(L)$, the groups $A$ and $A'$ inject into $\underline{G} (\kappa (x))$ (this is an easy consequence of the hypothesis that $\ell$ is invertible in $L)$. If moreover $x$ belongs to $X$, then the same computation as in \S 5.2 shows that $v_\ell \big(\underline{G}(\kappa (x)\big)$ is equal to the M-bound, hence $A$ is an $\ell$-Sylow of $\underline{G}(\kappa (x))$. By Sylow's theorem, this shows that $A'$ is conjugate in $\underline{G}(\kappa (x))$ to a subgroup of $A$. In particular, the fiber at $x$ of $Y \rightarrow$ Spec$(L)$ is non-empty. Since $X$ is Zariski-dense, this implies that the generic fiber $Y_{/k}$ of $Y \rightarrow$ Spec$(L)$ is non-empty, i.e. that $Y\!(\bar{k})$ is non-empty.
\end{proof}
\noindent{\sl Remark.} One can show that $Y$ is smooth over $L$, and hence that $Y(k_s) \not= \varnothing$ which is slightly more precise than $Y\!(\bar{k}) \not= \varnothing$.
\vskip 0.3cm
\noindent{\small{\sl Exercise.} Show that a family of polynomial equations with coefficients in ${\mathfrak m}athbf Z$ has a solution in ${\mathfrak m}athbf C$ if and only if it has a solution in ${\mathfrak m}athbf Z/p{\mathfrak m}athbf Z$ for infinitely many $p$'s.}
\subsection{Arbitrary semisimple algebraic groups} In the previous sections, we have assumed that $G$ is of inner type, i.e. that the natural homomorphism
$$
\varepsilon : {\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Aut} {\rm Dyn}(R)
$$
is trivial. Let us now look briefly at the general case, where no hypotheses on $\varepsilon$ are made. In order to state the result which replaces th.6 we need to introduce the linear representations $\varepsilon_d$ of ${\mathfrak m}athbf Gamma_k$ defined as follows:
Let $S = {\mathfrak m}athbf Q[P_1,\dots,P_r]$ be the ${\mathfrak m}athbf Q$-algebra of $W$-invariant polynomials, cf. \S 6.1. Let $I = (P_1, \dots , P_r)$ be the augmentation ideal of $S$; put $V = I/I^2$. The vector space $V$ is of dimension $r$, and is graded; the dimension of its $d$-th component $V_d$ is equal to the number of indices $i$ with $d_i = d$. The group $\operatorname{Aut} {\rm Dyn}(R)$ acts on $S$, $V$ and the $V_d$'s; by composing this action with $\varepsilon$, we get for each $d > 0$ a linear representation
$$
\varepsilon_d : {\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Aut} (V_d)\, .
$$
\vskip 0.3cm
\noindent{\bf Theorem 6$^\prime$.} {\sl Let $A$ be a finite subgroup of $G(k)$. Then}:
$$
v_\ell (A) \le \inf_{g\,\in\, {\mathfrak m}athbf Gamma_k} \sum_{d} v_\ell \big(\det (\chi_{_{\ell^\infty}} (g)^d - \varepsilon_d(g))\big)$$
(The determinant is relative to the vector space $V_d \otimes {\mathfrak m}athbf Q_\ell\, .$)
\vskip 0.5cm
\noindent {\sl Proof (sketch)}. The method is the same as the one used for th.6. There are three steps:
(1) Reduction to the case where $k$ is finitely generated over its prime subfield; this is easy.
(2) Reduction to the case where $k$ is finite, via the general Chebotarev density theorem instead of th.7.
(3) The case where $k$ is finite. In that case, if $q = |k|$, and if $\sigma$ is the Frobenius generator of ${\mathfrak m}athbf Gamma_k$, one has (cf. e.g. [St 68] th. 11.16)
$$
v_\ell \big(G(k)\big) = \sum_d v_\ell \big(\det (q^d - \varepsilon_d(\sigma))\big) = \sum_d v_\ell \big(\det (\chi_{_{\ell^\infty}} (\sigma )^d - \varepsilon_d(\sigma ))\big)$$
hence the desired formula:
$$
(\ast )\quad\quad v_\ell (A) \le \sum_d v_\ell \big(\det (\chi_{_{\ell^\infty}}(g)^d - \varepsilon_d(g))\big)
$$
in the special case $g = \sigma$. By applying this to the finite extensions of $k$, one sees that the inequality $(\ast )$ is valid for all $\sigma^n , n = 1,2,\dots ,$ and hence for all $g \,\in \,{\mathfrak m}athbf Gamma_k$, since the $\sigma^n$ are dense in ${\mathfrak m}athbf Gamma_k$.
$\Box$
\vskip 0.2cm
\noindent{\sl Remark.} One may also prove th.6$^\prime$ using $\ell$-adic cohomology, cf. \S 6.8.
\vskip 0.2cm
\noindent{\sl Example.} Take $R$ of type $A_2$, so that $\operatorname{Aut}$ Dyn$(R) = \{1,-1\}$ and $\varepsilon$ may be viewed as a quadratic character of ${\mathfrak m}athbf Gamma_k$. The $V_d$'s are of dimension $1$ for $d = 2,3$ and are 0 otherwise. The action of $\operatorname{Aut}$ Dyn$(R)$ on $V_d$ is trivial for all $d$, except $d = 3$. Hence $\varepsilon_2 = 1$, $\varepsilon_3 = \varepsilon$, and th.6$^\prime$ can be rewritten as:
$$
v_\ell (A) \,\le\, \inf_{g\in{\mathfrak m}athbf Gamma_k} \left\{ v_\ell (\chi_{_{\ell^\infty}} (g)^2 - 1) + v_\ell \big(\chi_{_{\ell^\infty}}(g)^3 - \varepsilon (g)\big)\right\}\, .
$$
A similar result holds for the types $A_r$ ($r > 2$), $D_r$ ($r$ odd) and $E_6$, with 2 (resp. 3) replaced by the even $d_i$'s (resp. the odd $d_i$'s).
\subsection{The cohomological method} Let us consider first the general situation suggested in \S 6.5.4 where a finite group $A$ acts freely on a quasi-projective $k$-variety $X$. As explained in [Il 05], \S 7, one can then give an upper bound for $v_\ell (A)$ in terms of the action of ${\mathfrak m}athbf Gamma_k$ on the \'{e}tale cohomology of $X$. More precisely, let $H^i_c(X)$ denote the $i$-th \'{e}tale cohomology group of $X_{/k_s}$, with proper support and coefficients ${\mathfrak m}athbf Q_\ell$; it is a finite dimensional ${\mathfrak m}athbf Q_\ell$-vector space which is 0 for $i > 2\!\cdot
\!\dim(X)$. There is a natural action of ${\mathfrak m}athbf Gamma_k$ on $H^i_c(X)$, and, for each $g \in {\mathfrak m}athbf Gamma_k$, one can define the ``Lefschetz number" ${\mathfrak m}athbf Lambda_X(g)$ by the usual formula:
$$
{\mathfrak m}athbf Lambda_X(g) = \sum_i (-1)^i {\rm Tr}\big(g |H^i_c(X)\big)\, .
$$
One has ${\mathfrak m}athbf Lambda_X (g) \in {\mathfrak m}athbf Z_\ell$. Moreover:
\vskip 0.5cm
\noindent{\bf Theorem 6$^{\prime\prime}$.} $v_\ell (A) \le \inf_{g\,\in\,{\mathfrak m}athbf Gamma_k} v_\ell \big({\mathfrak m}athbf Lambda_X(g)\big)\, .$
\begin{proof}
See [Il 05], \S 7, especially cor.7.5. The proof follows the same pattern as the other proofs of the present \S: one uses Chebotarev to reduce to the case where $k$ is finite, in which case the result follows from the fact, due to Grothendieck, that, if $\sigma$ is the (geometric) Frobenius generator of ${\mathfrak m}athbf Gamma_k$, then ${\mathfrak m}athbf Lambda_X(\sigma )$ is equal to $|X(k)|$, hence is divisible by $|A|$ since the action of $A$ is free. (As in the proof of th.6$^\prime$, one applies this, not only to $\sigma$ but also to its powers $\sigma^n$, $n > 0$, and one uses the fact that the $\sigma^n$ are dense in ${\mathfrak m}athbf Gamma_k$\,.)
\end{proof}
If one applies th.6$^{\prime\prime}$ to $A \subset G(k)$, with $A$ acting by left translations on $X = G$, one recovers th.6 and th.6$^\prime$, thanks to the known structure of the cohomology of $G$, cf. e.g. [SGA 4$\frac{1}{2}$], p. 230.
\subsection{The Cremona group: open problems} Recall that the {\sl Cremona group} {\bf Cr}$_r(k)$ is the group of $k$-automorphisms of the field $k(X_1,\dots,X_r)$, i.e. the group of birational automorphisms (or ``pseudo-automorphisms", cf. [De 70]) of the projective $r$-space over $k$. For $r = 1$, one has ${\bf Cr}_1(k) = {\mathfrak m}athbf P{\mathfrak m}athbf G{\mathfrak m}athbf L_2(k)$. Let us assume that $r {{\mathfrak m}athfrak g}e 2$. As explained in [De 70], ${\bf Cr}_r$ is not an algebraic group, but looks like a kind of very large semisimple group of rank $r$ (very large indeed: its ``Weyl group" is the infinite group ${\mathfrak m}athbf G{\mathfrak m}athbf L_r({\mathfrak m}athbf Z)$). Not much is known about the finite subgroups of ${\bf Cr}_r(k)$ beyond the classical case $r = 2$ and $k$ algebraically closed. Here is a question suggested by \S 5.1:
- Is it true that ${\bf Cr}_r(k)$ has no $\ell$-torsion if $\varphi (t) > r$?
\noindent In the special case $k = {\mathfrak m}athbf Q$, $r = 2$ or $3$, this amounts to:
- Is it true that the fields ${\mathfrak m}athbf Q(X_1,X_2)$ and ${\mathfrak m}athbf Q(X_1, X_2, X_3)$ have no automorphism of prime order ${{\mathfrak m}athfrak g}e 11$? (Automorphisms of order $2$, $3$, $5$ and $7$ do exist.)
It would be very interesting to attack these questions using cohomology, but I do not see how to do this. It is not even clear how to define cohomological invariants of ${\bf Cr}_r({\mathfrak m}athbf C)$, since there is no natural topology
on that group. Still, one would like to give a meaning to a sentence such as
$$``{\bf Cr}_r({\mathfrak m}athbf C) {\hbox{\rm { is connected for }}} r {{\mathfrak m}athfrak g}e 1 {\hbox{\rm { and simply-connected for }}} r {{\mathfrak m}athfrak g}e 2."$$
{\mathfrak m}arkright{LECTURE III: CONSTRUCTION OF LARGE SUBGROUPS}
\specialsection*{\bf III. Construction of large subgroups}
\setcounter{section}{8}
\numberwithin{equation}{section}
\numberwithin{equation}{subsection}
\setcounter{subsubsection}{0}
\setcounter{footnote}{2}
\vskip 0.5cm
\begin{center}
{\bf {\S 7. Statements}}
\end{center}
\label{sec1}
We keep the notation of Lecture II: $k$, $\ell$, $\chi_{_{\ell^\infty}}$, $t$, $m$, \ldots.
We consider only semisimple groups over $k$ with a root system $R$ which is {\em irreducible.}
The M-bound of \S 6.2 will be denoted by $M(\ell,k,R)$;
it only depends on the pair $(\ell,k)$ via the invariants $t$ and $m$, and on $R$ via the degrees $d_1, \ldots, d_r$ of $W$.
We limit ourselves to the case $m < \infty$; see \S 14 for the case $m=\infty$.
A pair $(G,A)$, where $G$ is of inner type with root system $R$, and
$A \subset G(k)$ is a finite group, will be called {\em optimal} if $v_\ell(A)$ is equal to the M-bound $M(\ell,k,R)$.
(We could assume that $A$ is an $\ell$-group, but this would not be convenient for the constructions which follow.)
Our goal is to prove:
\begin{mytheorem9}
\label{thm3.1}
If $\ell \neq 2$, an optimal pair $(G,A)$ exists {\rm (}for any $k$, $R${\rm )}.
\end{mytheorem9}
\begin{mytheorem10}
\label{thm3.2}
If $\ell = 2$, an optimal pair $(G,A)$ exists if $\operatorname{Im} \chi_{_{2^\infty}}$ is
of type {\rm (a)} or {\rm (b)} in the sense of {\rm \S 4.2}
\emph{
(i.e. if $\operatorname{Im} \chi_{_{2^\infty}}$ can be topologically generated by one element).}
\end{mytheorem10}
\begin{mytheorem11}
\label{thm3.3}
In the case $\ell=2$ and type {\rm (c)}, there exists $(G,A)$ with
$$v_2(A) = r_0 m + v_2(W)$$
where $r_0$ is the number of indices $i$ such that $d_i$ is even.
\end{mytheorem11}
Note that here the M-bound is $M(2,k,R)=r_1+r_0m+v_2(W)$ with $r_1=r-r_0$, cf. \S6.2, prop.4. Hence $v_2(A)$
differs from $M(2,k,R)$ only by $r_1$. In particular, $A$ is optimal if $r_1=0$. Hence:
\begin{corollary}
If all the $d_i$'s are even {\rm (i.e. if }$-1 \in W$), then an optimal pair $(G,A)$ exists for $\ell = 2$ {\rm (and hence for all $\ell$'s, thanks to th.9).
\vskip.1cm
\noindent This applies in particular to the exceptional types $G_2$, $F_4$, $E_7$ and $E_8.$}
\end{corollary}
\noindent {\sl Remarks}. (1) The simplest case where the M-bound is not attained is $k = {\mathfrak m}athbf Q$, $\ell =2$, $R$ of type $A_2$,
where $m=2$, $r_0=1$, $r=2$, the $M$-bound is 4, and it follows from [Sch 05]
that $v_2(A) \leq 3$ for every finite subgroup $A$ of $G({\mathfrak m}athbf Q)$.
(2) In Theorems 9, 10 and 11, no claim is made on the structure of $G$ except that it is of inner type and that its root system is of type $R$. However, if one looks closely at the proofs given in the next sections, one sees that $G$ can be chosen to have the following properties:
\noindent - it is simply connected;
\noindent - it splits over the cyclotomic field $k(z_\ell )$ if $\ell >2$, and over $k(i)$ if $\ell = 2.$
Simple examples (such as $k={\mathfrak m}athbf Q$, $\ell =3$, $G$ of type $G_2)$ show that it is not always possible to have $G$ split over $k$.
(3) If $G$ is not chosen carefully, the group $G(k)$ may not contain
any large $\ell$-subgroup, even if $k$ contains
all the roots of unity. For instance, when $R$ is of type $A_1$ (resp. of type $E_8$) it is easy (resp. it is possible) to construct a pair $(G,k)$ such that
the only torsion elements of $G(k)$ have order $1$ or $2$ (resp. $G(k)$ is torsion free).
(4) The three theorems above are almost obvious if the characteristic is $p \neq 0$
(especially Theorem 11 since type (c) never occurs!):
one takes a finite field $k_0$ contained in $k$ which has the same invariants $t$ and $m$
(this is easily seen to be possible -- if $k$ is finitely generated over ${\mathfrak m}athbf F_p$, one chooses
the maximal finite subfield of $k$),
and one takes for $G$ the group deduced by base change from a split group $G_0$ over $k_0$
with root system $R$.
If we choose for $A$ the finite group $G_0(k_0)$, it is clear from the way we got the
M-bound that $v_\ell(A) = M(\ell,k_0,R) = M(\ell,k,R)$, so that $(G,A)$ is optimal.
\vskip0.1cm
In what follows, we shall assume that $\operatorname{char}(k) =0$.
Note also that we could replace $k$ by any subfield having the same invariants $t$ and $m$,
for instance the intersection of $k$ with the field of $\ell^\infty$-roots of unity.
We could thus assume that {\em $k$ is a cyclotomic number field}, if needed.\vskip.1cm
The proof of Theorem 9 will be given first for classical groups (\S 9),
by explicit elementary constructions similar to those of Schur.
The more interesting case of exceptional groups (\S 12) will use different methods, based on
Galois twists (\S 10), Tits groups and braid groups (\S 11).
The case of $\ell=2$ will be given in \S 13. The last section (\S 14) is about $m = \infty$.
\vskip 0.5cm
\begin{center}
{\bf {\S 8. Arithmetic methods}} $(k = {\mathfrak m}athbf Q)$
\end{center}
\vskip 0.5cm
These methods are not strong enough to prove the statements of \S 7,
but they give very interesting special cases.
\subsection{\bf Euler characteristics}
Here, the ground field is ${\mathfrak m}athbf Q$.
One starts from a split simply connected group scheme $G$ over ${\mathfrak m}athbf Z$ (this makes sense, cf.~\cite{SGA3}).
One may thus speak of the group ${\mathfrak m}athbf Gamma = G({\mathfrak m}athbf Z)$ of the {\em integral points} of $G$.
It is a discrete subgroup of $G({\mathfrak m}athbf R)$.
Its Euler characteristic $\chi ({\mathfrak m}athbf Gamma)$ (``caract\'eristique d'Euler-Poincar\'e'' in French) is well-defined
(see [Se 71] and [Se 79]); it is a rational number.
Moreover it is proved in [Ha 71] that
\begin{equation}
\label{eq3.1}
\chi({\mathfrak m}athbf Gamma) = c \prod_{i=1}^r \frac{1}{2} \zeta(1-d_i)
= c \prod_{i=1}^r \frac{b_{d_i}}{2d_i},
\end{equation}
where $b_d$ is the $d$-th Bernoulli number,
$\zeta$ is the zeta function and\linebreak
{\mathfrak m}box{$c = |W|/|W_K|$} where
$W_K$ is the Weyl group of a maximal compact subgroup $K$ of $G({\mathfrak m}athbf R)$.
Assume that all $d_i$'s are {\em even} (if not, all the terms in (\ref{eq3.1}) are zero).
Using standard properties of Bernoulli numbers, one can check that
{\em the {\rm M}-bound relative to $\ell$ is
$M=\sum_i v_\ell \big ( \operatorname{den} \big ( \frac{1}{2} \zeta(1-d_i) \big) \big)$}, where ``den" means denominator. Hence, if $\ell$ does not divide $c$, and does not divide the numerator of any $\frac{1}{2} \zeta(1-d_i)$
(which is the case if $\ell$ is a so-called regular prime),
one sees that {\em the denominator of
{\rm EP}$({\mathfrak m}athbf Gamma)$ is divisible by $\ell^M$.}
But a theorem of K.~Brown~\cite{Br74} shows that this is only possible if ${\mathfrak m}athbf Gamma$ contains a
finite subgroup of order $\ell^M$.
Hence we get an optimal pair (provided $(c,\ell)=1$, and $\ell$ is regular, say).
\noindent{\sl Example}. Take $G$ of type $E_8$; here $c=3^3\!\cdot\!5$, and the numerators of the $\frac{1}{2} \zeta(1-d_i)$
do not cancel any denominator.
Hence one obtains that a split $E_8$ contains an optimal $A$ for all $\ell \neq 3,5$, with
the extra information that $A$ can be found inside the group ${\mathfrak m}athbf Gamma = G({\mathfrak m}athbf Z)$ --
but no information on what it looks like!
\subsection{\bf Mass formulae}
In \cite{Gr96}, B. Gross considers ${\mathfrak m}athbf Q$-forms of $G$ such that $G({\mathfrak m}athbf R)$ is {\em compact};
he also requires another condition which guarantees that $G$ has a {\em smooth model over ${\mathfrak m}athbf Z$}.
This condition is fulfilled for types $B$, $D$, $G_2$, $F_4$ and $E_8$.
He then proves a {\em mass formula} \`a la Minkowski (\cite{Gr96}, prop.2.2):
$$
\sum \frac{1}{|A_\sigma|} = \prod_{i=1}^r \frac{1}{2} \zeta(1-d_i)
$$
where the $A_\sigma$ are the ${\mathfrak m}athbf Z$-points of the smooth models of $G$ over ${\mathfrak m}athbf Z$
(taken up to conjugation).
Each $A_\sigma$ is finite.
It is then clear that, if $\ell^N$ is the $\ell$-th part of the denominator of $\prod_{i=1}^r \frac{1}{2} \zeta(1-d_i)$,
the $\ell$-Sylow subgroup of one of the $A_\sigma$ has order ${{\mathfrak m}athfrak g}eq \ell^N$.
If $N$ is equal to the Minkowski bound $M$
(which happens if $\ell$ does not divide the numerator of any of the $\frac{1}{2} \zeta(1-d_i)$),
then such a Sylow has order $\ell^M$, and we get an optimal pair.
Note that there is no extra factor ``$c$'' as in (\ref{eq3.1}).
This works very well for $G_2$, $F_4$, $E_8$
(and some classical groups too, cf. \cite{Gr96}):
\noindent $G_2$ -
Here the mass is $\frac{1}{4} \, \zeta(-1)\zeta(-5) = \displaystyle \frac{1}{2^63^37}$,
and it is obtained with just one $A_\sigma$, which turns out to be isomorphic to $G_2({\mathfrak m}athbf F_2)$.
\vskip3mm
\noindent $F_4$ -
There are two $A_\sigma$'s and the mass formula is
$$\begin{array}{rcl} \displaystyle
\frac{1}{2^{15} \cdot 3^6 \cdot 5^2 \cdot 7} + \frac{1}{2^{12} \cdot 3^5 \cdot 7^2 \cdot 13}
&= &\frac{1}{16} \, \zeta(-1) \zeta(-5) \zeta(-7) \zeta(-11) \\
&= & \displaystyle \frac{691}{2^{15} \cdot 3^6 \cdot 5^2 \cdot 7^2 \cdot 13}.
\end{array}$$
\vskip2mm
\noindent $E_8$ -
Here the numerator is very large, but the denominator is exactly what is needed
for the M-bound, namely:
$$2^{30} \cdot 3^{13} \cdot 5^5 \cdot 7^4 \cdot 11^2 \cdot 13^2 \cdot 19 \cdot 31.$$
\vskip 0.5cm
\begin{center}
{\bf {\S 9. Proof of theorem 9 for classical groups}}
\end{center}
\vskip 0.5cm
\setcounter{section}{9}
\setcounter{subsection}{0}
Here $\ell \neq 2$.
Recall that $\operatorname{Im} \chi_{_{\ell^\infty}} = C_t \times \{1 + \ell^m {\mathfrak m}athbf Z_\ell\}$,
where $m {{\mathfrak m}athfrak g}eq 1$ and $t$ divides $\ell-1$.
The M-bound is
$$
M = {\mathfrak m}athop{\sum_i}_{d_i \equiv 0 (\operatorname{mod} t)} \big ( m + v_\ell(d_i) \big ).
$$
We denote by $K$ the field $k(z_\ell)$ generated by a root of unity of order $\ell$.
It is a cyclic extension of $k$, of degree $t$, with Galois group $C_t$.
It contains $z_{_{\ell^m}}$ but not $z_{_{\ell^{m+1}}}$, cf. \S 4.1.
\subsection{\bf The groups $A_N$ and $A_N^{\,1}$}
\label{subsec3.1}
If $N$ is an integer ${{\mathfrak m}athfrak g}eq 1$, we denote by $A_N$ the subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_N(K)$
(where $K=k(z_\ell)$ as above) generated by the symmetric group $S_N$ and
the diagonal matrices whose entries are $\ell^m$-th roots of unity
(wreath product of $S_N$ with a cyclic group of order $\ell^m$).
We have
\begin{equation}
v_\ell(A_N)= mN + v_\ell(N!).
\end{equation}
The image of $\det_K : A_N \rightarrow K^*$ is $\{\pm 1\} \times \langle z_{_{\ell^m}}\rangle$.
Hence the kernel $A_N^{\,1}$
is such that
\begin{equation}
\label{eq3.5}
v_\ell(A_N^{\,1})= m(N-1) + v_\ell(N!).
\end{equation}
We are going to use $A_N$, and sometimes $A_N^{\,1}$, in order to construct
optimal subgroups for the classical groups $\operatorname{{\mathfrak m}athbf{SL}}_n$, $\operatorname{{\mathfrak m}athbf{SO}}_n$ and $\operatorname{{\mathfrak m}athbf{Sp}}_n$; this is what Schur did in \cite{schur}, \S 6, for the case of ${\mathfrak m}athbf G{\mathfrak m}athbf L_n$.
\subsection{\bf The case of $\operatorname{{\mathfrak m}athbf{SL}}_n$}
The $d_i$'s are $2$, $3$,$\ldots$, $n$.
If we put $N = \left[ \frac{n}{t} \right ]$, we have
\begin{equation}
\label{eq3.8}
\qquad M = mN + v_\ell(N!) \qquad {\mathfrak m}box{ if } t {{\mathfrak m}athfrak g}eq 2, \qquad \qquad \qquad \qquad
\end{equation}
\begin{equation}
\label{eq3.9}
\qquad M = m(N-1) + v_\ell(N!) \qquad {\mathfrak m}box{ if } t = 1, {\mathfrak m}box{ in which case } N=n.
\end{equation}
In the case $t {{\mathfrak m}athfrak g}eq 2$, we take $A_N \subset \operatorname{{\mathfrak m}athbf{GL}}_N(K) \subset \operatorname{{\mathfrak m}athbf{GL}}_{Nt}(k)$, and
observe that $\det_k(A_N)$ is equal to $\pm1$
(indeed, if $g \in A_N$, then
{\mathfrak m}box{$\det_k(g) = N_{K/k}\big (\det_K(g) \big )$}
and one checks that
$ N_{K/k}\big ( z_{_{\ell^m}} \big ) = 1$).
This shows that an $\ell$-Sylow of $A_N$ is contained in $\operatorname{{\mathfrak m}athbf{SL}}_{Nt}(k)$ and
hence in $\operatorname{{\mathfrak m}athbf{SL}}_n(k)$.
By~(\ref{eq3.8}) we get an optimal pair.
In the case $t=1$, we use the same construction with $A_N^{\,1}$ instead of $A_N$.
The comparison of~(\ref{eq3.5}) and~(\ref{eq3.9}) shows that we get an optimal pair.
\subsection{\bf The case of the orthogonal and symplectic groups, $t$ odd}
Let us consider the case of $\operatorname{{\mathfrak m}athbf{Sp}}_{2n}$. The $d_i$'s are equal to $2,4,\dots ,2n$. Hence,
if we put $N = \left[ \frac{n}{t} \right ]$, the M-bound is $mN + v_\ell(N!)$.
There is a natural embedding:
$$\operatorname{{\mathfrak m}athbf{GL}}_N\rightarrow \operatorname{{\mathfrak m}athbf{Sp}}_{2N} \rightarrow \operatorname{{\mathfrak m}athbf{Sp}}_{2n}$$
defined by $x {\mathfrak m}apsto \begin{pmatrix} x & 0 \\ 0 & \,^tx^{-1} \end{pmatrix}$.
The image of $A_N$ by that embedding is optimal.
The same construction works for $\operatorname{{\mathfrak m}athbf{SO}}_{2n}$ and $\operatorname{{\mathfrak m}athbf{SO}}_{2n+1}$.
(Note that, in all these cases, we get the {\em split} forms of the groups of type $B_n$, $C_n$, $D_n$.
This is no longer true
in the case $t$ is even -- nor in the cases of \S 12.)
\subsection{\bf The case of the orthogonal and symplectic groups, $t$ even}
Since $t$ is even, the group $C_t = \operatorname{Gal}(K/k)$ contains an element $\sigma$
of order 2; its image in ${\mathfrak m}athbf Z_\ell^*$ is $-1$.
Let $K_0$ be the subfield of $K$ fixed by $\sigma$;
we have $[K\!:\!K_0] = 2$, $[K_0\!:\!k] = t_0$ with $t_0 = t/2$.
Moreover $\sigma(z_{_{\ell^m}})$ is equal to $ (z_{_{\ell^m}})^{-1}$;
i.e. $\sigma$ acts on $ z_{_{\ell^m}}$ just as complex conjugation does.
Let us define an {\em hermitian form} $h$ on $K^N$
(where $N$ is a given integer ${{\mathfrak m}athfrak g}e 1$) by the standard formula
$$
\quad
h(x,y) = \sum_{i=1}^N x_i\!\cdot\!\sigma(y_i), \qquad
{\mathfrak m}box{ if }ÌÉx = (x_1, \ldots, x_N),\, y = (y_1, \ldots, y_N).
$$
If $\operatorname{{\mathfrak m}athbf{U}}_N$ denotes the {\em unitary group} associated with $h$, it is clear that
{\em the group $A_N$ defined in {\rm \S 9.1} is contained in $\operatorname{{\mathfrak m}athbf{U}}_N(K)$.} [We use here the traditional notation $\operatorname{{\mathfrak m}athbf{U}}_N (K)$ for the unitary group; this is a bit misleading, since $\operatorname{{\mathfrak m}athbf{U}}_N$ is an algebraic group over $K_0$, and we are taking its $K_0$-points.]
Let $\delta \in K^*$ be such that $\sigma(\delta) = - \delta$, e.g. $\delta = z_{\ell} - z_{\ell}^{-1}$.
We have $K = K_0 \oplus \delta\!\cdot\!K_0$, and $h(x,y)$ can be decomposed as
$$
\quad
h(x,y) = q_0(x,y) + \delta\cdot b_0(x,y), \,\,{{\mathfrak m}box{with}}\,\, q_0(x,y) \in K_0, \quad b_0(x,y) \in K_0.
$$
Then $q_0$ (resp. $b_0$) is a non-degenerate symmetric (resp. alternating)
$K_0$-bilinear form of rank $2N$.
\noindent Its trace $q = \operatorname{Tr}_{K_0/k} q_0(x,y)$ (resp. {\mathfrak m}box{$b = \operatorname{Tr}_{K_0/k} b_0(x,y)$)} is of rank
$2Nt_0 = Nt$ over $k$.
We thus get embeddings:
\begin{equation}
A_N \rightarrow \operatorname{{\mathfrak m}athbf{U}}_N(K) \rightarrow \operatorname{{\mathfrak m}athbf{SO}}_{2N}(K_0) \rightarrow \operatorname{{\mathfrak m}athbf{SO}}_{Nt}(k)
\end{equation}
\begin{equation}
A_N \rightarrow \operatorname{{\mathfrak m}athbf{U}}_N(K) \rightarrow \operatorname{{\mathfrak m}athbf{Sp}}_{2N}(K_0) \rightarrow \operatorname{{\mathfrak m}athbf{Sp}}_{Nt}(k).
\end{equation}
Now, for a given $n$, let us define $N$ by
$N = \left[ \frac{2n}{t} \right ]= \left[ \frac{n}{t_0} \right ]$.
By (9.4.2), we get an embedding
$$
A_N \rightarrow \operatorname{{\mathfrak m}athbf{Sp}}_{Nt}(k) \rightarrow \operatorname{{\mathfrak m}athbf{Sp}}_{2n}(k),
$$
and one checks that it is optimal.
The same method gives an embedding of $A_N$ into $\operatorname{{\mathfrak m}athbf{SO}}_{Nt}(k)$, hence into $\operatorname{{\mathfrak m}athbf{SO}}_{2n+1}(k)$, and this embedding is also optimal.
As for $\operatorname{{\mathfrak m}athbf{SO}}_{2n}(k)$, one has to be more careful. The method does give an embedding of $A_N$ into the $\operatorname{{\mathfrak m}athbf{SO}}_{2n}$ group relative to some quadratic form $Q$, but we have to ensure that such an $\operatorname{{\mathfrak m}athbf{SO}}_{2n}$ group is {\em of inner type} i.e. that $\operatorname{disc}(Q) = (-1)^n$ in $k^*/{k^*}^2$. There are three cases:
a) If $2n >Nt$ (i.e. if $t$ does not divide $2n$), we choose
$Q = q \oplus q_1$, where $q_1$ has rank $2n-Nt$, and is such that
$\operatorname{disc}(q) \cdot \operatorname{disc}(q_1) = (-1)^n$. We then have
$ A_N \subset \operatorname{{\mathfrak m}athbf{SO}}_{2n,Q}(k)$ and this is optimal.
b) If $2n =Nt$ and $N$ is even, we have $\operatorname{disc}(q) = d^N$, where
$d = \operatorname{disc}(K_0/k)$, hence $\operatorname{disc}(q)$= $1$ in $k^*/{k^*}^2$, which is the same as $(-1)^n$
since $n$ is even.
c) If $2n=Nt$ and $N$ is odd, we use an optimal subgroup $A$ of $ \operatorname{{\mathfrak m}athbf{SO}}_{2n-1}(k)$ relative to a quadratic form $q_0$ of rank $2n-1$. By adding to $q_0$ a suitable quadratic form of rank 1, we get a quadratic form of rank $2n$ and discriminant $(-1)^n$, as wanted. The corresponding embedding
$$A \rightarrow \operatorname{{\mathfrak m}athbf{SO}}_{2n-1}(k) \rightarrow \operatorname{{\mathfrak m}athbf{SO}}_{2n}(k)$$ is optimal. (Note that the $d_i$'s for type $D_n$ are $2,4,\dots, 2n-2$, and $n$.
Hence, if $t\!\not| \, n$, the M-bound for $D_n$ is the same as the M-bound for $B_{n-1}.)$
\vskip 0.5cm
\begin{center}
{\bf {\S 10. Galois twists}}
\end{center}
\vskip 0.5cm
To handle exceptional groups, we have to use {\em twisted} inner forms
instead of split ones.
We shall only need the most elementary case of twisting, namely the one coming from a
homomorphism $\varphi:{\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Aut}(G)$.
Let us recall what this means (cf. for example \cite{Se64}, chapter~III):
Let $K/k$ be a finite Galois extension.
Let $X$ be an algebraic variety over $k$, assumed to be quasi-projective
(the case where $X$ is affine would be enough). Choose a homomorphism
$$\varphi: \operatorname{Gal}(K/k) \rightarrow \operatorname{Aut}_k X.$$
The {\em twist} $X_\varphi$ of $X$ by $\varphi$ is a variety over $k$ which can be
characterized as follows:
There is a $K$-isomorphism $\theta\!:\!X_{/K}\rightarrow {X_\varphi}_{/K}$ such that ${{\mathfrak m}athfrak g}amma(\theta) = \theta \circ \varphi({{\mathfrak m}athfrak g}amma)$
for every ${{\mathfrak m}athfrak g}amma \in \operatorname{Gal}(K/k)$.
(Here $X_{/K}$ denotes the $K$-variety deduced from $X$ by the base change
{\mathfrak m}box{$k \rightarrow K$,}
and $\varphi({{\mathfrak m}athfrak g}amma) \in \operatorname{Aut}_k X$ is viewed as belonging to $\operatorname{Aut}_{K}X_{/K}$.)
One shows (as a special case of Galois descent) that such a pair $(X_\varphi,\theta)$ exists,
and is unique, up to isomorphism.
It is sometimes convenient to identify the $K$-points of $X$ and $X_\varphi$
via the isomorphism $\theta$.
But one should note that this is not compatible with the natural action of $\operatorname{Gal}(K/k)$
on $X(K)$ and $X_\varphi(K)$; one has
$$
\qquad
{{\mathfrak m}athfrak g}amma \big( \theta(x) \big) = \varphi({{\mathfrak m}athfrak g}amma) \big ( {{\mathfrak m}athfrak g}amma(x) \big )
\qquad {\mathfrak m}box{ if }ÌÉ{{\mathfrak m}athfrak g}amma \in \operatorname{Gal}(K/k), x \in X(K).
$$
In other words, if we identify $X_\varphi(K)$ with $X(K)$, an element ${{\mathfrak m}athfrak g}amma$ of $\operatorname{Gal}(K/k)$
acts on $X_\varphi(K)$ by the {\em twisted action{\rm { :}}}
$$
x {\mathfrak m}apsto \varphi({{\mathfrak m}athfrak g}amma) \big ( {{\mathfrak m}athfrak g}amma(x) \big )
$$
In particular, the {\em $k$-rational points of $X_\varphi$} correspond (via $\theta^{-1}$)
to the points $x \in X(K)$ such that
${{\mathfrak m}athfrak g}amma(x) = \varphi({{\mathfrak m}athfrak g}amma^{-1}) x$ for every ${{\mathfrak m}athfrak g}amma \in \operatorname{Gal}(K/k)$.
In what follows we apply the $\varphi$-twist to $X=$ split form of $G$,
with $\varphi({{\mathfrak m}athfrak g}amma)$ being a $k$-automorphism of $G$ for every ${{\mathfrak m}athfrak g}amma \in \operatorname{Gal}(K/k)$.
In that case, $G_\varphi$ is a $k$-form of $G$; this form is inner if all $\varphi({{\mathfrak m}athfrak g}amma)$
belong to $G^{\operatorname{ad}}(K)$ where $G^{\operatorname{ad}}$ is the adjoint group of $G$.
The effect of the twist is to make $k$-rational some elements of $G$ which were not.
In order to define $\varphi$, we shall have to use the $k$-automorphisms of $G$ provided by
the Tits group $W^*$, see next section.
\vskip 0.5cm
\begin{center}
{\bf {\S 11. A general construction}}
\end{center}
\vskip 0.5cm
\setcounter{section}{11}
\setcounter{subsection}{0}
Here, $G$ is a split simply connected group over $k$,
and $T$ is a maximal split torus of $G$.
We put $N=N_G(T)$ and $W = N/T$ is the Weyl group.
\subsection{\bf The Tits group}
The exact sequence $1 \rightarrow T \rightarrow N \rightarrow W \rightarrow 1$
does not split in general.
However Tits (\cite{Ti66a}, \cite{Ti66b}) has shown how to construct a
subgroup\footnote{The construction of $W^*$ depends on more than $(G,T)$:
one needs a \emph{pinning} (``\'epinglage'') of $(G,T)$ in the sense
of~\cite{SGA3}, XXIII.1.1.
} $W^*$ of $N(k)$ having the following properties:
(1) The map $W^* \rightarrow W$ is surjective.
(2) The group $W^* \cap T$ is equal to the subgroup $T_2$ of $T$
made up of the points $x$ of $T$ with $x^2 = 1$.
We thus have a commutative diagram, where the vertical maps are inclusions:
$$
\begin{array}{ccccccccc}
1 & \rightarrow & T_2 & \rightarrow & W^* & \rightarrow & W & \rightarrow & 1 \\
\downarrow & & \downarrow & & \downarrow \, & & \downarrow & & \\
1 & \rightarrow & T & \rightarrow & N \, & \rightarrow & W & \rightarrow & 1 \\
\end{array}
$$
We refer to Tits (\emph{loc. cit.}) and to Bourbaki\footnote{Bourbaki works in the context of compact
real Lie groups; his results can easily be translated to the algebraic setting we use here.}
([LIE X], pp. 115--116, exerc. 12, 13) for the construction and the properties of $W^*$.
For instance:
If $G$ comes from a split group scheme $\underline{G}$ over ${\mathfrak m}athbf Z$,
then $W^*$ is equal to $\underline{N}({\mathfrak m}athbf Z)$,
the group of \emph{integral points} of the group scheme $\underline{N}$.
In the case of $\operatorname{{\mathfrak m}athbf{SL}}_n$, this means that one can choose for $W^*$ the group of monomial matrices
with non-zero entries $\pm 1$ and determinant $1$.
For $n=2$, $W^*$ is the cyclic group of order 4 generated by $\begin{pmatrix} 0&1\\-1&0 \end{pmatrix}$.
Note also that $W^*$ is a quotient of the {\emph{braid group}} ${\bf B}_W$ associated to $W$.
(For the definition of the braid group of a Coxeter group, see e.g.~\cite{BM97}.)
\subsection{\bf Special elements of $W$}
We now go back to our general notation $\ell ,m,t,\ldots$ of Lecture II.
Recall that the M-bound $M =M(\ell,k,R)$ is given by
\begin{equation}
\label{11.2.1}
M = \sum_{t | d_i} \left( m + v_\ell(d_i) \right), \qquad {\mathfrak m}box{ cf. \S 6.2}.
\end{equation}
Let $a(t)$ be the number of indices $i$ such that $d_i \equiv 0 \, (\operatorname{mod} t)$.
We may rewrite (\ref{11.2.1}) as
\begin{equation}
M = m a(t) + \sum_{t|d_i} v_\ell(d_i).
\end{equation}
Note that, if no $d_i$ is divisible by $t$, we have $M=0$
and the trivial group $A=1$ is optimal.
Hence \emph{we shall assume in what follows that $a(t) {{\mathfrak m}athfrak g}eq 1$.}
\vskip0.1cm
Let now $w$ be an element of $W$.
We shall say that $w$ is {\emph {special}} (with respect to $t$ and $\ell$)
if it has the following four properties:
\noindent (1) $w$ has order $t$ in $W$.
\noindent (2) $w$ is the image of an element $w^*$ of $W^*$ such that
$(w^*)^t \in T_2 \cap C(G)$, where $C(G)$ is the center of $G$.
\noindent (3) The characteristic polynomial of $w$
(in the natural $r$-dimensional representation of $W$)
is divisible by $({\mathfrak m}athbf Phi_t)^{a(t)}$, where ${\mathfrak m}athbf Phi_t$ is the $t$-th cyclotomic polynomial.
\newline
(Equivalently: if $z_t$ denotes a primitive $t$-th root of unity,
then $z_t$ is an eigenvalue of $w$ of multiplicity at least $a(t)$.)
\noindent (4) Let $C_W(w)$ be the centralizer of $w$ in $W$.
Then:
$$ v_\ell\left (C_W(w) \right) {{\mathfrak m}athfrak g}eq \sum_{t|d_i} v_\ell(d_i).$$
\noindent{\sl Remark}. The reader may wonder whether special elements exist for
a given pair $(t,\ell)$ (with $a(t)>0$ and $\ell\equiv 1\ (\operatorname{mod} t)$, of course). The
answer is ``no'' in general: if $R$ is of type $C_3$ and $t=4$, no element
of $W^*$ has both properties (1) and (2). Fortunately, the answer is ``yes'' for the exceptional types $G_2,\dots,E_8$, cf. \S 12.{\mathfrak m}edbreak
\noindent {\sl Example{\rm{ :}} the regular case}.
Suppose that $w \in W$ is \emph{regular of order} $t$ in the sense
of Springer\footnote{With a slight difference: Springer requires $t > 1$ and we don't; it is convenient to view $w = 1$ as a regular element of $W$.
Note that, if $t$ is given, there is a very simple criterion ensuring the existence of a regular element of $W$ of order $t$: the number of indices $i$ such that $d_i \equiv 0$ (mod $t$) should be equal to the number of $i$'s such that $d_i \equiv 2$ (mod $t$), cf. Lehrer-Springer \cite{LS99}, cor.5.5.}
(\cite{Sp74}, bottom of p. 170 - see also \cite{BM97}, \S 3). This means that $w$ has an eigenvector $v$, with eigenvalue $z_t$, such that $v$ does not belong to any reflecting hyperplane. {\sl Then} $w$ {\sl is special} (for any $\ell$ with $\ell \equiv 1$ (mod $t$)). Indeed:
(1) is obvious.
(2) follows from the fact, proved in \cite{BM97}, \S 3, that $w$ has a lifting ${{\mathfrak m}athbf w}$ in the braid group ${{\mathfrak m}athbf B}_W$ with
${{\mathfrak m}athbf w}^t = {\mathfrak m}box{\boldmath$\pi$}$, where {\boldmath$\pi$} has an image $\pi$ in $W$ which belongs to $T_2 \cap C(G)$. In Bourbaki's notation ([LIE X], p.116) $\pi$ is the canonical element $z_G$ of the center of $G$.
(3) is proved in \cite{Sp74}, th. 4.2.
(4) is proved in \cite{Sp74}, th. 4.2, in the stronger form $|C_W(w)|= \prod_{t|d_i} d_i$.
\vskip 0.3cm
\noindent{\sl Special cases}
$t = 1$. Here $w = 1$ and $w^* = \pi$ (one could also take $w^* = 1$).
$t =\!2$. Here $w = w_0 =$ longest element of $W$. When $-1$ belongs to $W$, one has $w_0 = -1$ and $w^*_0$ is {\sl central} in $W^*$ (because ${{\mathfrak m}athbf w}_0$ is central in ${{\mathfrak m}athbf B}_W$, cf. \cite{BM97}, 1.2 and 3.4). In that case the inner automorphism of $G$ defined by $w^*_0$ is a ``Weyl-Chevalley involution": it acts on $T$ by $t {\mathfrak m}apsto t^{-1}$.
\setcounter{lemma}{6}
\subsection{An auxiliary result}
\begin{lemma}
\label{lem7}
Suppose $w \in W$ is special of order $t$.
Then it is possible to choose a lifting $w^*$ of $w$ in $W^*$
which satisfies\vskip0.1cm
{\em (2*)} $(w^*)^t \in T_2 \cap C(G)$
\noindent and
{\em (4*)} $v_\ell \left ( C_{W^*}(w^*) \right ) {{\mathfrak m}athfrak g}eq \sum_{t|d_i} v_\ell(d_i)$.
\end{lemma}
\begin{proof}
Let $P$ be an $\ell$-Sylow of $C_W(w)$;
the groups $P$ and $\langle w \rangle$ commute,
and $P \cap \langle w \rangle=1$ since $w$ has order $t$
and $\ell$ is prime to $t$ (since $\ell\equiv 1 \, \operatorname{mod} t$).
Hence the group $P_w$ generated by $w$ and $P$
is the direct product $P \times \langle w \rangle$.
Since $\ell \neq 2$, its 2-Sylow subgroup is contained in $\langle w \rangle$.
Put $C_2 = T_2 \cap C(G)$.
We have an exact sequence:
$$
\label{eq48}
1 \rightarrow T_2/C_2 \rightarrow W^*/C_2 \rightarrow W \rightarrow 1.
$$
By property (2) of $w$, this exact sequence splits over $\langle w \rangle$,
hence over the 2-Sylow of $P_w$;
since the order of $T_2/C_2$ is a power of 2, this implies that it splits over $P_w$.
We thus get an element $w'$ of $W^*/C_2$, of order $t$, which lifts $w$,
and centralizes a subgroup $P'$ of $W^*/C_2$ isomorphic to $P$.
We then choose for $w^*$ a representative of $w'$ in $W^*$; it has
property (2$^*$), moreover
its centralizer contains the inverse image of $P'$,
which is canonically isomorphic to $C_2 \times P'$.
By property (4) we have
$$v_\ell(P') = v_\ell(P) {{\mathfrak m}athfrak g}eq \sum_{t|d_i} v_\ell(d_i).$$
This shows that $w^*$ has property (4*).
\end{proof}
\noindent{\sl Remark}.
In the case where $w$ is regular, one can do without lemma~\ref{lem7}.
Indeed the braid group construction of~\cite{BM97}
gives a lifting $w^*$ of $w$ having property (2$^*$) and such that the map
$C_{W^*}(w^*) \rightarrow C_W(w)$ is surjective.
\subsection{The main result}
\setcounter{proposition}{4}
\begin{proposition}
Suppose $W$ contains an element $w$ which is special
with respect to $t$ and $\ell$.
Then there exist an inner twist $G_\varphi$ of $G$ {\rm(cf. \S 10)}
and a finite $\ell$-subgroup $A$ of $G_\varphi(k)$ such that the pair
$(G_\varphi, A)$ is optimal in the sense of \S $7$.
{\em(In particular, th.9 is true for $(k,\ell,R)$.)}
\end{proposition}
\begin{proof}
As in \S 9, we put $K = k(z_\ell)$, where $z_\ell$ is a root of unity
of order $\ell$.
Let $C_t = \operatorname{Gal}(K/k)$; it is a cyclic group of order $t$.
Choose $w^* \in W^*$ with the properties of lemma~\ref{lem7} and
let $\sigma$ be the inner automorphism of $G$ defined by $w^*$.
Since $\sigma$ has order $t$, there exists an injective homomorphism:
$$\varphi: C_t \rightarrow G^{\operatorname{ad}}(k) \subset \operatorname{Aut}_k(G)$$
which maps $C_t$ onto the subgroup $\langle \sigma \rangle$ of $\operatorname{Aut}_k(G)$
generated by $\sigma$.
As explained in \S 10, we may then define the \emph{$\varphi$-twist}
$G_\varphi$ of $G$, relatively to the Galois extension $K/k$.
The group $G_\varphi$ is an inner form of $G$;
it has the same root system $R$.
It remains to construct a finite $\ell$-subgroup $A$ of $G_\varphi(k)$
such that $(G_\varphi,A)$ is optimal,
i.e. $v_\ell(A) = m a(t) + \sum_{t|d_i} v_\ell(d_i)$, cf. (11.2.2).
We take for $A$ the semi-direct product $E_m\cdot P$, with
$E_m \subset T_{\varphi}(k)$ and $P \subset N_\varphi(k)$, where $E_m$ and $P$
are defined as follows:\vskip.1cm
(1) $P$ is an $\ell$-Sylow of $C_{W^*}(w^*)$.
By lemma~\ref{lem7} we have $v_\ell(P) {{\mathfrak m}athfrak g}eq \sum_{t|d_i} v_\ell(d_i)$.
\newline
Note that the points of $P$ are fixed by $\sigma$.
Hence these points are rational over $k$ not only in the group $G$
but also in the group $G_\varphi$.
(2) $E_m$ is the subgroup of $T_\varphi(k)$ made up of the elements $x$
such that $x^{\ell^m}=1$.
\noindent It is clear that $P$ normalizes $E_m$, and that $P \cap E_m = 1$.
\begin{lemma}
\label{lem8}
The group $E_m$ contains a product of $a(t)$ copies of the group ${\mathfrak m}athbf Z/\ell^m{\mathfrak m}athbf Z$.
\end{lemma}
This implies that $v_\ell(E_m) {{\mathfrak m}athfrak g}eq m a(t)$ and hence
$$v_\ell(A) = v_\ell(E_m)+v_\ell(P) {{\mathfrak m}athfrak g}eq m a(t) +\sum_{t|d_i} v_\ell(d_i).$$
We thus get $v_\ell(A){{\mathfrak m}athfrak g}eq M$ and since $M$ is an upper bound for $v_\ell(A)$
we have $v_\ell(A)=M$.
\end{proof}
\noindent{\sl Proof of lemma 8}.
Consider first the subgroup $T_{\ell^m}$ of $T(k_s)$ made up of the elements $x$
with $x^{\ell^m}=1$.
Since $T$ is $k$-split, and $K = k(z_\ell) = k(z_{\ell^m})$
(cf. \S 4 and \S 9),
the points of $T_{\ell^m}$ are rational over $K$.
If we write $T_{\ell^m}(K)$ additively, it becomes a free ${\mathfrak m}athbf Z/\ell^m{\mathfrak m}athbf Z$-module of rank $r$
and the action of a generator $s$ of $C_t$ is by $x {\mathfrak m}apsto sx$,
where $s$ is identified with an element of order $t$ in ${\mathfrak m}athbf Z_\ell^*$
(i.e. $s = $``$z_t$'' with our usual notation for roots of unity).
As for the action of $w^*$ (i.e. of $w$) on $T_{\ell^m}(K)$,
it can be put in diagonal form
since $w$ is of order $t$ and $t$ divides $\ell-1$;
its diagonal elements are $r$ elements $y_1, \ldots, y_r$ of ${\mathfrak m}athbf Z/\ell^m{\mathfrak m}athbf Z$,
with $y_i^t=1$.
Let $c$ be the largest integer such that $({\mathfrak m}athbf Phi_t)^c$ divides the
characteristic polynomial of $w$.
By property (3) of 11.2, we have $c {{\mathfrak m}athfrak g}eq a(t)$
(in fact, $c=a(t)$, by \cite{Sp74}, th.~3.4).
This implies that the family of the $y_i$'s contains $c$ times each primitive
$t$-th root of unity (viewed as element of $({\mathfrak m}athbf Z/\ell^m{\mathfrak m}athbf Z)^*$).
In particular, there is a ${\mathfrak m}athbf Z/\ell^m{\mathfrak m}athbf Z$-submodule $X$ of $T_{\ell^m}(K)$
which is free of rank $c$
and on which $w$ acts by $x {\mathfrak m}apsto z_t^{-1} x$.
If we twist $G$, $T$, $T_{\ell^m}$ by $\varphi$,
the new action of $C_t = \operatorname{Gal}(K/k)$ on $X$ is trivial
(cf. end of \S 10).
This means that $X$ is contained in $T_\varphi(k)$, hence in $E_m$,
which proves the lemma.
$\Box$
\vskip 0.3cm
Note the following consequence of proposition 4:
\begin{corollary}
If $W$ contains a $t$-regular element in the sense of \cite{Sp74},
then theorem 9 is true for $k,\ell,R$.
\end{corollary}
In the case $t=1$, no twist is necessary
(one takes $w=1$, $w^*=1$, cf. \S 11.2).
\vskip 0.5cm
\begin{center}
{\bf {\S 12. Proof of theorem~9 for exceptional groups}}
\end{center}
\setcounter{section}{12}
\setcounter{subsection}{0}
In each case we will show that the Weyl group contains an element $w$
which is special with respect to $t$ and $\ell$, so that we may apply
prop.5.
\subsection{The case of $G_2$}
The degrees $d_i$ are $d_1=2, d_2=6$.
Since $t$ divides one of them, $t$ is a divisor of $6$,
hence is regular (\cite{Sp74}, no. 5.4).
We may then apply prop.5.
\qed
Explicit description of $w,w^*$: if $c$ is a Coxeter element of $W$,
$c$ is of order $6$, and every lifting $c^*$ of $c$ in $W^*$ has order 6.
Hence, for any divisor $t$ of 6, we may take $w=c^{6/t}$ and $w^* = (c^*)^{6/t}$.
\subsection{The case of $F_4$}
The $d_i$'s are: 2, 6, 8, 12.
All their divisors are regular (Springer, \emph{loc. cit.}).
One concludes as for $G_2$.
\qed
\subsection{The case of $E_6$}
The $d_i$'s are: 2, 5, 6, 8, 9, 12.
All their divisors are regular, except $t=5$.
In that case, choose any element $w \in W$ of order 5.
Since the kernel of $W^* \rightarrow W$ is a 2-group,
$w$ can be lifted to an element $w^*$ of $W^*$ of order 5.
Conditions (1) and (2) of \S11.2 are obviously satisfied.
The same is true for condition (3), since $a(5)=1$
(only one of the $d_i$'s is divisible by 5),
and $w$ has at least one eigenvalue of order 5.
As for condition (4), it is trivial, since $\ell\equiv 1 \, (\operatorname{mod} 5)$ implies $\ell{{\mathfrak m}athfrak g}eq 11$,
and $\ell$ does not divide any of the $d_i$'s, so that
$\sum_{t|d_i} v_\ell(d_i)$ is 0.
Hence $w$ is special with respect to $(5,\ell)$.
\qed
\subsection{The case of $E_7$}
The $d_i$'s are: 2, 6, 8, 10, 12, 14, 18.
By~\cite{Sp74}, {\it loc.cit.} all their divisors are regular except 4, 5, 8, 10, 12.
If $t=4, 5, 8$ or 12, $t$ already occurs for $E_6$,
with the same values of $a(t)$, namely 2, 1, 1 and 1.
Hence, we have $E_6$-special elements $w_4, w_5, w_8$ and $w_{12}$ in $W(E_6)$.
One then takes their images in $W(E_7)$ by the injective map $W(E_6) \rightarrow W(E_7)$,
and one checks that they are $E_7$-special (here again condition (4) is trivial since
$v_\ell(d_i)=0$ for all the $\ell$'s with $\ell \equiv 1 \, (\operatorname{mod} t)$).
As for $t=10$, one takes $w = -w_5$, which makes sense since $-1\in W$.
The element $-1$ (usually denoted by $w_0$) can be lifted to
a central element $\varepsilon$ of $W^*$ with $\varepsilon^2 \in T_2 \cap C(G)$;
this is a general property of the case $-1\in W$ (which reflects the fact that $-1$ is $2$-regular, see end of \S 11.2).
Hence, if $w_5^*$ is a lifting of $w_5$ of order 5, $\varepsilon w_5^*$ is a lifting of
$w$ of order 10, and this shows that $w$ is special with respect to 10 and $\ell$.
\qed
\subsection{The case of $E_8$}
The $d_i$'s are: 2, 8, 12, 14, 18, 20, 24, 30.
By~\cite{Sp74}, {\it loc.cit.}, all their divisors are regular except 7, 9, 14, 18.
If $t=7$ (resp. 9), one chooses $w_7 \in W$ of order 7 (resp. $w_9 \in W$ of order 9).
Since 7 and 9 are odd, condition (2) of \S 11.2 is satisfied.
The same is true for condition (3) because $a(t)=1$, and for condition (4) because $v_{\ell}(d_i)=0$ for all $i$.
If $t=14$ (resp. 18), one takes $w=-w_7$ (resp. $w=-w_9$),
as we did for $E_7$.
\qed
\vskip 0.5cm
\begin{center}
{\bf {\S 13. Proof of theorems 10 and 11}}
\end{center}
\vskip 0.5cm
\setcounter{section}{13}
\setcounter{subsection}{0}
Here $\ell$ = 2. There are three cases (cf. \S 4.2):
(a) $\operatorname{Im}\chi_{_{2^\infty}} = 1 + 2^m {{\mathfrak m}athbf Z}_2 $ with $m {{\mathfrak m}athfrak g}e 2$.
In that case the M-bound is\linebreak $rm + v_2 (W)$, and th.10 asserts that an optimal pair $(G,A)$ exists for every type $R$.
(b) $\operatorname{Im}\chi_{_{2^\infty}} =\langle-1 + 2^m\rangle$, with $m {{\mathfrak m}athfrak g}e 2$. The M-bound is $r_0m + r_1 + v_2(W)$, where $r_0$ (resp. $r_1$) is the number of $i$'s such that $d_i$ is odd (resp. even). Here, too, th.10 asserts that an optimal pair exists.
(c) $\operatorname{Im} \chi_{_{2^\infty}} = \langle-1,1+2^m\rangle$, with $m {{\mathfrak m}athfrak g}e 2$.
The M-bound is the same as in case (b), but th.11 does not claim that it can be met (i.e. that an optimal pair exists); it merely says that there is a pair $(G,A)$ with $v_2(A) = r_0m + v_2(W)$; such a pair is optimal only when $r_1 = 0$, i.e. when $-1$ belongs to the Weyl group.
\subsection{Proof of theorem 10 in case (a).} We take $G$ split and simply connected, and we choose a maximal split torus $T$. We use the notation $(N,W,W^*)$ of \S 11. Let $E$ be the $2$-torsion subgroup of $T(k)$. Since $T$ is isomorphic to the product of $r$ copies of ${\mathfrak m}athbf G_m$, $E$ is isomorphic to a product of $m$ copies of ${\mathfrak m}athbf Z/2^m{\mathfrak m}athbf Z$, cf. \S 4.2. Hence $v_2(E) = rm$. The group $E$ is normalized by the Tits group $W^*$; we define $A$ as $A = E\!\cdot\!W^*$. The exact sequence
$$
1 \rightarrow E \rightarrow A \rightarrow W \rightarrow 1
$$
shows that $v_2(A) = rm + v_2(W)$. Hence $(G,A)$ is optimal.
\subsection{Cases (b) and (c).} As in \S11.4, we start with a split $G$, with a split maximal torus $T$. We define $N, W, W^*$ as usual. After choosing an order on the root system $R$, we may view $W$ as a Coxeter group; let $w_0$ be its longest element. It has order 2, and it is regular in the sense of Springer \cite{Sp74}. As explained in \S 11.2, this implies that there is a lifting $w^*_0$ of $w_0$ in $W^*$ which has the following two properties:
(i) its square belongs to the center of $G$;
(ii) the natural map $C_{W^*}\big(w^*_0\big) \rightarrow C_W(w_0)$ is surjective.
\noindent Let $\sigma$ be the inner automorphism of $G$ defined by $w^*_0$. By (i), we have $\sigma^2 = 1$. Let $K = k(i)$ and let $\varphi$ be the homomorphism of Gal$(K/k)$ into Aut$_k(G)$ whose image is $\{1,\sigma\}$. Let us define $G_\varphi$ as the $\varphi$-{\sl twist} of $G$, in the sense defined in \S 10. Denote by $T_\varphi ,N_\varphi$ and $W^*_\varphi$ the $\varphi$-twists of $T,N$ and $W^*$. We have an exact sequence
$$
1 \rightarrow T_\varphi \rightarrow N_\varphi \rightarrow W_\varphi \rightarrow 1,$$
where $W_\varphi$ is the $\varphi$-twist of $W$. Note that $W_\varphi (k)$ is equal to the centralizer $C_W(w_0)$ of $w_0$ in $W$, and similarly $W^*_\varphi (k)$ is equal to $C_{_{W^*}}(w^*_0)$.
As in \S 13.1, let $E$ be the $2$-torsion subgroup of $T_\varphi (k)$. It is normalized by $C_{_{W^*}}(w^*_0)$. Define $A \subset G_\varphi (k)$ to be the group $A = E\cdot C_{_{W^*}}(w^*_0)$. By (ii), we have an exact sequence:
$$
1 \rightarrow E \rightarrow A \rightarrow C_W(w_0) \rightarrow 1\, ,
$$
which shows that $v_2(A) = v_2(E) + v_2\big(C_W(w_0)\big)$. The fact that $w_0$ is regular of order $2$ implies that
$$|C_W(w_0)| = \prod_{2|d_i} d_i, $$hence $v_2\big(C_W(w_0)\big) = \sum v_2(d_i) = v_2 (W)$. This gives:
\begin{equation}
\label{13.2.1} v_2 (A) = v_2(E) + v_2(W).
\end{equation}
\begin{proposition}We have{\rm{ :}}
$$
\begin{array}{lll}
v_2(E) = r_1 + r_0m & {{\mathfrak m}box{\sl in case}} \,\,{\rm (b)}\\
v_2(E) = r_0m &{{\mathfrak m}box{\sl in case}} \,\, {\rm (c)}.
\end{array}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad
$$
\end{proposition}
In case (b), this shows that $(G_\varphi , A)$ is optimal, which proves th.10. Similarly, the fact that $v_2(A) = r_0m + v_2(W)$ proves th.11 in case (c).
\subsection{Proof of proposition 6.} We need to describe explicitly the torus $T_\varphi$. To do so, let us first define the following two tori:
${\mathfrak m}athbf G^\sigma_m = 1$-dimensional torus deduced from ${\mathfrak m}athbf G_m$ by Galois twist relatively to $K/k$.
Its group of $k$-points is $K^*_1 = {\rm Ker} \, N_{K/k} : K^* \rightarrow k^*$.
$R_{K/k}{\mathfrak m}athbf G_m = 2$-dimensional torus deduced from ${\mathfrak m}athbf G_m$ by Weil's restriction of scalars relatively to $K/k$. Its group of $k$-points is $K^*$.
\begin{lemma} The torus $T_\varphi$ is isomorphic to the product of $r_1$ copies of \linebreak$R_{K/k}{\mathfrak m}athbf G_m$ and $r_0-r_1$ copies of ${\mathfrak m}athbf G^\sigma_m$.
\end{lemma}
\noindent {\sl Proof.} The character group $X = $ Hom$(T,{\mathfrak m}athbf G_m)$ is free of rank $r$, with basis the fundamental weights $\omega_1, \dots ,\omega_r$. This gives a decomposition of $T$ as
$$T = T_1 \times T_2 \times \dots \times T_r\, ,$$
where each $T_i$ is canonically isomorphic to ${\mathfrak m}athbf G_m$. Let $\tau = -w_0$ be the opposition involution of the root system $R$; it permutes $\omega_1, \dots ,\omega_r$ with $r_1$ orbits of order 2, and $r_0-r_1$ orbits of order 1. (This follows from the fact that $-1$ is an eigenvalue of $w_0$ of multiplicity $r_0$.) The involution $\tau$ permutes the tori $T_j$. If an index $j$ is fixed by $\tau$, then $w_0$ acts on $T_j$ by $t {\mathfrak m}apsto t^{-1}$ and the twisted torus $(T_j)_\varphi$ is isomorphic to ${\mathfrak m}athbf G_m^\sigma$; similarly, if $\tau$ permutes $j$ and $j^\prime$, the torus $(T_j\times T_{j^\prime})_\varphi$ is isomorphic to $R_{K/k}{\mathfrak m}athbf G_m$. This proves lemma 9.
$\Box$
\vskip 0.3cm
\noindent{\sl End of the proof of prop.6}. The $2$-torsion subgroup of
${\bf G}_m^\sigma(k) = K^*_1$ is cyclic of order $2^m$; the 2-torsion subgroup of $R_{K/k}{\mathfrak m}athbf G_m(k) = K^*$ is cyclic of order $2^{m+1}$ in case (b) and of order $2^m$ in case (c). We get what we wanted, namely:
case (b): $v_2(E) = r_1 (m+1) + (r_0-r_1)m = r_0m+r_1$
case (c): $v_2(E) = r_1m + (r_0-r_1)m = r_0m.$
\noindent This completes the proof of prop.6, and hence of th.10 and th.11.
$\Box$
\subsection{Remarks on the non simply connected case.} The proof above could have been given without assuming that the split group $G$ is simply connected. The main difference is in lemma 9: in the general case, the torus $T_\varphi$ is a product of three factors (instead of two):
$$
T_\varphi = ({\mathfrak m}athbf G_m)^\alpha \times ({\mathfrak m}athbf G_m^\sigma)^\beta \times (R_{K/k}{\mathfrak m}athbf G_m)^{{\mathfrak m}athfrak g}amma\, ,
$$
where $\alpha , \beta , {{\mathfrak m}athfrak g}amma$ are integers, with $\beta + {{\mathfrak m}athfrak g}amma = r_0$ and $\alpha + {{\mathfrak m}athfrak g}amma = r_1$. This gives the following formulae for $v_2(E):$
case (b) : $v_2(E) = \alpha + \beta m + {{\mathfrak m}athfrak g}amma (m+1) = r_1 + r_0m$
case (c) : $v_2(E) = \alpha + \beta m + {{\mathfrak m}athfrak g}amma m = \alpha + r_0m\, .$
\noindent In case (b) one finds the same value for $v_2(A)$, namely the
M-bound. \linebreak In case (c) one finds a result which is intermediate between the M-bound $r_1 + r_0m + v_2(W)$ and the value $r_0m + v_2(W)$ given by th.11.{\mathfrak m}edskip
\noindent{\sl Examples} (assuming we are in case (c)).
- {\sl Type} $A_r$, $r$ {\sl even}. One finds that $\alpha$ is always 0, so that one does not gain anything by choosing non simply connected groups. Indeed, in that case, it is possible to prove, by a variant of Schur's method, that the value of $v_2(A)$ given by th.11 is best possible.
- {\sl Type} $A_r$, $r$ {\sl odd} ${{\mathfrak m}athfrak g}e 3$. Here $r_1 = (r-1)/2$. One finds that $\alpha = 0$ if $r \equiv 1$ (mod 4), but that $\alpha$ can be equal to $1$ if $r \equiv 3$ (mod 4). When $r = 3$, we thus get $\alpha = r_1$; this shows that the M-bound is best possible for type $A_3$.
- {\sl Type} $D_r$, $r$ {\sl odd}. Here $r_1 = 1$, and if one chooses $G$ neither simply connected nor adjoint, one has $\alpha = 1$. This means that the orthogonal group $\operatorname{{\mathfrak m}athbf{SO}}_{2r}$ has an inner $k$-form which contains an optimal $A$. (Note the case $r = 3$, where $D_3 = A_3$.)
- {\sl Type} $E_6$. Here $r_1 = 2$, and one has $\alpha = 0$ both for the simply connected group and for the adjoint group (indeed, $\alpha$ is 0 for every adjoint group).\linebreak I do not know whether the bound of th.11 is best possible in this case.
\vskip 0.5cm
\begin{center}
{\bf {\S 14. The case $m = \infty$}}
\end{center}
\label{sec14}
\setcounter{section}{14}
\setcounter{subsection}{0}
\subsection{Statements.} We keep the notation $(G,R,W,d_i,\ell,t,m)$ of \S 4 and \S 6; as before, we assume that $G$ is of inner type.
We consider the case $m = \infty$, i.e. the case where {\sl the image of}
$\chi_{_{\ell^\infty}}$ {\sl is finite}; that image is then cyclic of order $t$, cf. \S 4.
Let $a(t)$ be the number of $i$'s such that $d_i \equiv 0$ (mod $t$). If $a(t) = 0$, then $G(k)$ is $\ell$-torsion free, cf. \S 6.2, cor.to prop. 4. In what follows, we shall thus assume that $a(t) {{\mathfrak m}athfrak g}e 1$. In that case, $G(k)$ may contain infinite $\ell$-subgroups (we say that a group is an $\ell$-{\sl group} if every element of that group has order a power of $\ell$). The following two theorems show that $a(t)$ controls the size of such a subgroup:
\begin{mytheorem12} Let $A$ be an $\ell$-subgroup of $G(k)$. Then $A$ contains a subgroup of finite index isomorphic to the $\ell$-group $({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^a$, with $a \le a(t).$
\end{mytheorem12}
(Note that ${\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell$ is the union of an increasing sequence of cyclic groups of order $\ell, \ell^2$, \dots ; it is the analogue of ${\mathfrak m}athbf Z/\ell^m{\mathfrak m}athbf Z$ for $m = \infty$.)\vskip.1cm
The bound $a \le a(t)$ of th.12. is optimal. More precisely:
\begin{mytheorem13} There exist a semisimple group $G$ of inner type, with root system $R$, and an $\ell$-subgroup $A$ of $G(k)$, such that $A$ is isomorphic to the product of $a(t)$ copies of ${\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell$.
\end{mytheorem13}
\subsection{Proof of theorem 12} We need a few lemmas:
\begin{lemma} Any finitely generated $\ell$-subgroup of $G(k)$ is finite.
\end{lemma}
\noindent{\sl Proof.} Let $B$ be a finitely generated $\ell$-subgroup of $G(k)$. We may embed $B$ in $\operatorname{{\mathfrak m}athbf{GL}}_n(k)$ for $n$ large enough. By a known result (see \S 1.2) there exists a subgroup $B^\prime$ of $B$, of finite index, which is torsion-free if $\operatorname{char}(k)=0$, and has only $p$-torsion if char$(k)$ = $p$. Since $B^\prime$ is an $\ell$-group, this means that $B^\prime = 1$, hence $B$ is finite.
$\Box$
\begin{lemma}
There exists a maximal $k$-torus of $G$ which is normalized by $A$. {\rm (Recall that $A$ is an $\ell$-subgroup of $G(k).)$}
\end{lemma}
\noindent {\sl Proof.} Let $F$ be the set of all finite subgroups of $A$, ordered by inclusion. Lemma 10 implies that, if $B_1$ and $B_2$ belong to $F$, so does $\langleB_1, B_2\rangle$. Let $X$ be the $k$-variety parametrizing the maximal tori of $G$; it is a homogeneous space of $G$. If $B \in F$, let $X^B$ be the subvariety of $X$ fixed by $B$; a point of $X^B$ corresponds to a maximal torus of $G$ normalized by $B$. By the noetherian property of the scheme $X$, one may choose $B_0 \in F$ such that $X^{^{B_0}}$ is minimal among the $X^B$\ \!'s. If $B \in F$, then $X^{\langleB_0,B\rangle}$ is contained in $X^{B_0}$, hence equal to $X^{B_0}$. This shows that $X^{B_0}$ is contained in all the \linebreak $X^B$\ 's, i.e. that every maximal torus which is normalized by $B_0$ is normalized by all the $B$'s, hence by $A$. By the corollary to th.3$^{\prime\prime}$ of \S 3.3 (applied to the finite $\ell$-group $B_0$) there exists such a torus which is defined over $k$.
$\Box$
\begin{lemma}
Let $u\in {\mathfrak m}athbf M_r({\mathfrak m}athbf Z_\ell)$ be an $r \times r$ matrix with coefficients in ${\mathfrak m}athbf Z_\ell$, which we view as an endomorphism of $({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^r$. Then {\rm Ker}$(u)$ has a subgroup of finite index isomorphic to the product of $r- {\rm rank}(u)$ copies of ${\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell$.
\end{lemma}
In other words, the ``corank" of Ker$(u)$ is equal to $r - {\rm rank}(u)$.
\vskip 0.3cm
\noindent{\sl Proof}. Same as that of lemma 4 of \S 5.2: by reduction to the case where $u$ is a diagonal matrix.
$\Box$
\begin{lemma}
Let $z_t$ be a primitive $t$-th root of unity, and let $w$ be an element of $W$. The multiplicity of $z_t$ as an eigenvalue of $w$ is $\le a(t)$.
\end{lemma}
\noindent{\sl Proof.} See \cite{Sp74}, th.3.4(i) where it is deduced from the fact that the polynomial $\det(t-w)$ divides $\prod_i (t^{d_i}-1).$
$\Box$
\begin{lemma}
Let $T$ be a maximal $k$-torus of $G$, and let $T(k)_\ell$ be the $\ell$-torsion subgroup of $T(k)$. We have {\rm corank} $T(k)_\ell \le a(t)$.
\end{lemma}
As above, the ``corank" of a commutative $\ell$-group is the largest $n$ such that the group contains the product of $n$ copies of ${\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell$.
\vskip 0.3cm
\noindent{\sl Proof.} As in \S 5.2, let $Y(T) = \operatorname{Hom}_{k_s} ({\mathfrak m}athbf G_m,T)$ be the group of cocharacters of $T$. The action of the Galois group ${\mathfrak m}athbf Gamma_k$ on $Y(T)$ gives a homomorphism
$$
\rho : {\mathfrak m}athbf Gamma_k \rightarrow \operatorname{Aut} Y(T) \simeq \operatorname{{\mathfrak m}athbf{GL}}_r ({\mathfrak m}athbf Z)
$$
and the image of $\rho$ is contained in the Weyl group $W$ (this is still another way of saying that $G$ is of inner type). The group ${\mathfrak m}athbf Gamma_k$ acts on $T(k_s)_\ell \simeq ({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^r$ by $\rho \otimes \chi$, where $\chi = \chi_{_{\ell^\infty}}$. Let us now choose $g \in {\mathfrak m}athbf Gamma_k$ such that $\chi(g) = z_t^{-1}$, where $z_t$ is an element of order $t$ of ${\mathfrak m}athbf Z^*_\ell$, and let $w = \rho (g)$. The element $g$ acts on $T(k_s)_\ell$ by $wz^{-1}_t$. Let $T_g$ be the kernel of $g-1$ on $T(k_s)_\ell$. By lemma 12, we have corank $(T_g) = r -$ rank$(g-1)$, which is equal to the multiplicity of $z_t$ as an eigenvalue of $w$; using lemma 13, we get corank$(T_g) \le a(t)$, and since $T(k)_\ell$ is contained in $T_g$, we have corank$(T(k)_\ell) \le a(t)$.
$\Box$
\vskip 0.3cm
\noindent{\sl End of the proof of th.12}. By lemma 11, there is a maximal $k$-torus $T$ of $G$ which is normalized by $A$. Let $A^\circ = A \cap T(k)$. Then $A^\circ$ is an abelian subgroup of $A$ of finite index. Since $A^\circ$ is contained in $T(k)_\ell$, lemma 14 shows that $A^\circ$ is isomorphic to the product of a finite group with a product of at most $a(t)$ copies of ${\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell$.
$\Box$
\subsection{Proof of theorem 13.} We follow the same strategy as for theorem 9, 10 and 11. There are three cases:
\subsubsection{\rm{\bf Classical groups} $(\ell \not= 2)$} We change slightly the definitions of \S 9.1: we define $A_N$ as the subgroup of $\operatorname{{\mathfrak m}athbf{GL}}_N(K)$, with $K = k(z_\ell)$, made up of the diagonal matrices of order a power of $\ell$; it is isomorphic to $({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^N$.
For any given $n {{\mathfrak m}athfrak g}e 2$, we put $N = [n/t]$ and we get embeddings
$$
A_N \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_N(K) \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_{Nt}(k) \rightarrow \operatorname{{\mathfrak m}athbf{GL}}_n (k).
$$
If $t>1$, one checks that the $k$-determinant of every element of $A_N$ is $1$; we thus get an embedding $A_N \rightarrow \operatorname{{\mathfrak m}athbf{SL}}_n(k)$ which has the required properties since $N = a(t)$ in that case. When $t = 1$, we replace $A_N$ by the subgroup of its elements of $k$-determinant 1, and we also get what we want. This solves the case of type $A_r$. Types $B_r$, $C_r$ and $D_r$ are then treated by the methods of \S 9.3 and \S 9.4.
\subsubsection{{\rm\bf Exceptional groups} $(\ell \not= 2)$} One replaces prop.5 of \S 11.4 by a statement giving the existence of $A \subset G_\varphi(k)$ with $A\simeq({\mathfrak m}athbf Q_\ell/{\mathfrak m}athbf Z_\ell)^{a(t)}$. The proof is the same. One then proceeds as in \S 12.
\subsubsection{{\rm\bf The case} $\ell = 2$} Same method as in \S 13.
$\Box$
{\mathfrak m}arkright{ }
\noindent
{\small \noindent J.-P. Serre\\
\noindent Coll\`{e}ge de France\\
\noindent 3, rue d'Ulm\\
\noindent F-75005 PARIS.}
\end{document}
|
\begin{document}
\title{
Parallel machine scheduling with precedence constraints and setup times
}
\author{Bernat Gacias $^{1,2}$, Christian Artigues $^{1,2}$ and Pierre Lopez $^{1,2}$\\
~\\
\begin{footnotesize}$^{1}$~CNRS; LAAS; 7 avenue du Colonel Roche, F-31077 Toulouse, France\end{footnotesize}\\
\begin{footnotesize}$^{2}$~Université de Toulouse; UPS, INSA, INP, ISAE; LAAS; F-31077 Toulouse, France\end{footnotesize} \\
\begin{small}\{bgacias,artigues,lopez\}@laas.fr\end{small}}
\date{}
\maketitle
\begin{abstract}
This paper presents different methods for solving parallel machine scheduling problems with precedence constraints and setup times between the jobs. Limited discrepancy search methods mixed with local search principles, dominance conditions and specific lower bounds are proposed. The proposed methods are evaluated on a set of randomly generated instances and compared with previous results from the literature and those obtained with an efficient commercial solver. We conclude that our propositions are quite competitive and our results even outperform other approaches in most cases.
\end{abstract}
~\\
\begin{footnotesize}\textbf{Keywords}: Parallel machine scheduling, setup times, precedence constraints, limited discrepancy search, local search.\end{footnotesize}
\thispagestyle{empty}
\section{Introduction}
~~~This paper deals with parallel machine scheduling with precedence constraints and setup times between the execution of jobs. We consider the optimization of two different criteria: the minimization of the sum of completion times and the minimization of maximum lateness. These two criteria are of great interest in production scheduling. The sum of completion times is a criterion that maximizes the production flow and minimizes the work-in-process inventories. Due dates of jobs can be associated to the delivery dates of products. Therefore, the minimization of maximum lateness is a goal of due date satisfaction in order to disturb as less as possible the customer who is delivered with the longest delay. These problems are strongly \emph{NP-hard}~\citep{bib-NP}.
The parallel machine scheduling problem has been widely studied~\citep{bib-parallel}, specially because it appears as a relaxation of more complex problems like the hybrid flow shop scheduling problem or the RCPSP (Resource-Constrained Project Scheduling Problem). Several methods have been proposed to solve this problem. In~\cite{bib-GenColonnes}, a column generation strategy is proposed. \cite{bib-LinearProgram} propose a linear program and an efficient heuristic for large-size instances for the resolution of priority constraints and family setup times problem.~\cite{bib-lbCtotal} solve the problem with a tree search method. More recently,~\cite{bib-TreeSearch} compare two different branching schemes and several tree search strategies for the problem with release dates and tails for the makespan minimization case.
However, the literature on parallel machine scheduling with precedence constraints and setup times is quite limited.~\cite{bib-PrecSommeCi} and~\cite{bib-PrecLmax} deal with the problem with precedence constraints for the minimization of the sum of completion times and maximum lateness respectively. The setup times case is considered in~\cite{bib-SetupLmax} and in ~\cite{bib-SetupLmax2} for the minimization of maximum lateness.~\cite{bib-Lmax} deal with the same criterion on a single machine with family-dependent setup times. Finally,~\cite{bib-lbCi} propose a lower bound and a branch-and-bound method for the minimization of the sum of completion times.
Problems that have either precedence constraints or setup times, but not both, can be solved by list scheduling algorithms. It means there exists a total ordering of the jobs (i.e., a list) that, when a given machine assigment rule is applied, reaches the optimal solution~\citep{bib-ListScheduling}. For a regular criterion, this rule is called Earliest Completion Time (ECT). It consists in allocating every job to the machine that allows it to be completed at the earliest. This reasoning unfortunately does not work when precedence constraints and setup times are considered together, as shown in~\cite{bib-contraexemple}. We have then to modify the way to solve the problem and consider both scheduling and resource allocation decisions.
In Section 2, we define formally, the parallel machine scheduling problem with setup times and precedence constraints between jobs. In Section 3 we present a branch-and-bound method and its components: tree structure, lower bounds, and dominance rules. Discrepancy-based tree search methods are described in Section 4. In Section 5 we present the hybrid tree-local search methods used to solve large-size instances. Section 6 is dedicated to computational experiments.
\section{Problem definition}\label{ProblemDefinition}
~~~We consider a set $J$ of $n$ jobs to be processed on $m$ parallel machines. The precedence relations between the jobs and the setup times, considered when different jobs are sequenced on the same machine, must be satisfied. The preemption is not allowed, so each job is continually processed during $p_{i}$ time units on the same machine. The machine can process no more than one job at a time. The decision variables of the problem are the start times of every job $i=1..n$, $S_{i}$, and let us define $C_{i}$ as the completion time of job $i$, where $C_{i}=S_{i}+p_{i}$. Let $r_{i}$ and $d_{i}$ be the release date and the due date of job $i$, respectively. Due dates are only considered for job lateness computation. We denote by $E$ the set of precedence constraints between jobs. The relation $(i,j)\in E$, with $i, j \in J$, means that job $i$ is performed before job $j$ ($i \prec j$) such that job $j$ can start only after the end of job $i$ $(S_{j}\geq C_{i})$. Finally, we define $s_{ij}$ as the setup time needed when job $j$ is processed immediately after job $i$ on the same machine. Thus, for two jobs $i$ and $j$ processed successively on the same machine, we have either $S_{j}\geq C_{i}+s_{ij}$ if $i$ precedes $j$, or $S_{i}\geq C_{j}+s_{ji}$ if $j$ precedes $i$. Using the notation of~\cite{bib-NP}, the problems under consideration are denoted: $Pm|prec,s_{ij},r_i|\sum C_{i}$ for the minimization of the sum of completion times and $Pm|prec,s_{ij},r_i|L_{\max}$ for the minimization of the maximum lateness.
\subsection*{Example}
~~~A set of $5$ jobs $(n=5)$ must be executed on $2$ parallel machines $(m=2)$. For every job $i$, we give $p_{i}$, $r_{i}$, $d_{i}$, and $s_{ij}$ (see Table~\ref{tab-donnes}). Besides, for that example we have the precedence constraints: $1\prec 4$ and $2\prec 5$.
\begin {table}[h]
\begin{center}
\subtable[]{
\begin {tabular}{cccc}
\hline
\bf{$n$} & \bf{$p_{i}$} & \bf{$r_{i}$} & \bf{$d_{i}$}\\
\hline
\bf{1} & 4 & 1 & 7\\
\bf{2} & 3 & 0 & 5\\
\bf{3} & 4 & 3 & 8\\
\bf{4} & 3 & 3 & 10\\
\bf{5} & 2 & 1 & 5\\
\hline
\end {tabular}}
\hspace{1.5 cm}
\subtable[]{
\begin {tabular}{cccccc}
\hline
\bf{$s_{ij}$}&\bf{1}&\bf{2}&\bf{3}&\bf{4}&\bf{5}\\
\hline
\bf{1} & 0 & 2 & 3 & 4 & 5\\
\bf{2} & 7 & 0 & 6 & 1 & 3\\
\bf{3} & 2 & 4 & 0 & 7 & 1\\
\bf{4} & 4 & 4 & 8 & 0 & 1\\
\bf{5} & 3 & 4 & 8 & 5 & 0\\
\hline
\end {tabular}}
\caption {Example 1 data}
\label {tab-donnes}
\end {center}
\end {table}
\FloatBarrier
Figure~\ref{fig-exemple} displays a feasible solution for this problem. The set of precedence constraints is satisfied: $S_{5}=13\geq 3=C_{2}$ and $S_{4}=5\geq 5=C_{1}$. We stress that job $4$ must postpone its start time on $M_{2}$ by one time unit because of the precedence constraint. On the other hand, we have to check that, for every job $i$, $r_{i}\leq S_{i}$ and that setup times between two sequenced jobs on the same machine are also respected. For the evaluation of the solution, we observe that for the minimization of the sum of completion times the value of the function is $z=\sum C_{i}=43$ and for the minimization of maximum lateness $z=L_{\max}=L_{5}=10$.
\begin {figure}[h]
\begin {center}
\scalebox{0.4}{
\includegraphics[]{./problema.eps}}
\caption{Feasible schedule}
\label{fig-exemple}
\end {center}
\end {figure}
\FloatBarrier
\section{Branch-and-Bound components for
$Pm|prec,s_{ij},r_i|\sum{C_i}$ and $Pm|prec,s_{ij},r_i|L_{\max}$}\label{sec-branchandbound}
~~~~A tree structure with two levels of decisions (scheduling and resource allocation) is defined in Section~\ref{subsec-structure}. Lower bounds, constraint propagation mechanisms and dominance rules are introduced in Sections~\ref{subsec-evaluation} and~\ref{subsec-dominance}.
\subsection{Tree structure}\label{subsec-structure}
~~~~Precedence constraints and setup times scheduling problems may not be efficiently solved by a list algorithm as conjectured by~\cite{bib-contraexemple}. It means that there possibly does not exist a job allocation rule that reaches an optimal solution where all the possible lists of jobs are enumerated. Let us consider the minimization of the sum of completion times for 4 jobs scheduled on 2 parallel machines. The data of the problem are displayed in Table 2.
\begin {table}[h]
\begin{center}
\subtable[]{
\begin {tabular}{cccc}
\hline
\bf{$n$} & \bf{$p_{i}$} & \bf{$r_{i}$}\\
\hline
\bf{1} & 1 & 0\\
\bf{2} & 1 & 0\\
\bf{3} & 1 & 2\\
\bf{4} & 1 & 2\\
\hline
\end {tabular}}
\hspace{1.5 cm}
\subtable[]{
\begin {tabular}{cccccc}
\hline
\bf{$s_{ij}$}&\bf{1}&\bf{2}&\bf{3}&\bf{4}\\
\hline
\bf{1} & 0 & 10 & 2 & 10\\
\bf{2} & 10 & 0 & 1 & 1\\
\bf{3} & 10 & 10 & 0 & 10\\
\bf{4} & 10 & 10 & 10 & 0\\
\hline
\end {tabular}}
\caption {Example 2 data}
\label {tab-donnes2}
\end {center}
\end {table}
\FloatBarrier
If we consider the problem without precedence constraints, we find two optimal solutions ($\sum C_i=9$) when we allocate the jobs following the Earliest Completion Time rule for the lists $\{1,2,4,3\}$ and $\{2,1,4,3\}$ (see Figure~\ref{fig-contraexemple}a). Now, let us consider the same problem with the precedence constraint $3\prec4$. In that case, there does not exist any allocation rule that reaches an optimal solution for any list of jobs that respects the precedence constraint. The optimal solution ($\sum C_i=11$) is reached when we consider the list $\{1,2,3,4\}$ and job 3 is not allocated on the machine that allows it to finish first (see Figure~\ref{fig-contraexemple}b). Thus, in our problems we have not only to find the best list of jobs but also to specify the best resource allocation.
\begin{figure}
\begin{center}
\subfigure[Optimal schedule without the precedence constraint]{
\scalebox{0.4}{
\includegraphics[]{./ContraExemple.eps}
}
}
\subfigure[Optimal schedule with the precedence constraint]{
\scalebox{0.4}{
\includegraphics[]{./ContraExemple2.eps}
}
}
\caption{Example of job allocation}
\label{fig-contraexemple}
\end{center}
\end {figure}
\FloatBarrier
The optimal solution can be reached by a two decision-level tree search. We define a node as a partial schedule $\sigma(p)$ of $p$ jobs. Every node entails at most $m\times(n-p)$ child nodes. The term $n-p$ corresponds to the choice of the next job to be scheduled (job scheduling problem). Only the jobs with all the previous jobs already executed are candidates to be scheduled. Once the next job to be scheduled is selected we have to consider the $m$ possible machine allocations (machine allocation problem). For practical purposes, we have mixed both levels of decision: one branch is associated with the choice of the next job to schedule and also with the choice of the machine. A solution is reached when the node represents a complete schedule, that means when $p=n$.
\subsection{Node evaluation}\label{subsec-evaluation}
~~~~Node evaluation differs depending on the studied criterion. First, we propose to compute a simple lower bound. For every node (partial schedule), we update the earliest start times of the unscheduled jobs taking account of the branching decisions through precedence constraints and we calculate the minimum completion time (for $\min \sum C_{i}$ criterion) and the minimum lateness (for $\min L_{\max}$ criterion) for every not yet-scheduled job. Then we update the criterion and we compare the lower bound with the best current solution.
We propose to compute an upper bound. The upper bound is computed by a simple list scheduling heuristic selecting the combination of job, between the not yet-scheduled jobs, and machine with the shortest start time.
For criterion $\min \sum C_{i}$, we also propose to compute the lower bound presented in~\cite{bib-lbCi} for the parallel machine scheduling problem, with sequence-dependent setup times and release dates ($Pm|s_{ij},r_i|\sum C_{i}$). This problem is a relaxation of the problem with precedence constraints, so the lower bound is still valid for our problem. In this paper, we just present the lower bound for the problem, that is based on job preemption relaxation, and we refer to~\cite{bib-lbCi} for the proof.
Let $S_*$ be the schedule obtained with the SRPT (Shortest Remaining Processing Time) rule for the relaxed problem $1|r_i,(\frac{p_i}{m}+s^*_i),pmtn|\sum{\max(C^*_i-s^*_i,r_i+p_i)}$, where $s_i=\min_{j\neq i} s_{ij}$ and $s^*_i=\frac{s_i}{m}$. Let $C^*_{[i]}(S_{*})$ be the modified completion time of job $i$ with the processing time $p_i+s^*_i$ for each job $i$. Let $a_i=p_i+r_i+s^*_i$ and let ($a_{[1]},a_{[2]},\dots,a_{[n]}$) be the series obtained by sorting ($a_1,a_2,\dots,a_n$) in non-decreasing order. Then $LB=\sum{\max[C^*_{[i]}(S_{*}),a_{[i]}]}-\sum{s^*_i}$ is a lower bound for $Pm|prec,s_{ij},r_i|\sum C_{i}$. The complexity of the lower bound is $O(n\log n)$, the same complexity as SRPT.
For $\min L_{\max}$, the evaluation consists in triggering a satisfiability test based on constraint propagation involving energetic reasoning~\citep{bib-energetique2}. The energy is produced by the resources and it is consumed by the jobs. We apply this feasibility test to verify whether the best solution reached from the current node will be at least as good as the best current solution. We determine the minimum energy consumed by the jobs ($E_{consumed}$) over a time interval $\Delta=[t_{1},t_{2}]$ and we compare it with the available energy ($E_{produced}=m\times(t_{2}-t_{1})$). In our problem we also have to consider the energy consumed by the setup times ($E_{setup}$). If $E_{consumed}+E_{setup}>E_{produced}$ we can prune the node.
For an interval $\Delta$ where there is a set $F$ of $k$ jobs that may consume energy, we can easily show that the minimum quantity of setups which occurs is $\alpha=\max(0,k-m)$. So, we have to take the $\alpha$ shortest setup times of the set ${\{s_{ij}\},i,j\in F}$, into account.
The energy consumed in an interval $\Delta$ is $E_{consumed}=\sum_{i}\max(0,\min(p_{i},t_{2}-t_{1},r'_{i}+p_{i}-t_{1},t_{2}-d'_{i}+p_{i}))+\sum_{l}^{\alpha}s_{[l]}$ where $s_{[l]}$ are the setup times of the set ${\{s_{ij}\},i,j\in F}$, sorted in non-decreasing order, and a time window $[r'_{i},d'_{i}]$ for every not yet-scheduled job $i$ is issued from precedence constraint propagation:
\begin{center}
$r'_{i}=\max \{r_{i},r_j+p_j;~\forall~j\in\Gamma_{i}^{-}\}$ and $d'_{i}=\min \{Z_{best}+d_{i},d'_j-p_j;~\forall~j\in\Gamma_{i}^{+}\}$,
\end{center}
where $\Gamma_{i}^{-}$ and $\Gamma_{i}^{+}$ are respectively the set of previous and successor jobs for job $i$ and $Z_{best}$ is the minimum current value for $L_{\max}$.
In Figure~\ref{fig-energie} we illustrate how to compute the energy consumed by the not yet-scheduled jobs (1 to 5 in the example) for a 3-machine problem. For every job, we determine a time window and the minimum energy consumed (in grey) over the selected interval $\Delta=[t_{1},t_{2}]$. For $E_{setup}$ we have to take the $\alpha$ shortest setup times, in the example $k=4$ (there is no consumption for job 1) and $m=3$, so we have to sum only the shortest setup time between the consuming jobs, in our case we add 2 energy units (value of $s_{35}$).
\begin {figure}[h]
\begin {center}
\scalebox{0.3}{
\includegraphics[]{./energetique.eps}
}
\caption{Minimum energy consumed in a partial schedule}
\label{fig-energie}
\end {center}
\end {figure}
\FloatBarrier
The time interval $\Delta=[t_{1},t_{2}]$ considered to compute the energy consumed is $t_{1}=\min{r'_i},\forall i \in F$ and $t_{2}=d'_j$, where $j$ is the job with the shortest time window $\min{(d'_j-r'_j)},\forall j \in F$. The complexity of the energetic test is $O(n^2)$.
\subsection{Dominance rules}\label{subsec-dominance}
~~~We also propose dominance rules to restrict the search space. They consist in trying to find whether there exists a dominant node allowing us to prune the evaluated node. All proposed rules are based on the dominance properties of the set of active schedules. A schedule $S$ is active if no feasible schedule can be obtained from $S$ by left-shifting a single activity. Let us define the \emph{front} of a partial schedule as the set of the last jobs executed on the machines (the ones with the largest start times).
We first present a global dominance rule based on max flow computation based on a resource-flow model previously used for the resource-constrained project scheduling problem with setup times~(\cite{bib-flotmax2}, Section 2.13). The idea is to verify that there exists a partial schedule $\sigma'(p)$ with the start times \linebreak $S'=\{S'_{1},S'_{2},\dots,S'_{i},\dots,S'_{p}\}$ different from $\sigma(p)$ with start times \linebreak $S=\{S_{1},S_{2},\dots,S_{i},\dots,S_{p}\}$ that allows us to move forward the start time of job $k$ without modifying other start times ($S'_{i}=S_{i},\forall i\neq k$ and $S'_{k}\leq S_{k}-1$). This is a necessary but not a sufficient condition for the dominance. Besides, the schedule $\sigma'(p)$ has to keep the same front as $\sigma(p)$ except for the case where job $k$ does not belong to the front of $\sigma'(p)$ (the dominant partial schedule). For example in Figure~\ref{fig-ExempleFlot}, job 5 ($S_5=18$) may be scheduled after job 4 or between job 2 and job 4 with a shortest start time ($S'_5=17$). In the first case the new schedule $\sigma'(p)$ is not dominant because of setup times but in the second case it is, so the front can be modified only if job $k$ is not part of it in $\sigma'(p)$.
We represent $\sigma'(p)$ by a graph and we turn the dominance rule in a max flow computation. Two vertices are considered for every job, the first one represents the start time $i_{t}$ and the second one the completion time $i_{s}$ of the job. One unit capacity arcs are defined between the vertices $i_{s}$-$j_{t}$ by the partial schedule $\sigma'(p)$ and they represent the transfer of resource units between the jobs. Finally, we need four dummy vertices. Two vertices ($0_{s}$, $0_{t}$), the source node $S$ and the sink node $T$, flow origin and flow destination, respectively. Arcs $S$-$0_{s}$ and $0_{t}$-$T$ have $m$-unit capacity and represent the resource constraint. 1-unit capacity arcs between $S$-$i_{s}$ and $i_{t}$-$T$ ensure the job execution.
\begin {figure}[h]
\begin {center}
\scalebox{0.4}{
\includegraphics[]{./ExempleFlot.eps}}
\caption{Partial schedule of the evaluated node}
\label{fig-ExempleFlot}
\end {center}
\end {figure}
\FloatBarrier
Figure~\ref{fig-flotmax} shows the flow network for the schedule depicted in Figure~\ref{fig-ExempleFlot} (data of Table~\ref{tab-donnes}). For each node we try to find a schedule that allows us to move forward the start time of the last scheduled job by one unit (job 5 in the example, $S'_{5}=17$) and to keep the same start times for the other jobs. We create a direct arc $i_{s}$-$j_{t}$ if $S'_{j}>S'_{i}+p_{i}+s_{ij}$, that means if job $j$ can be executed on the same machine than job $i$. In order to respect the second condition for the dominance, we do not create the arcs between the jobs belonging to the front in the evaluated node (job 4 and job 5). We observe that a max flow of $m+p$ units is necessary to ensure all job executions and to satisfy the resource constraints. In that case, $\sigma'(p)$ is a feasible schedule and we can prune the node.
\begin {figure}[h]
\begin {center}
\scalebox{0.45}{
\includegraphics[]{./Flotmax.eps}
}
\caption{Network to compute the max flow dominance rule}
\label{fig-flotmax}
\end {center}
\end {figure}
\FloatBarrier
We propose a second dominance rule based on the position of the front jobs in the priority list. For a given schedule, the dominance rule searches for a new list of jobs in order to obtain the dominant partial schedule. We modify the list of scheduled jobs taking into account the precedence constraints. We can prune the evaluated node when the dominant partial schedule keeps the same front than the evaluated node (jobs 1, 2, and 3), one of the jobs starts earlier ($S'_{1}<S_{1}$) and for the rest of jobs belonging to the front the start times are not delayed ($S_{2}=S'_{2}$ and $S_{3}=S'_{3}$), as we see in Figure~\ref{fig-dominance}.
\begin{figure}[h]
\begin{center}
\subfigure[Evaluated node]{
\scalebox{0.3}{
\includegraphics[]{./Dominance}
}
}
\subfigure[Dominant partial schedule]{
\scalebox{0.3}{
\includegraphics[]{./Dominance1}
}
}
\caption{Example of dominant partial schedule}
\label{fig-dominance}
\end{center}
\end {figure}
\FloatBarrier
We propose to permute the order of the $m$ front jobs in order to find the dominant schedule. For example, in Figure~\ref{fig-dominance} if the order of scheduled front jobs is $1-2-3$ we test all the possible permutations satisfying precedence constraints. If one of such permutations yields a dominant partial schedule, we can prune the evaluated node. This rule can be computed with time complexity $O(m!)$. As shown in Section~\ref{Evaluation}, despite its exponential worst-case complexity, this dominance rule has interesting properties when used in conjunction with discrepancy-based tree search and remains efficient for a small number of machines. A partial enumeration remains valid if $m$ becomes very large.
Note similar dominance rules have already been used for the RCPSP (which can be defined as an extension of the parallel machine scheduling problem with precedence constraints, but without setup times) under the name "cutset dominance rules"~\citep{bib-DominanceRules}. However, in~\cite{bib-DominanceRules}, all the cutsets are kept in memory yielding important memory requirements.
\section{Discrepancy-based tree search methods}
\subsection{Limited discrepancy search}\label{Section-LDS}
~~~~To tackle the combinatorial explosion of the standard branch-and-bound methods for large problem instances, we use a method based on the discrepancies regarding a reference branching heuristic. Such a method is based on the assumed good performance of this reference heuristic, thus making an ordered local search around the solution given by the heuristic. First, it explores the solutions with few discrepancies from the heuristic solution and then it moves away from this solution until it has covered the whole search space. In this context, the principle of \emph{LDS (Limited Discrepancy Search)}~\citep{bib-lds} is to explore first the solutions with discrepancies on top of the tree, since it assumes that the early mistakes, where very few decisions have been taken, are the most important.
Figure~\ref{fig-LDS} shows \emph{LDS} behavior for a binary tree search with the number of discrepancies for every node. Let us consider the left branch as the reference heuristic decision. At iteration 0 we explore the heuristic solution, then at iteration 1 we explore all the solutions that differ at most once from the heuristic solution, and we continue until all the leaves have been explored.
\begin {figure}[h]
\begin{center}
\includegraphics[width=13cm,height=6cm]{./lds.eps}
\caption{\emph{Limited Discrepancy Search} for a binary tree}
\label{fig-LDS}
\end{center}
\end {figure}
LDS can be used as an exact method, for small-size instances, when the maximum number of discrepancies is authorized. We can also use it as an approximate method if we limit the number of authorized discrepancies.
Several methods based on LDS have been proposed to improve its efficiency. \emph{ILDS (Improved LDS)}~\citep{bib-ilds} has been devised to avoid the redundancy (observed in Figure~\ref{fig-LDS}) where the solutions with no discrepancies are also visited at iteration 1. \emph{DDS (Depth-bounded Discrepancy Search)}~\citep{bib-dbds} or \emph{DBDFS (Discrepancy-Bounded Depth First Search)}~\citep{bib-dbdfs} propose to change the order of the search. DDS limits the depth where the discrepancies are considered, in the sense that at the $k^{\rm th}$ iteration we only authorize the discrepancies at the first $k$ levels of the tree. It stresses the principle that the early mistakes are the most important. DBDFS consists in a classical \emph{DFS} where the nodes explored are limited by the discrepancies. Recently, in the \emph{YIELDS} method~\citep{bib-Yields}, learning process notions are integrated. In what follows, we propose several versions of LDS adapted to the considered parallel machine scheduling context.
\FloatBarrier
\subsection{Exploration strategy}
~~~~As a branching heuristic, we use the same heuristic to compute the lower bound presented in Section~\ref{subsec-evaluation}:~\emph{EST (Earliest Start Time)} rule for the selection of the next job to schedule and the resource to execute it. We take criterion EST because it is intuitively compatible with the minimization of setup times which has globally a positive impact for minimization of other regular criteria~\citep{bib-EST}. In case of tie between two jobs, we apply \emph{SPT (Smallest Processing Time)} rule for $\min \sum C_{i}$ and \emph{EDD (Earliest Due Date)} for $\min L_{\max}$.
Because of the existence of two types of decisions, we consider here two types of discrepancies: discrepancy on job selection and discrepancy on resource allocation. In the case of non-binary search trees, we have two different ways to count the discrepancies (see Figure~\ref{fig-modes}). In the first mode (\emph{binary}), we consider that choosing the heuristic decision corresponds to 0 discrepancy, while any other value corresponds to 1 discrepancy. The other mode (\emph{non-binary}) consists in considering that the further we are from the heuristic choice the more discrepancies we have to count. We suggest to evaluate experimentally both modes for the heuristic for job selection. On the other hand, for the choice of the machine, we use the non-binary mode since we assume that the allocation heuristic only makes a few errors. As we will see in Section~\ref{Evaluation}, selecting the machine which allows the earliest completion of the job is a high performance heuristic.
\begin {figure}[h]
\begin{center}
\subfigure[binary]{
\includegraphics[scale=0.25]{./div0.eps}
}
\subfigure[non-binary]{
\includegraphics[scale=0.25]{./div1.eps}
}
\caption{Example of discrepancies counting modes on job selection}
\label{fig-modes}
\end{center}
\end {figure}
\FloatBarrier
We propose to test three different branching schemes. The first one, called DBDFS~\citep{bib-dbdfs}, is a classical depth-first search where the solutions obtained are limited by the allowed discrepancies (see Section~\ref{Section-LDS}). We propose two other strategies, \emph{LDS-top} and \emph{LDS-low}, which consider the number of discrepancies for the order in which the solutions are reached. The node to explore is the node with the smallest number of discrepancies, and with the smallest depth for the strategy called LDS-top, and with the largest depth for the strategy called LDS-low. As Figure~\ref{fig-OrdreRecherche} shows (case of $2$ authorized discrepancies) all three methods explore the same solutions but in different orders.
\begin {figure}[h]
\begin {center}
\scalebox{0.4}{
\includegraphics[]{./OrdreRecherche2div.eps}}
\caption{Order of explored leaves for different branching rules}
\label{fig-OrdreRecherche}
\end {center}
\end {figure}
\FloatBarrier
\subsection{Large neighborhood search based on LDS}
We have presented \emph{LDS} as an exact or a truncated tree search method. In this section, we propose to use it as part of local search. In a local search method, we define a solution neighborhood $N_{k} (x)$ ($k$ defines the acceptable variations of solution $x$). If we find a solution $x'$ better than $x$ in $N_{k}(x)$ then we explore the neighborhood $N_k(x')$ of this new best solution. In the case of large-scale neighborhoods problems, the neighborhood becomes so huge that we can consider the search for the best solution in $N_{k} (x)$ as an optimization sub-problem~\citep{bib-LNS}. In that context, we consider a neighborhood defined by an LDS search tree.
\emph{CDS (Climbing Discrepancy Search)}~\citep{bib-cds} is the first large neighborhood search method based on \emph{LDS} (see Algorithm~\ref{algo:CDS}). At each iteration it carries out a $k$-discrepancy search around the best current solution. If a better solution is found, then \emph{CDS} explores its neighborhood. In the case of no better solution is found, then $k$ is increased by one.
\begin{algorithm}[H]
\SetVline
\Begin{
$k \leftarrow 1$\;
$k_{max}\leftarrow n$\;
$Sol_{ref} \leftarrow InitialHeuristic()$\;
\While {$k\leq k_{max}$}{
\texttt{\footnotesize/* Generate the set of solutions $N$ of $k$ discrepancies from $Sol_{ref}$~*/}\\
$N=LDS(Sol_{ref},k)$\;
$s' \leftarrow BestOf(N)$\;
\eIf{$z(s')<z(Sol_{ref})$}{
$Sol_{ref}\leftarrow s'$\;
$k \leftarrow 1$\;
}{
$k \leftarrow k+1$\;
}
}
}
\caption{\emph{Climbing Discrepancy Search}}
\label{algo:CDS}
\end{algorithm}
\FloatBarrier
The drawback of CDS is that for large-size instances the neighborhood quickly explodes.~\citet{bib-cdds} propose \emph{CDDS (Climbing Depth-bounded Discrepancy Search)} that mixes principles of CDS and of DDS. The neighborhood of the best solution is limited not only by the number of discrepancies but also by the depth in the tree. In that case, the neighborhood explosion is avoided and the idea that the most important heuristic mistakes are early ones is stressed.
In this work, we propose two variants of CDS and CDDS for the problems at hand. They are closely related with \emph{VNS (Variable Neighborhood Search)}~\citep{bib-VNS} concept, since we modify the size and the structure of the neighborhood explored.~\emph{HD-CDDS (Hybrid Discrepancy CDDS)} (see Algorithm~\ref{algo:HDCDDS}) consists in a mix of CDS and CDDS. We start with a CDS search, but if for a defined number of discrepancies $k_{limit}$ we cannot find a better solution, then we authorize a bigger number of discrepancies only between some levels ([$d_{min}$,$d_{max}$]). Once we have finished the search for $k_{limit}+1$, we propose either to increase the number of authorized discrepancies and to keep the same number of levels where the discrepancies are authorized ($x=d_{max}-d_{min}$), which is the case in Algorithm~\ref{algo:HDCDDS}, or to increase the number of levels and to keep the number of discrepancies. This method solves the problem of neighborhood explosion and offers more jobs mobility than CDDS (which is particularly interesting for setup times problems) but we need to parametrize the values of the search ($k_{limit}$, $x$).
\begin {algorithm}[H]
\SetVline
\Begin{
$k \leftarrow 1$\;
$d_{min}\leftarrow 0$\;
$d_{max}\leftarrow n$\;
$Sol_{ref} \leftarrow InitialHeuristic()$\;
\While{termination conditions not met}{
\texttt{\footnotesize/* Generate the set of solutions $N$ of $k$ discrepancies from $Sol_{ref}$~*/}\\
$N=GenSol(Sol_{ref},k,d_{min},d_{max})$\;
$s' \leftarrow BestOf(N)$\;
\eIf{$z(s')<z(Sol_{ref})$}{
$Sol_{ref}\leftarrow s'$\;
$k \leftarrow 1$\;
$d_{min}\leftarrow 0$\;
$d_{max}\leftarrow n$\;
}{
\eIf{$k<k_{limit}$}{
$k \leftarrow k+1$\;
}{
\eIf{$d_{max}-d_{min}=n$}{
$d_{min}\leftarrow 0$\;
$d_{max}\leftarrow x$\;
}{
$d_{min}\leftarrow d_{max}$\;
$d_{max}\leftarrow d_{min}+x$\;
\If{$d_{min}>n$}{
$k\leftarrow k+1$\;
$d_{min}\leftarrow 0$\;
$d_{max}\leftarrow x$\;}
}
}
}
}
}
\caption{Algorithm \emph{HD-CDDS}}
\label{algo:HDCDDS}
\end{algorithm}
\FloatBarrier
The second proposed variant, \emph{MC-CDS (Mix Counting CDS)}, is an application of \emph{CDS} but with a modification in the way to count the discrepancies for the job selection rule only. We consider a binary counting for the discrepancies at the top level of the tree and a non-binary counting way for the rest of levels. This variant accepts discrepancies for all depth levels because the non-binary counting restricts the explored neighborhood.
\subsection{Discrepancy-adapted dominance rules}
~~~In this section we propose to adapt the second dominance rule presented in Section~\ref{subsec-dominance} to the principle of local search. We argue that it can be very inefficient to use the dominance rule as presented in Section~\ref{subsec-dominance} with the proposed local search methods. Indeed, the best solutions of the neighborhood could not be explored because we have found a dominant partial schedule that allows us to prune them. Even if it is true that there exists a solution better than the evaluated node, it may not belong to the explored neighborhood.
For that reason, we propose discrepancy-adapted dominance rules. Once we know the criterion that defines the neighborhood (for example, $k$ authorized discrepancies from the job list $L$), we only have to verify that the new list of jobs $L'$ that reaches the dominant partial schedule is part of the explored nodes in the local search ($L'\in G$, where $G$ is the set of $k$-discrepancies lists from $L$).
We can see that the max flow computation rule presented in Section~\ref{subsec-dominance} is not discrepancy adaptable. It is not possible to verify that the dominant partial schedule $\sigma'(p)$ is part of the explored space because the rule indicates the existence of $\sigma'(p)$ but not the corresponding schedule. On the other hand, the second dominance rule introduced in Section~\ref{subsec-dominance} consists in a local modification of the evaluated schedule in order to explicitly obtain the dominant schedule. That way, we have the list of jobs, $L'$, available to compare with the best current solution list of jobs, $L$, and to verify that the dominant schedule is part of the explored nodes. Hence, when the encountered dominant schedule is not part of the explored neighborhood the current node is not pruned.
\section{Computational experiments}\label{Evaluation}
~~~In this section we present the main results obtained from the implementation of our work. In the literature we have not found instances for parallel machines including both setup times and precedence constraints. Therefore, we propose to test the methods on a set of randomly generated instances. The algorithms are implemented in C++ and were run on a 2 GHz personal computer with 2 Go of RAM under the Linux Fedora 8 operating system.
We generate a set of 120 (60 for each criterion) small-size instances ($n=10$, $m=3$, and $n=15$, $m=2$) for the evaluation of the dominance rules and for the \emph{ECT rule} efficiency. Then, we test on a set of 120 middle-size instances ($n=40$, $m\in[2,4]$) the different branching rules (\emph{LDS-top}, \emph{LDS-low}, and \emph{DBDFS}), the different ways to count the discrepancies (\emph{binary} and \emph{non-binary}) to determine the best methods for being included inside the \emph{LDS} structure of the local search methods. The efficiency of the lower bounds, the dominance rules and the energetic reasoning proposed in Section~\ref{sec-branchandbound} are tested on middle and large-size instances ($n=100$, $m\in[2,4]$). We also compare the \emph{CDS} and the \emph{HD-CDDS} methods with the results obtained in \cite{bib-TreeSearch} for the hard instances of the $Pm|r_{i},q_{i}|C_{\max}$ problem (without precedence constraints and setup times). And finally, we evaluate and compare the proposed methods on a set of 120 large-size instances with the results obtained with ILOG OPL 6.0.
We use the RanGen software~\citep{bib-rangen} in order to generate the precedence graph between the jobs. Setup times and time windows $[r_i, d_i]$ cannot be generated by RanGen. Setup times are generated from the uniform distributions $U[1,10]$ and $U[20,40]$
. Moreover they must respect the weak triangle inequality: $s_{ij}\leq s_{ik}+p_{k}+s_{kj},\forall \thinspace i,j,k$. The values of $p_i$ are generated from the uniform distribution $U[1,5]$
. Time windows are generated in a classical way we found in the literature~\citep{bib-Sourd}. The values of $d_{i}$ are generated from the uniform distribution $U[\max(0,P\times(1-\tau-\rho/2)),P\times(1-\tau+\rho/2)]$, where $P=\sum(p_{i}+\min_{j}(s_{ij}))$, $\tau\in[0,1]$, $\rho\in[0,1]$. The $r_{i}$ are generated from $d_i$, $r_{i}=d_{i}-(p_{i}\times(2+\alpha))$ where $\alpha \in [-0.5,+1.5]$.
We solve to optimality the small-size instances and we compare the results (\emph{Optimal}) with the results obtained when we apply the ECT rule (\emph{ECT}) for each possible list of jobs (jobs are only allocated to the machine which allows to finish it first), with the results using the dominance rule based on the permutation of front jobs (\emph{Front Rule}), and with the results using the dominance rule based on max flow computation (\emph{Max Flow}).
\begin {table}[hbt]
\begin{center}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=10,m=3$}}
&\emph{NbBest} & \emph{AvgNodes} & \emph{AvgTCPU}\\
\hline
\emph{Optimal} & 60 (100.0~\textdiscount) & 484925 & 10.6 \\
\emph{Front Rule} & 60 (100.0~\textdiscount) & 480444 & 12.3\\
\emph{Max Flow} & 60 (100.0~\textdiscount) & 339541 & 27.7 \\
\emph{ECT} & 53 (88.3~\textdiscount) & 61684 & 0.07 \\
\end {tabular}
}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=15,m=2$}}
&\emph{NbBest} & \emph{AvgNodes} & \emph{AvgTCPU}\\
\hline
\emph{Optimal} & 60 (100.0~\textdiscount) & 10126793 & 641.9 \\
\emph{Front Rule} & 60 (100.0~\textdiscount) & 9480313 & 626.4\\
\emph{Max Flow} & 60 (100.0~\textdiscount) & 7530154 & 454.6 \\
\emph{ECT} & 54 (90.0~\textdiscount) & 1747416 & 2.5 \\
\end {tabular}
}
\caption{Results of ECT and dominance rules efficiency for $\min \sum{C_i}$ problem}
\label{fig-TestECT}
\end {center}
\end {table}
\begin {table}[hbt]
\begin{center}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & & \\
\multicolumn{1}{l}{\footnotesize{$n=10,m=3$}}
&\emph{NbBest} & \emph{AvgNodes} & \emph{AvgTCPU}\\
\hline
\emph{Optimal} & 60 (100.0~\textdiscount) & 281896 & 5.6 \\
\emph{Front Rule} & 60 (100.0~\textdiscount) & 263474 & 7.9\\
\emph{Max Flow} & 60 (100.0~\textdiscount) & 219557 & 19.7 \\
\emph{ECT} & 52 (86.7~\textdiscount) & 69141 & 0.07\\
\end {tabular}
}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=15,m=2$}}
&\emph{NbBest} & \emph{AvgNodes} & \emph{AvgTCPU}\\
\hline
\emph{Optimal} & 60 (100.0~\textdiscount) & 11936385 & 884.8 \\
\emph{Front Rule} & 60 (100.0~\textdiscount) & 10503767 & 778.7\\
\emph{Max Flow} & 60 (100.0~\textdiscount) & 8945948 & 628.4 \\
\emph{ECT} & 54 (90.0~\textdiscount) & 4681104 & 7.27 \\
\end {tabular}
}
\caption{Results of ECT and dominance rules efficiency for $\min{L_{\max}}$ problem}
\label{fig-TestECTb}
\end {center}
\end {table}
\FloatBarrier
First, note that we found some hard instances that we could not to solve to optimality before 15000 seconds. We observe in Tables~\ref{fig-TestECT} and~\ref{fig-TestECTb} that ECT rule is very efficient for both problems. The optimal solution is reached over almost 90~\textdiscount~of the instances and the average CPU time (\emph{AvgTCPU}) is clearly reduced when we use the ECT rule. These results let us consider, for local search methods, only the job permutation allocating the jobs on the machines following the ECT rule. The dominance front rule is also effective, the average number of explored nodes (\emph{AvgNodes}) and the average CPU time usually decrease when we use it. We observe that the \emph{Max Flow} rule largely reduces the number of explored nodes and the CPU time, except for the very small-size instances. We deduce that it is a very efficient rule to solve to optimality instances with a larger number of jobs.
In the comparison between the two different ways to count the discrepancies, \emph{binary} and \emph{non-binary} (only for job selection rules), we have evaluated on the middle-size instances the number of times each mode has found the best solution (\emph{NbBest}). The CPU time is limited to $100$ seconds.
Table~\ref{fig-TestModes} shows that the binary mode has a higher performance than the non-binary one. Out of a set of 120 instances, the binary mode has found the best solution over 75~\textdiscount~of the instances, independently of the branching rule. We find very similar results for both criteria. In the following, the binary counting is kept for the LDS structure of the local search.
\begin {table}[hbt]
\begin{center}
\begin {tabular}{lcc}
\multicolumn{1}{l}{120 Instances}&
\multicolumn{2}{c}{\emph{NbBest}}\\
\multicolumn{1}{l}{\footnotesize{$n=40,m\in[2,4]$}}&
\emph{binary} mode & \emph{non-binary} mode\\
\hline
\emph{DBDFS} & 90 (75.0~\textdiscount) & 48 (40.0~\textdiscount)\\
\emph{LDS-top} & 93 (77.5~\textdiscount) & 49 (40.8~\textdiscount)\\
\emph{LDS-low} & 98 (81.7~\textdiscount) & 31 (25.8~~\textdiscount)\\
\end {tabular}
\caption{Results of the comparison between discrepancies counting modes}
\label{fig-TestModes}
\end {center}
\end {table}
\FloatBarrier
In Table~\ref{fig-TestBranchingRule}, we can see the results for the comparison between the exploration strategies. In addition to previous notations, we introduce the average mean deviation from the best solution (\emph{AvgDev}). The CPU time is limited to $100$ seconds.
\begin {table}[hbt]
\begin{center}
\begin {tabular}{lcccc}
\multicolumn{1}{l}{Binary mode}&
\multicolumn{2}{c}{$\min \sum{C_i}$~(60 instances)}&
\multicolumn{2}{c}{$\min{L_{\max}}$~(60 instances)}\\
\multicolumn{1}{c}{\footnotesize{$n=40,m\in[2,4]$}}& \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}}& \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}}\\
\hline
\emph{DBDFS} & 43 (71.7~\textdiscount) & 0.91~\textdiscount& 47 (78.3~\textdiscount) & 1.86~\textdiscount \\
\emph{LDS-top} & 29 (48.3~\textdiscount) & 0.43~\textdiscount& 17 (28.3~\textdiscount) & 2.33~\textdiscount \\
\emph{LDS-low} & 50 (83.3~\textdiscount) & 0.71~\textdiscount& 59 (98.3~\textdiscount) & 0.75~\textdiscount \\
\end {tabular}
\caption{Results for the comparison of different branching strategies}
\label{fig-TestBranchingRule}
\end {center}
\end {table}
\FloatBarrier
We find that LDS-low is the most efficient strategy, since it reaches the best solution for a larger number of instances and it presents the less important average mean deviation when the best solution is found by another strategy. LDS-low finds the best solution for all instances except for one corresponding to the maximum lateness minimization and for 50 over a set of 60 instances for completion times sum minimization. We use this strategy for the remaining computational experiments.
The lower bounds, the energetic reasoning, and the discrepancy-adapted dominance rule are compared in Tables~\ref{fig-Regles} and ~\ref{fig-Reglesb}. We run a 30 seconds LDS search for the middle and large-size instances for different versions of the node evaluation. First, we only consider the lower bound computed using precedence constraint propagation (\emph{LBCP}), then we add the lower bound ($LB_{NCY}$) proposed in~\cite{bib-lbCi} for $\min \sum{C_i}$ problem and the energetic reasoning (\emph{ENERGY}) for $\min L_{\max}$ problem; finally we add the discrepancy-adapted dominance rule (\emph{DaDR}). We compare the number of times each version finds the best solution (\emph{NbBest}), the explored nodes average (\emph{AvgNodes}), and the average CPU time needed to reach the best solution (\emph{TBest}), only for the cases that all versions have found it.
\begin {table}[hbt]
\begin{center}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=40,m\in[2,4]$}}
&\emph{NbBest} & \emph{AvgNodes} & \emph{TBest}\\
\hline
\emph{LBCP} & 36 (60.0~\textdiscount) & 62007 &4.52 \\
$LB_{NCY}$ & 38 (63.3~\textdiscount) & 61742 &4.47\\
\emph{DaDR} & 35 (58.3~\textdiscount) & 53373 &1.69\\
\end {tabular}
}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=100,m\in[2,4]$}}
&\emph{NbBest} & \emph{AvgNodes}& \emph{TBest}\\
\hline
\emph{LBCP} & 26 (43.3~\textdiscount) & 9259 & 17.55 \\
$LB_{NCY}$ & 34 (56.7~\textdiscount) & 7813 &15.63\\
\emph{DaDR} & 38 (63.3~\textdiscount) & 7606 & 8.71\\
\end {tabular}
}
\caption{Results of lower bounds and dominance rule efficiency for $\min \sum{C_i}$ problem}
\label{fig-Regles}
\end {center}
\end {table}
\FloatBarrier
Tables \ref{fig-Regles} and \ref{fig-Reglesb} show the efficiency of the specific lower bound $LB_{LCY}$ and energetic reasoning with the computation of setup times consumption. Moreover, we find that the discrepancy-adapted dominance rule is very efficient for large-size instances but not especially interesting for the middle-size instances. However the time consumed to reach the best solution is reduced when we use the dominance rule for most of cases.
\begin {table}[hbt]
\begin{center}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=40,m\in[2,4]$}}
&\emph{NbBest} & \emph{AvgNodes}& \emph{TBest}\\
\hline
\emph{LBCP} & 47 (78.3~\textdiscount) & 93737& 4.81 \\
\emph{ENERGY} & 48 (80.0~\textdiscount) & 99856& 4.24 \\
\emph{DaDR} & 44 (73.3~\textdiscount) & 71737 & 4.59 \\
\end {tabular}
}
\subtable{
\begin {tabular}{lccc}
\multicolumn{1}{l}{60 Instances}& & &\\
\multicolumn{1}{l}{\footnotesize{$n=100,m\in[2,4]$}}
&\emph{NbBest} & \emph{AvgNodes}& \emph{TBest}\\
\hline
\emph{LBCP} & 44 (73.3~\textdiscount) & 11474& 4.29 \\
\emph{ENERGY} & 48 (80.0~\textdiscount) & 12961& 3.58 \\
\emph{DaDR} & 55 (91.7~\textdiscount) & 9462& 3.17 \\
\end {tabular}}
\caption{Results of lower bound, energetic reasoning and dominance rule efficiency for $\min{L_{\max}}$ problem}
\label{fig-Reglesb}
\end {center}
\end {table}
\FloatBarrier
We compare \emph{CDS} and \emph{HD-CDDS} methods against other tree search methods presented in~\cite{bib-TreeSearch}. In~\cite{bib-TreeSearch}, the authors test two different branching schemes, time windows (\emph{tw}) and chronological (\emph{chr}), and several incomplete tree search techniques (truncated branch-and-bound, LDS, Beam Search and Branch-and-Greed) for the $Pm|r_{i},q_{i}|C_{\max}$ problem. We adapt the proposed methods for this problem and we use the heuristic for the initial solution and the upper bounds proposed in their paper. In Table~\ref{fig-ComparaisonNeron}, we compare LDS ($z$ is the number of authorized discrepancies) and Beam Search (\emph{BS}, $\omega$ is the number of explored child nodes) results, the method with the best results in their work, against the proposed methods CDS and HD-CDDS. We have evaluated the number of times the method has found the best solution (\emph{NbBest}) and for how many of them the method is the only one to reach the best solution (\emph{NbBestStrict}) for a set of 50 hard instances ($n=100$ and $m=10$). The CPU time is limited to $30$ seconds as in~\cite{bib-TreeSearch}.
\begin {table}[hbt]
\begin{center}
\begin{tabular}{l c c}
50 instances & \emph{NbBest} & \emph{NbBestStrict}\\
\hline
\small{$LDS^{tw}_{z=1}$} & 1 (2.0~\textdiscount) & 0 \\
\small{$LDS^{chr}_{z=2}$} & 7 (14.0~\textdiscount) & 0 \\
\small{$BS^{tw}_{\omega =3}$}& 25 (50.0~\textdiscount) & 3 \\
\small{$BS^{chr}_{\omega=4}$} & 22 (44.0~\textdiscount) & 0\\
\hline
\emph{CDS} & 35 (70.0~\textdiscount) & 6 \\
\emph{HD-CDDS}& 38 (76.0~\textdiscount) & 9\\
\end{tabular}
\caption{Results for the comparison with other truncated tree search techniques}
\label{fig-ComparaisonNeron}
\end{center}
\end{table}
\FloatBarrier
Although precedence constraints and setup times are not considered in the problem, we can observe that our propositions are strictly better. Out of a set of 50 instances, CDS and HD-CDDS find the best solution for most of the cases and they find a new best solution for 6 and 9 instances respectively. Rather than contradicting the statement of relative LDS inefficiency for parallel machine problem experienced by~\cite{bib-TreeSearch}, this demonstrates, at least for this problem, the efficiency of large neighborhood search based on LDS.
Finally, we compare the local search methods with the results obtained by ILOG OPL 6.0. The four variants of the hybrid tree local search methods (\emph{CDS}, \emph{CDDS}, \emph{HD-CDDS}, \emph{MC-CDS}) are implemented with \emph{LDS-low}, discrepancy-adapted dominance rule and binary counting (except for \emph{MC-CDS} which supposes a mix counting). We solve the large-size instances ($n=100,m\in[2,4]$) for two different CPU time limits, 30 and 300 seconds, then we compare the number of times when the best solution has been found by the method and the average deviation from the best solution.
\begin {table}[hbt]
\begin{center}
\subtable{
\begin {tabular}{lcccc}
\multicolumn{1}{l}{30 instances}&
\multicolumn{2}{c}{\footnotesize{$TCPU=30s$}}&\multicolumn{2}{c}{\footnotesize{$TCPU=300s$}}\\
\multicolumn{1}{c}{\scriptsize{$p\sim U[1,5], s_{ij}\sim U[1,10]$}}& \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}} & \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}}\\
\hline
\emph{CDS} & 17 (56.6~\textdiscount) & 0.64~\textdiscount & 7 (23.3~\textdiscount) & 0.51~\textdiscount \\
\emph{CDDS} & 7 (23.3~\textdiscount) & 0.75~\textdiscount & 7 (23.3~\textdiscount) & 0.82~\textdiscount \\
\emph{HD-CDDS} & 16 (53.3~\textdiscount) & 0.60~\textdiscount & 14 (46.7~\textdiscount) & 0.43~\textdiscount \\
\emph{MC-CDS} & 17 (56.6~\textdiscount) & 0.64~\textdiscount & 10 (33.3~\textdiscount) & 0.45~\textdiscount \\
\emph{ILOG OPL} & 4 (13.3~\textdiscount) & 1.51~\textdiscount & 2 (6.7~\textdiscount) & 1.47~\textdiscount \\
\end {tabular}
}
\subtable{
\begin {tabular}{lcccc}
\multicolumn{1}{l}{30 instances}&
\multicolumn{2}{c}{\footnotesize{$TCPU=30s$}}&\multicolumn{2}{c}{\footnotesize{$TCPU=300s$}}\\
\multicolumn{1}{c}{\scriptsize{$p\sim U[1,5], s_{ij}\sim U[20,40]$}}& \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}} & \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}}\\
\hline
\emph{CDS} & 9 (30.0~\textdiscount) & 0.23~\textdiscount & 6 (20.0~\textdiscount) & 0.18~\textdiscount \\
\emph{CDDS} & 7 (23.3~\textdiscount) & 0.35~\textdiscount & 6 (20.0~\textdiscount) & 0.38~\textdiscount \\
\emph{HD-CDDS} & 12 (40.0~\textdiscount) & 0.26~\textdiscount & 11 (36.6~\textdiscount) & 0.17~\textdiscount \\
\emph{MC-CDS} & 11 (36.7~\textdiscount) & 0.25~\textdiscount & 13 (43.3~\textdiscount) & 0.26~\textdiscount \\
\emph{ILOG OPL} & 10 (33.3~\textdiscount) & 0.70~\textdiscount & 5 (16.6~\textdiscount) & 0.63~\textdiscount \\
\end {tabular}
}
\caption{Results for the comparison of different variants of hybrid tree local search methods for $\min \sum{C_i}$ problem}
\label{fig-TestLocalSearch}
\end {center}
\end {table}
\FloatBarrier
In Table~\ref{fig-TestLocalSearch}, we observe that hybrid local search methods improve the best solutions found by ILOG OPL. All methods, except CDDS, find the best solution for a large number of instances and the mean deviation from the best solution are less important than ILOG OPL solutions. We observe that computing an upper bound highly increases the efficiency of the truncated search.
\begin {table}[hbt]
\begin{center}
\subtable{
\begin {tabular}{lcccc}
\multicolumn{1}{l}{30 instances}&
\multicolumn{2}{c}{\footnotesize{$TCPU=30s$}}&\multicolumn{2}{c}{\footnotesize{$TCPU=300s$}}\\
\multicolumn{1}{c}{\scriptsize{$p\sim U[1,5], s_{ij}\sim U[1,10]$}}& \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}} & \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}}\\
\hline
\emph{CDS} & 10 (33.3~\textdiscount) & 2.75~\textdiscount & 7 (23.3~\textdiscount) & 3.06~\textdiscount \\
\emph{CDDS} & 9 (30.0~\textdiscount) & 2.65~\textdiscount & 8 (26.7~\textdiscount) & 3.28~\textdiscount \\
\emph{HD-CDDS} & 13 (43.3~\textdiscount) & 1.92~\textdiscount & 10 (33.3~\textdiscount) & 2.56~\textdiscount \\
\emph{MC-CDS} & 13 (43.3~\textdiscount) & 1.75~\textdiscount & 11 (30.0~\textdiscount) & 2.29~\textdiscount \\
\emph{ILOG OPL} & 15 (50.0~\textdiscount) & 2.07~\textdiscount & 18 (60.0~\textdiscount) & 1.55~\textdiscount \\
\end {tabular}
}
\subtable{
\begin {tabular}{lcccc}
\multicolumn{1}{l}{30 instances}&
\multicolumn{2}{c}{\footnotesize{$TCPU=30s$}}&\multicolumn{2}{c}{\footnotesize{$TCPU=300s$}}\\
\multicolumn{1}{c}{\scriptsize{$p\sim U[1,5], s_{ij}\sim U[20,40]$}}& \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}} & \multicolumn{1}{c}{\emph{NbBest}} &\multicolumn{1}{c}{\emph{AvgDev}}\\
\hline
\emph{CDS} & 3 (10.0~\textdiscount) & 2.76~\textdiscount & 2 (6.0~\textdiscount) & 2.89~\textdiscount \\
\emph{CDDS} & 3 (10.0~\textdiscount) & 2.71~\textdiscount & 2 (6.0~\textdiscount) & 2.88~\textdiscount \\
\emph{HD-CDDS} & 13 (43.3~\textdiscount) & 2.12~\textdiscount & 7 (23.3~\textdiscount) & 1.55~\textdiscount \\
\emph{MC-CDS} & 12 (40.0~\textdiscount) & 2.08~\textdiscount & 8 (26.7~\textdiscount) & 1.83~\textdiscount \\
\emph{ILOG OPL} & 15 (50.0~\textdiscount) & 0.91~\textdiscount & 19 (63.3~\textdiscount) & 0.90~\textdiscount \\
\end {tabular}
}
\caption{Results for the comparison of different variants of hybrid tree local search methods for $\min L_{\max}$ problem}
\label{fig-TestLocalSearchb}
\end {center}
\end {table}
\FloatBarrier
Table~\ref{fig-TestLocalSearchb} shows the results for the minimization of maximum lateness. For this case, we observe ILOG OPL improves our results, but we can say that the proposed methods are still competitive, the mean deviation is acceptable and they found the best solution over 50~\textdiscount~and 37~\textdiscount~of instances, for 30 and 300 seconds respectively.
\section{Conclusion}
~~~In this paper we have studied limited discrepancy-based search methods. We have compared and tested some of the existing options for different LDS components, such as discrepancy counting modes and branching structures, to solve the parallel machine scheduling problem with precedence constraints and setup times.
New local search methods based on LDS have been proposed and compared with similar existing methods. The computational experiments show these methods are efficient to solve parallel machine scheduling problems in general and demonstrates the interest, at least for the studied problem, of incorporating LDS into a large neighborhood search scheme as first suggested by~\cite{bib-cds}.
We have suggested an energetic reasoning scheme integrating setup times and we have proposed new global and local dominance rules adapted to discrepancies. As the results show, these evaluation techniques allow to reduce the number of explored nodes and the time of the search.
As a direction for further research, the proposed methods could be extended to solve more complex problems involving setup times, like the hybrid flow shop or the RCPSP.
\end{document}
|
\begin{document}
\begin{abstract}
Highly regular graphs for which not all regularities are explainable by symmetries are fascinating creatures. Some of them like, e.g., the line graph of W.~Kantor's non-classical $\operatorname{GQ}(5^2,5)$, are stumbling stones for existing implementations of graph isomorphism tests. They appear to be extremely rare and even once constructed it is difficult to prove their high regularity. Yet some of them, like the McLaughlin graph on 275 vertices and Ivanov's graph on 256 vertices are of profound beauty. This alone makes it an attractive goal to strive for their complete classification or, failing this, at least to get a deep understanding of them. Recently, one of the authors discovered new methods for proving high regularity of graphs. Using these techniques, in this paper we study a classical family of strongly regular graphs, originally discovered by A.E.~Brouwer, A.V.~Ivanov, and M.H.~Klin in the late 80s. We analyze their symmetries and show that they are $(3,5)$-regular but not $2$-homogeneous. Thus we promote these graphs to the distinguished club of highly regular graphs with few symmetries.
\end{abstract}
\title{On a family of highly regular graphs by Brouwer, Ivanov, and Klin}
\section{Introduction}
Recall that a simple graph $\Gamma$ is called \emph{regular} if there exists a number $k$, such that each vertex of $\Gamma$ has exactly $k$ neighbors. The concept of regularity can be extended naturally. Roughly speaking for a given configuration of vertices in $\Gamma$ we may count extensions of this configuration to a bigger, given, type of configuration. An example is given by the $k$-isoregular graphs. A regular graph is called $k$-isoregular graph if for every induced subgraph $\Delta\le\Gamma$ the number of joint neighbors of $V(\Delta)$ in $\Gamma$ depends only on the isomorphism type of $\Delta$. When we talk about high regularity, we have in mind a much more general set of regularity conditions:
\begin{definition}
A \emph{graph type} $\bar{y}pe{T}$ of order $(m,n)$ is a triple $(\Delta,\iota,\Theta)$, where $\Delta$ and $\Theta$ are graphs of order $m$ and $n$, respectively, and where $\iota\colon\Delta\hookrightarrow\Theta$ is an embedding. A graph $\Gamma$ is called \emph{$\bar{y}pe{T}$-regular} if either $\Delta$ does not embed into $\Gamma$ or if for all $\kappa\colon\Delta\hookrightarrow\Gamma$ the number $\#(\Gamma,\bar{y}pe{T},\kappa)$ of embeddings $\hat\kappa\colon\Theta\hookrightarrow\Gamma$ with $\kappa=\hat\kappa\circ\iota$ does not depend on $\kappa$ (i.e., it is equal to a constant $\#(\Gamma,\bar{y}pe{T})$).
In the case that $\Delta$ does not embed into $\Gamma$ we define $\#(\Gamma,\bar{y}pe{T})$ to be equal to $0$.
\end{definition}
We are usually not so much interested into regularities for particular graph types but rather for whole classes.
\begin{definition}
A graph is called
\begin{itemize}
\item \emph{$(\lseq m, \lseq n)$-regular}, if it is $\bar{y}pe{T}$-regular for each graph type $\bar{y}pe{T}$ of order $(m,n)$,
\item \emph{$(\lseq m, n)$-regular}, if it is $(\lseq m, \lseq l)$-regular for all $m\le l\le n$,
\item \emph{$( m, n)$-regular}, if it is $(\lseq k, n)$-regular for all $k\le m$.
\end{itemize}
\end{definition}
The notion of $(m,n)$-regularity generalizes several classical regularity-concepts for graphs. E.g., the $( 2, 3)$-regular graphs coincide with the \emph{strongly regular graphs} (in the sense of Bose \cite{Bos63}), the $(2,t)$-regular graphs correspond to the graphs that satisfy the \emph{$t$-vertex condition} (in the sense of Higman \cite{Hig71}, cf.~also \cite{HesHig71}). Finally, the $(k,k+1)$-regular graphs coincide with the \emph{$k$-regular graphs} (in the sense of Gol'fand and Klin \cite{GolKli78}) and with the \emph{$k$-tuple regular graphs} (in the sense of Buczak \cite{Buc80}). Nowadays, in order to avoid conflicts with existing graph-theoretical terminology, these graphs are called \emph{$k$-isoregular} (cf.~\cite{KliPoeRos88}).
\begin{definition}
We call a graph $\Gamma$ \emph{highly regular} if there is some $m\ge 2$ and some $n\ge 4$, such that $\Gamma$ is $( m, n)$-regular.
\end{definition}
Note that our definition of high regularity excludes the strongly regular graphs that do not satisfy the $4$-vertex condition. The reason for this is that we are ultimately interested in a classification of highly regular graphs. However, such a classification for strongly regular graphs in general seems hopeless as for certain orders there are so-called prolific constructions (cf.~\cite{Wal71,FDF02,Muz07}).
Most naturally, regularity is induced by symmetry. E.g., if a graph is vertex transitive, then it is also regular. Recall that a graph is called \emph{symmetric} if its automorphism group acts transitively on vertices and arcs (cf. \cite{GodRoy01}). When we talk about highly symmetric graphs, we think about even stronger conditions:
\begin{definition}
Let $\Gamma$ and $\Delta$ be graphs. Then $\Gamma$ is called \emph{$\Delta$-homogeneous} if for all $\iota_1,\iota_2\colon\Delta\hookrightarrow\Gamma$ there exists $\alpha\in\operatorname{Aut}(\Gamma)$ such that $\iota_2=\alpha\circ\iota_1$.
It is called \emph{weakly $\Delta$-homogeneous} if for all $\iota_1,\iota_2\colon\Delta\hookrightarrow\Gamma$ there exist $\alpha\in\operatorname{Aut}(\Gamma)$ and $\beta\in\operatorname{Aut}(\Delta)$, such that $\alpha\circ\iota_1 = \iota_2\circ\beta$.
\end{definition}
Note that many of the common symmetry-conditions naturally translate into special cases of this definition. For instance, vertex transitivity is $K_1$-homogeneity, arc-transitivity is $K_2$-homogeneity, edge-transitivity is weak $K_2$-homogeneity\dots.
\begin{definition}
A graph $\Gamma$ is called \emph{$k$-homogeneous} if it is $\Delta$-homogeneous for all graphs $\Delta$ of order $\le k$. It is called \emph{homogeneous} if it is $k$-homogeneous, for every $k>0$.
\end{definition}
In general, we call a graph \emph{highly symmetric} if it is $k$-homogeneous, for some $k\ge 2$. High symmetry implies high regularity: It is easy to see that every $k$-homogeneous graph is $(k,l)$-regular, for every $l\ge k$.
Note that the highly symmetric graphs are completely classified up to isomorphism. The homogeneous finite graphs were classified by Gardiner, Gol'fand and Klin \cite{Gar76,GolKli78}. It was shown by Cameron \cite{Cam80} that every $5$-homogeneous graph is homogeneous. The finite $4$-homogeneous graphs were characterized by Buczak \cite{Buc80}. It turns out that there is up to isomorphism and up to complement just one $4$-homogeneous graph that is not homogeneous, the Schl\"afli graph. The $3$-homogeneous graphs were classified by Cameron and Macpherson \cite{CamMac85}. Finally, the $2$-homogeneous graphs are implicitly known by the classification of rank-3-groups that was carried out by Bannai, Kantor, Liebler, Liebeck, and Saxl (\cite{Ban72,KanLie82,Lie87,LieSax86}). It is noteworthy that the classification of the $k$-homogeneous graphs for $2\le k\le 4$ relies on the classification of finite simple groups.
We are mostly interested in highly regular graphs for which not all regularities are explainable by symmetries. Apart from the sheer intellectual challenge to classify these combinatorial objects, we are interested in such graphs since they play a role in the research about the complexity of the graph isomorphism problem. For existing implementations of graph isomorphism tests (like, e.g., the widely used package \emph{nauty} by B.~McKay \cite{McKPip14}) highly regular graphs with few symmetries form a performance bottleneck. For instance, in its standard settings it takes hours of cpu-time for nauty to compute a canonical labeling of the line graph of the $\operatorname{GQ}(5^2,5)$ constructed by Kantor in \cite{Kan80} (cf.~also \cite{Pay92,PayTha09}). Here the notion $\operatorname{GQ}(s,t)$ refers to \emph{generalized quadrangles} of order $(s,t)$ in the sense of Tits \cite{Tit59}.
Interestingly, there exist highly symmetric graphs for which not all regularities are explainable by symmetries. E.g., the McLaughlin graph on $275$ vertices is $( 4, 5)$-regular but is not $4$-homogeneous. So in particular it is highly regular. On the other hand it is $3$-homogeneous and thus, according to our definition, it is highly symmetric.
While we know almost everything about highly symmetric graphs, our knowledge about highly regular graphs is still very modest. This is so, even though a considerable amount of research went into their classification during the last few decades. It is generally not so hard to construct a graph with given regularities, but it is much harder to construct one with few symmetries. The following timeline shows roughly the development of the research since the early seventies:
\begin{description}
\item[1970] Hestenes and Higman introduce the notion of $(2,t)$-regularity and show that point graphs of generalized quadrangles are $(2,4)$-regular (cf. \cite{Hig71,HesHig71}.
\item[1984] Farad\v{z}ev, A.A.Ivanov, and Klin construct a $(2,3)$-regular graph on 280 vertices with Aut($J_2$) as automorphism group, that is not $2$-homogeneous (cf.~\cite{IvaKliFar84,FarKliMuz94}).
\item[1989] A.V.Ivanov finds a $(2,5)$-regular graph on 256 vertices, whose subconstituents, both, are $(2,4)$-regular (cf.~\cite{Iva89}).
\item[1989] Brouwer, Ivanov, and Klin describe a family $\Gamma^{(m)}$ of $(3,4)$-regular graphs that contains Ivanov's graph as $\Gamma^{(4)}$, and show that their first subconstituents are $(2,4)$-regular but not $2$-homogeneous (cf.~\cite{BroIvaKli89}).
\item[1994] A.V.Ivanov discovers another infinite family $\widehat{\Gamma}^{(m)}$ of $(2,4)$-regular graphs (cf.~\cite{Iva94}).
\item[2000] Reichard shows that both, $\Gamma^{(m)}$ and $\widehat{\Gamma}^{(m)}$ are $(2,5)$-regular, for all $m\ge 4$. Moreover, he shows that the graph discovered in 1984 is $(2,4)$-regular (cf.~\cite{Reich00}).
\item[2003] Reichard shows that point graphs of $\operatorname{GQ}(s,t)$ are $(2,5)$-regular, and that the point graphs of $\operatorname{GQ}(q,q^2)$ are $(2,6)$-regular (cf.~\cite{Reich03}).
\item[2003] Klin, Meszka, Reichard, and Rosa identify the smallest $(2,4)$-regular graph that is not $2$-homogeneous. It has parameters $(v,k,\lambda,\mu)=(36,14,4,6)$ (cf.~\cite{KliMesReiRos05}).
\item[2004] CP shows that the point graphs of $\operatorname{PQ}(s,t,\mu)$ are $(2,5)$-regular. Here $\operatorname{PQ}(s,t,\mu)$ refers to the \emph{partial quadrangles} of order $(s,t,\mu)$ in the sense of Cameron \cite{Cam75}.
\item[2005] Reichard shows that the point graphs of $\operatorname{GQ}(q,q^2)$ are $(2,7)$-regular (cf.~\cite{Rei15}).
\item[2007] CP shows that the point graphs of $\operatorname{PQ}(q-1,q^2,q^2-q)$ are $(2,6)$-regular.
\item[2007] Klin and CP find two self-complementary $(2,4)$-regular graphs that are not $2$-homoge\-neous.
\item[2014] CP shows that the point graphs of $\operatorname{GQ}(q,q^2)$ are $(3,7)$-regular (cf.~\cite{Pec14}).
\end{description}
As can be read of this timeline much work had to be put into proving high regularity for graphs that were already known. The reason for the difficulties is that with growing $m$ and $n$ we experience a combinatorial explosion of the number of graph types of order $(m,n)$. For instance, there are 20.364 pairwise non-isomorphic graph types of order $(3,7)$.
Interestingly, sometimes high regularity implies high symmetry. It was shown independently by Buczak, Gol'fand, and Cameron, that every $(5,6)$-regular graph is already homogeneous. Thus, when classifying highly regular graphs, we may restrict our attention to $(m,n)$-regular graphs for which $m<n$ and for which $m<5$. Indeed, as was mentioned above, there is known only one $(4,5)$-regular graph that is not $4$-homogeneous---the McLaughlin graph. Non-$3$-homogeneous, $(3,t)$-regular graphs for $t\ge 4$ appear to be extremely rare. In this paper we are going to uncover another infinite family of $(3,5)$-regular graphs that are not $2$-homogeneous. We will do so by giving a complete analysis of the family $\Gamma^{(m)}$ originally discovered by Brouwer, Ivanov, and Klin.
\section{Constructions and results}
In \cite{Iva89} A.V.~Ivanov constructed a $( 2, 5)$-regular graph $\Gamma^{(4)}$ with $256$ vertices and valency $120$. The automorphism group of this graph acts transitively on vertices and arcs but not on non-arcs. In particular, $\Gamma^{(4)}$ is not $2$-homogeneous.
Ivanov showed further that the first and second subconstituents of $\Gamma^{(4)}$ are $( 2, 4)$-regular. Here the \emph{first (the second) subconstituent} of a graph $\Gamma$ with respect to a vertex $v\in V(\Gamma)$ is the subgraph of $\Gamma$ induced by all the neighbors (all the non-neighbors) of $v$ in $\Gamma$. The first and the second subconstituent of $\Gamma$ with respect to $v$ are denoted by $\Gamma_1(v)$ and by $\Gamma_2(v)$, respectively. Clearly, if $\operatorname{Aut}(\Gamma)$ acts transitively on vertices, then all first subconstituents (all second subconstituents) are mutually isomorphic. In this case, if the vertex with respect to which we take the subconstituent is not important, then instead of $\Gamma_i(v)$ we write just $\Gamma_i$ ($i\in\{1,2\}$).
It is well known that a strongly regular graph $\Gamma$ is $( 3, 4)$-regular if and only if its subconstituents $\Gamma_i(v)$ are strongly regular with parameters independent from $v\in V(\Gamma)$ (for a proof see, e.g., \cite[Proposition 4]{Rei15}). Thus, Ivanov's graph $\Gamma^{(4)}$ is $( 3, 4)$-regular.
In \cite{BroIvaKli89} a wide class of strongly regular graphs is described of which Ivanov's graph is a special case. We are not going to repeat the construction in full generality but only as far as it touches our interests. In particular, only one series of strongly regular graphs from \cite{BroIvaKli89} consists of $( 3, 4)$-regular graphs. This is the one that we consider in the sequel. A first construction goes as follows:
\begin{construction}[{\cite{BroIvaKli89}}]\label{const1}
Consider the vector space $\mathbb{F}_2^{2m}$. Let $q\colon \mathbb{F}_2^{2m}\to\mathbb{F}_2$ be a non-degenerate quadratic form over $\mathbb{F}_2^{2m}$ of maximal Witt index. Let $Q\subseteq\mathbb{F}_2^{2m}$ be the quadric defined by $q$, and let $S\le \mathbb{F}_2^{2m}$ be a maximal singular subspace for $q$. Now define $\Gamma^{(m)}\coloneqq (V^{(m)},E^{(m)})$ according to
\begin{equation*}
V^{(m)} \coloneqq \mathbb{F}_2^{2m},\quad E^{(m)} \coloneqq \{(\bar{v},\bar{w})\mid \bar{w}-\bar{v}\in Q\setminus S\}.
\end{equation*}
\end{construction}
A first analysis of these graphs was given in \cite{BroIvaKli89}. Further steps were taken in \cite{Iva94} and \cite{Reich00}. In the following we collect what is known about the graphs $\Gamma^{(m)}$ and their subconstituents and what is relevant for this paper:
\begin{itemize}
\item $\Gamma^{(m)}$ is strongly regular (\cite[Section 2]{BroIvaKli89}),
\item $\Gamma^{(m)}$ is symmetric (\cite[Section 3]{Iva94}),
\item the first subconstituent $\Gamma_1^{(m)}$ is $( 2, 4)$-regular (\cite[Theorem 1]{BroIvaKli89}),
\item the second subconstituent $\Gamma_2^{(m)}$ is strongly regular (\cite[Section 4]{BroIvaKli89}),
\item if $m\ge 4$ then $\operatorname{Aut}(\Gamma_1^{(m)})$ has rank $4$ (\cite[Theorem 1]{BroIvaKli89}),
\item $\Gamma^{(m)}$ is $( 3, 4)$-regular (this is a direct consequence of the previous items (cf.\ also \cite[Proposition 4]{Rei15}),
\item $\Gamma^{(m)}$ is $( 2, 5)$-regular (\cite[Theorem 7]{Reich00}).
\end{itemize}
The parameters of $\Gamma^{(m)}$ and its subconstituents are given in the following table. Here and below, in order to save space and to improve readability, we denote the number $2^{m-3}$ by $\theta_m$.
\[
\resizebox{\bar{e}xtwidth}{!}{$\displaystyle
\renewcommand{l|cccccccc}{l|cccccccc}
& \multicolumn{1}{c}{v} & \multicolumn{1}{c}{k} & \multicolumn{1}{c}{\lambda} & \multicolumn{1}{c}{\mu} & \multicolumn{1}{c}{r} & \multicolumn{1}{c}{s} & \multicolumn{1}{c}{f} & \multicolumn{1}{c}{g} \\\hline
\Gamma^{(m)} & 64\theta_m^2 & 4\theta_m(8\theta_m-1) & 4\theta_m(4\theta_m-1) & 4\theta_m(4\theta_m-1) & 4\theta_m & -4\theta_m & 4\theta_m(8\theta_m-1) & (4\theta_m+1)(8\theta_m-1) \\
\Gamma_1^{(m)} & 4\theta_m(8\theta_m-1) & 4\theta_m(4\theta_m-1) & 2\theta_m(4\theta_m-1) & 4\theta_m(2\theta_m-1) & 4\theta_m & -2\theta_m & \dfrac{(4\theta_m-1)(8\theta_m-1)}{3} & \dfrac{4(4\theta_m-1)(4\theta_m+1)}{3}\\
\Gamma_2^{(m)} & (4\theta_m+1)(8\theta_m-1) & 16\theta_m^2 & 2\theta_m(4\theta_m-1) & 8\theta_m^2 & 2\theta_m & -4\theta_m & \dfrac{4(4\theta_m+1)(4\theta_m-1)}{3} & \dfrac{2(2\theta_m+1)(8\theta_m-1)}{3}
\end{array}$}
\]
Let us make Construction~\ref{const1} more concrete:
\begin{construction}
Construction~\ref{const1} requires a non-degenerate quadratic form on $\mathbb{F}_2^{2m}$ of maximal Witt index. Up to equivalence, there is exactly one such quadratic form and its Witt index is $m$. Moreover, it does not matter which quadratic form from this equivalence class we choose as two equivalent forms will lead to isomorphic graphs. For the rest of the paper we will consider
\[
q^{(m)}(x_1,\dots,x_m,y_1,\dots,y_m)\coloneqq \sum_{i=1}^m x_iy_i.
\]
It is convenient to identify $\mathbb{F}_2^{2m}$ with the isomorphic vector space $(\mathbb{F}_2^m)^2$ whose elements are of the shape $\bar{v}=\icol{\bar{v}_1\\\bar{v}_2}$, where $\bar{v}_1$ and $\bar{v}_2$ are binary vectors of length $m$. Note that with this identification we have
\[
q^{(m)}(\bar{v})= q^{(m)}\icol{\bar{v}_1\\\bar{v}_2}= \bar{v}_1^{T}\bar{v}_2.
\]
The quadric $Q_m$ induced by $q^{(m)}$ consists of all vectors $\bar{v}$ such that $\bar{v}_1^T\bar{v}_2=0$. A maximal singular subspace $S_m$ is given by the set of all $\bar{v}\in\mathbb{F}_2^{2m}$ for which $\bar{v}_2=\bar{0}$ (here and below, by $\bar{0}$ we denote the zero vector; in each case the length of $\bar{0}$ will be clear from the context). Now we can repeat the construction of $\Gamma^{(m)}=(V^{(m)},E^{(m)})$ in more concrete terms:
\[
V^{(m)}=\mathbb{F}_2^{2m},\quad E^{(m)}=\{(\bar{v},\bar{w})\mid (\bar{v}_1+\bar{w}_1)^T(\bar{v}_2+\bar{w}_2)=0, \bar{v}_2\neq\bar{w}_2\}.
\]
From now on, whenever we talk about the graphs $\Gamma^{(m)}$, we have in mind this model.
\end{construction}
Our main result is:
\begin{theorem}\label{mainthm}
Let $m\ge 4$ be a natural number. Then
\begin{enumerate}
\item $\Gamma^{(m)}$ is not $2$-homogeneous; the orbitals of $\operatorname{Aut}(\Gamma^{(m)})$ are given by the following binary relations on $\mathbb{F}_2^{2m}$:
\begin{align*}
\varrho_1^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}=\bar{w}\}, & \varrho_2^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in S_m\setminus\{\bar{0}\}\,\},\\
\varrho_3^{(m)} &= \{ (\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in Q_m\setminus S_m\}, & \varrho_4^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\notin Q_m\};
\end{align*}
\item the relational structure $\mathcal{C}^{(m)}\coloneqq (\mathbb{F}_2^{2m};\,\varrho_1^{(m)},\varrho_2^{(m)},\varrho_3^{(m)},\varrho_4^{(m)})$ is $3$-homogeneous, i.e., every isomorphism between relational substructures of at most three elements extends to an automorphism,
\item $\Gamma^{(m)}$ is $( 3, 5)$-regular,
\item $\Gamma_1^{(m)}$ is $(2,4)$-regular but not $2$-homogeneous (already known from \cite{BroIvaKli89}),
\item $\Gamma_2^{(m)}$ is $(2,4)$-regular but not $1$-homogeneous.
\end{enumerate}
\end{theorem}
The rest of the paper is devoted to the proof of this result.
\section{Symmetries of the graphs $\Gamma^{(m)}$}
Let us have a look onto the automorphisms of $\Gamma^{(m)}$. By $\operatorname{GL}(n,2)$ we denote the group of regular $n\times n$-matrices over $\mathbb{F}_2$. Clearly, $\Gamma^{(m)}$ is invariant under all affine transformations $\varphi_{A,\bar{w}}\colon\bar{v}\mapsto A\bar{v}+\bar{w}$ for which $A\in\operatorname{GL}(2m,2)$ preserves $Q_m$ and $S_m$ setwise. Let us denote this group by $G_m$ and the stabilizer of $\bar{0}$ in $G_m$ by $H_m$. Then we have
\begin{lemma}\label{HM}
Let $A\in \operatorname{GL}(m,2)$, and let $S$ be a symmetric $m\times m$-matrix over $\mathbb{F}_2$ with $0$ diagonal. Then
\[
\begin{pmatrix}
A & AS\\
O & (A^T)^{-1}
\end{pmatrix}
\]
is an element of $H_m$ (here and below $O$ denotes the zero-matrix). Moreover, every element of $H_m$ is obtained in this way.
\end{lemma}
\begin{proof}
Let $M\in H_m$. Then $M$ can be decomposed into $m\times m$-blocks like
\[
M=\begin{pmatrix}
A & B \\
D & C
\end{pmatrix}.
\]
As $M$ preserves $S_m$, we have $D=O$. Let $\bar{v}\in V(\Gamma^{(m)})$. Then
\[
M\bar{v} = \begin{pmatrix}
A & B \\
O & C
\end{pmatrix}
\begin{pmatrix}
\bar{v}_1\\
\bar{v}_2
\end{pmatrix}= \begin{pmatrix}
A\bar{v}_1+B\bar{v}_2\\ C\bar{v}_2
\end{pmatrix}.
\]
Since $M$ preserves $Q_m$, we have for all $\bar{v}\in Q_m$ that $(A\bar{v}_1+B\bar{v}_2)^T C\bar{v}_2=0$. That means
\[
0 = (A\bar{v}_1)^TC\bar{v}_2 + (B\bar{v}_2)^TC\bar{v}_2 = \bar{v}_1^T A^T C\bar{v}_2 + \bar{v}_2^T B^T C\bar{v}_2.
\]
If we consider the special case that $\bar{v}_1=\bar{0}$, then we obtain $\bar{v}_2^T B^T C \bar{v}_2= 0$, for all $\bar{v}_2\in \mathbb{F}_2^m$. From this it follows that $S\coloneqq B^T C$ is a symmetric matrix with $0$-diagonal. Moreover, it follows that $\bar{v}_1^T A^T C\bar{v}_2=0$, for all $\bar{v}\in Q_m$. However, from this it follows that $A^T C=I$. Indeed, if we consider all vectors of the shape $\bar{v}=(\bar{e}_i,\bar{e}_j)^T$ (here and below, by $\bar{e}_i$ we denote the vector whose $i$-th entry is equal to $1$ and whose remaining entries are equal to $0$; in each case the length of $\bar{e}_i$ will be clear from the context) for $i\neq j$, then we obtain that all the off-diagonal entries of $A^T C$ are equal to $0$. Since both, $A$ and $C$ are regular, the claim follows. Now we may conclude that $C= (A^T)^{-1}$ and $S= B^T(A^T)^{-1}= (A^{-1}B)^T = A^{-1}B$. It follows that $B=AS$. Thus we showed that every element of $H_m$ is of the desired shape. It is not hard to see that every matrix of this shape preserves $Q_m$ and $S_m$ setwise.
\end{proof}
\begin{proposition}\label{notrankthree}
For all $m\ge 4$ the graph $\Gamma^{(m)}$ is not $2$-homogeneous, and $\Gamma_2^{(m)}$ has an intransitive automorphism group.
\end{proposition}
\begin{proof}
Consider the vertices $\bar{z}=\icol{\bar{0}\\\bar{0}}$, $\bar{a}=\icol{\bar{e}_1\\\bar{0}}$, $\bar{b}=\icol{\bar{e}_1\\\bar{e}_1}$. Clearly, $(\bar{z},\bar{a})$ and $(\bar{z},\bar{b})$ are non-arcs in $\Gamma^{(m)}$. Our goal is to show that no automorphism of $\Gamma^{(m)}$ maps $(\bar{z},\bar{a})$ to $(\bar{z},\bar{b})$: To this end we introduce two auxiliary graphs $\Upsilon_{\bar{a}}$ and $\Upsilon_{\bar{b}}$. The vertices of $\Upsilon_{\bar{a}}$ and $\Upsilon_{\bar{b}}$ shall be all neighbours of $\bar{z}$ in $\Gamma^{(m)}$ that are non-neighbours of $\bar{a}$ and $\bar{b}$, respectively. If we can show that $\Upsilon_{\bar{a}}$ and $\Upsilon_{\bar{b}}$ are non-isomorphic, then we are done.
It is not hard to see that we have
\begin{align*}
V(\Upsilon_{\bar{a}}) &= \{\icol{\bar{v}_1\\\bar{v}_2}\mid \bar{v}_1^T\bar{v}_2=0, \bar{v}_2(1)=1\},\\
V(\Upsilon_{\bar{b}}) &= W_1\mathrel{\dot\cup} W_2,\bar{e}xt{ where} \\
W_1 &= \{\icol{\bar{v}_1\\\bar{v}_2}\mid \bar{v}_2=\bar{e}_1,\bar{v}_1(1)=0\},\\
W_2 &= \{\icol{\bar{v}_1\\\bar{v}_2}\mid \bar{v}_2\neq\bar{0}, \bar{v}_1^T\bar{v}_2=0,\bar{v}_1(1)=\bar{v}_2(1)\},
\end{align*}
where for a vector $\bar{v}$, by $\bar{v}(i)$ we denote the $i$-th entry of $\bar{v}$.
In order to understand the structure of the graphs $\Upsilon_{\bar{a}}$ and $\Upsilon_{\bar{b}}$, consider the projection $\Pi\colon\mathbb{F}_2^{2m}\bar{w}oheadrightarrow\mathbb{F}_2^{2m-2}$ given by
\[
\Pi\colon \mathbb{F}_2^{2m}\bar{w}oheadrightarrow\mathbb{F}_2^{2m-2}: \icol{x_1\\\vdots\\x_m\\y_1\\\vdots\\y_m}\mapsto \icol{x_2\\\vdots\\x_m\\y_2\\\vdots\\y_m}.
\]
Note that the restrictions of $\Pi$ to $V(\Upsilon_{\bar{a}})$ and to $V(\Upsilon_{\bar{b}})$ both are bijections with $\mathbb{F}_2^{2m-2}$. Moreover, if we define
\begin{align*}
A_1 &\coloneqq \left\{\icol{0\\\bar{0}\\1\\\bar{0}}\right\}, & B_1 &\coloneqq \left\{\icol{0\\\bar{0}\\1\\\bar{0}}\right\},\\
A_2 &\coloneqq \left\{\icol{0\\\bar{x}\\1\\\bar{0}}\mid \bar{x}\in\mathbb{F}_2^{m-1}\setminus\{\bar{0}\}\right\}, & B_2 &\coloneqq \left\{\icol{0\\\bar{x}\\1\\\bar{0}}\mid \bar{x}\in\mathbb{F}_2^{m-1}\setminus\{\bar{0}\}\right\},\\
A_3 &\coloneqq \left\{\icol{0\\\bar{x}\\1\\\bar{y}}\mid \bar{x},\bar{y}\in\mathbb{F}_2^{m-1},\bar{x}^T\bar{y}=0,\bar{y}\neq\bar{0}\right\}, & B_3 &\coloneqq \left\{\icol{0\\\bar{x}\\0\\\bar{y}}\mid \bar{x},\bar{y}\in\mathbb{F}_2^{m-1},\bar{x}^T\bar{y}=0,\bar{y}\neq\bar{0}\right\},\\
A_4 &\coloneqq \left\{\icol{1\\\bar{x}\\1\\\bar{y}}\mid \bar{x},\bar{y}\in\mathbb{F}_2^{m-1},\bar{x}^T\bar{y}=1\right\}, & B_4 &\coloneqq \left\{\icol{1\\\bar{x}\\1\\\bar{y}}\mid \bar{x},\bar{y}\in\mathbb{F}_2^{m-1},\bar{x}^T\bar{y}=1\right\}.
\end{align*}
Then $V(\Upsilon_{\bar{a}})=A_1\mathrel{\dot\cup} A_2\mathrel{\dot\cup} A_3\mathrel{\dot\cup} A_4$, $V(\Upsilon_{\bar{b}})=B_1\mathrel{\dot\cup} B_2\mathrel{\dot\cup} B_3\mathrel{\dot\cup} B_4$, and
\begin{align*}
\Pi(A_1) &=\Pi(B_1) = \{\bar{0}\}, &
\Pi(A_2) &=\Pi(B_2) = S_{m-1}\setminus\{\bar{0}\},\\
\Pi(A_3) &=\Pi(B_3) = Q_{m-1}\setminus S_{m-1}, &
\Pi(A_4) &=\Pi(B_4) = \mathbb{F}_2^{2m-2}\setminus Q_{m-1}.
\end{align*}
The edges of $\Upsilon_{\bar{a}}$ and $\Upsilon_{\bar{b}}$ may be read off the following diagrams:
\[\scalebox{0.9}{
\begin{tikzpicture}[node distance=20mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node (A3) [terminal,anchor=center] {$\Pi(A_3)=Q_{m-1}\setminus S_{m-1}$};
\node (A2) [terminal, above right= of A3,anchor=center] {$\Pi(A_2)=S_{m-1}\setminus \{\bar{0}\}$};
\node (A4) [terminal, below right= of A3,anchor=center] {$\Pi(A_4)=\mathbb{F}_2^{2m-2}\setminus Q_{m-1}$};
\node (A1) [terminal, left=30mm of A3,anchor=center] {$\Pi(A_1)=\{\bar{0}\}$};
\path[thick, draw] (A1) edge ["$\varrho_3^{(m-1)}$" inner sep=2pt] (A3);
\path[thick, draw] (A3) edge ["$\varrho_3^{(m-1)}$" inner sep=0pt,swap](A4);
\path[thick, draw] (A3) edge ["$\varrho_3^{(m-1)}$" tight](A2);
\path[thick, draw] (A2) edge ["$\varrho_3^{(m-1)}$" inner sep=2pt] (A4);
\path[thick, draw] (A3) edge [loop above, min distance=20mm,in=65,out=115,"$\varrho_3^{(m-1)}$"] (A3);
\path[thick, draw] (A4) edge [loop below, min distance=20mm,in=-65,out=-115,swap,"$\varrho_3^{(m-1)}$"] (A4);
\node at (-3,-2){\ensuremath{\Pi(\Upsilon_{\bar{a}})}};
\end{tikzpicture}}
\]
\[\scalebox{0.9}{
\begin{tikzpicture}[node distance=20mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node (B3) [terminal,anchor=center] {$\Pi(B_3)=Q_{m-1}\setminus S_{m-1}$};
\node (B2) [terminal, above right= of B3,anchor=center] {$\Pi(B_2)=S_{m-1}\setminus \{\bar{0}\}$};
\node (B4) [terminal, below right= of B3,anchor=center] {$\Pi(B_4)=\mathbb{F}_2^{2m-2}\setminus Q_{m-1}$};
\node (B1) [terminal, left=30mm of B3,anchor=center] {$\Pi(B_1)=\{\bar{0}\}$};
\path[thick, draw] (B1) edge ["$\varrho_3^{(m-1)}$" inner sep=2pt] (B3);
\path[thick, draw] (B3) edge ["$\varrho_4^{(m-1)}$" inner sep=0pt,swap](B4);
\path[thick, draw] (B3) edge ["$\varrho_3^{(m-1)}$" tight](B2);
\path[thick, draw] (B2) edge ["$\varrho_3^{(m-1)}$" inner sep=2pt] (B4);
\path[thick, draw] (B3) edge [loop above, min distance=20mm,in=65,out=115,"$\varrho_3^{(m-1)}$"] (B3);
\path[thick, draw] (B4) edge [loop below, min distance=20mm,in=-65,out=-115,swap,"$\varrho_3^{(m-1)}$"] (B4);
\node at (-3,-2){\ensuremath{\Pi(\Upsilon_{\bar{b}})}};
\end{tikzpicture}}
\]
If, e.g., in the first figure there is an edge between, say, $\Pi(A_i)$ and $\Pi(A_j)$ labelled with $\varrho_k^{(m-1)}$, then this means that $(\bar{u},\bar{v})\in E(\Upsilon_{\bar{a}})$
if and only if $(\Pi(\bar{u}),\Pi(\bar{v}))\in\varrho_k^{(m-1)}$. Also we can read off the diagrams that $\Upsilon_{\bar{a}}$ is isomorphic to $\Gamma^{(m-1)}$ and that a graph isomorphic to $\Upsilon_{\bar{b}}$ can be obtained from $\Gamma^{(m-1)}$ by switching all edges between $Q_{m-1}\setminus S_{m-1}$ and $\mathbb{F}_2^{(2m-2)}\setminus Q_{m-1}$ for edges given by $\varrho_4^{(m-1)}$.
Now we are ready to show that $\Upsilon_{\bar{b}}$ is not isomorphic to $\Upsilon_{\bar{a}}$. First of all we note that $\Upsilon_{\bar{a}}$, being isomorphic to $\Gamma^{(m-1)}$, is $(3,4)$-regular. Let us count the number of common neighbours of a triangle in $\Gamma^{(m-1)}$. Clearly, this is the number of common neighbours of an edge in the first subconstituent $\Gamma_1^{(m-1)}$. In other words, it is equal to $\theta_m(2\theta_m-1)$. Let us now consider the triangle of $\Upsilon_{\bar{b}}$ induced by the following three vertices:
\begin{align*}
\bar{c}_1&=\icol{0\\\bar{0}\\0\\\bar{e}_1}, & \bar{c}_2&=\icol{0\\\bar{0}\\0\\\bar{e}_2}, & \bar{c}_3&=\icol{0\\\bar{0}\\0\\\bar{e}_3}.
\end{align*}
Note that we are able to choose the vertices in this way, since $m\ge 4$, and that $\{\bar{c}_1,\bar{c}_2,\bar{c}_3\}\subseteq B_3$. Thus, the image under $\Pi$ is in $Q_{m-1}\setminus S_{m-1}$. From the diagram of $\Pi(\Upsilon_{\bar{b}})$ we may read that the number of joint neighbours of $\bar{c}_1$, $\bar{c}_2$, and $\bar{c}_3$ is equal to
\[
|\{\bar{v}\in Q_{m-1}\mid \forall i:(\Pi(\bar{c}_i),\bar{v})\in\varrho_3^{(m-1)}\}| + |\{\bar{v}\in \mathbb{F}_2^{2m-2}\setminus Q_{m-1}\mid \forall i:(\Pi(\bar{c}_i),\bar{v})\in\varrho_4^{(m-1)}\}|,
\]
which can be shown to be equal to $\theta_m(2\theta_m-3/2)$. This shows that $\Upsilon_{\bar{b}}$ is not isomorphic to $\Upsilon_{\bar{a}}$, for all $m\ge 4$. This completes the proof that $\Gamma^{(m)}$ is not $2$-homogeneous.
It remains to show that $\Gamma_2^{(m)}$ has an intransitive automorphism group. For this we can make use of our computations above. First we use the fact that $\Gamma_2^{(m)}(\bar{a})$ is isomorphic to $\Gamma_2^{(m)}(\bar{b})$. Second we argue that $\bar{z}$ is a vertex of both graphs. Third we note that $\Upsilon_{\bar{a}}$ is the first subconstituent of $\Gamma_2^{(m)}(\bar{a})$ with respect to $\bar{z}$ and that $\Upsilon_{\bar{b}}$ is the first subconstituent of $\Gamma_2^{(m)}(\bar{b})$ with respect to $\bar{z}$. Now, the fact that $\Upsilon_{\bar{a}}$ and $\Upsilon_{\bar{b}}$ are non-isomorphic shows that $\Gamma_2^{(m)}$ contains two different kinds of vertices. In other words, the automorphism group of $\Gamma_2^{(m)}$ has at least two orbits on vertices.
\end{proof}
\section{The Schurian closure of $\Gamma^{(m)}$}
We define the \emph{Schurian closure} of a graph $\Gamma$ to be the relational structure on $V(\Gamma)$ whose basic relations are the orbitals of $\operatorname{Aut}(\Gamma)$. The Schurian closure of a graph gives rise to a so-called coherent configuration. Recall that a \emph{coherent configuration} $\mathcal{C}$ is a finite relational structure $(V, (\varrho_i)_{i\in I})$, such that
\begin{itemize}
\item the set $\{\varrho_i\mid i\in I\}$ forms a partition of $V\times V$,
\item every $\varrho_i$ is either contained in the diagonal relation $\Delta_V=\{(x,x)\mid x\in V\}$, or it is irreflexive,
\item every $\varrho_i$ is either symmetric or asymmetric,
\item for all $i,j,k\in I$ there exists an integer $p_{i,j}^k$, such that for all $(x,y)\in\varrho_k$ we have
\[
|\{ z\mid (x,z)\in\varrho_i\land (z,y)\in\varrho_j\}|=p_{i,j}^k.
\]
\end{itemize}
The $(p_{i,j}^k)_{i,j,k\in I}$ are called the \emph{structure constants} of $\mathcal{C}$. A coherent configuration $\mathcal{C}$ is called \emph{Schurian} if its relations coincide with the orbitals of its automorphism group (here the automorphism group of $\mathcal{C}$ consists of all permutations of $V$ that preserve each relation $\varrho_i$ where $i\in I$). Note that this is the same as to say that $\mathcal{C}$, considered as a relational structure, is $2$-homogeneous (i.e., every isomorphism between substructures of cardinality at most $2$ extends to an automorphism).
If $\mathcal{C}=(V(\Gamma),(\varrho_i)_{i=1,\dots,k})$ is the Schurian closure of $\Gamma$, then it is not hard to see that $\mathcal{C}$ is a Schurian coherent configuration.
The knowledge of the Schurian closure of $\Gamma^{(m)}$ and, in particular, the knowledge of its structure constants is going to be essential in proving the $(3,5)$-regularity of $\Gamma^{(m)}$. Our considerations from the previous section suggest that $\operatorname{Aut}(\Gamma^{(m)})$ has at least $4$ orbitals. In the following we show that the orbitals of $\operatorname{Aut}(\Gamma^{(m)})$ are exactly the relations $\varrho_1^{(m)},\dots,\varrho_4^{(m)}$ that were defined in Theorem~\ref{mainthm}:
\begin{proposition}\label{rankthree}
The orbitals of $\operatorname{Aut}(\Gamma^{(m)})$ are given by the following binary relations on $\mathbb{F}_2^{2m}$:
\begin{align*}
\varrho_1^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}=\bar{w}\}, & \varrho_2^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in S_m\setminus\{\bar{0}\}\,\},\\
\varrho_3^{(m)} &= \{ (\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in Q_m\setminus S_m\}, & \varrho_4^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\notin Q_m\}.
\end{align*}
\end{proposition}
Before we come to the proof of this Proposition, we need a few auxiliary results:
\begin{lemma}\label{symcond}
Let $\bar{u},\bar{v}\in\mathbb{F}_2^m$. Then a symmetric $m\times m$-matrix $S$ over $\mathbb{F}_2$ with zero-diagonal exists such that $S\bar{u}=\bar{v}$ if and only if either $\bar{u}=\bar{v}=\bar{0}$ or $\bar{u}\neq\bar{0}$ and $\bar{u}^T\bar{v}=0$.
\end{lemma}
\begin{proof}
A symmetric $m\times m$-matrix $S$ with zero-diagonal may be considered as the adjacency matrix of a simple graph $\Gamma$ with vertex set $\{1,\dots,m\}$ and with $i$ connected to $j$ if and only if $S(i,j)=1$.
Let
\begin{align*}
I_0& \coloneqq\{i\in\{1,\dots,m\}\mid \bar{u}(i)=0\}, & I_1&\coloneqq \{i\in\{1,\dots,m\}\mid \bar{u}(i)=1\},\\
J_0& \coloneqq \{j\in\{1,\dots,m\}\mid \bar{v}(j)=0\}, & J_1&\coloneqq \{j\in\{1,\dots,m\}\mid \bar{v}(j)=1\}.
\end{align*}
Then $S\bar{u}=\bar{v}$ if and only if in $\Gamma$ every element of $J_1$ has an odd number of neighbours and every element of $J_0$ has an even number of neighbours in $I_1$, respectively.
More detailedly, if we define
\begin{align*}
I_{00} &\coloneqq I_0\cap J_0, & I_{01} &\coloneqq I_0\cap J_1,& I_{10} &\coloneqq I_1\cap J_0 & I_{11} &\coloneqq I_1\cap J_1,
\end{align*}
then $S\bar{u}=\bar{v}$ if and only if the parity of the valencies of the vertices from the $I_{ij}$ to $I_1$ is as depicted in the following diagram:
\begin{equation}\label{paritydiag}
\begin{tikzcd}[column sep=small]
I_{00} \arrow[to=I1,"\bar{e}xt{even}" sloped,swap] & & I_{01}\arrow[to=I1,"\bar{e}xt{odd}" sloped,swap] & & I_{10}\arrow[to=I1,"\bar{e}xt{even}" sloped,swap,near start] & &I_{11}\arrow[to=I1,"\bar{e}xt{odd}" sloped,swap]\\[2ex]
& & & |[alias=I1]|I_1.
\end{tikzcd}
\end{equation}
``$\Rightarrow$''
We need to show that $\bar{u}^T\bar{v}=0$. This means that $|I_{11}|$ is even. Suppose on the contrary that $|I_{11}|$ is odd. Let us count the number of arcs in the subgraph of $\Gamma$ induced by $I_1$. Since $|I_{11}|$ is odd, there is an odd number of arcs from $I_{11}$ to $I_{1}$. As the number of arcs from $I_{11}$ to $I_{11}$ must be even (by the first theorem of graph theory), the number of arcs from $I_{11}$ to $I_{10}$ must be odd. By symmetry, there is an odd number of arcs from $I_{10}$ to $I_{11}$. As the number of arc from $I_{10}$ to $I_1$ must be even, we conclude that the number of arcs from $I_{10}$ to $I_{10}$ must be odd, a contradiction with the first theorem of graph theory. Hence, the cardinality of $I_{11}$ must be even and thus $\bar{u}^T\bar{v}=0$.
``$\Leftarrow$'' If $\bar{u}=\bar{v}=\bar{0}$, then we may chose $S=O$. So suppose that $\bar{u}\neq\bar{0}$ and that $\bar{u}^T\bar{v}=0$. Then $|I_{11}|$ is even. We define a graph $\Gamma$ with vertex set $\{1,\dots,m\}$: The subgraph of $\Gamma$ induced by $I_{11}$ shall be a complete graph. The induced subgraphs $\Gamma(I_{10})$, $\Gamma(I_{01})$, and $\Gamma(I_{00})$ shall have no edge at all. Finally, every vertex from $I_{01}$ shall be connected with exactly one vertex from $I_1$. Clearly, the valencies of the vertices of $\Gamma$ satisfy the parity-conditions from diagram~\eqref{paritydiag}. Thus, if we let $S$ be the adjacency matrix of $\Gamma$, then $S\bar{u}=\bar{v}$.
\end{proof}
\begin{lemma}\label{Scond}
Let $A\in\operatorname{GL}(m,2)$, let $S$ be any square matrix of order $m$, and let $\bar{u},\bar{v}\in\mathbb{F}_2^{2m}$. Then
\[
\begin{pmatrix}
(A^T)^{-1} & (A^T)^{-1}S\\
O & A
\end{pmatrix}
\begin{pmatrix}
\bar{u}_1\\
\bar{u}_2
\end{pmatrix}=
\begin{pmatrix}
\bar{v}_1\\
\bar{v}_2
\end{pmatrix} \iff A\bar{u}_2=\bar{v}_2 \bar{e}xt{ and } \bar{u}_1+S\bar{u}_2 = A^T\bar{v}_1.
\]
\end{lemma}
\begin{proof}
Clear.
\end{proof}
\begin{lemma}\label{suborbs}
The group $H_m$ has orbits $\{\bar{0}\}$, $S_m\setminus\{\bar{0}\}$, $Q_m\setminus S_m$, and $\mathbb{F}_2^{2m}\setminus Q_m$.
\end{lemma}
\begin{proof}
Let $\bar{v},\bar{w}\in S_m\setminus\{\bar{0}\}$. Then $\bar{v}=\icol{\bar{v}_1\\\bar{0}}$ and $\bar{w}=\icol{\bar{w}_1\\\bar{0}}$. Let $A\in\operatorname{GL}(m,2)$, such that $A\bar{v}_1=\bar{w}_1$. Then
\[
\begin{pmatrix}
A & O \\
O & (A^T)^{-1}
\end{pmatrix}
\begin{pmatrix}
\bar{v}_1\\\bar{0}
\end{pmatrix}
= \begin{pmatrix}
\bar{w}_1\\\bar{0}
\end{pmatrix}.
\]
Thus, $\bar{v}$ and $\bar{w}$ are in the same orbit under $H_m$.
Let $\bar{v}, \bar{w}\in Q_m\setminus S_m$. That is $\bar{v}=\icol{\bar{v}_1\\\bar{v}_2}$, $\bar{v}_2\neq\bar{0}$, $\bar{v}_1^T\bar{v}_2=0$, and $\bar{w}=\icol{\bar{w}_1\\\bar{w}_2}$, $\bar{w}_2\neq\bar{0}$, $\bar{w}_1^T\bar{w}_2=0$. Let $A\in\operatorname{GL}(m,2)$, such that $A\bar{v}_2=\bar{w}_2$. Consider $\bar{a}\coloneqq A^T\bar{w}_1-\bar{v}_1$. We claim that there is a symmetric $m\times m$-matrix $S$ with zero-diagonal, such that $S\bar{v}_2=\bar{a}$.
By Lemma~\ref{symcond} we need to show that $\bar{v}_2^T\bar{a}=0$. We compute:
\begin{equation}\label{v2a}
\bar{v}_2^TA^T\bar{w}_1 = (A^{-1}\bar{w}_2)^TA^T\bar{w}_1 = \bar{w}_2^T(A^T)^{-1}A^T\bar{w}_1 = \bar{w}_2^T\bar{w}_1=0.
\end{equation}
Together with the fact that $\bar{v}_2^T\bar{v}_1=0$, this proves that $\bar{v}_2^T\bar{a}=0$. Let $S$ be a symmetric matrix with zero-diagonal, such that $S\bar{v}_2=\bar{a}$. Then, by Lemma~\ref{Scond}, we have that
\[
\begin{pmatrix}
(A^T)^{-1} & (A^T)^{-1}S\\
O & A
\end{pmatrix}
\begin{pmatrix}
\bar{v}_1\\
\bar{v}_2
\end{pmatrix}=
\begin{pmatrix}
\bar{w}_1\\
\bar{w}_2
\end{pmatrix}.
\]
The case $\bar{v},\bar{w}\in\mathbb{F}_2^{2m}\setminus Q_m$ is handled in the same way as the previous case. Only the final result in \eqref{v2a} is $1$ and $\bar{v}_2^T\bar{v}_1=1$, thus also in this case $\bar{v}_2^T\bar{a}=0$.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{rankthree}]
The group $G_m$ acts transitively on $V(\Gamma^{(m)})$. Thus, by Lemma~\ref{suborbs}, $\varrho_1^{(m)},\dots,\varrho_4^{(m)}$ are the orbitals of $G_m$. Since $G_m\le\operatorname{Aut}(\Gamma^{(m)})$, and since by Proposition~\ref{notrankthree} $\operatorname{Aut}(\Gamma^{(m)})$ has at least $4$ orbitals, we conclude that the $\varrho_i^{(m)}$ ($i=1,\dots,4$) are precisely the orbitals of $\operatorname{Aut}(\Gamma^{(m)})$.
\end{proof}
Let us denote the Schurian closure $(\mathbb{F}_2^{2m};\,\varrho_1^{(m)},\varrho_2^{(m)},\varrho_3^{(m)},\varrho_4^{(m)})$ of $\Gamma^{(m)}$ by $\mathcal{C}^{(m)}$.
This coherent configuration appeared for the first time in \cite{Iva94}, where also its structure constants $(p_{i,j}^k(m))_{i,j,k\in\{1,2,3,4\}}$ were computed. Here we give this table once more, using our notations: \\
\begin{minipage}{\linewidth-12pt}
\begin{center}\scalebox{1}{$\displaystyle\renewcommand{1.5}{1.3}
\begin{array}{cccccc}
& & j=1 & j=2 & j=3 & j=4\\\hline
& k=1 & 1 & 0 & 0 & 0\\
& k=2 & 0 & 1 & 0 & 0\\
\raisebox{3ex}[0pt][0pt]{i=1} & k=3 & 0 & 0 & 1 & 0\\
& k=4 & 0 & 0 & 0 & 1\\\hline
& k=1 & 0 & 8\theta_m-1 & 0 & 0\\
& k=2 & 1 & 8\theta_m-2 & 0 & 0\\
\raisebox{3ex}[0pt][0pt]{i=2} & k=3 & 0 & 0 & 4\theta_m-1 & 4\theta_m\\
& k=4 & 0 & 0 & 4\theta_m & 4\theta_m-1\\\hline
& k=1 & 0 & 0 & 4\theta_m(8\theta_m-1) & 0\\
& k=2 & 0 & 0 & 4\theta_m(4\theta_m-1) & 16\theta_m^2\\
\raisebox{3ex}[0pt][0pt]{i=3} & k=3 & 1 & 4\theta_m-1 & 4\theta_m(4\theta_m-1) & 4\theta_m(4\theta_m-1)\\
& k=4 & 0 & 4\theta_m & 4\theta_m(4\theta_m-1) & 4\theta_m(4\theta_m-1)\\\hline
& k=1 & 0 & 0 & 0 & 4\theta_m(8\theta_m-1)\\
& k=2 & 0 & 0 & 16\theta_m^2 & 4\theta_m(4\theta_m-1)\\
\raisebox{3ex}[0pt][0pt]{i=4} & k=3 & 0 & 4\theta_m & 4\theta_m(4\theta_m-1) & 4\theta_m(4\theta_m-1)\\
& k=4 & 1 & 4\theta_m-1 & 4\theta_m(4\theta_m-1) & 4\theta_m(4\theta_m-1)\\\hline
\end{array}
$}\end{center}
\captionof{table}{The structure constants $p_{ij}^k(m)$ of $\mathcal{C}^{(m)}$}
\end{minipage}
Next we show that the coherent configuration $\mathcal{C}^{(m)}$, considered merely as a relational structure in the model-theoretic sense, has another remarkable property:
\begin{proposition}
$\mathcal{C}^{(m)}$, considered as relational structure, is $3$-homogeneous. That is, every isomorphism between substructures of cardinality at most $3$ extends to an automorphism of $\mathcal{C}^{(m)}$.
\end{proposition}
\begin{proof}
We already know that $\mathcal{C}^{(m)}$ is $1$-homogeneous and $2$-homogeneous. Following is a list of isomorphism types of substructures on $3$ elements in $\mathcal{C}^{(m)}$:
\[
\begin{matrix}
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_2$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_2$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_2$}](a)(c)
\end{tikzpicture} &
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_3$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_3$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_3$}](a)(c)
\end{tikzpicture} &
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_4$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_4$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_4$}](a)(c)
\end{tikzpicture} &
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_3$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_3$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_2$}](a)(c)
\end{tikzpicture}\\[6.5ex]
\mathcal{T}_1 & \mathcal{T}_2 & \mathcal{T}_3 & \mathcal{T}_4\\[2ex]
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_4$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_4$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_2$}](a)(c)
\end{tikzpicture} &
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_4$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_4$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_3$}](a)(c)
\end{tikzpicture} &
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_3$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_3$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_4$}](a)(c)
\end{tikzpicture} &
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,NoLabel]{a}
\Vertex[a=1*120-90,d=1,NoLabel]{b}
\Vertex[a=2*120-90,d=1,NoLabel]{c}
\Edge[labelstyle={MyLabel}, label=$\varrho_3$](a)(b)
\Edge[labelstyle={MyLabel}, label={$\varrho_4$}](b)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_2$}](a)(c)
\end{tikzpicture}\\[6.5ex]
\mathcal{T}_5 & \mathcal{T}_6 & \mathcal{T}_7 & \mathcal{T}_8
\end{matrix}
\]
Following for each type $\mathcal{T}_i$ we consider triples $(\bar{a},\bar{b},\bar{c})$ and $(\bar{u},\bar{v},\bar{w})$ of vertices of $\mathcal{C}^{(m)}$ that induce substructures isomorphic to $\mathcal{T}_i$, such that the mapping $\varphi\colon \bar{a}\mapsto\bar{u},\, \bar{b}\mapsto\bar{v},\,\bar{c}\mapsto\bar{w}$ is an isomorphism. Since $\mathcal{C}^{(m)}$ is $2$-homogeneous, in each case, without loss of generality, we may assume that $\bar{a}=\bar{u}=\bar{0}$ and $\bar{b}=\bar{v}$. Throughout the proof we fix the notation
\begin{align*}
\bar{c}_1 &= \begin{pmatrix}
\bar{c}_1(1)\\\tilde{c}_1
\end{pmatrix}, & \bar{c}_2 &=\begin{pmatrix}
\bar{c}_2(1)\\\tilde{c}_2
\end{pmatrix}, & \bar{w}_1 &=\begin{pmatrix}
\bar{w}_1(1)\\\tilde{w}_1
\end{pmatrix}, & \bar{w}_2 &=\begin{pmatrix}
\bar{w}_2(1)\\\tilde{w}_2
\end{pmatrix},
\end{align*}
for certain $\tilde{c}_i, \tilde{w}_i\in\mathbb{F}_2^{m-1}$ ($i=1,2$). In each case we will find some $A\in\operatorname{GL}(m,2)$ and some symmetric square matrix $S$ of order $m$ with zero-diagonal, such that
\begin{align}\label{goal}
\begin{pmatrix}
(A^T)^{-1} & (A^T)^{-1}S\\
O & A
\end{pmatrix}
\begin{pmatrix}
\bar{b}_1\\
\bar{b}_2
\end{pmatrix}&=
\begin{pmatrix}
\bar{v}_1\\
\bar{v}_2
\end{pmatrix}, &
\begin{pmatrix}
(A^T)^{-1} & (A^T)^{-1}S\\
O & A
\end{pmatrix}
\begin{pmatrix}
\bar{c}_1\\
\bar{c}_2
\end{pmatrix}&=
\begin{pmatrix}
\bar{w}_1\\
\bar{w}_2
\end{pmatrix}.
\end{align}
``about $\mathcal{T}_1$:'' Without loss of generality we may assume that $\bar{b}=\bar{v}=\icol{\bar{e}_1\\\bar{0}}$. Moreover, $\bar{c}_2=\bar{w}_2=\bar{0}$. Choose an $\hat{A}\in\operatorname{GL}(m,2)$ that fixes $\bar{e}_1$ and that maps $\bar{c}_1$ to $\bar{w}_1$ (such an $\hat{A}$ exists, since $\bar{c}_1\neq\bar{0}$ and because $\operatorname{GL}(m,2)$ acts $2$-transitively on non-zero vectors). Then with $A\coloneqq (\hat{A}^T)^{-1}$, and $S\coloneqq O$ we have that \eqref{goal} is satisfied.
``about $\mathcal{T}_2$:'' Without loss of generality we may assume that, $\bar{b}=\bar{v}=\icol{\bar{0}\\\bar{e}_1}$, $\bar{c}_1(1)=\bar{w}_1(1)=0$, $\bar{c}_2,\bar{w}_2\notin\{\bar{0},\bar{e}_1\}$, and $\bar{c}_1^T\bar{c}_2=\bar{w}_1^T\bar{w}_2=0$. Note that
then $\icol{\tilde{c}_1\\\tilde{c}_2},\icol{\tilde{w}_1\\\tilde{w}_2}\in Q_{m-1}\setminus S_{m-1}$. By Lemma~\ref{suborbs} there exists an element of $H_{m-1}$ that maps $\icol{\tilde{c}_1\\\tilde{c}_2}$ to $\icol{\tilde{w}_1\\\tilde{w}_2}$. Thus, by Lemma~\ref{HM}, there exist $\tilde{A}\in\operatorname{GL}(m-1,2)$ and a symmetric square matrix $\tilde{S}$ of order $m-1$ with zero-diagonal, such that
\[
\begin{pmatrix}
(\tilde{A}^T)^{-1} & (\tilde{A}^T)^{-1}\tilde{S}\\
O & \tilde{A}
\end{pmatrix}
\begin{pmatrix}
\tilde{c}_1\\
\tilde{c}_2
\end{pmatrix}=
\begin{pmatrix}
\tilde{w}_1\\
\tilde{w}_2
\end{pmatrix}.
\]
Let $\tilde{x}\in\mathbb{F}_2^{m-1}$, such that $\tilde{x}^T\tilde{c}_2=\bar{c}_2(1)+\bar{w}_2(1)$ (such an $\tilde{x}$ exists because $\tilde{c}_2\neq\bar{0}$). Define
\begin{align*}
A&\coloneqq
\begin{pmatrix}
1 & \tilde{x}^T\\
\bar{0} & \tilde{A}
\end{pmatrix}, & S &\coloneqq
\begin{pmatrix}
0 & \bar{0}^T\\
\bar{0} & \tilde{S}
\end{pmatrix}.
\end{align*}
Then, using Lemma~\ref{Scond}, it can be checked that \eqref{goal} is satisfied.
``about $\mathcal{T}_3$:'' Without loss of generality we may assume that $\bar{b}=\bar{v}=\icol{\bar{e}_1\\\bar{e}_1}$, $\bar{c}_1(1)\neq\bar{c}_2(1)$, $\bar{w}_1(1)\neq\bar{w}_2(1)$, and $\bar{c}_1^T\bar{c}_2=\bar{w}_1^T\bar{w}_2=1$.
Observe that $\tilde{c}_1^T\tilde{c}_2=\tilde{w}_1^T\tilde{w}_1=1$.
Let $\tilde{x}\in\mathbb{F}_2^{m-1}$, such that $\tilde{x}^T\tilde{c}_2=\bar{c}_1(1)+\bar{w}_1(1)$ (such an $\tilde{x}$ exists because $\tilde{c}_2\neq\bar{0}$).
Note that
\[
(1+\bar{c}_1(1)+\bar{w}_1(1))\tilde{x}^T\tilde{c}_2 = (1+\tilde{x}^T\tilde{c}_2)\tilde{x}^T\tilde{c}_2 = 0.
\]
Thus, by Lemma~\ref{suborbs} together with Lemma~\ref{HM}, there exists $\tilde{A}\in\operatorname{GL}(m-1,2)$ and a symmetric square matrix $\tilde{S}$ of order $m-1$ with zero-diagonal, such that
\[
\begin{pmatrix}
(\tilde{A}^T)^{-1} & (\tilde{A}^T)^{-1}\tilde{S}\\
O & \tilde{A}
\end{pmatrix}
\begin{pmatrix}
\tilde{c}_1+(1+\bar{c}_1(1)+\bar{w}_1(1))\tilde{x}\\
\tilde{c}_2
\end{pmatrix}=
\begin{pmatrix}
\tilde{w}_1\\
\tilde{w}_2
\end{pmatrix}.
\]
Define
\begin{align*}
A&\coloneqq
\begin{pmatrix}
1 & \tilde{x}^T\\
\bar{0} & \tilde{A}
\end{pmatrix}, & S &\coloneqq
\begin{pmatrix}
0 & \tilde{x}^T\\
\bar{x} & \tilde{S}
\end{pmatrix}.
\end{align*}
Then, using Lemma~\ref{Scond}, it can be verified that \eqref{goal} is satisfied.
``about $\mathcal{T}_4$:'' Without loss of generality we may assume that $\bar{b}=\bar{v}=\icol{\bar{0}\\\bar{e}_1}$, $\bar{c}_1(1)=\bar{w}_1(1)=0$, $\bar{c}_1\neq\bar{0}$, $\bar{w}_1\neq\bar{0}$, and $\bar{c}_2=\bar{w}_2=\bar{0}$.
Let $\tilde{A}\in\operatorname{GL}(m-1,2)$ such that $\tilde{A}\tilde{c}_1=\tilde{w}_1$. Then, with
\[
A\coloneqq
\begin{pmatrix}
1 & \bar{0}^T\\
\bar{0} & (\tilde{A}^T)^{-1}
\end{pmatrix}
\]
and with $S\coloneqq O$, it can be checked that \eqref{goal} is satisfied.
``about $\mathcal{T}_5$:'' Without loss of generality we may assume that $\bar{b}=\bar{v}=\icol{\bar{e}_1\\\bar{e}_1}$, $\bar{c}_1(1)=\bar{w}_1(1)=0$, $\bar{c}_1\neq\bar{0}$, $\bar{w}_1\neq\bar{0}$, and $\bar{c}_2=\bar{w}_2=\bar{0}$.
Let $\tilde{A}\in\operatorname{GL}(m-1,2)$ such that $\tilde{A}\tilde{c}_1=\tilde{w}_1$. Then, with
\[
A\coloneqq
\begin{pmatrix}
1 & \bar{0}^T\\
\bar{0} & (\tilde{A}^T)^{-1}
\end{pmatrix}
\]
and with $S\coloneqq O$ it can be checked that \eqref{goal} is satisfied.
``about $\mathcal{T}_6$:'' Without loss of generality we may assume that $\bar{b}=\bar{v}=\icol{\bar{0}\\\bar{e}_1}$, $\bar{c}_1(1)=\bar{w}_1(1)=0$, and $\bar{c}_1^T\bar{c}_2=\bar{w}_1^T\bar{w}_2=1$. Observe that $\icol{\tilde{c}_1\\\tilde{c}_2},\icol{\tilde{w}_1\\\tilde{w}_2}\in \mathbb{F}_2^{2m-2}\setminus Q_{m-1}$. By Lemma~\ref{suborbs} in conjunction with Lemma~\ref{HM} there exists $\tilde{A}\in\operatorname{GL}(m-1,2)$ and a symmetric square matrix $\tilde{S}$ of order $m-1$ with zero-diagonal, such that
\[
\begin{pmatrix}
(\tilde{A}^T)^{-1} & (\tilde{A}^T)^{-1}\tilde{S}\\
O & \tilde{A}
\end{pmatrix}
\begin{pmatrix}
\tilde{c}_1\\
\tilde{c}_2
\end{pmatrix}=
\begin{pmatrix}
\tilde{w}_1\\
\tilde{w}_2
\end{pmatrix}.
\]
Let $\tilde{x}\in\mathbb{F}_2^{m-1}$, such that $\tilde{x}^T\tilde{c}_2=\bar{c}_2(1)+\bar{w}_2(1)$ (such an $\tilde{x}$ exists because $\tilde{c}_2\neq\bar{0}$). Define
\begin{align*}
A&\coloneqq
\begin{pmatrix}
1 & \tilde{x}^T\\
\bar{0} & \tilde{A}
\end{pmatrix}, & S &\coloneqq
\begin{pmatrix}
0 & \bar{0}^T\\
\bar{0} & \tilde{S}
\end{pmatrix}.
\end{align*}
Then, using Lemma~\ref{Scond}, it can be checked that \eqref{goal} is satisfied.
``about $\mathcal{T}_7$:'' Without loss of generality we may assume that $\bar{b},\bar{v}=\icol{\bar{0}\\\bar{e}_1}$, $\bar{c}_1(1)=\bar{w}_1(1)=1$, and $\bar{c}_1^T\bar{c}_2=\bar{w}_1^T\bar{w}_2=1$, $\bar{c}_2\neq\bar{e}_1$, $\bar{w}_2\neq\bar{e}_1$.
Then $\tilde{c}_2\neq\bar{0}$, $\tilde{w}_2\neq\bar{0}$. Let $\tilde{x}\in\mathbb{F}_2^{m-1}\setminus\{\tilde{c}_2\}$, such that $\tilde{x}^T\tilde{c}_2=\bar{c}_2(1)+\bar{w}_2(1)$. Then $(\tilde{c}_1+\tilde{x})^T\tilde{c}_2=\tilde{w}_1^T\tilde{w}_2$. Thus, $\icol{\tilde{c}_1+\tilde{x}\\\tilde{c}_2}$ and $\icol{\tilde{w}_1\\\tilde{w}_2}$ are either both in $\mathbb{F}_2^{m-2}\setminus Q_{m-1}$ or both in $Q_{m-1}\setminus S_{m-1}$. By Lemma~\ref{suborbs} together with Lemma~\ref{HM} there exists $\tilde{A}\in\operatorname{GL}(m-1,2)$ and a symmetric square matrix $\tilde{S}$ of order $m-1$ with zero-diagonal, such that
\[
\begin{pmatrix}
(\tilde{A}^T)^{-1} & (\tilde{A}^T)^{-1}\tilde{S}\\
O & \tilde{A}
\end{pmatrix}
\begin{pmatrix}
\tilde{c}_1+\tilde{x}\\
\tilde{c}_2
\end{pmatrix}=
\begin{pmatrix}
\tilde{w}_1\\
\tilde{w}_2
\end{pmatrix}.
\]
With \begin{align*}
A&\coloneqq \begin{pmatrix}
1 & \tilde{x}^T\\
\bar{0} & \tilde{A}
\end{pmatrix}, &
S\coloneqq \begin{pmatrix}
0 & \bar{0}^T\\
\bar{0} & \tilde{S}
\end{pmatrix},
\end{align*}
using Lemma~\ref{Scond}, it can be checked that \eqref{goal} is satisfied.
``about $\mathcal{T}_8$:'' Without loss of generality we may assume that $\bar{b}=\bar{v}=\icol{\bar{0}\\\bar{e}_1}$, $\bar{c}_1(1)=\bar{w}_1(1)=1$, and $\bar{c}_2=\bar{w}_2=\bar{0}$.
Let $\tilde{A}\in\operatorname{GL}(m-1,2)$, such that $\tilde{A}\tilde{c}_1=\tilde{w}_1$. Then with
\[
A\coloneqq \begin{pmatrix}
1 & \bar{0}^T\\
\bar{0} & (\tilde{A}^T)^{-1}
\end{pmatrix}
\]
and $S\coloneqq O$ it can be checked that \eqref{goal} is satisfied.
\end{proof}
\section{$(3,5)$-regularity of the graphs $\Gamma^{(m)}$}
The proof of the $(3,5)$-regularity of $\Gamma^{(m)}$ hinges on a recent result reducing the number of graph types to be checked for regularity. Let us repeat the relevant details:
\begin{definition}
Let $\mathbb{T}=(\Delta,\iota,\Theta)$ be a graph type. Suppose $\Theta=(T,E)$. Let $M\subseteq T$ be the image of $\iota$. Then we define the \emph{closure} $\operatorname{Cl}(\mathbb{T})$ to be the graph with vertex set $T$ and with arc set $E\cup \{(u,v)\mid u,v\in M,\, u\neq v\}$.
\end{definition}
\begin{theorem}[{\cite[Corollary~3.41]{Pec14}}]
A graph $\Gamma$ is $(m,n+1)$-regular if and only if it is $(m,n)$-regular and $\mathbb{T}$-regular for all graph types $\mathbb{T}$ of order $(m,n+1)$ for which $\operatorname{Cl}(\mathbb{T})$ is $(m+1)$-connected.
\end{theorem}
We know that $\Gamma^{(m)}$ is $(3,4)$-regular. Next we enumerate all graph types of order $(3,5)$ whose closure is $4$-connected. The only $4$-connected graph of order $5$ is $K_5$. Thus the graph types of order $(3,5)$ with $4$-connected closure are:
\begin{align*}
\mathbb{T}_1\colon\, &
\begin{tikzpicture}[rotate=-162,scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\Vertices[unit=3,NoLabel]{circle}{A,B,C,D,E}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\Edges(A,B,C,A)
\end{tikzpicture} & \mathbb{T}_2\colon\, &
\begin{tikzpicture}[rotate=-162,scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\Vertices[unit=3,NoLabel]{circle}{A,B,C,D,E}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\Edges(A,B,C)
\end{tikzpicture} & \mathbb{T}_3\colon\, &
\begin{tikzpicture}[rotate=-162,scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\Vertices[unit=3,NoLabel]{circle}{A,B,C,D,E}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\Edges(A,B)
\end{tikzpicture} & \mathbb{T}_4\colon\, &
\begin{tikzpicture}[rotate=-162,scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\Vertices[unit=3,NoLabel]{circle}{A,B,C,D,E}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\end{tikzpicture}
\end{align*}
In other words, from the (up to isomorphism) $148$ graph types of order $(3,5)$ only $4$ have to be checked in order to prove that $\Gamma^{(m)}$ is $(3,5)$-regular.
In the course of the proof of $(3,5)$-regularity of $\Gamma^{(m)}$ the following classical graph theoretical concept will play a crucial role:
\begin{definition}
Let $\Gamma$ be a graph. An \emph{equitable partition} of $\Gamma$ is an ordered partition $(M_1,\dots,M_n)$ of $V(\Gamma)$, such that for all $i,j\in\{1,\dots,n\}$ there exists a non-negative integer $a_{ij}$, such that for all $v\in M_i$ the number of neighbours of $v$ in $M_j$ is equal to $a_{ij}$. The matrix $(a_{ij})_{i,j=1}^n$ is called the \emph{partition matrix} of the equitable partition.
\end{definition}
\subsubsection*{Proof of $\mathbb{T}_1$-regularity}
To fix notation, let $\mathbb{T}_1=(\Delta,\iota,\Theta)$, where $V(\Delta)=\{a,b,c\}$ and $E(\Delta)=\{(a,b),(a,c),(b,a),(b,c),(c,a),(c,b)\}$. Let $\kappa_1,\kappa_2\colon\Delta\hookrightarrow\Gamma^{(m)}$ be two embeddings. In particular, suppose $\kappa_1\colon a\mapsto \bar{u},\,b\mapsto \bar{v},\,c\mapsto \bar{w}$ and $\kappa_2\colon a\mapsto\bar{x},\, b\mapsto\bar{y},\, c\mapsto\bar{z}$. Then, since $\mathcal{C}^{(m)}$ is $3$-homogeneous, there exists an automorphism $\varphi$ of $\mathcal{C}^{(m)}$, such that $\varphi\colon \bar{u}\mapsto\bar{x},\,\bar{v}\mapsto\bar{y},\,\bar{w}\mapsto\bar{z}$. In other words, $\kappa_2=\varphi\circ\kappa_1$. In particular, since $\operatorname{Aut}(\mathcal{C}^{(m)})=\operatorname{Aut}(\Gamma^{(m)})$, we obtain that $\#(\Gamma^{(m)},\mathbb{T}_1,\kappa_1)=\#(\Gamma^{(m)},\mathbb{T}_1,\kappa_2)$. Since $\kappa_1$ and $\kappa_2$ were chosen arbitrarily, we conclude that $\Gamma^{(m)}$ is $\mathbb{T}_1$-regular.
\subsubsection*{Proof of $\mathbb{T}_2$-regularity}
Let the graph type $\mathbb{T}_2=(\Delta,\iota,\Theta)$ be given by the following labeled diagram:
\[
\mathbb{T}_2\colon\quad\begin{tikzpicture}[scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\SetVertexLabelOut
\Vertex[a=0*72-90,d=3,Lpos=0*72-90,Ldist=0,L=b]{B}
\Vertex[a=1*72-90,d=3,Lpos=1*72-90,Ldist=0,L=c]{C}
\Vertex[a=2*72-90,d=3,Lpos=2*72-90,Ldist=0,L=d]{D}
\Vertex[a=3*72-90,d=3,Lpos=3*72-90,Ldist=0,L=e]{E}
\Vertex[a=4*72-90,d=3,Lpos=4*72-90,Ldist=0,L=a]{A}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\Edges(A,B,C)
\end{tikzpicture}
\]
That is, $V(\Delta)=\{a,b,c\}$, $V(\Theta)=\{a,b,c,d,e\}$, and $\iota\colon\Delta\hookrightarrow\Theta$ is the identical embedding. Let $\kappa\colon\Delta\hookrightarrow\Gamma^{(m)}$. Let $\mathbb{T}$ be the graph type obtained from $\mathbb{T}_2$ by removing the vertex $b$. That is, $\mathbb{T}$ has the following diagram:
\[
\mathbb{T}\colon\quad \begin{tikzpicture}[scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\SetVertexLabelOut
\Vertex[a=1*72-90,d=3,Lpos=1*72-90,Ldist=0,L=c]{C}
\Vertex[a=2*72-90,d=3,Lpos=2*72-90,Ldist=0,L=d]{D}
\Vertex[a=3*72-90,d=3,Lpos=3*72-90,Ldist=0,L=e]{E}
\Vertex[a=4*72-90,d=3,Lpos=4*72-90,Ldist=0,L=a]{A}
\Edges(A,E,C,D,A)
\Edge(E)(D)
\AddVertexColor{black}{A,C}
\end{tikzpicture}
\]
Let $\bar{v}\coloneqq \kappa(b)$. Observe, that $\#(\Gamma^{(m)},\mathbb{T}_2,\kappa)=\#(\Gamma_1^{(m)}(\bar{v}),\mathbb{T},\kappa\mathord{\upharpoonright}_{\{a,c\}})$. Since $\Gamma_1^{(m)}(\bar{v})$ is $(2,4)$-regular, it is in particular $\mathbb{T}$-regular. Thus, since $\operatorname{Aut}(\Gamma^{(m)})$ is transitive, we obtain that $\Gamma^{(m)}$ is $\mathbb{T}_2$-regular.
\subsubsection*{Proof of $\mathbb{T}_3$-regularity}
Let the graph type $\mathbb{T}_3=(\Delta,\iota,\Theta)$ be given by the following labelled diagram:
\[
\mathbb{T}_3\colon\quad\begin{tikzpicture}[scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\SetVertexLabelOut
\Vertex[a=0*72-90,d=3,Lpos=0*72-90,Ldist=0,L=b]{B}
\Vertex[a=1*72-90,d=3,Lpos=1*72-90,Ldist=0,L=c]{C}
\Vertex[a=2*72-90,d=3,Lpos=2*72-90,Ldist=0,L=d]{D}
\Vertex[a=3*72-90,d=3,Lpos=3*72-90,Ldist=0,L=e]{E}
\Vertex[a=4*72-90,d=3,Lpos=4*72-90,Ldist=0,L=a]{A}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\Edges(A,C)
\end{tikzpicture}
\]
That is, $V(\Delta)=\{a,b,c\}$, $V(\Theta)=\{a,b,c,d,e\}$, and $\iota\colon\Delta\hookrightarrow\Theta$ is the identical embedding.
Up to symmetries of $\Gamma^{(m)}$ and of $\mathbb{T}_3$ there are two kinds of embeddings of $\Delta$
into $\Gamma^{(m)}$. They are distinguished by their induced image in $\mathcal{C}^{(m)}$: Let $\kappa\colon\Delta\hookrightarrow\Gamma^{(m)}$. Let us denote $\kappa(a)=:\bar{u}$, $\kappa(b)=:\bar{v}$ and $\kappa(c)=:\bar{w}$. Then we have that $\#(\Gamma^{(m)},\mathbb{T}_3,\kappa)$ is equal to the number of arcs in the subgraph of $\Gamma^{(m)}$ induced by the set of joint neighbours of $\bar{u}$, $\bar{v}$, and $\bar{w}$.
Note that $\{\bar{u},\bar{v},\bar{w}\}$ induce one of the following two subcolorgraphs in $\mathcal{C}^{(m)}$:
\begin{align*}
(1) &:
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,Lpos=0*120-90,Ldist=0,L=\bar{v}]{c}
\Vertex[a=1*120-90,d=1,Lpos=1*120-90,Ldist=0,L=\bar{w}]{a}
\Vertex[a=2*120-90,d=1,Lpos=2*120-90,Ldist=0,L=\bar{u}]{z}
\Edge[lw=2pt,labelstyle={MyLabel,swap}, label=$\varrho_3^{(m)}$](z)(a)
\Edge[labelstyle={MyLabel}, label={$\varrho_4^{(m)}$}](z)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_4^{(m)}$}](a)(c)
\end{tikzpicture}
& (2) &:
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,Lpos=0*120-90,Ldist=0,L=\bar{v}]{c}
\Vertex[a=1*120-90,d=1,Lpos=1*120-90,Ldist=0,L=\bar{w}]{a}
\Vertex[a=2*120-90,d=1,Lpos=2*120-90,Ldist=0,L=\bar{u}]{z}
\Edge[lw=2pt,labelstyle={MyLabel,swap}, label=$\varrho_3^{(m)}$](z)(a)
\Edge[labelstyle={MyLabel}, label={$\varrho_2^{(m)}$}](z)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_4^{(m)}$}](a)(c)
\end{tikzpicture}
\end{align*}
Let us start with the first kind. Since $\mathcal{C}^{(m)}$ is $3$-homogeneous, without loss of generality we can assume that $\bar{u}=\icol{\bar{0}\\\bar{0}}$, $\bar{w}=\icol{\bar{0}\\\bar{e}_m}$, and $\bar{v}=\icol{\bar{e}_1\\\bar{e}_1}$.
\begin{lemma}\label{vert2}
Let $\bar{x}\in \mathbb{F}_2^{2m}$. Then $\bar{x}$ is a joint neighbour of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ if and only if
\begin{enumerate}
\item $\bar{x}_2\neq\bar{0}$,
\item $\bar{x}_1^T\bar{x}_2 = 0$,
\item $\bar{x}_1(m)=0$,
\item $\bar{x}_1(1)\neq\bar{x}_2(1)$,
\item $\bar{x}_2(m)=1 \Longrightarrow (\bar{x}_2(1),\dots,\bar{x}_2(m-1))\neq\bar{0}^T$,
\item $\bar{x}_2(1)=1 \Longrightarrow (\bar{x}_2(2),\dots,\bar{x}_2(m))\neq\bar{0}^T$.
\end{enumerate}
\end{lemma}
\begin{proof}
Clear.
\end{proof}
In the following, by $M$ we will denote the set of joint neighbours of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$. Using Lemma~\ref{vert2} we partition $M$ into $6$ natural classes:
\begin{align*}
M_1 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_2(1)=0, \bar{x}_2(m)=0\},\\
M_2 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_2(1)=0, \bar{x}_2(m)=1\},\\
M_3 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_2(1)=1, \bar{x}_2(m)=0\},\\
M_4 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_2(1)=1, \bar{x}_2(m)=1, (\bar{x}_2(2),\dots,\bar{x}_2(m-1))\neq \bar{0}^T\},\\
M_5 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1\neq\bar{0}, \bar{x}_2(1)=1, \bar{x}_2(m)=1, (\bar{x}_2(2),\dots,\bar{x}_2(m-1))= \bar{0}^T\},\\
M_6 &= \{\bar{x}\in M\mid \bar{x}_1=\bar{0}, \bar{x}_2=\bar{e}_1+\bar{e}_m\}.
\end{align*}
We claim that $(M_1,\dots,M_6)$ is an equitable partition of $\langle M\rangle_{\Gamma^{(m)}}$. For the proof of this claim consider now the projection
\begin{equation}\label{projection}
\Pi\colon \mathbb{F}_2^{2m}\bar{w}oheadrightarrow\mathbb{F}_2^{2m-4}:\quad
\begin{pmatrix}
\bar{x}_1\\
\bar{x}_2
\end{pmatrix}\mapsto
\begin{pmatrix}
\tilde{x}_1\\
\tilde{x}_2
\end{pmatrix},
\end{equation}
where $\tilde{x}_i$ is the unique element of $\mathbb{F}_2^{m-2}$, such that $\bar{x}_i=\icol{\bar{x}_i(1)\\\tilde{x}_i\\\bar{x}_i(m)}$ (where $i=1,2$). Observe that for each $i\in\{1,\dots,6\}$ we have that $\Pi\mathord{\upharpoonright}_{M_i}$ is one-to-one. Routine computations show that the projection of $\langle M\rangle_{\Gamma^{(m)}}$ in $\mathcal{C}^{(m-2)}$ looks as follows:
\[\scalebox{0.9}{
\begin{tikzpicture}[node distance=50mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node (M5) [terminal,anchor=center] {$\Pi(M_5)=S_{m-2}\setminus\{\bar{0}\}$};
\node (M1) [terminal, above left= of M5,anchor=center] {$\Pi(M_1)=Q_{m-2}\setminus S_{m-2}$};
\node (M2) [terminal, above right= of M5,anchor=center] {$\Pi(M_2)=Q_{m-2}\setminus S_{m-2}$};
\node (M3) [terminal, below right= of M5,anchor=center] {$\Pi(M_3)=Q_{m-2}\setminus S_{m-2}$};
\node (M4) [terminal, below left= of M5,anchor=center] {$\Pi(M_4)=Q_{m-2}\setminus S_{m-2}$};
\node (M6) [terminal, below= 50mm of M5,anchor=center] {$\Pi(M_6)=\{\bar{0}\}$};
\path[thick, draw] (M1) edge ["$\varrho_4^{(m-2)}$" inner sep=0pt] (M5);
\path[thick, draw] (M1) edge [out=-60,in=160, "$\varrho_4^{(m-2)}$" inner sep=-1pt,near end,swap](M3);
\path[thick, draw] (M1) edge [swap, "$\varrho_4^{(m-2)}$"](M4);
\path[thick, draw] (M1) edge ["$\varrho_1^{(m-2)}\cup\varrho_2^{(m-2)}\cup\varrho_3^{(m-2)}$"](M2);
\path[thick, draw] (M2) edge [out=-120,in=20, "$\varrho_4^{(m-2)}$" tight, near start](M4);
\path[thick, draw] (M2) edge [swap, "$\varrho_4^{(m-2)}$" inner sep=-3pt] (M5);
\path[thick, draw] (M2) edge ["$\varrho_4^{(m-2)}$"](M3);
\path[thick, draw] (M3) edge [swap, "$\varrho_3^{(m-2)}$" inner sep=0pt] (M5);
\path[thick, draw] (M3) edge ["$\varrho_3^{(m-2)}$" tight](M6);
\path[thick, draw] (M3) edge ["$\varrho_1^{(m-2)}\cup\varrho_2^{(m-2)}\cup\varrho_3^{(m-2)}$"] (M4);
\path[thick, draw] (M4) edge ["$\varrho_3^{(m-2)}$" inner sep=-3pt] (M5);
\path[thick, draw] (M4) edge [swap,"$\varrho_3^{(m-2)}$" inner sep=0pt](M6);
\path[thick, draw] (M1) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M1);
\path[thick, draw] (M2) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M2);
\path[thick, draw] (M3) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M3);
\path[thick, draw] (M4) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M4);
\end{tikzpicture}}
\]
To be more precise, an edge in the above given diagram from $\Pi(M_i)$ to $\Pi(M_j)$ labelled by a relation $\sigma$ means that for all $\bar{x}\in M_i$, $\bar{y}\in M_j$, we have that $(\bar{x},\bar{y})\in E(\langle M\rangle_{\Gamma^{(m)}})$ if and only if $(\Pi(\bar{x}),\Pi(\bar{y}))\in\sigma$. An immediate consequence of this observation is that $(M_1,\dots, M_6)$ is an equitable partition of $\langle M\rangle_{\Gamma^{(m)}}$, as was claimed before. Moreover, its partition matrix is given by:
\[
\begin{pmatrix}\def1.5{1.5}
p_{33}^3 & p_{31}^3+p_{32}^3+p_{33}^3 & p_{34}^3 & p_{34}^3 & p_{24}^3 & 0 \\
p_{31}^3+p_{32}^3+p_{33}^3 & p_{33}^3 & p_{34}^3 & p_{34}^3 & p_{24}^3 & 0 \\
p_{34}^3 & p_{34}^3 & p_{33}^3 & p_{31}^3+p_{32}^3+p_{33}^3 & p_{23}^3 & p_{13}^3\\
p_{34}^3 & p_{34}^3 & p_{31}^3+p_{32}^3+p_{33}^3 & p_{33}^3 & p_{23}^3 & p_{13}^3 \\
p_{34}^2 & p_{34}^2 & p_{33}^2 & p_{33}^2 & 0 & 0\\
0 & 0 & p_{33}^1 & p_{33}^1 & 0 & 0
\end{pmatrix}
\]
Here, for saving space, in each case instead of $p_{ij}^k(m-2)$ we wrote just $p_{ij}^k$.
It is now easy to compute the number of arcs in $\langle M\rangle_{\Gamma^{(m)}}$. It is
\begin{equation}\label{arcscase1}
\setlength\arraycolsep{2pt}
\begin{pmatrix}
\theta_m(2\theta_m-1) \\ \theta_m(2\theta_m-1) \\ \theta_m(2\theta_m-1) \\ \theta_m(2\theta_m-1) \\ 2\theta_m-1 \\ 1
\end{pmatrix}^T
\begin{pmatrix}
\theta_m(\theta_m-1) & \theta_m^2 & \theta_m(\theta_m-1 & \theta_m(\theta_m-1 & \theta_m & 0\\
\theta_m^2 & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m & 0\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m^2 & \theta_m-1 & 1\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m^2 & \theta_m(\theta_m-1) & \theta_m-1 & 1\\
\theta_m^2 & \theta_m^2 & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & 0 & 0\\
0 & 0 & \theta_m(2\theta_m-1) & \theta_m(2\theta_m-1) & 0 & 0
\end{pmatrix}
\begin{pmatrix}
1 \\
1 \\
1 \\
1 \\
1 \\
1
\end{pmatrix},
\end{equation}
where the vector on the left hand side of this expression consists of the cardinalities of the $M_i$ ($i=1,\dots,6$).
In principle we know now the number of arcs in $\langle M\rangle_{\Gamma^{(m)}}$, but instead of computing this number outright, we stop at this point and start our consideration of the second type of embeddings of $\Delta$ into $\Gamma^{(m)}$:
Let $\kappa$ be an embedding of $\Delta$ into $\Gamma^{(m)} $ of the second kind. Since $\mathcal{C}^{(3)}$ is $3$-homogeneous, without loss of generality we may assume that $\bar{u}=\icol{\bar{0}\\\bar{0}}$, $\bar{w}=\icol{\bar{0}\\\bar{e}_1}$, and $\bar{v}=\icol{\bar{e}_1\\\bar{0}}$.
\begin{lemma}
Let $\bar{x}\in \mathbb{F}_2^{2m}$. Then $\bar{x}$ is a joint neighbour of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ if and only if
\begin{enumerate}
\item $\bar{x}_2\neq\bar{0}$,
\item $\bar{x}_1^T\bar{x}_2 = 0$,
\item $\bar{x}_1(1)=\bar{x}_2(1)=0$.
\end{enumerate}
\end{lemma}
\begin{proof}
Clear.
\end{proof}
If we denote the set of joint neighbours of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ by $N$, then as an immediate consequence we obtain that $\langle N\rangle_{\Gamma^{(m)}}$ is isomorphic to $\Gamma_1^{(m-1)}$. So in principle we can count the arcs in $\langle N\rangle_{\Gamma^{(m)}}$. However, in order to compare this number with the data computed in the first case it is more convenient if we give a description of $\langle N\rangle_{\Gamma^{(m)}}$ with respect to a suitable equitable partition: We partition $N$ into the following $6$ parts:
\begin{align*}
N_1 &= \{\bar{x}\in N\mid \bar{x}_1(m)=1, \bar{x}_2(m)=0\},\\
N_2 &= \{\bar{x}\in N\mid \bar{x}_1(m)=1, \bar{x}_2(m)=1\},\\
N_3 &= \{\bar{x}\in N\mid \bar{x}_1(m)=0, \bar{x}_2(m)=0\},\\
N_4 &= \{\bar{x}\in N\mid \bar{x}_1(m)=0, \bar{x}_2(m)=1,(\bar{x}_2(1),\dots,\bar{x}_2(m-1))\neq\bar{0}^T\},\\
N_5 &= \{\bar{x}\in N\mid \bar{x}_1(m)=0, \bar{x}_2(m)=1, \bar{x}_1\neq\bar{0}, (\bar{x}_2(1),\dots,\bar{x}_2(m-1))=\bar{0}^T\},\\
N_6 &= \{\bar{x}\in N\mid \bar{x}_1(m)=0, \bar{x}_2(m)=1, \bar{x}_1=\bar{0}, (\bar{x}_2(1),\dots,\bar{x}_2(m-1))=\bar{0}^T\}.
\end{align*}
As before we examine how $\langle N\rangle_{\Gamma^{(m)}}$ looks like when projected by the projection $\Pi$ from \eqref{projection}. First we observe that the restrictions $\Pi$ to the classes $N_i$ ($i=1,\dots,6$) are all one-to-one. The projection of $\langle N\rangle_{\Gamma^{(m)}}$ with respect to the projection $\Pi$ in $\mathcal{C}^{(m)}$ is given in the following diagram:
\[\scalebox{0.90}{
\begin{tikzpicture}[node distance=50mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node (M5) [terminal,anchor=center] {$\Pi(N_5)=S_{m-2}\setminus\{\bar{0}\}$};
\node (M1) [terminal, above left= of M5,anchor=center] {$\Pi(N_1)=Q_{m-2}\setminus S_{m-2}$};
\node (M2) [terminal, above right= of M5,anchor=center] {$\Pi(N_2)=\mathbb{F}_2^{2m-2}\setminus Q_{m-1}$};
\node (M3) [terminal, below right= of M5,anchor=center] {$\Pi(N_3)=Q_{m-2}\setminus S_{m-2}$};
\node (M4) [terminal, below left= of M5,anchor=center] {$\Pi(N_4)=Q_{m-2}\setminus S_{m-2}$};
\node (M6) [terminal, below= 50mm of M5,anchor=center] {$\Pi(N_6)=\{\bar{0}\}$};
\path[thick, draw] (M1) edge ["$\varrho_4^{(m-2)}$" inner sep=0pt] (M5);
\path[thick, draw] (M1) edge [out=-60,in=160, "$\varrho_3^{(m-2)}$" inner sep=-1pt,near end,swap](M3);
\path[thick, draw] (M1) edge [swap, "$\varrho_4^{(m-2)}$"](M4);
\path[thick, draw] (M1) edge ["$\varrho_1^{(m-2)}\cup\varrho_2^{(m-2)}\cup\varrho_3^{(m-2)}$"](M2);
\path[thick, draw] (M2) edge [out=-120,in=20, "$\varrho_3^{(m-2)}$" tight, near start](M4);
\path[thick, draw] (M2) edge [swap, "$\varrho_3^{(m-2)}$" inner sep=-3pt] (M5);
\path[thick, draw] (M2) edge ["$\varrho_4^{(m-2)}$"](M3);
\path[thick, draw] (M3) edge [swap, "$\varrho_3^{(m-2)}$" inner sep=0pt] (M5);
\path[thick, draw] (M3) edge ["$\varrho_3^{(m-2)}$" tight](M6);
\path[thick, draw] (M3) edge ["$\varrho_1^{(m-2)}\cup\varrho_2^{(m-2)}\cup\varrho_3^{(m-2)}$"] (M4);
\path[thick, draw] (M4) edge ["$\varrho_3^{(m-2)}$" inner sep=-3pt] (M5);
\path[thick, draw] (M4) edge [swap,"$\varrho_3^{(m-2)}$" inner sep=0pt](M6);
\path[thick, draw] (M1) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M1);
\path[thick, draw] (M2) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M2);
\path[thick, draw] (M3) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M3);
\path[thick, draw] (M4) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M4);
\end{tikzpicture}}
\]
Again, an edge from $\Pi(N_i)$ to $\Pi(N_j)$ labelled by $\sigma$ means that for all $\bar{x}\in N_i$ and for all $\bar{y}\in N_j$ we have $(\bar{x},\bar{y})\in E(\langle N\rangle_{\Gamma^{(m)}})$ if and only if $(\Pi(\bar{x}),\Pi(\bar{y}))\in\sigma$. From the diagram we may conclude that $(N_1,\dots, N_6)$ is an equitable partition of $\langle N\rangle_{\Gamma^{(m)}}$. Its partition matrix is given by:
\[
\begin{pmatrix}
p_{33}^3 & p_{41}^3+p_{42}^3+p_{43}^3 & p_{33}^3 & p_{34}^3 & p_{24}^3 & 0 \\
p_{31}^4+p_{32}^4 + p_{33}^4 & p_{43}^4 & p_{34}^4 & p_{33}^4 & p_{23}^4 & 0 \\
p_{33}^3 & p_{44}^3 & p_{33}^3 & p_{31}^3+p_{32}^3+p_{33}^3 & p_{23}^3 & p_{13}^3\\
p_{34}^3 & p_{43}^3 & p_{31}^3+p_{32}^3+p_{33}^3 & p_{33}^3 & p_{23}^3 & p_{13}^3\\
p_{34}^2 & p_{43}^2 & p_{33}^2 & p_{33}^2 & 0 & 0 \\
0 & 0 & p_{33}^1 & p_{33}^1 & 0 & 0 \\
\end{pmatrix}
\]
Again, for saving space, in each case instead of $p_{ij}^k(m-2)$ we wrote just $p_{ij}^k$.
Thus, the number of arcs in $\langle N \rangle_{\Gamma^{(m)}}$ is equal to
\[
\setlength\arraycolsep{2pt}
\begin{pmatrix}
\theta_m(2\theta_m-1) \\ \theta_m(2\theta_m-1) \\ \theta_m(2\theta_m-1) \\ \theta_m(2\theta_m-1) \\ 2\theta_m-1 \\ 1
\end{pmatrix}^T
\begin{pmatrix}
\theta_m(\theta_m-1) & \theta_m^2 & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m & 0\\
\theta_m^2 & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m & 0\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m^2 & \theta_m-1 & 1\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m^2 & \theta_m(\theta_m-1) & \theta_m-1 & 1\\
\theta_m^2 & \theta_m^2 & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & 0 & 0\\
0 & 0 & \theta_m(2\theta_m-1) & \theta_m(2\theta_m-1) & 0 & 0
\end{pmatrix}
\begin{pmatrix}
1 \\
1 \\
1 \\
1 \\
1 \\
1
\end{pmatrix},
\]
where the vector on the left hand side consists of the cardinalities of the $N_i$ ($i=1,\dots,6$). However, this is the same expression as in \eqref{arcscase1}.
To sum up, $\#(\Gamma^{(m)},\mathbb{T}_3,\kappa)$ does not depend on the embedding $\kappa$. In other words, $\Gamma^{(m)}$ is $\mathbb{T}_3$-regular.
\subsubsection*{Proof of $\mathbb{T}_4$-regularity}
Let $\mathbb{T}_4$ be given by the following labelled diagram:
\[
\mathbb{T}_4\colon\quad\begin{tikzpicture}[scale=0.4,baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexMath
\SetVertexLabelOut
\Vertex[a=0*72-90,d=3,Lpos=0*72-90,Ldist=0,L=b]{B}
\Vertex[a=1*72-90,d=3,Lpos=1*72-90,Ldist=0,L=c]{C}
\Vertex[a=2*72-90,d=3,Lpos=2*72-90,Ldist=0,L=d]{D}
\Vertex[a=3*72-90,d=3,Lpos=3*72-90,Ldist=0,L=e]{E}
\Vertex[a=4*72-90,d=3,Lpos=4*72-90,Ldist=0,L=a]{A}
\Edges(A,E,B,D,C,E,D,A)
\AddVertexColor{black}{A,B,C}
\end{tikzpicture}
\]
Up to symmetries of $\mathbb{T}_4$ and of $\Gamma^{(m)}$ there are $3$ types of embeddings of $\Delta=\overline{K}_3$ into $\Gamma^{(m)}$: Fix an embedding $\kappa\colon\Delta\hookrightarrow\Gamma^{(m)}$. If we assume that $\kappa\colon a\mapsto\bar{u}, b\mapsto\bar{v}, c\mapsto\bar{w}$, then the subcolorgraph of $\mathcal{C}^{(m)}$ induced by $\{\bar{u},\bar{v},\bar{w}\}$ is one of the following:
\begin{align*}
(1) &:
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,Lpos=0*120-90,Ldist=0,L=\bar{w}]{c}
\Vertex[a=1*120-90,d=1,Lpos=1*120-90,Ldist=0,L=\bar{v}]{a}
\Vertex[a=2*120-90,d=1,Lpos=2*120-90,Ldist=0,L=\bar{u}]{z}
\Edge[labelstyle={MyLabel,swap}, label=$\varrho_2^{(m)}$](z)(a)
\Edge[labelstyle={MyLabel}, label={$\varrho_2^{(m)}$}](z)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_2^{(m)}$}](a)(c)
\end{tikzpicture}
& (2) &:
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,Lpos=0*120-90,Ldist=0,L=\bar{w}]{c}
\Vertex[a=1*120-90,d=1,Lpos=1*120-90,Ldist=0,L=\bar{v}]{a}
\Vertex[a=2*120-90,d=1,Lpos=2*120-90,Ldist=0,L=\bar{u}]{z}
\Edge[labelstyle={MyLabel,swap}, label=$\varrho_2^{(m)}$](z)(a)
\Edge[labelstyle={MyLabel}, label={$\varrho_4^{(m)}$}](z)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_4^{(m)}$}](a)(c)
\end{tikzpicture}
& (3) &:
\begin{tikzpicture}[baseline={(current bounding box.center)}]
\tikzset{VertexStyle/.style = {shape = circle, draw,minimum size = 2pt, inner sep = 2pt}}
\SetVertexLabelOut
\SetVertexMath
\SetVertexLabel
\Vertex[a=0*120-90,d=1,Lpos=0*120-90,Ldist=0,L=\bar{w}]{c}
\Vertex[a=1*120-90,d=1,Lpos=1*120-90,Ldist=0,L=\bar{v}]{a}
\Vertex[a=2*120-90,d=1,Lpos=2*120-90,Ldist=0,L=\bar{u}]{z}
\Edge[labelstyle={MyLabel,swap}, label=$\varrho_4^{(m)}$](z)(a)
\Edge[labelstyle={MyLabel}, label={$\varrho_4^{(m)}$}](z)(c)
\Edge[labelstyle={MyLabel,swap}, label={$\varrho_4^{(m)}$}](a)(c)
\end{tikzpicture}
\end{align*}
\paragraph{About the first type of embeddings:} Since $\mathcal{C}^{(m)}$ is $3$-homogeneous, without loss of generality we can assume that $\bar{u}=\icol{\bar{0}\\\bar{0}}$, $\bar{v}=\icol{\bar{e}_1\\\bar{0}}$, $\bar{w}=\icol{\bar{e}_2\\\bar{0}}$.
\begin{lemma}
Let $\bar{x}\in \mathbb{F}_2^{2m}$. Then $\bar{x}$ is a joint neighbour of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ if and only if
\begin{enumerate}
\item $\bar{x}_2\neq\bar{0}$,
\item $\bar{x}_1^T\bar{x}_2 = 0$,
\item $\bar{x}_2(1)=\bar{x}_2(2)=0$.
\end{enumerate}
\end{lemma}
\begin{proof}
Clear.
\end{proof}
As it was done before, the set $M$ of joint neighbors of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ is subdivided into subsets:
\begin{align*}
M_1 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1(2)=0, \bar{x}_2(1)=0, \bar{x}_2(2)=0\},\\
M_2 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1(2)=1, \bar{x}_2(1)=0, \bar{x}_2(2)=0\},\\
M_3 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_1(2)=0, \bar{x}_2(1)=0, \bar{x}_2(2)=0\},\\
M_4 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_1(2)=1, \bar{x}_2(1)=0, \bar{x}_2(2)=0\}.
\end{align*}
Consider the projection
\begin{align} \label{projction2}
\Pi\colon\mathbb{F}_2^{2m}\bar{w}oheadrightarrow\mathbb{F}_2^{2m-4},\quad
\begin{pmatrix}
\bar{x}_1\\\bar{x}_2
\end{pmatrix}\mapsto
\begin{pmatrix}
\tilde{x}_1\\\tilde{x}_2
\end{pmatrix},
\end{align}
where $\tilde{x}_i$ is the unique vector from $\mathbb{F}_2^{m-2}$, such that $\bar{x}_i=\icol{\bar{x}_i(1)\\\bar{x}_i(2)\\\tilde{x}_i}$ ($i=1,2$). Note that for each $i\in\{1,\dots,4\}$ the restriction $\Pi\mathord{\upharpoonright}_{M_i}$ is one-to-one. The projection of $\langle M \rangle_{\Gamma^{(m)}}$ under $\Pi$ looks as follows:
\[\scalebox{0.75}{
\begin{tikzpicture}[node distance=20mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node (M5) [anchor=center]{};
\node (M1) [terminal, above left= of M5,anchor=south east] {$\Pi(M_1)=Q_{m-2}\setminus S_{m-2}$};
\node (M2) [terminal, above right= of M5,anchor=south west] {$\Pi(M_2)=Q_{m-2}\setminus S_{m-2}$};
\node (M3) [terminal, below right= of M5,anchor=north west] {$\Pi(M_3)=Q_{m-2}\setminus S_{m-2}$};
\node (M4) [terminal, below left= of M5,anchor=north east] {$\Pi(M_4)=Q_{m-2}\setminus S_{m-2}$};
\path[thick, draw] (M1) edge ["$\varrho_3^{(m-2)}$" inner sep=-1pt,near end,swap](M3);
\path[thick, draw] (M1) edge [swap, "$\varrho_3^{(m-2)}$"](M4);
\path[thick, draw] (M1) edge ["$\varrho_3^{(m-2)}$"](M2);
\path[thick, draw] (M2) edge ["$\varrho_3^{(m-2)}$" tight, near start](M4);
\path[thick, draw] (M2) edge ["$\varrho_3^{(m-2)}$"](M3);
\path[thick, draw] (M3) edge ["$\varrho_3^{(m-2)}$"] (M4);
\path[thick, draw] (M1) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M1);
\path[thick, draw] (M2) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M2);
\path[thick, draw] (M3) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M3);
\path[thick, draw] (M4) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M4);
\end{tikzpicture}}
\]
Thus, $(M_1,M_2,M_3,M_4)$ is an equitable partition of $\langle M \rangle_{\Gamma^{(m)}}$. Its partition matrix is given by:
\[
\begin{pmatrix}
p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2)\\
p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2)\\
p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2)\\
p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2) & p_{33}^3(m-2)
\end{pmatrix}
\]
Thus, the number of arcs in $\langle M \rangle_{\Gamma^{(m)}}$ is given by:
\[
\begin{pmatrix}
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)
\end{pmatrix}^T
\begin{pmatrix}
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)
\end{pmatrix}
\begin{pmatrix}
1\\
1\\
1\\
1
\end{pmatrix}.
\]
\paragraph{About the second type of embeddings:} Since $\mathcal{C}^{(m)}$ is $3$-homogeneous, without loss of generality we can assume that $\bar{u}=\icol{\bar{0}\\\bar{0}}$, $\bar{v}=\icol{\bar{e}_1\\\bar{0}}$, $\bar{w}=\icol{\bar{e}_2\\\bar{e}_2}$.
\begin{lemma}
Let $\bar{x}\in \mathbb{F}_2^{2m}$. Then $\bar{x}$ is a joint neighbour of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ if and only if
\begin{enumerate}
\item $\bar{x}_2\neq\bar{0}$,
\item $\bar{x}_1^T\bar{x}_2 = 0$,
\item $\bar{x}_2(1)=0$,
\item $\bar{x}_1(2)\neq\bar{x}_2(2)$,
\item $\bar{x}_2(2)=1\Longrightarrow(\bar{x}_2(3),\dots,\bar{x}_2(m))\neq\bar{0}^T$.
\end{enumerate}
\end{lemma}
\begin{proof}
Clear.
\end{proof}
As usually, we decompose the set $M$ of joint neighbours of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$:
\begin{align*}
M_1 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1(2)=0, \bar{x}_2(1)=0, \bar{x}_2(2)=1\},\\
M_2 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1(2)=1, \bar{x}_2(1)=0, \bar{x}_2(2)=0\},\\
M_3 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_1(2)=0, \bar{x}_2(1)=0, \bar{x}_2(2)=1\},\\
M_4 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_1(2)=1, \bar{x}_2(1)=0, \bar{x}_2(2)=0\},
\end{align*}
and show that $(M_1,M_2,M_3,M_4)$ forms an equitable partition of $\langle M\rangle_{\Gamma^{(m)}}$. For each $i\in\{1,\dots,4\}$, the restriction of the projection $\Pi$ from \eqref{projction2} to $M_i$ is one-to-one. The projection of $\langle M \rangle_{\Gamma^{(m)}}$ under $\Pi$ in $\mathcal{C}^{(m-2)}$ is given by
\[\scalebox{0.75}{
\begin{tikzpicture}[node distance=20mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node(M5)[anchor=center]{};
\node (M1) [terminal, above left= of M5,anchor=south east] {$\Pi(M_1)=Q_{m-2}\setminus S_{m-2}$};
\node (M2) [terminal, above right= of M5,anchor=south west] {$\Pi(M_2)=Q_{m-2}\setminus S_{m-2}$};
\node (M3) [terminal, below right= of M5,anchor=north west] {$\Pi(M_3)=Q_{m-2}\setminus S_{m-2}$};
\node (M4) [terminal, below left= of M5,anchor=north east] {$\Pi(M_4)=Q_{m-2}\setminus S_{m-2}$};
\path[thick, draw] (M1) edge ["$\varrho_3^{(m-2)}$" inner sep=-1pt,near end,swap](M3);
\path[thick, draw] (M1) edge [swap, "$\varrho_4^{(m-2)}$"](M4);
\path[thick, draw] (M1) edge ["$\varrho_4^{(m-2)}$"](M2);
\path[thick, draw] (M2) edge ["$\varrho_3^{(m-2)}$" tight, near start](M4);
\path[thick, draw] (M2) edge ["$\varrho_4^{(m-2)}$"](M3);
\path[thick, draw] (M3) edge ["$\varrho_4^{(m-2)}$"] (M4);
\path[thick, draw] (M1) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M1);
\path[thick, draw] (M2) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M2);
\path[thick, draw] (M3) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M3);
\path[thick, draw] (M4) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M4);
\end{tikzpicture}}
\]
Consequently, $(M_1,\dots,M_4)$ is an equitable partition of $\langle M \rangle_{\Gamma^{(m)}}$. Its partition matrix is given by:
\[
\begin{pmatrix}
p_{33}^3(m-2) & p_{34}^3(m-2) & p_{33}^3(m-2) & p_{34}^3(m-2)\\
p_{34}^3(m-2) & p_{33}^3(m-2) & p_{34}^3(m-2) & p_{33}^3(m-2)\\
p_{33}^3(m-2) & p_{34}^3(m-2) & p_{33}^3(m-2) & p_{34}^3(m-2)\\
p_{34}^3(m-2) & p_{33}^3(m-2) & p_{34}^3(m-2) & p_{33}^3(m-2)
\end{pmatrix}
\]
Thus, the number of arcs in $\langle M \rangle_{\Gamma^{(m)}}$ is equal to
\[
\begin{pmatrix}
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)
\end{pmatrix}^T
\begin{pmatrix}
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)
\end{pmatrix}
\begin{pmatrix}
1\\
1\\
1\\
1
\end{pmatrix}.
\]
\paragraph{About the third type of embeddings:} Since $\mathcal{C}^{(m)}$ is $3$-homogeneous, without loss of generality we can assume that $\bar{u}=\icol{\bar{0}\\\bar{0}}$, $\bar{v}=\icol{\bar{e}_1\\\bar{e}_1}$, $\bar{w}=\icol{\bar{e}_1+\bar{e}_2\\\bar{e}_2}$.
\begin{lemma}
Let $\bar{x}\in \mathbb{F}_2^{2m}$. Then $\bar{x}$ is a joint neighbour of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$ if and only if
\begin{enumerate}
\item $\bar{x}_2\neq\bar{0}$,
\item $\bar{x}_1^T\bar{x}_2 = 0$,
\item $\bar{x}_1(1)\neq\bar{x}_2(1)$,
\item $\bar{x}_2(2)=\bar{x}_1(2)+\bar{x}_1(1)$,
\item $\bar{x}_2(1)=1\Longrightarrow (\bar{x}_2(2),\dots,\bar{x}_2(m))\neq\bar{0}^T$,
\item $\bar{x}_2(2)=1\Longrightarrow (\bar{x}_2(1),\bar{x}_2(3),\dots,\bar{x}_2(m))\neq\bar{0}^T$,
\item $\bar{x}_2(1)=\bar{x}_2(2)=1\Longrightarrow(\bar{x}_2(3),\dots,\bar{x}_2(m))\neq\bar{0}^T$.
\end{enumerate}
\end{lemma}
\begin{proof}
Clear.
\end{proof}
As usually, we decompose the set $M$ of joint neighbours of $\{\bar{u},\bar{v},\bar{w}\}$ in $\Gamma^{(m)}$:
\begin{align*}
M_1 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1(2)=0, \bar{x}_2(1)=1, \bar{x}_2(2)=0\},\\
M_2 &= \{\bar{x}\in M\mid \bar{x}_1(1)=0, \bar{x}_1(2)=1, \bar{x}_2(1)=1, \bar{x}_2(2)=1\},\\
M_3 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_1(2)=0, \bar{x}_2(1)=0, \bar{x}_2(2)=1\},\\
M_4 &= \{\bar{x}\in M\mid \bar{x}_1(1)=1, \bar{x}_1(2)=1, \bar{x}_2(1)=0, \bar{x}_2(2)=0\}.
\end{align*}
For each $i\in\{1,\dots,4\}$, the restriction of the projection $\Pi$ from \eqref{projction2} to $M_i$ is one-to-one. The projection of $\langle M\rangle_{\Gamma^{(m)}}$ under $\Pi$ in $\mathcal{C}^{(m-2)}$ is given by
\[\scalebox{0.75}{
\begin{tikzpicture}[node distance=20mm, terminal/.style={rounded rectangle,minimum size=6mm,draw},tight/.style={inner sep=-2pt}]
\node (M5) [anchor=center]{};
\node (M1) [terminal, above left= of M5,anchor=south east] {$\Pi(M_1)=Q_{m-2}\setminus S_{m-2}$};
\node (M2) [terminal, above right= of M5,anchor=south west] {$\Pi(M_2)=Q_{m-2}\setminus S_{m-2}$};
\node (M3) [terminal, below right= of M5,anchor=north west] {$\Pi(M_3)=Q_{m-2}\setminus S_{m-2}$};
\node (M4) [terminal, below left= of M5,anchor=north east] {$\Pi(M_4)=Q_{m-2}\setminus S_{m-2}$};
\path[thick, draw] (M1) edge ["$\varrho_4^{(m-2)}$" inner sep=-1pt,near end,swap](M3);
\path[thick, draw] (M1) edge [swap, "$\varrho_4^{(m-2)}$"](M4);
\path[thick, draw] (M1) edge ["$\varrho_4^{(m-2)}$"](M2);
\path[thick, draw] (M2) edge ["$\varrho_4^{(m-2)}$" tight, near start](M4);
\path[thick, draw] (M2) edge ["$\varrho_4^{(m-2)}$"](M3);
\path[thick, draw] (M3) edge ["$\varrho_4^{(m-2)}$"] (M4);
\path[thick, draw] (M1) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M1);
\path[thick, draw] (M2) edge [loop above, min distance=20mm,in=45,out=135,"$\varrho_3^{(m-2)}$"] (M2);
\path[thick, draw] (M3) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M3);
\path[thick, draw] (M4) edge [loop below, min distance=20mm,in=-45,out=-135,swap, "$\varrho_3^{(m-2)}$"] (M4);
\end{tikzpicture}}
\]
Consequently, $(M_1,M_2,M_3,M_4)$ forms an equitable partition of $\langle M \rangle_{\Gamma^{(m)}}$. Its partition matrix is given by:
\[
\begin{pmatrix}
p_{33}^3(m-2) & p_{34}^3(m-2) & p_{34}^3(m-2) & p_{34}^3(m-2)\\
p_{34}^3(m-2) & p_{33}^3(m-2) & p_{34}^3(m-2) & p_{34}^3(m-2)\\
p_{34}^3(m-2) & p_{34}^3(m-2) & p_{33}^3(m-2) & p_{34}^3(m-2)\\
p_{34}^3(m-2) & p_{34}^3(m-2) & p_{34}^3(m-2) & p_{33}^3(m-2)
\end{pmatrix}
\]
Thus, the number of arcs in $\langle M \rangle_{\Gamma^{(m)}}$ is equal to
\[
\begin{pmatrix}
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)\\
\theta_m(2\theta_m-1)
\end{pmatrix}^T
\begin{pmatrix}
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)\\
\theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1) & \theta_m(\theta_m-1)
\end{pmatrix}
\begin{pmatrix}
1\\
1\\
1\\
1
\end{pmatrix}.
\]
Note that in all three cases we counted the same number of arcs in $\langle M \rangle_{\Gamma^{(m)}}$. Thus, the number $\#(\Gamma^{(m)},\mathbb{T}_4,\kappa)$ does not depend on $\kappa$. In other words, $\Gamma^{(m)}$ is $\mathbb{T}_4$-regular.
This finishes the proof that $\Gamma^{(m)}$ is $(3,5)$-regular.\qed
\section{Outlook}
In \cite{Iva94}, A.V.~Ivanov described another series of $(2,4)$-regular graphs whose existence is related to the unique (up to equivalence) non-degenerate quadratic form of Witt-index $m-1$ on $\mathbb{F}_2^{2m}$. Let $\widehat{q}^{(m)}$ be such a form.
By $\widehat{Q}_m$ we denote the quadric defined by $\widehat{q}^{(m)}$, and by $\widehat{S}_m$ a maximal singular subspace. The bilinear form associated with $\widehat{q}^{(m)}$ is given by $[\bar{x},\bar{y}]^{(m)}=\widehat{q}^{(m)}(\bar{x}+\bar{y})+\widehat{q}^{(m)}(\bar{x})+\widehat{q}^{(m)}(\bar{y})$. The given data give rise to the following five binary relations on $\mathbb{F}_2^{2m}$:
\begin{align*}
\sigma_1^{(m)} &= \{(\bar{v},\bar{w})\mid \bar{v}=\bar{w}\}, & \sigma_2^{(m)} &=\{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in\widehat{S}_m\setminus\{\bar{0}\}\},\\
\sigma_3^{(m)} &=\{(\bar{v},\bar{w})\mid\bar{v}+\bar{w}\in\widehat{Q}_m\setminus\widehat{S}_m\}, & \sigma_4^{(m)} &=\{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in\widehat{S}_m^\perp\setminus\widehat{S}_m\},\\
\sigma_5^{(m)} &=\{(\bar{v},\bar{w})\mid \bar{v}+\bar{w}\in \mathbb{F}_2^{2m}\setminus(\widehat{Q}_m\cup\widehat{S}_m^\perp)\}.
\end{align*}
It is known (cf.~ \cite{Iva94}) that the relational structure $\widehat{\mathcal{C}}^{(m)}\coloneqq (\mathbb{F}_2^{2m},\sigma_1^{(m)},\sigma_2^{(m)},\sigma_3^{(m)},\sigma_4^{(m)},\sigma_5^{(m)})$ is a coherent configuration and that the graph $\widehat{G}amma^{(m)}\coloneqq (\mathbb{F}_2^{2m},\sigma_2^{(m)}\cup\sigma_5^{(m)})$ is $(2,4)$-regular. In the course of our research of the Brouwer-Ivanov-Klin-graphs we had also a look onto the graphs $\widehat{G}amma^{(m)}$. So far we were able to show that for all $m\ge 5$ we have that:
\begin{enumerate}
\item $\operatorname{Aut}(\widehat{G}amma^{(m)})=\operatorname{Aut}(\widehat{\mathcal{C}}^{(m)})$,
\item $\widehat{\mathcal{C}}^{(m)}$ is $3$-homogeneous,
\item $\widehat{G}amma^{(m)}$ is $(3,5)$-regular,
\item $\widehat{G}amma^{(m)}$ is not $2$-homogeneous.
\end{enumerate}
The proof is postponed to a subsequent publication, as it uses different techniques and would explode the size of this paper.
\end{document}
|
\begin{document}
\title{A Two-stage Method for Inverse Medium Scattering}
\begin{abstract}
We present a novel numerical method to the time-harmonic inverse medium scattering
problem of recovering the refractive index from near-field scattered data. The approach
consists of two stages, one pruning step of detecting the scatterer support, and one
resolution enhancing step with mixed regularization. The first step is strictly direct
and of sampling type, and faithfully detects the scatterer support. The second step is an
innovative application of nonsmooth mixed regularization, and it accurately resolves the
scatterer sizes as well as intensities. The model is efficiently solved by a semi-smooth
Newton-type method. Numerical results for two- and three-dimensional examples indicate
that the approach is accurate, computationally efficient, and robust with respect to data
noise.
\\
\textbf{Key words}: inverse medium scattering problem, reconstruction algorithm, sampling
method, mixed regularization, semi-smooth Newton method
\end{abstract}
\section{Introduction}
In this work we study the inverse medium scattering problem (IMSP) of determining the
refractive index from near-field measurements for time-harmonic wave propagation
\cite{ColtonKress:1998}. Consider a homogeneous background space $\mathbb{R}^\mathrm{d}$
$(\mathrm{d}=2,3)$ that contains some inhomogeneous media occupying a bounded domain
$\Omega$. {Let $u^{inc}=e^{ik\,x\cdot d}$ be} an incident plane wave, with the incident
direction $d\in \mathbb{S}^{\mathrm{d}-1}$ and the wave number $k$. Then the total field
$u$ induced by the inhomogeneous medium scatterers satisfies the Helmholtz
equation~\cite{ColtonKress:1998}
\begin{equation}\label{eq:acoustic}
\Delta u+k^2\,n^2(x) u=0,
\end{equation}
where the function $n(x)$ is the refractive index, i.e., the ratio of the wave speed in
the homogeneous background to that in the concerned medium at location $x$. The model
describes not only time-harmonic acoustic wave propagation, but also electromagnetic wave
propagation in either the transverse magnetic or transverse electric modes
\cite[Appendix]{ItoJinZou:2011a}.
Next we let $\eta=(n^2-1)k^2$, which combines the relative refractive index $n^2-1$ with
the wave number $k$. Clearly the coefficient $\eta$ characterizes the material properties
of the inhomogeneity and is supported in the scatterer $\Omega \subset
\mathbb{R}^\mathrm{d}$. We denote by $I=\eta u$ the induced current, by $G(x,y)$ the
fundamental solution to the Helmholtz equation in the homogeneous background, i.e.,
\begin{equation*}\displaystyle
G(x,y)=\left\{
\begin{aligned}\frac{i}{4}H^1_0(k\,|x-y|), &\quad\mathrm{d} =2,\\
\frac{1}{4\pi}\frac{e^{ik|x-y|}}{|x-y|},&\quad \mathrm{d}=3,\\
\end{aligned}\right.
\end{equation*}
where the function $H_0^1$ refers to Hankel function of the first kind and zeroth-order.
Then we can express the total field $u$ as follows \cite{ColtonKress:1998}
\begin{equation} \label{int}
u=u^{inc}+\int_\Omega G(x,y)I(y)\,dy\,.
\end{equation}
By multiplying both sides of equation \eqref{int} by $\eta$, we arrive at the following
integral equation of the second kind for the induced current $I$
\begin{equation}\label{eqn:indcur}
I(x) = \eta u^{inc} + \eta \int_\Omega G(x,y) I (y)dy.
\end{equation}
The reformulation \eqref{eqn:indcur} is numerically amenable since all computation is
restricted to the scatterer support $\Omega$, which is much smaller than the whole space
$\mathbb{R}^\mathrm{d}$. Hence, the complexity is also very low. We will approximate the
solution to \eqref{eqn:indcur} by the mid-point rule (cf. Appendix \ref{app:int}).
Next we let $u^s= u-u^{inc}$ be the scattered field, which is measured on a closed
curve/surface $\Gamma$ enclosing the scatterers $\Omega$. Then the IMSP is to retrieve
the refractive index $n^2$ or equivalently the coefficient $\eta$ from (possibly very
noisy) measurements of the scattered field $u^s$, corresponding to one or several
incident fields. In the literature, a number of reconstruction techniques for the IMSP
have been developed. These methods can be roughly divided into two groups: support
detection and coefficient estimate. The former group (including MUSIC
\cite{Devaney:2006,Cheney:2001}, linear sampling method \cite{ColtonKirsch:1996,
CakoniColtonMonk:2011} and factorization method \cite{Kirsch:1998} etc.) usually is of
sampling type, and aims at detecting the scatterer support efficiently. The latter group
generally relies on the idea of regularization (including Tikhonov regularization
\cite{BaoLi:2005,RachowiczZdunek:2011}, iterative regularization method
\cite{Hohage:2001, BakushinskyKokurinKozlov:2005, Hohage:2006}, contrast source inversion
\cite{BergBroekhovenAbubakar:1999}, subspace regularization \cite{ChenZhong:2009} and
propagation-backpropagation method \cite{Vogeler:2003}), and aims at retrieving a
distributed estimate of the index function. These approaches generally are more
expensive, but their results may profile the inhomogeneities more precisely.
In this paper, we shall develop a novel two-stage numerical method for solving the IMSP.
The first step employs a direct sampling method, recently developed in
\cite{ItoJinZou:2011a}, to detect the scatterer support $\Omega$ stably and accurately.
It is based on the following index function
\begin{equation} \label{indx}
\Phi(x_p)=\frac{|\langle u^s, \,G(\cdot,x_p)\rangle_{L^2(\Gamma)}|}{
\|u^s\|_{L^2(\Gamma)}\|G(\cdot,x_p)\|_{L^2(\Gamma)}}\quad \forall \,x_p\in\widetilde{\Omega},
\end{equation}
where $\widetilde{\Omega}\supset\Omega$ is a sampling domain. Numerically, the method is
strictly direct and does not incur any linear matrix operations. The method can detect
reliably the scatterer support $\Omega$ even in the presence of a large amount of data
noises \cite{ItoJinZou:2011a}. In particular, a (much smaller) computational domain
$D\subset\widetilde{\Omega}$ can be determined from the index $\Phi$, and furthermore the
restriction $\Phi|_D$ may serve as a first approximation to the coefficient $\eta$.
The second step enhances the image resolution by a novel application of (nonsmooth) mixed
regularization. With the approximation $\Phi|_D$ from the sampling step at hand, equation
\eqref{eqn:indcur} gives an approximate induced current $\widehat{I}$ as well as an
approximate total field $\widehat{u}$. Then we seek a regularized solution $\eta$ to the
linearized scattering equation
\begin{equation} \label{scat}
\int_D G(x,y)\,\widehat{u}(y)\,\eta(y)\,dy=u^s(x),
\end{equation}
by an innovative regularization incorporating both $L^1$ and $H^1$ penalties. The $L^1$
penalty promotes the sparsity of the solution \cite{Tibshirani:1996,
CandesRombergTao:2006, Donoho:2006}. However, the estimate tends to be very spiky with
the $L^1$ penalty used alone. Meanwhile, the conventional $H^1$ penalty can only yield
globally smooth but often overly diffusive profiles. In this work we shall propose a
novel mixed model that consists of a suitable combination of the $L^1$ and $H^1$
penalties. As we will see, this mixed model produces well clustered and yet distributed
solutions, thereby overcoming the aforementioned drawbacks. It is the mixed model that
enables us to obtain a clear and accurate reconstruction of the inclusions: The
homogeneous background is vividly separated from the scatterers and both support and
intensity of the inclusions are accurately resolved.
Numerically, the $L^1$ penalty term gives rise to a nonsmooth optimality condition, which
renders its direct numerical treatment inconvenient. Fortunately, by using
complementarity functions, the optimality condition reduces to a coupled nonlinear system
for the sought-for coefficient $\eta$ and the Lagrangian multiplier, which is amenable to
efficient numerical solution. We shall develop an efficient and stable semi-smooth Newton
solver for the model via a primal-dual active-set strategy \cite{ItoKunisch:2008}.
Overall, the direct sampling method is very cheap and reduces the computational domain
$D$ in the mixed model (cf. \eqref{scat}) significantly, which in turn makes the
semi-smooth Newton method for the mixed model very fast. Hence, the proposed inverse
scattering method is computationally very efficient.
The rest of the paper is structured as follows. In Section \ref{sec:omega}, we recall a
novel direct sampling method for screening the scatterer support $\Omega$, and derive
thereby an initial guess to the coefficient $\eta$. Then in Section \ref{sec:tikh} we
develop an enhancement technique based on the idea of mixed regularization, and an
efficient semi-smooth Newton solver. Finally, we present numerical results for two- and
three-dimensional examples to demonstrate the accuracy and efficiency of the proposed
inverse scattering method.
\section{A direct sampling method}\label{sec:omega}
In this section, we describe a direct sampling method to determine the shape of the
scatterers, recently derived in \cite{ItoJinZou:2011a}. We only briefly recall the
derivation, but refer the readers to \cite{ItoJinZou:2011a} for more details. Consider a
circular curve $\Gamma$ ($d=2$) or a spherical surface $\Gamma$ ($d=3$). Let $G(x,x_p)$
be the fundamental solution in the homogeneous background. Then using the definitions of
the fundamental solutions $G(x,x_p)$ and $G(x,x_q)$ and Green's second identity, we
deduce
\begin{equation} \label{eqn:green}
2i\Im(G(x_p,x_q))=\int_{\Gamma}\left[\overline{G}(x,x_q)\frac{\partial G(x,x_p)}{\partial n}
-G(x,x_p)\frac{\partial \overline{G}(x,x_q)}{\partial n}\right]ds,
\end{equation}
where the points $x_p,x_q\in\Omega_\Gamma$, the domain enclosed by the boundary $\Gamma$.
Next we approximate the right hand side of identity \eqref{eqn:green} by means of the
Sommerfeld radiation condition for the Helmholtz equation, i.e.,
\begin{equation*}
\frac{\partial G(x,x_p)}{\partial n}=ikG(x,x_p) + \mathrm{h.o.t.}.
\end{equation*}
Consequently, we arrive at the following approximate relation
\begin{equation*}
\int_{\Gamma}G(x,x_p)\overline{G}(x,x_q)ds \approx k^{-1}\Im(G(x_p,x_q)),
\end{equation*}
which is valid if the points $x_p$ and $x_q$ are not close to the boundary $\Gamma$.
Now, we consider a sampling domain $\widetilde{\Omega}$ enclosing the scatterer support
$\Omega$. Upon dividing $\widetilde{\Omega}$ into small elements $\{\tau_j\}$, we may
approximate the integral in the scattering relation \eqref{int} by
\begin{equation} \label{eqn:appsc}
u^s(x)=\int_{\widetilde{\Omega}} G(x,y)I(y)dy\approx \sum_{j}w_j\,G(x,y_j),
\end{equation}
where the weight $w_j$ is given by $|\tau_j|I_j$ with $|\tau_j|$ being the volume of the
$j$th element $\tau_j$. The relation \eqref{eqn:appsc} is plausible if the induced
current $I$ is regular in each element and the elements $\{\tau_j\}$ are sufficiently
fine. It also admits a nice physical interpretation: the scattered field $u^s$ at any
fixed point $x\in\Gamma$ is a weighted average of that due to point scatterers located at
$\{y_j\}$.
Combining the preceding two relations yields
\begin{equation}\label{eqn:appind}
\int_\Gamma u^s(x)\overline{G}(x,x_p)ds\approx k^{-1}\sum_j w_j\Im(G(y_j,x_p)).
\end{equation}
Hence, if the sampling point $x_p$ is close to some point scatterer $y_j$, i.e.,
$y_j\in\Omega$, then $G(y_j,x_p)$ is nearly singular and takes a very large value,
contributing significantly to the sum in \eqref{eqn:appind}. Conversely, if the point
$x_p$ is far away from all the physical point scatterers, then the sum will be very
small due to the decay property of $G(y_j,x_p)$.
These facts lead us to the index function $\Phi(x_p)$ in \eqref{indx} for any $x_p$ in
the sampling region $\widetilde{\Omega}$. In practice, if a point $x_p$ satisfies
$\Phi(x_p)\approx 1$, then it likely lies within the support; whereas if
$\Phi(x_p)\approx 0$, then the point $x_p$ most probably lies outside the support. Hence
it serves as an indicator of the scatterer support. Consequently, we can determine a
domain $D\subset\widetilde{\Omega}$ as one approximate scatterer support, and moreover,
the restriction $\Phi|_D$ of the index $\Phi$ to the domain $D$ may be regarded as a
first approximation to the sought-for coefficient $\eta$. The subdomain $D$ may be chosen
as $D=\{x\in \widetilde{\Omega}: \Phi(x)\ge \mu \max_{x\in \widetilde{\Omega}} \Phi(x)\}$
with $\mu$ being a cut-off value, i.e., the union of elements whose index values are not
less than a specified fraction of the largest index value over the sampling region
$\widetilde{\Omega}$. This determination of subdomain $D$ will be adopted in our
numerical experiments.
This method is of sampling type (cf. \cite{Potthast:2006} for an overview of existing
sampling methods), and its flavor closely resembles \textit{multiple signal
classification} \cite{Schmidt:1986,Cheney:2001,Devaney:2006} and \textit{linear sampling
method} \cite{ColtonKirsch:1996,Kirsch:1998}. However, unlike these existing techniques,
it works with a few (e.g., one or two) incident waves, is highly tolerant with respect to
noise, and involves only computing inner products with fundamental solutions rather than
expensive matrix operations as in the other two techniques. The robustness of $\Phi$ is
attributed to the fact that the (high-frequency) noise is roughly orthogonal to the
(smooth) fundamental solutions.
\section{Mixed regularization}\label{sec:tikh}
The direct sampling method in Section \ref{sec:omega} extracts an accurate estimate $D$
to the scatterer support $\Omega$ as well as a reasonable initial guess to the medium
coefficient $\eta$, i.e., $\Phi|_D$. In this part we refine the approximation $\Phi|_D$
by exploiting the idea of nonsmooth mixed regularization. Given the approximation
$\Phi|_D$, we can compute the induced current $\widehat{I}$ via \eqref{eqn:indcur} for
each incident wave and the corresponding total field $\widehat{u}$ on the domain $D$ from
\eqref{int}. By substituting the approximation $\widehat{u}$ into equation \eqref{int},
we arrive at the following linearized problem
\begin{equation*}
\int_DG(x,y)\widehat{u}(y)\eta(y)dy = u^s(x)\,, \quad x\in\Gamma.
\end{equation*}
It is convenient to introduce a linear integral operator $K:L^2(D)\mapsto L^2(\Gamma)$
defined by
\begin{equation}\label{eqn:linint}
(K\eta)(x)=\int_DG(x,y)\widehat{u}(y)\,\eta(y)\,dy.
\end{equation}
We observe that the kernel $G(x,y)\widehat{u}(y)$ is smooth due to the analyticity of
fundamental functions $G(x,y)$ away from the singularities and standard Sobolev
smoothness of the total field $\widehat{u}(y)$ (following from elliptic regularity theory
\cite{ColtonKress:1998}). Hence, the linear operator $K:L^2(D)\mapsto L^2(\Gamma)$ is
compact. As a consequence, the linearized problem \eqref{eqn:linint} is ill-posed in the
sense that small perturbations in the data can lead to huge deviations in the solution,
according to the classical inverse theory \cite{TikhonovArsenin:1977}, which is
reminiscent of the severe ill-posedness of the IMSP, and its stable and accurate
numerical solution calls for regularization techniques.
We determine an enhanced estimate of the coefficient $\eta$ from the linearized problem
\eqref{eqn:linint} by solving the following variational problem:
\begin{equation} \label{mixed}
\min\quad \frac{1}{2}\int_\Gamma|K\eta -u^s|^2ds +
\alpha\int_D|\eta|dx+\frac{\beta}{2}\int_D|\nabla \eta|^2dx.
\end{equation}
In comparison with more conventional regularization techniques, the most salient feature
of the model \eqref{mixed} lies in two penalty terms: it contains both the $L^1$ penalty
and the $H^1$ penalty, which exert drastically different a priori knowledge of the
sought-for solution. The scalars $\alpha$ and $\beta$ are regularization parameters
controlling the strength of respective regularization.
This variational problem \eqref{mixed} allows us to determine a coefficient $\eta$ which
is distributed yet clustered, i.e., exhibiting a clear groupwise sparsity structure in
the canonical pixel basis. This a priori knowledge is plausible for localized
inclusions/inhomogeneities in a homogeneous background. The model \eqref{mixed} is
derived from the following widely accepted observations. The $L^1$ penalty promotes the
sparsity of the solution \cite{Tibshirani:1996,CandesRombergTao:2006,Donoho:2006}, i.e.,
the solution is very much localized. Hence, the estimated background is homogeneous.
However, if the $L^1$ penalty is used alone, the solution tends to be very spiky and may
miss numerous physically relevant pixels in the sought-for groups. That is, the desirable
groupwise structure is not preserved. Meanwhile, the more conventional $H^1$ penalty
\cite{TikhonovArsenin:1977} yields a globally smooth profile, but the solution is often
overly diffusive. Consequently, the overall structure stands out clearly, but the
retrieved background is very blurry, which may lead to erroneous diagnosis of the number
of the inclusions and their sizes. In order to preserve simultaneously these distinct
features of the sought-for coefficient, i.e., sparsely distributed groupwise structures
in a homogeneous background, a natural idea would be to combine the $L^1$ penalty with
the $H^1$ penalty, in the hope of retaining the strengths of both models. As we shall see
below, the idea does work very well, and the model is very effective for enhancing the
resolution of the estimate to the coefficient $\eta$.
The general idea of mixed regularization, i.e., using multiple penalties, has proved very
effective in promoting several distinct features simultaneously. This general idea has
been pursued in the imaging community \cite{ItoKunisch:2000,LuShenXu:2007}. However, to
the best of our knowledge, the model \eqref{mixed} has not been explored in the
literature, let alone its efficient and accurate numerical treatment. A detailed
mathematical analysis of the model \eqref{mixed} is beyond the scope of the present
paper. We refer interested readers to \cite{ItoJinTakeuchi:2011} for some preliminary
results on mixed regularization and to \cite{JinLorenzSchiffler:2009} for a related model
(elastic-net).
To fully explore the potentials of the model \eqref{mixed}, an efficient and accurate
solver is required. We shall develop a semi-smooth Newton type method, which allows extracting very
detailed features of the solutions to the model \eqref{mixed}. The starting point of the
algorithm is the necessary optimality condition of the variational problem \eqref{mixed},
which reads
\begin{equation} \label{fixed}
K^*K\eta-\beta\Delta\eta-K^\ast u^s \in -\alpha\partial \psi(\eta),
\end{equation}
where $\psi(\eta)=\|\eta\|_{L^1}$ and the subdifferential $\partial\psi(\eta)$
\cite{ItoKunisch:2008} is the set-valued signum function, which is defined pointwise as
\begin{equation*}
\partial\psi(\eta)(x)=\left\{\begin{array}{ll}
1, &\mbox{if }\eta(x)>0, \\
\,[-1,1\,], &\mbox{if }\eta(x)=0,\\
-1, &\mbox{if }\eta(x)<0.
\end{array}\right.
\end{equation*}
Due to the convexity of the functional, the relation \eqref{fixed} is also a sufficient
condition. Hence it suffices to solve the inclusion \eqref{fixed}, for which there are
several different ways, e.g., iterative soft shrinkage
\cite{DaubechiesDefrise:2004,WrightNowakFigueiredo:2009}, augmented Lagrangian
method/alternating direction method or semi-smooth Newton method \cite{ItoKunisch:2008}.
We shall develop a (new) semi-smooth Newton method to efficiently solve the inclusion
\eqref{fixed}. To this end, we first recall the complementarity condition
\cite{ItoKunisch:2008}
\begin{equation} \label{eqn:complrel}
\lambda=\frac{\lambda+c\eta}{\max(1,|\lambda+c\,\eta|)}
\end{equation}
for any $c>0$, which will be fixed at a constant in the implementation, and $\lambda$
serves as a Lagrange multiplier. It can be directly verified by pointwise inspection that
the complementarity condition \eqref{eqn:complrel} is equivalent to the inclusion
$\lambda\in\partial\psi(\eta)$ (cf.\,\cite{ItoKunisch:2008}). With the help of the
complementarity condition \eqref{eqn:complrel}, we arrive at the following equivalent
nonlinear system in the primal variable $\eta$ and dual variable $\lambda$:
\begin{equation*}
\left\{\begin{aligned}
K^*K\eta-\beta\Delta\eta-K^\ast u^s +\alpha\lambda &= 0,\\
\lambda-\frac{\lambda+c\eta}{\max(1,|\lambda+c\,\eta|)}&=0.
\end{aligned}\right.
\end{equation*}
Then we apply the semi-smooth Newton algorithm using a primal-dual active set strategy.
The complete implementation is listed in Algorithm \ref{alg:ssn}. The technical details
for deriving the crucial Newton step (Step 5) are deferred to Appendix \ref{app:ssn},
which involves damping and regularization. A natural choice of the stopping criterion at
Step 6 is based on monitoring the change of the active set $\mathcal{A}=\{x\in
D:|\lambda+c\,\eta|\le 1\}$: if the active sets for two consecutive iterations coincide,
then we can terminate the algorithm, cf. \cite{ItoKunisch:2008}.
\begin{algorithm}[h] \caption{Primal-dual active set method}\label{alg:ssn}
\begin{algorithmic}[1]
\STATE Initialize $\eta^0$ and $\lambda^0$, and set $c > 0$ and $k=0$.
\FOR {$k=0,\dots,K$}
\STATE Set the active set ${\cal A}^k$ and inactive set ${\cal I}^k$ respectively by
\begin{equation*}
\begin{aligned}
{\cal A}^k&=\{x\in D:|\lambda^k+c\,\eta^k|\le 1\},\\
{\cal I}^k&=\{x\in D: |\lambda^k+c\,\eta^k|>1\}.
\end{aligned}
\end{equation*}
\STATE Compute $a$ and $b$ by
\begin{equation*}
a=\frac{\lambda^k}{\max(|\lambda^k|,1)} \quad\mbox{and}\quad b=\frac{\lambda^k+c\,\eta^k}{|\lambda^k+c\,\eta^k|},
\end{equation*}
and set $d^k=|\lambda^k+c\eta^k|$ and $F^k=ab^t$.
\STATE Solve for $(\eta^{k+1},\lambda^{k+1})$ from the system
\begin{equation*}
\begin{aligned}
K^\ast K\eta^{k+1}-\beta\,\Delta\eta^{k+1}-K^\ast u^s+\alpha\,\lambda^{k+1}&=0\mbox{ on } {\cal I}^k,\\
\lambda^{k+1}-c\frac{1}{d^k-1}(I-F^k) \eta^{k+1}-\frac{\lambda^k}{\max(|\lambda^k|,1)}&=0,\\
\eta^{k+1}&=0 \mbox{ on }{\cal A}^k.
\end{aligned}
\end{equation*}
\STATE Check the stopping criterion.
\ENDFOR
\STATE {\textbf{output}} approximation $\eta^K$.
\end{algorithmic}
\end{algorithm}
The main computational effort of the algorithm lies in the Newton update at Step 5: each
iteration requires solving a (dense) linear system. We note that the dual variable
$\lambda$ can be expressed in terms of the primal variable $\eta$ and on the active set
$\mathcal{A}$, the coefficient $\eta$ vanishes identically. Thus in practice, we solve
only a linear system for $\eta$ on the inactive set $\mathcal{I}=D\setminus\mathcal{A}$.
An important feature of the algorithm is that the linear system becomes smaller and
smaller and also less and less ill-conditioned as the iteration goes on, while the
iterate captures more and more refined details of the nonhomogeneous medium regions. If
the exact solution is indeed sparse (many zero entries), then the system size, i.e.,
$|\mathcal{I}|$, usually shrinks quickly as the iteration proceeds. The numerical
experiments indicate that the convergence of the algorithm is rather steady and fast.
\section{Numerical experiments}
In this part, we present numerical results for several two- and three-dimensional
examples to showcase the proposed two-stage inverse scattering method, for both exact and
noisy data. The wave number $k$ is fixed at $2\pi$, and the wavelength is set to
$\lambda=1$. The exact scattered field $u^s$ is obtained by first solving the integral
equation \eqref{eqn:indcur} for the induced current $I$ and then substituting the current
$I$ into the integral representation \eqref{int}. Here the integral equation
\eqref{eqn:indcur} is discretized by a mid-point rule; see Appendix \ref{app:int} for
details. The noisy scattered data $u_\delta^s$ are generated pointwise by the formula
\begin{equation*}
u_\delta^s(x) = u^s(x) + \epsilon \zeta \max_{x\in\Gamma}|u^s(x)|,
\end{equation*}
where $\epsilon$ refers to the relative noise level, and both the real and imaginary
parts of the noise $\zeta=\zeta(x)$ follow the standard Gaussian distribution. The index
$\Phi$, its restriction $\Phi|_D$ and the enhanced approximation $\eta$ by the mixed
model will be displayed. As is mentioned in Section 2, we choose the subdomain $D$
(approximate scatterer support) based on the formula $D=\{x\in \widetilde{\Omega}:
\Phi(x)\ge \mu \max_{x\in \widetilde{\Omega}} \Phi(x)\}$, where the cut-off value $\mu$
is taken in the range $(0.5,0.7)$. The choice of the cutoff value $\mu$ affects directly
the size of the domain $D$, but does not cause much effects on the reconstructions.
Like in any regularization technique, an appropriate choice of regularization parameters
$(\alpha,\beta)$ in the mixed model \eqref{mixed} is crucial for the success of the
proposed imaging algorithm. There have been a number of choice rules
\cite{ItoJinTakeuchi:2011b} for one single parameter, but very little is known about the
mixed model. We shall choose the pair $(\alpha,\beta)$ in a trial and error manner, which
suffices our goal of illustrating the significant potentials of the mixed model for
inverse scattering. In Algorithm \ref{alg:ssn}, the parameter $c$ is set to $50$, and
both $\eta^0$ and $\lambda^0$ are initialized to zero. The maximum number $K$ of Newton
iterations is $50$. In all the experiments, the convergence of the algorithm is achieved
within about $10$ iterations. All the computations were performed on \textsc{MATLAB}
7.12.0 (R2011a) on a dual-core desktop computer with 2GB RAM.
\subsection{Two-dimensional examples}
Unless otherwise specified, one incident direction $d$ is employed for two-dimensional
problems, and it is fixed at $\frac{1}{\sqrt{2}} (1,1)^\mathrm{T}$. The scattered field
$u^s$ is measured at $30$ points uniformly distributed on a circle of radius $5$. The
sampling domain $\widetilde{\Omega}$ is fixed at $[-2,2]^2$, which is divided into a
uniform mesh consisting of small squares of width $h=0.01$. The subdomain $D$ for the
integral equation \eqref{eqn:linint} is divided into a coarser uniform mesh consisting of
small squares of width $0.02$.
\begin{figure}
\caption{Numerical results for Example \ref{exam:2sc}
\label{fig:2sca}
\end{figure}
Our first example illustrates the method for two separate scatterers.
\begin{exam}\label{exam:2sc}
We consider two separate square scatterers in the following two scenarios
\begin{itemize}
\item[(a)] The scatterers are of width $0.2$ and centered at $(-0.8,-0.7)$ and
$(0.3,0.9)$, respectively, and the coefficient $\eta$ in both region is $1$.
\item[(b)] The scatterers are of width $0.3$ and centered at $(-0.25,0)$ and
$(0.25,0)$, respectively, and the coefficient $\eta$ in the former and latter is
$1.5$ and $1$, respectively.
\end{itemize}
\end{exam}
The two scatterers in Example \ref{exam:2sc}(a) are well apart from each other. The
recovery of the scatterer locations by the index $\Phi$ is quite satisfactory, especially
upon noting that we have just used one incident wave. Two distinct scatterers are
observed for both exact data and the data with $20\%$ noise, cf. Fig. \ref{fig:2sca}(b).
However, the magnitudes are inaccurate, and the estimate suffers from spurious
oscillations in the homogeneous background, due to the ill-posed nature of the IMSP and
the oscillating behavior of fundamental solutions. Nonetheless, two localized square
subregions $D$ (each of width $0.4$) encompass the modes of the index $\Phi$, see Fig.
\ref{fig:2sca}(c), and may be taken as an approximate scatterer support. {In Fig.
\ref{fig:2sca}(c), the entire sampling domain $\widetilde{\Omega}$ is shown, and the two
small squares represent the approximate support $D$. Outside of the domain $D$, the index
$\Phi|_D$ is set to zero, i.e., identical with homogeneous background, and will not be
updated during the enhancing step.} The enhancing step is initialized with $\Phi|_D$, and
the results are shown in Fig. \ref{fig:2sca}(d). The regularization parameters for
getting the reconstructions, which are determined in a trial-and-error manner, are
presented in Table \ref{tab:regpara}. The enhancement {of the approximation $\Phi_D$ over
the domain $D$} is significant: the recovered background is now mostly homogeneous, and
the magnitudes and sizes of the recovered scatterers agree well with the exact ones. This
shows clearly the significant potentials of the proposed mixed regularization for inverse
scattering problems. {The numbers in Table \ref{tab:regpara} also sheds valuable insights
into the mixed model \eqref{mixed}: the value of the regularization parameter $\alpha$ is
much larger than that of $\beta$. Hence, the $L^1$ penalty plays a predominant role in
ensuring the sparsity of the solution, whereas the $H^1$ penalty yields a locally smooth
structure.}
\begin{table}
\centering
\caption{Regularization parameters $(\alpha,\beta)$ for the examples.}
\begin{tabular}{ccccc}
\hline
example & \ref{exam:2sc}(a) & \ref{exam:2sc}(b) & \ref{exam:ring} & \ref{exam:cube}\\
\hline
$\epsilon = 0\%$ &(2.0e-6,1.5e-9) & (8.0e-6,1.4e-8) & (7.0e-6,1.0e-9) &(2.5e-9,4.0e-14)\\
$\epsilon = 20\%$&(3.0e-6,2.0e-9) & (8.5e-6,9.0e-9) & (7.0e-6,5.0e-9) &(2.5e-9,5.0e-14)\\
\hline
\end{tabular} \label{tab:regpara}
\end{table}
\begin{figure}
\caption{Numerical results for Example \ref{exam:2sc}
\label{fig:2scb}
\end{figure}
The two scatterers in Example \ref{exam:2sc}(b) stay very close to each other, and thus
it is rather challenging for precise numerical reconstruction. The detection of the
scatterer locations by the index $\Phi$, see Fig. \ref{fig:2scb}(b), is still very
impressive. In particular, it clearly distinguishes two separate scatterers with their
locations correctly retrieved, and this remains stable for data with up to $20\%$ noise.
The mixed regularization is performed on a square $D$ (of width $1$) enclosing the two
modes in the index $\Phi$, see Fig. \ref{fig:2scb}(c). This choice of the inversion
domain $D$ allows possibly connecting of the modes. However, the enhancement correctly
recognizes two separate scatterers, with their magnitudes and sizes in excellent
agreement with the exact ones. Also the estimated background is very crispy.
Surprisingly, the estimate deteriorates only slightly in that the right scatterer
elongates a little bit towards the left scatterer as the noise level $\epsilon$ increases
from $0$ to $20\%$. Although not presented, we would like to note that for this
particular example, the reconstructions are still reasonable for data with $30\%$ noise.
Hence the proposed inverse scattering method is very robust with respect to the data
noise.
Next we consider a ring-shaped scatterer.
\begin{exam}\label{exam:ring}
The scatterer is one ring-shaped square located at the origin, with the outer and inner
side lengths being $0.6$ and $0.4$, respectively. The coefficient $\eta$ of the scatterer
is $1$. Two incident directions $d_1=\frac{1}{\sqrt{2}}(1,1)^\mathrm{T}$ and
$d_2=\frac{1}{\sqrt{2}}(1,-1)^\mathrm{T}$ are considered.
\end{exam}
Ring-shaped scatterer represents one of most challenging objects to recover, and it is
highly nontrivial even with multiple scattered field data sets, especially noting the
ring has a small thickness. It has been observed that one single incident field is
insufficient to completely resolve the ring structure, and only some parts of the ring
can be resolved, depending on the incident direction $d$ \cite{ItoJinZou:2011a}. Hence we
employ two incident waves in the directions $d_1=\frac{1}{\sqrt{2}}(1,1)^\mathrm{T}$ and
$d_2=\frac{1}{\sqrt{2}}(1,-1)^\mathrm{T}$ in order to yield sufficient amount of
information about the scatterer, and accordingly, the index function $\Phi$ is defined as
follows
\begin{equation*}
\Phi(x_p)=\max_i\{\Phi_i(x_p)\}\quad \forall x_p\in\widetilde{\Omega},
\end{equation*}
where the function $\Phi_i$ refers to the index for the $i$th data set. The numerical
results with the exact data and $20\%$ noise in the data are shown in Fig.
\ref{fig:ring}. With just two incident waves, the index $\Phi$ can provide a quite
reasonable estimate of the ring shape. Despite some small oscillations, the overall
profile stands out clearly, and remains very stable for up to $20\%$ noise in the data.
The enhancing step via mixed regularization provides a very crispy estimate of the ring
structure: the recovered scatterer has a clear ring structure, which agrees excellently
with the exact one in terms of both magnitude and size. The presence of $20\%$ data noise
causes visible deterioration to the reconstruction, see Fig. \ref{fig:ring}(d).
Nonetheless, the enhanced reconstruction still exhibits a clear ring shape, and it
represents a very good approximation to the true scatterer upon noting the large amount
of data noise.
\begin{figure}
\caption{Numerical results for Example \ref{exam:ring}
\label{fig:ring}
\end{figure}
\subsection{Three-dimensional example}
Our last example shows the feasibility of the method for three-dimensional problems.
\begin{exam}\label{exam:cube}
We consider two cubic scatterers of width $0.1$ centered at $(0.35,0.15,0.15)$ and
$(-0.35,0.15,0.15)$, respectively. One single incident field with direction
$d=\frac{1}{\sqrt{3}}(1,1,1)^\mathrm{T}$ is used, and the coefficient $\eta$ of the
scatterers is taken to be $1$.
\end{exam}
The scattered field $u^s$ is measured at $600$ points uniformly distributed on the
surface $\Gamma$ of a cubic of width $5$, (i.e., $10$ points in each direction). To
simulate the scattered field data, we take the sampling domain $\widetilde{\Omega}$ to be
the cubic $[-1,1]^3$, which is divided into a uniform mesh consisting of small cubes of
width $h=0.01$. The inversion domain $D$ for the integral equation \eqref{eqn:linint} is
divided into a coarser mesh consisting of small cubes of width $0.03$.
The numerical results for Example \ref{exam:cube} with exact data are shown in Fig.
\ref{fig:cubex}(b), where each row represents a cross-sectional image along the second
coordinate axis $x_2$. The scatterer support estimated by the index $\Phi$ agrees
reasonably with the exact one, and away from the boundary of the true scatterers, the
magnitude of $\Phi$ decreases quickly. However, the reconstructed profile is slightly
diffusive in comparison with the exact one, which is reminiscent of the decay property of
fundamental solutions. The nonsmooth mixed regularization \eqref{mixed} is carried out on
two cubic subregions (of width $0.36\lambda$), cf. Fig. \ref{fig:cubex}(c). Like before,
a significant improvement in the resolution is observed: the sparse estimate is much more
localized in comparison with the index $\Phi$, and also the magnitude is close to the
exact one; see Fig. \ref{fig:cubex}(d). The presence of $20\%$ data noise does not worsen
much the index $\Phi$ and the sparse reconstruction, cf. Fig. \ref{fig:cubn20}. Hence the
reconstruction algorithm is highly tolerant with respect to data noise.
Lastly, we briefly comment on the computational efficiency of the overall procedure. The
first step with the index involves only computing inner products and is embarrassingly
cheap and easily parallelized. The accuracy of the support detection is quite
satisfactory, and thus a large portion of the sampling domain $\widetilde{\Omega}$ can be
pruned from inversion, i.e., $|D|\ll|\widetilde{\Omega}|$. Hence, the enhancement via
mixed regularization is also rather efficient.
\begin{figure}
\caption{Numerical results for Example \ref{exam:cube}
\label{fig:cubex}
\end{figure}
\begin{figure}
\caption{Numerical results for Example \ref{exam:cube}
\label{fig:cubn20}
\end{figure}
\section{Concluding remarks}
We have presented a novel two-stage inverse scattering method for the inverse medium
scattering problem of recovering the refractive index from near-field scattered data. The
efficiency and accuracy of the method stem from accurate support detection by the
sampling strategy and group sparsity-promoting of the mixed regularization technique. The
former is computationally very efficient, and reduces greatly the computational domain
for the more expensive inversion via nonsmooth mixed regularization, while the latter
achieves an enhanced resolution with the magnitudes and sizes comparable with the exact
ones. The numerical results for two- and three-dimensional examples clearly confirm these
observations.
These promising experimental results raise a number of interesting questions for further
studies. First, the potentials of mixed regularization have been clearly demonstrated. It
is of great interest to shed theoretical insights into the model as well as to design
efficient acceleration strategies, which for three-dimensional problems remains very
challenging. Some partial theoretical results can be found in \cite{ItoJinTakeuchi:2011}.
Also of much practical relevance is an automated choice of regularization parameters.
Second, the reconstructions were obtained with the linearized model, which represents
only an approximation to the genuine nonlinear IMSP model. It would be interesting to
justify the excellent performance of the linearization procedure. Third, the robustness
of the approach to noise is outstanding when compared with more conventional inverse
scattering algorithms, especially noting the limited data for inversion. The mechanism of
the robustness is not yet clear.
\section{Numerical method for forward scattering}\label{app:int}
We denote by $\mathbb{J}$ the index set of grid points of a uniformly distributed mesh
with a mesh size $h>0$ and consider square cells
\begin{equation*}
B_j=B_{j_1,j_2}=(x^1_{j_1},x^2_{j_2})+[-\tfrac{h}{2},\tfrac{h}{2}]\times [\tfrac{h}{2},\tfrac{h}{2}]
\end{equation*}
for every tuple $j=(j_1,j_2)$ belonging to the index set $\mathbb{J}$. Assume that the
domain $\cup_{j \in \mathbb{J}} \,B_j$ contains the scatterer support $\Omega$. We use
the mid-point quadrature rule to evaluate the operator $K$, and hence the integral
\eqref{eqn:indcur} is approximated by
\begin{equation*}
I_k - \eta_{k}\, \sum_{j \in \mathbb{J}} G_{k,j} I_jh^2 = \eta_k \, u^{inc}(x_k)
\end{equation*}
where $I_k=I(x_k)$ and $\eta_k=\eta(x_k)$, and the off-diagonal entries $G_{k,j}$ and the
diagonal entries $G_{k,k}$ are given by $G_{k,j}=G(x_k,x_j)$ and
\begin{equation*}
G_{k,k}=\frac{1}{h^2} \int_{(-\tfrac{h}{2},\tfrac{h}{2})^2} G(x,0)dx,
\end{equation*}
respectively. The diagonal entries can be accurately computed by tensor-product Gaussian quadrature rules.
The resulting system can be solved using standard numerical solvers, e.g., Gaussian
elimination, if the cardinality of the index set $\mathbb{J}$ is medium, and iterative
solvers like GMRES. The extension of the procedure to 3D problems is straightforward.
\section{Semi-smooth Newton method}\label{app:ssn}
In this part, we derive a semi-smooth Newton method for minimizing \eqref{mixed}. The
optimality condition of the variational problem reads
\begin{equation*}
\left\{\begin{aligned}
K^*K\eta+\alpha\lambda-\beta\Delta \eta - K^\ast u^s &= 0,\\
\lambda -\frac{\lambda+c\eta}{\max(1,|\lambda+c\eta|)}&=0,
\end{aligned}\right.
\end{equation*}
where $\lambda$ is the Lagrange multiplier (dual variable). The second line, the
complementarity function, equivalently expresses the inclusion $\lambda\in
\partial\|\eta\|_{L^1}$, {which can be checked directly by pointwise
inspection. Thereby, we effectively transforms the inclusion \eqref{fixed} into a
numerically amenable nonlinear system}.
It follows directly from the complementarity relation
\begin{equation}\label{eqn:complem}
\lambda =\frac{\lambda+c\eta}{\max(1,|\lambda+c\eta|)}
\end{equation}
that on the active set $\mathcal{A}=\{x\in D: |\lambda+c\eta|(x)\leq1\}$, $\eta$ vanishes
identically. Otherwise, both the dual variable $\lambda$ and the primal variable $\eta$
need to be solved. We shall solve the system by a semi-smooth Newton method
\cite{ItoKunisch:2008}. First observe that the Newton step (with the increments for
$\lambda$ and $\eta$ denoted by $\delta\lambda$ and $\delta\eta$, respectively) applied
to the following reformulation of equation \eqref{eqn:complem} (on the set
$\mathcal{I}=D\setminus\mathcal{A}$)
\begin{equation*}
\lambda|\lambda+c\eta| - \lambda + c\eta=0
\end{equation*}
is given by
\begin{equation*}
|\lambda+c\eta|\delta\lambda+\lambda\frac{\lambda+c\eta}{|\lambda+c\eta|}[\delta\lambda+c\delta\eta]-(\delta\lambda+c\delta
\eta)+\lambda|\lambda+c\eta|-(\lambda+c\eta)=0,
\end{equation*}
or equivalently with the notation $\lambda^+=\lambda+\delta\lambda$ and
$\eta^+=\eta+\delta\eta$, we have
\begin{equation*}
\lambda^+|\lambda+c\eta|+\lambda\frac{\lambda+c\eta}{|\lambda+c\eta|}[\lambda^++c\eta^+]=\lambda|\lambda+c\eta|+[\lambda^++c\eta^+].
\end{equation*}
Next we apply the idea of damping and regularization to the equation and thus get
\begin{equation*}
\lambda^+|\lambda+c\eta|+\theta[\lambda^++c\eta^+]\frac{\lambda+c\eta}{|\lambda+c\eta|}\frac{\lambda}{\max(|\lambda|,1)}
=[\lambda^++c\eta^+]+\theta|\lambda+c\eta|\frac{\lambda}{\max(|\lambda|,1)}.
\end{equation*}
Here, the purpose of the regularization step $\frac{\lambda}{\max(|\lambda|,1)}$ is to
automatically constrain the dual variable $\lambda$ to $[-1,1]$. The damping factor
$\theta$ is automatically selected to achieve the stability. To this end, we let
$d=|\lambda+c\eta|$, $\widetilde{\eta}=d-1$, $a=\tfrac{\lambda}{\max(|\lambda|,1)}$, and
$b=\tfrac{\lambda+c\eta}{|\lambda+c\eta|}$. We arrive at
\begin{equation*}
\lambda^+(\widetilde{\eta}+1)+\theta[\lambda^++c\eta^+]ab=
[\lambda^++c\eta^+]+\theta ad.
\end{equation*}
Thus we have
\begin{equation*}
\lambda^+=\frac{1}{\widetilde{\eta}+\theta ab}[1-\theta ab]c\eta^++\frac{\theta
d}{\widetilde{\eta}+\theta ab}a
\end{equation*}
To arrive at a simple iteration scheme, we set $\tfrac{\theta d}{\widetilde{\eta}+\theta
ab}=1$, i.e., $\theta=\tfrac{d-1}{d-ab}\leq1$. Consequently, we obtain a simple
iteration
\begin{equation*}
\lambda^+=\frac{1-ab}{d-1}c\eta^++\frac{\lambda}{\max(|\lambda|,1)},
\end{equation*}
where we have used the relation $\frac{1-\theta ab}{\widetilde\eta+\theta
ab}=\frac{1-ab}{d-1}$. Substituting this into the first equation gives
\begin{equation}\label{eqn:ssneq}
K^\ast K\eta^+ + \alpha c\frac{1-ab}{d-1} \eta^+ -\beta \Delta \eta^+ = K^\ast u^s-\alpha\frac{\lambda}{\max(|\lambda|,1)}.
\end{equation}
We note that one only needs to solve equation \eqref{eqn:ssneq} on the inactive set
$\mathcal{I}$, since on the active set $\mathcal{A}$, there always holds $\eta^+=0$. This
has an enormous computational consequence: the size of the linear system in
\eqref{eqn:ssneq} can be very small if $|\mathcal{I}|$ is small, i.e., the solution is
sparse. This last relation shows also clearly the sparsity of the solution, and this
provides a crispy estimate of the background. Upon obtaining the solution $\eta^+$, one
can update $\lambda^+$ on the sets $\mathcal{I}$ and $\mathcal{A}$ according to the
second and the first equation, respectively. Lastly, we would like to remark on the
consistency of the scheme: if the sequence generated by the semi-smooth Newton method
converges, then the limit satisfies the complementarity relation \eqref{eqn:complem} as
desired.
\end{document}
|
\begin{document}
\title{Multi-Matrix Post-Processing for Quantum Key Distribution}
\author{Chao-hui Gao}
\author{Dong Jiang}
\email{[email protected]}
\affiliation{State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210046, P.R.China}
\author{Liang-liang Lu}
\email{[email protected]}
\affiliation{School of Physics, Nanjing University, Nanjing, 210093, P. R. China}
\author{Yu Guo}
\author{Li-jun Chen}
\email{[email protected]}
\affiliation{State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210046, P.R.China}
\date{\today}
\begin{abstract}
Post-processing is a significant step in quantum key distribution(QKD), which is used for correcting the
quantum-channel noise errors and distilling identical corrected keys between two distant legitimate parties.
Efficient error reconciliation protocol, which can lead to an increase in the secure key generation rate, is one of the main performance indicators of QKD setups. In this paper, we propose a multi-low-density parity-check
codes based reconciliation scheme, which can provide remarkable perspectives for highly efficient
information reconciliation. With testing our approach through data simulation, we show that the proposed scheme combining multi-syndrome-based error rate estimation allows a more accurate estimation about the error rate as compared with random sampling and single-syndrome estimation techniques before the error correction, as well as a significant increase in the efficiency of the procedure without compromising security and sacrificing
reconciliation efficiency.
\end{abstract}
\pacs{}
\maketitle
\section{Introduction}
Quantum Key Distribution (QKD) is a class of protocols where the two separated users, Alice and Bob, can share identical secret keys which are secure from the eavesdropper (Eve) \cite{gisin2002quantum}.
Since it provides unconditional security guaranteed by laws of quantum mechanics \cite{scarani2009security}, QKD has attracted wide attention and many advanced works have been published over recent years \cite{lo2005decoy,lo2012measurement,liao2017satellite,Wang2005Beating}.
Generally, a QKD protocol can be divided into quantum and classical parts.
In the former part, Alice generates and transmits a set of raw key through the quantum channel.
Due to Eve's attacks\cite{Bennet1984Quantum}, channel noise, and device imperfection \cite{Gerhardt2010Full, Weier2011Quantum, Jain2011Device}, the keys are weakly correlated and partially secure, and Eve may obtain some information about the keys.
The classical part, also known as post-processing, is used to correct the errors, and to remove information leakage.
Post-processing consists of base sifting \cite{Bennet1984Quantum}, error estimation \cite{Wang2005Beating,treeviriyanupab2014rate,2018arXiv181005841K}, key reconciliation \cite{Luby1998Improved} and privacy amplification \cite{Bennett1988Privacy,Bennett1995Generalized}.
During base sifting, the bits measured with correct measurement bases in the raw key are kept and constitute the sifted key.
Subsequently, Bob uses a key reconciliation algorithm to correct the errors in the sifted key based on the estimated error rate.
Finally, Alice and Bob implement privacy amplification to remove information leakage and obtain the final key, which is secure from Eve.
In error estimation, the accuracy of the estimated quantum bit error rate(QBER) effects the operational efficiency of post-processing.
If the actual QBER for a given block is larger than the estimate, Bob might end up with a wrong final key.
A common method to obtain the QBER for legitimate users is to exchange and compare random sampled sifted key, which can lower the
key generation rate due to disclosed bits. Recently, Kiktenko $et al$ \cite{2018arXiv181005841K} proposed a distinct approach
based on the use of syndromes of low-density parity-check (LDPC) codes to obtain the QBER for each block of the sifted key,
allowing more accurate estimation. The suggested algorithm is also suitable for irregular LDPC codes.
In parallel, key reconciliation is the most crucial step of post-processing, which is responsible for correcting the errors in Bob's sifted key,
in such a way that it ensures consistency between Alice's and Bob's sifted keys.
Belief Propagation (BP) \cite{Luby1998Improved} is the most widely used key reconciliation algorithm, and has attracted intensive study \cite{Kou2001Low,Zhang2002Shuffled,Hocevar2004,Sharon2004An,Zhang2004A,Chang2008Lower,Park2009Shuffled,Wu2010Alternate,Aslam2017Edge}.
There are three criteria for judging a key reconciliation algorithm, namely, convergence speed, bit error rate (BER) and success rate.
However, it is hard to meet the three criteria at the same time, which often appears if the syndrome decoding, based on
an iterative BP algorithm, fails to converge
within the predefined number of iterations (e.g., it could
be caused by an inappropriate choice of the LDPC parity-matrices
relative to the actual errors in raw keys).
This makes key reconciliation the bottleneck of QKD and severely affects the key generation rate for industrial QKD systems.
In this paper, we extend the blind information reconciliation \cite{kiktenko2017symmetric} to multiple LDPC codes and estimate the QBER more
accurately by virtue of multiple syndromes without disclosing redundant bits.
Experimental results show that a significant increase in the efficiency of the procedure, i.e. faster convergence speed with higher success rate.
To prevent extra information leakage in our post-processing scheme, we also give a multiple LDPC codes construction method.
Security analysis shows that our key reconciliation scheme does not reveal extra information.
The rest of the paper is organized as follows: in Section \uppercase\expandafter{\romannumeral2}, a briefly review of error estimation and key reconciliation is given, followed by a detail description of the process and advantages of our scheme. Section \uppercase\expandafter{\romannumeral3}
provides the novel multi-matrix post-processing approach for error estimation and correction. In Section \uppercase\expandafter{\romannumeral4} a set of data simulation are carried out to fully evaluate these advantages. The proposed construction method of multiple matrices and the security analysis of the proposed scheme are given in the appendix.
\section{Preliminaries}
\label{sec:examples}
In this section, we will first review error estimation and reconciliation. Other parts of post-processing can be referred to \cite{Bennet1984Quantum,Bennett1988Privacy,Bennett1995Generalized}.
\subsection{Error Estimation}
We assume that Alice and Bob possess random sifted keys of equal length, and Bob needs to estimate the error rate $e$ of the sifted keys before executing key reconciliation, since $e$ is an important input parameter of reconciliation algorithms. The estimation accuracy of $e$ directly effects the operational efficiency of post-processing.
If $e$ is overestimated, Alice will place superfluous information on her syndrome, i.e., more leakage needed to be removed during privacy amplification, leading to relatively low key generation rate.
On the contrary, if $e$ is underestimated, less information is provided, so Bob spends more time to correct errors during key reconciliation or even end up with wrong final key.
Error estimation can be executed in the several ways.
The most well-known method is the random sampling \cite{Wang2005Beating}.
But its drawback is that if Alice and Bob want to estimate more accurate error rate, they inevitably sacrifice key bits.
To solve this problem, P.Treeviriyanupab \emph{et al}. proposed a new method \cite{treeviriyanupab2014rate}.
In this protocol, Alice and Bob use their syndromes $z^{A}=[z_1^{A}, \cdots, z_m^{A}]$ and $z^{B}=[z_1^{B}, \cdots, z_m^{B}]\ (z_j^{A},z_j^{B}\in \{0,1\}, j\in\{0,\cdots,m\})$ as input to calculate the maximum likelihood estimation of error rate.
Syndromes are generated from a kind of data structure, LDPC code \cite{Gallager1962Low}, which can be presented by a $m\times n$ matrix or a Tanner Graph (TG) \cite{Tanner1981A}.
In Fig. \ref{fig:LDPC-TG} (a), an example of binary LDPC matrix $H_{m\times n}$ is given.
The variable nodes $v_i~(i\in\{1,\cdots,n\})$ (blue circles ) and check nodes $c_j~(j\in\{1,\cdots,m\})$ (yellow squares) represent bits of key and parity-check equations, respectively \cite{Gallager1962Low}.
TG corresponding to this matrix is shown in Fig. \ref{fig:LDPC-TG} (b).
An edge connecting a variable node and a check node indicates that the variable node participates in the parity-check equation.
In a LDPC code, the degree of a variable node (or check node) is the number of check nodes (or variable nodes) connected to it.
\begin{figure}
\caption{A binary m $\times$ n LDPC matrix (a) and its corresponding TG (b).}
\label{fig:LDPC-TG}
\end{figure}
The syndromes, $z^{A}$ (or $z^{B}$), are simply obtained by multiplying a LDPC matrix and Alice's (or Bob's) sifted key.
But the method \cite{treeviriyanupab2014rate} is applicable only to regular LDPC code, in which all of the variable nodes have the same degrees and so does all check nodes. So Kiktenko \emph{et al}. extend the scope of application \cite{2018arXiv181005841K} (hereinafter referred to as the single-syndrome error estimation), which is also suitable for irregular LDPC code.
\subsection{Key Reconciliation}
BP \cite{Luby1998Improved}, also known as the Sum Product (SP) algorithm, can be used for error-correction.
Due to its relatively high decoding efficiency and low executing complexity, BP has been widely adopted in QKD to correct the key errors caused by Eve's attacks, channel noise, etc.
In QKD, if Bob uses BP to correct his sifted key $y^{T}=[y_1,\ldots,y_n]$, he first needs to initializes $P^{b}_i~(b\in\{0,1\})$, $v_i$ and variable-to-check (V2C) information $L_{v_i\to c_j}$ as follows,
\begin{equation}
\begin{cases}
P^{0}_i=1-e, P^{1}_i=e & y_i=0\\
P^{0}_i=e, P^{1}_i=1-e & y_i=1
\end{cases},
\label{equ:bp_initial}
\end{equation}
\begin{equation}
L_{P_i}=\log \frac{P^{0}_i}{P^{1}_i},
\label{equ:L-Pi}
\end{equation}
\begin{equation}
L_{v_i\to c_j}=L_{P_i},
\label{equ:init-Lq}
\end{equation}
\noindent where $P^{b}_i~(b\in\{0,1\})$ is the prior probability of the candidate value $b$ of $v_i$, $e$ is the result of error estimation, $L_{P_i}$ represents the log likelihood ratio of $P_i^b$.
Secondly, as shown in Fig. \ref{fig:C2VandV2C} (a), he generates and propagates check-to-variable (C2V) information $L_{c_j\to v_i}$ by
\begin{equation}
\begin{split}
L_{c_j\to v_i}=\mathrm{sign}(z_j)\cdot 2\mathrm{tanh}^{-1}(\prod_{v_{i^{'}}\in{N(c_j)\backslash i}}\mathrm{tanh}(\frac{1}{2}L_{v_{i^{'}}\to c_j})),
\label{equ:L-rji}
\end{split}
\end{equation}
\noindent where $z$ denotes the Alice's syndrome \cite{Mackay1999Good}, which is the product of $H_{m\times n}$ and Alice's sifted key, $\mathrm{tanh()}$ is the hyperbolic tangent function, $\mathrm{tanh}^{-1}()$ is the inverse function of $\mathrm{tanh}()$, $v_{i^{'}}\in{N(c_j)\backslash i}$ represents the set of adjacent variable nodes of check nodes $c_j$ except $v_i$, $\mathrm{sign}()$ is a sign function defined as follows:
\begin{equation}
\mathrm{sign}(z_j)=
\begin{cases}
+1& z_j=0 \\
-1& z_j=1
\end{cases}.
\end{equation}
Thirdly, as plotted in Fig. \ref{fig:C2VandV2C} (b), Bob updates and propagates V2C information by substituting the generated C2V information into the following equation.
\begin{equation}
L_{v_i\to c_j}=L_{P_i}+\sum_{c_{j^{'}}\in{N(v_i)\backslash j}}L_{c_{j^{'}}\to v_i},
\label{equ:L-qij}
\end{equation}
where, $c_{j^{'}}\in{N(v_i)\backslash j}$ represents the set of adjacent check nodes of $v_i$ except $c_j$. All of $L_{c_j\to v_i}$ and $L_{v_i\to c_j}$ contain information of posterior probabilities of $v_i$.
\begin{figure}
\caption{(a)Generated C2V information $L_{c_j\to v_i}
\label{fig:C2VandV2C}
\end{figure}
Finally, he calculates the soft-decision value of every variable node $v_i$ as follows,
\begin{equation}
L_{v_i}=L_{P_i}+\sum_{c_j\in{N(v_i)}}L_{c_j\to v_i},
\end{equation}
then performs the decoding decision on every variable node according to the following equation,
\begin{equation}
y_i=
\begin{cases}
1& L_{v_i}>0 \\
0& L_{v_i}<0
\end{cases}.
\label{equ:decode-decide}
\end{equation}
Bob iterates the last three steps until the decoding is successful (i.e., the equation $z=H_{m\times n }\cdot y$ is satisfied) or the number of iterations reaches the pre-set upper limit.
In each iteration, BP can use different scheduling strategies, which can be divided into three categories \cite{Casado2007Informed}: Flooding, Shuffled, and Layer.
Flooding first goes through all the check nodes and generates C2V information, then traverses all the variable nodes and updates V2C information. Shuffled uses variable nodes as the traversal sequence, sequentially updates C2V and V2C information between variable nodes and their adjacent check nodes.
Layer, on the contrary, uses check nodes as the traversal sequence, sequentially updates C2V and V2C information between check nodes and their adjacent variable nodes.
In practical applications, BP, Shuffled Belief Propagation (SBP) \cite{Zhang2002Shuffled}, and Layer Belief Propagation (LBP) \cite{Hocevar2004,Sharon2004An} are the typical representatives of the above three scheduling strategies. For convenience, the algorithms based on single matrix are hereinafter referred to as the single-matrix reconciliation.
\section{Multi-matrix Post-processing}
In this section, we propose a post-processing scheme where users estimate error rate with multiple syndromes and correct errors with multiple matrices (hereinafter referred to as the multi-matrix post-processing). In the multi-matrix post-processing, base sifting and privacy amplification are the same as the original post-processing (hereinafter referred to as the single-matrix post-processing). Here we introduce only error estimation and key reconciliation in the frame of multiple syndromes.
\subsection{Multi-syndrome Error Estimation}
Each bit of a syndrome represents the relationship of the parity-check equation and the key. By comparing Alice's syndrome and his own syndrome, Bob can extract some information about error rate. If he uses multiple matrices, he can obtain multiple syndromes, which can be used to estimate the error rate more accurately.
Above all, Bob obtains $u$ syndromes from Alice and performs XOR as follows,
\begin{equation}
\triangle z^{k}=z^{A|k} \oplus z^{B|k},k\in \{1,\cdots,u\},
\label{equ:syndromes}
\end{equation}
where $\oplus$ is the XOR operation, $z^{A|k}$ and $z^{B|k}$ is the $k^{th}$ syndromes of Alice and Bob respectively. Then Bob calculates the maximum likelihood estimation of $e$ by,
\begin{equation}
e=\arg \max_{e^{'}\in[0,threshold]}M_{e^{'}|\triangle Z},
\label{equ:errorRates}
\end{equation}
where $e^{'}$ is a possible value that $e$ may take, $\triangle Z=[\triangle z^{1},\triangle z^{2},\cdots ,\triangle z^{u}]$. In equation (\ref{equ:errorRates}), $M_{e^{'}|\triangle Z}$ can be obtained via,
\begin{equation}
M_{e^{'}|\triangle Z}=\prod_{k=1}^{u}\prod_{j=1}^{m}[1-\triangle z_j^{k}+(2\triangle z_j^{k}-1)p(e^{'},d_{c_j}^{k})],
\end{equation}
\begin{equation}
\begin{split}
p(e^{'},d_{c_j}^{k})&=Pr(\triangle z_j^{k}=1)\\&
=\sum_{\substack{i=1\\ i\bmod 2=1}}^{d_{c_j}^{k}}\dbinom{d_{c_j}^{k}}{i}e^{'i}(1-e^{'})^{d_{c_j}^{k}-i},
\label{equ:probabilities}
\end{split}
\end{equation}
where $M_{e^{'}|\triangle Z}$ is the likelihood function of $e^{'}$, $p(e^{'},d_{c_j}^{k})$ is the priori probability of that $z_j^{A|k}$ and $z_j^{B|k}$ are different, $\triangle z_j^{k}$ is the $j^{th}$ bit of $\triangle z^{k}$, $z_j^{A|k}$ is the $j^{th}$ bit of $z^{A|k}$, $z_j^{B|k}$ is the $j^{th}$ bit of $z^{B|k}$, $d_{c_j}^{k}$ is the degree of check node $c_j$ of $k^{th}$ matrix. As shown in equation (\ref{equ:errorRates}), $e$ evaluates to $e^{'}$ that maximizes $M_{e^{'}|\triangle Z}$. The \lq\lq threshold\rq\rq\ \cite{richardson2001capacity,richardson2001design} is the upper limit of error rate that can be acceptable. If $e$ exceeds the \lq\lq threshold\rq\rq, the sifted key will be abandoned.
Our method (hereinafter referred to as the multi-syndrome error estimation) is based on the single-syndrome error estimation, but can bring out higher accuracy of estimation. Meanwhile, compared with the random sampling, our method doesn't need to discard any key bit.
\subsection{Multi-matrix Key Reconciliation}
Although, theoretical analysis and simulation results show that the single-matrix reconciliation can correct the errors to some extent \cite{Sharon2007Efficient},
the performances of convergence speed and BER are still limited \cite{Casado2007Informed,Casado2010LDPC}, and the success rate is decreased when LDPC code is not cycle-free \cite{Tanner1981A,Yazdani2004Improving}.
To overcome these problems, we propose a new reconciliation strategy that uses two or more matrices to correct errors in parallel.
Let us take multi-matrix BP (MBP) as an example to show the detailed process and advantages of our strategy.
Suppose Alice and Bob have prepared and shared $u$ LDPC codes $H_1, \ldots, H_u$. After obtaining the sifted key $x^{T}=[x_1, \ldots, x_n]~(x_i\in\{0,1\})$, Alice calculates $u$ syndromes according to the following equation:
\begin{equation}
\begin{split}
(z^k)^T=[z_1^k,\ldots,z_n^k]=H_k\cdot x,~ &k\in\{1,\dots,u\},\\
&z_i^k\in\{0,1\},
\end{split}
\end{equation}
\noindent and sends them to Bob over the classical channel. Because of Eve's attacks, channel noise, or device imperfection,
Bob inevitably obtain different sifted keys with Alice, denoted as $y^{T}=[y_1, \ldots,y_n],~(y_i\in\{1,0\})$.
In our strategy, Bob first initializes the prior probabilities $P^{b}_i~(b\in\{0,1\})$, log likelihood ratios $L^k_{P_i}$ and V2C information $L^k_{v_i\to c_j}$ for all matrices according to equations (\ref{equ:bp_initial}), (\ref{equ:L-Pi}) and (\ref{equ:init-Lq}), respectively.
Secondly, Bob generates and propagates C2V information $L^{k}_{c_j\to v_i}$ according to equation (\ref{equ:L-rji}).
Thirdly, by substituting C2V information into equation (\ref{equ:L-qij}), Bob updates and propagates V2C information.
Finally, he goes through all variable nodes to obtain their soft-decision values by
\begin{equation}
L_{v_i}=L_{P_i}+\sum^{u}_{k=1}\sum_{c_j\in{N_{k}(v_i)}}{L^k_{c_j\to v_i}},
\label{equ:M-decision}
\end{equation}
\noindent and makes decoding decisions according to equation (\ref{equ:decode-decide}).
Because once Bob's key is corrected, i.e. $y$ is equal to $x$, all his syndromes satisfy $z_k=H_k\cdot y$. Thus he randomly selects a matrix $H_k$, and judges whether $z_k$ is equal to $H_k\cdot y$. If so, Bob terminates the algorithm and stores $y$. Otherwise, he starts another iteration. The reconciliation is considered as a failure when the number of iterations exceeds the upper limit.
There is an important figure called the reconciliation efficiency $f$ \cite{kiktenko2017symmetric}. It shows the ratio of practical information leakage to theoretical floor for successful reconciliation. It serves to imply the efficiency and security of a reconciliation strategy and help privacy amplification to remove information leakage. For the single-matrix reconciliation, the reconciliation efficiency $f$ is represented as
\begin{equation}
f=\frac{m}{nh(e)}>1,
\label{equ:f-single}
\end{equation}
where m and n are the numbers of check nodes and variable nodes of the LDPC code, e is the result of error estimation, h is the Shannon binary entropy:
\begin{equation}
h(e)=-e\log_2e-(1-e)\log_2(1-e).
\label{equ:entropy}
\end{equation}
For the multi-matrix reconciliation, however, $f$ is given by
\begin{equation}
f=\frac{\alpha m}{nh(e)}>1,(\alpha\geq 1),
\label{equ:f-multi}
\end{equation}
where $\alpha$ is a constant which is relative to $u$ and the structures of $u$ matrices. Fortunately, if the construction method of multiple matrices (see Appendix B) is used, it can be proved that the practical information leakage is equal to $m$ (see Appendix A), i.e., $\alpha$ is equal to $1$, without sacrificing the reconciliation efficiency compared
with single-matrix post-processing.
Obviously, our strategies is portable, it can be easily applied to SBP, LBP (see Appendix C), and other algorithms to achieve the following improvements:
\begin{enumerate}[1.\itemindent=-\itemindent]
\item\textbf{ Faster Convergence Speed} \ \ In our strategy, when Bob generates C2V and updates V2C information, all matrices operate in parallel. And as shown in equation (\ref{equ:M-decision}), Bob obtains the soft-decision value of each variable node $v_i$ by gathering all the C2V information sent to $v_i$ in every matrix. The amount of C2V information gathered within one iteration in the multi-matrix reconciliation is equal to information gathered in numerous iterations in the single-matrix reconciliation.
\item\textbf{ Higher Success Rate} \ \ Once C2V and V2C information of a matrix are trapped in a cycle, the other matrices without this cycle can help the trapped matrix jump out the cycle, leading to higher success rate.
\item\textbf{ Lower BER} \ \ The value of each key bit is determined according to the information provided by multiple matrices. The accuracy of error-correction is effectively improved, resulting in lower BER.
\end{enumerate}
\section{Experimental Evaluation}
To fully evaluate the above advantages of multi-matrix post-processing, in this session we first give some detailed comparisons among three methods of error estimation.
Then for the other three parts, the experiments about the three criteria of key reconciliation algorithms are carried out.
All simulation data used in our experiments are generated by real random number generator IDQ EasyQuantis 2.1.
For comparison, we also set the upper limit of iterations to $100$, which is similar to existing implementations \cite{Zhang2012Verification,Djordjevic2012Evaluation}, and the code rate and code length of LDPC codes are set to $0.8$ and $10000$, respectively.
\subsection{Error Estimation}
We have described the three methods of error estimation hereinbefore, including the random sampling, the single-syndrome error estimation and the multi-syndrome error estimation. To compare these three methods, we generate 2000 sets of keys at error rates of $0.0068$, $0.0166$, and $0.0267$, respectively. The sampling rate of random sampling is set to 0.5. For any set of key, we use these methods to get three error rates. As shown in Fig. \ref{fig:errorEstimate}, it is clear that our method (black lines) is more accurate and stable than the random sampling (magenta lines) and the single-syndrome error estimation (red lines).
\begin{figure*}
\caption{ Comparison of random sampling, single-syndrome and multi-syndrome for error estimation with 2000 sets of keys at the QBER of 0.0068 (top), 0.0166 (middle), and 0.0267 (bottom), respectively.
(a) Results of multi-syndrome error estimation (black lines) and random sampling method (magenta lines).
(b) Results of multi-syndrome error estimation (black lines) and single-syndrome error estimation (red lines).}
\label{fig:errorEstimate}
\end{figure*}
\subsection{Convergence Speed}
For key reconciliation, since the faster the convergence speed is, the smaller the average number of iterations becomes,
we evaluate the convergence speed of different algorithms by calculating their average numbers of iterations under different error rates.
We first prepare a matrix for the single-matrix algorithms, then add four more matrices for the multi-matrix algorithms (see the next section for the detailed method of generating LDPC codes). At a certain error rate, we generate 100 sets of keys, perform each algorithm on the keys, and calculate the average number of iterations.
The results are shown in Fig.~\ref{fig:Iteration}. Clearly, under different error rates, the average numbers of iterations of the multi-matrix algorithms are significantly decreased compared with their single-matrix versions. MBP cuts down 43.15$\sim$46.06\% of average iteration number of BP, while MLBP is 38.16$\sim$40.21\% and MSBP is 47.87$\sim$53.38\%.
We can further increase the convergence speed of the multi-matrix algorithms by adjusting two factors. One is the number of matrices used in reconciliation. We generate 100 sets of keys with error rate 0.0246, run the multi-matrix algorithms with different number of matrices to correct these keys. The relationship between the average number of iterations and the number of matrices is plotted in Fig. \ref{fig:Number}. Clearly, the average number of iterations and the number of matrices are inversely proportional.
Another factor is the number of waves.
The variable nodes with larger degrees can get more information, thus can be corrected earlier and can provide useful information to help other variable nodes.
This process spreads from large-degree to small-degree variable nodes, behaving like a wave, so it is called the wave effect \cite{Luby2001Improved}.
For a multi-matrix algorithm, the multiple waves can be formed simultaneously to correct errors. We refer this phenomenon as the multi-wave effect, which obviously leads to faster convergence speed.
However, if the waves are close to each other, they spread as one wave. This greatly discounts the performance of the multi-wave effect.
On the contrary, if the large-degree variable nodes are dispersed in different matrices, the multiple waves spread and correct errors at the same time, resulting in faster convergence speed.
We construct $5$ matrices with close waves to compare with $5$ matrices with separated ones, and plot the results in Fig.~\ref{fig:Wave}. Clearly, the algorithms using matrices with separated waves outperform the others.
Therefore, our strategy can significantly improve the convergence speed compared with the single-matrix reconciliation, and the speed can be further improved if Bob uses more or designs better matrices.
\begin{figure}
\caption{Comparison about convergence speed of 6 reconciliation algorithms by calculating their average numbers of iterations for different error rates.}
\label{fig:Iteration}
\end{figure}
\begin{figure}
\caption{ Relationship about the convergence speed and the number of matrices (1$\sim$5) in reconciliation.
The error rate for data simulation is 0.0246.
}
\label{fig:Number}
\end{figure}
\begin{figure}
\caption{The convergence speed of the multi-matrix algorithms relative to the number of waves is shown.
We generate 100 sets of keys at each error rate, perform each algorithm on the keys using 5 matrices with compact and separated waves respectively,
and calculate the average number of iterations.}
\label{fig:Wave}
\end{figure}
\subsection{Success Rate}
The success rate of reconciliation may be negatively impacted by the cycles.
For example, suppose Alice's sifted key is $x^{T}=[1,0,1,0,1]$, Bob's sifted key is $y^{T}=[1,0,0,0,1]$, the error rate $e$ is 0.2, LDPC code has $5$ variable nodes labeled as $\{v_1,\dots,v_5\}$ and $4$ check nodes denoted as $\{c_1,\dots,c_4\}$. As shown in Fig. \ref{fig:cycle} (a), in LDPC code there is a 4-member cycle which is represented by a blue circle and red edges, respectively. If Bob uses BP algorithm to correct the key, the reconciliation is failed in each iteration. It is because that there is always a difference between the signs of soft-decision values of $v_2$ and $v_4$. Therefore, they cannot be decoded as $1$ at the same time. The 4-member cycle makes new information always be excluded and old information always loop in the cycle. Thus, as recorded in Tab. \ref{tab:ring-assoliation}, no matter how large the upper limit of iterations is, the single-matrix reconciliation always fails.
However, as shown in Fig.~\ref{fig:cycle} (b), if Bob adds two matrices to correct the key, since there are no cycle between $v_2$ and $v_4$ in the new matrices, the data of the new matrices help $v_2$ and $v_4$ break out of the 4-member cycle, resulting in a successful reconciliation. As shown in Tab. \ref{tab:ring-multi}, MBP correct the error within two iterations.
\begin{figure}
\caption{ One matrix with a 4-member cycle (a) and two additional matrices (b). }
\label{fig:cycle}
\end{figure}
\begin{table}[htbp]
\centering
\caption{\bf Soft-decision Values of $\mathbf{v_2}$ and $\mathbf{v_4}$}
\begin{tabular}{|p{1.2cm}|p{2.6cm}|p{2.6cm}|p{1.2cm}|}
\hline
Iteration number & Soft-decision value of $v_2$ & Soft-decision value of $v_4$ & Result \\
\hline
1 & -0.753772 & 0.753772 & fail \\
2 & 0.753772 & -0.753772 & fail \\
3 & -0.728434 & 0.728434 & fail \\
4 & -0.728434 & 0.728434 & fail \\
5 & -0.704088 & 0.704088 & fail \\
$\cdots$ & $\cdots$ & $\cdots$ & $\cdots$ \\
96 & 0.166115 & -0.166115 & fail \\
97 & -0.160981 & 0.160981 & fail \\
98 & 0.160981 & -0.160981 & fail \\
99 & -0.156007 & 0.156007 & fail \\
100 & 0.156007 & -0.156007 & fail \\
\hline
\end{tabular}
\label{tab:ring-assoliation}
\end{table}
\begin{table*}[htbp]
\centering
\caption{\bf Soft-decision Values of $\mathbf{v_2}$ and $\mathbf{v_4}$ in 3-matrix reconciliation}
\begin{tabular}{|p{1.2cm}|p{1.7cm}|p{1.7cm}|p{1.7cm}|p{1.7cm}|p{1.7cm}|p{1.7cm}|p{1.7cm}|p{1.7cm}|p{1.2cm}|}
\hline
Iteration number & Soft-decision value of $v_2$ in $H_1$ & Soft-decision value of $v_2$ in $H_2$ & Soft-decision value of $v_2$ in $H_3$ & Soft-decision value of $v_2$ & Soft-decision value of $v_4$ in $H_1$ & Soft-decision value of $v_4$ in $H_2$ & Soft-decision value of $v_4$ in $H_3$ & Soft-decision value of $v_4$ & Result \\
\hline
1 & -0.753772 & -0.753772 & -0.753772 & -5.0339 & 0.753772 & -2.01882 & -1.38629 & 0.121249 & fail \\
2 & 0.753772 & -3.46963 & -2.56496 & -8.0534 & -0.753772 & -2.77259 & -3.52636 & -4.28013 & success \\
\hline
\end{tabular}
\label{tab:ring-multi}
\end{table*}
We carry out a test to fully represent the performance of reducing the impact of cycles.
In this test, we generate 1000 sets of keys with error rate 0.0275, perform the 6 reconciliation algorithms on the generated keys, and calculate the success rate. As shown in Fig. \ref{fig:success}, the average success rate of the multi-matrix algorithms is 96.33\%, nearly double that, 48.83\%, of the single-matrix algorithms.
\begin{figure}
\caption{Reconciliation success rate for single- and multi-matrix algorithms.
1000 sets of keys with error rate of 0.0275 are generated for the comparison.}
\label{fig:success}
\end{figure}
\subsection{Bit Error Rate}
Compared with the single-matrix reconciliation, the multi-matrix algorithms decode the key according to information provided by multiple matrices. The decoding results are more accurate and reliable. We generate 100 sets of keys with error rate 0.0267, perform BP and MBP on the generated keys to calculate the number of corrected bits $N_c$ and the number of misjudged bits $N_m$ in each iteration, and plot the valid number of corrected bits $N_c-N_m$ in Fig. \ref{fig:right_wrong}.
We can see that MBP can correct more errors in each iteration, and most of the errors are corrected at the beginnings of the iterations. It achieves faster convergence speed and lower BER compared with BP.
\begin{figure}
\caption{The valid number of corrected bits $N_c-N_m$ ($N_c$ the number of corrected bits; $N_m$ is the number of misjudged bits) in each iteration for single- and multi-matrix algorithms. 100 sets of keys with error rate of 0.0267 are considered.}
\label{fig:right_wrong}
\end{figure}
To further evaluate the BER performances of the multi-matrix algorithms, five QBER values ranging from 0.0202 to 0.0256 are selected. At each error rate, we generate 1000 sets of keys, perform 5-matrix algorithms and their single-matrix versions on these generated keys. After 5 iterations, we calculate BERs of different algorithms according to the following equation,
\begin{equation}
\mathrm{BER}=\frac{\mathrm{number\ of\ error\ bits}}{\mathrm{1000 * length\ of\ code}},
\label{equ:BER}
\end{equation}
\noindent and draw the results in Fig. \ref{fig:BER}. It is obvious that all three multi-matrix algorithms achieve lower BERs under different error rates compared with their single-matrix versions.
For example, the BER of SBP is 0.0030832 when the error rate is 0.0202, while MSBP is 0.0000045, between which there is a difference of 3-order magnitude.
\begin{figure}
\caption{The BER performances of the multi-matrix algorithms after 5 iterations are shown. Five QBER ranging from 0.0202 to 0.0256 are selected. For each error rate, we generate 1000 sets of keys, perform 5-matrix algorithms and single-matrix versions on these generated keys.}
\label{fig:BER}
\end{figure}
\section{Conclusion}
In this paper, a highly efficient error reconciliation protocol for QKD is proposed, whose core is using likelihood of multiple syndromes
obtained from multiple LDPC codes for QBER estimation and correction. Security analysis and multi-matrix construction method are provided.
Evaluation results show that
the proposed approach allows improving the accuracy of QBER estimation in contract to previous works. Additionally, the scheme
can greatly increase the convergence speed, success rate, and significantly improve the BER performance during key reconciliation without compromising
the reconciliation efficiency and significant expenditure of authentication and time resources. Our
findings can lower the complexity for post-processing procedure, thus will promote the commercialization
of QKD.
\appendix
\section{Appendix A\\ Security Analysis}
The security of single-matrix reconciliation is guaranteed by the following theorems.
\textbf{Theorem 1}: Let $x$ and $z$ be Alice's sifted key and syndrome, respectively. $H_{m\times n}$ is the matrix used in reconciliation. Once Eve gets $z$, she can extract at most $m$ bits of information about $x$, i.e.,
\begin{equation}
I(x;z)\leq m.
\label{equ:prove-1}
\end{equation}
\noindent \textbf{Proof of Theorem}: The amount of information that Eve can obtain from $z$ about $x$ is
\begin{equation}
I(x;z)=H(z)-H(z|x).
\label{equ:prove-2}
\end{equation}
\noindent Assuming that Eve knows $H_{m\times n}$, she would obtain $z$ if she knows $x$, i.e.,
\begin{equation}
H(z|x)=0.
\label{equ:prove-3}
\end{equation}
\noindent When a random variables are in the equal probability distribution, the discrete entropy can reach the maximum value, so
\begin{equation}
\begin{split}
I(x;z)&=H(z)\leq -\sum_1^{2^{m}}(\frac{1}{2^{m}}\log{\frac{1}{2^{m}}})\\
&=\log{2^{m}}=m.
\end{split}
\label{equ:prove-4}
\end{equation}
\textbf{Theorem 2}: If the random variable $X$ of Alice's sifted key $x$ obeys uniform distribution, i.e.,
\begin{equation}
P(X=x)=\frac{1}{2^{n}},
\label{equ:prove-5}
\end{equation}
\noindent then there are at least $t$ bits of information about $x$ unknown to Eve, even though she has obtained $z$, i.e.,
\begin{equation}
H(x|z)\ge t.
\label{equ:prove-6}
\end{equation}
\noindent where $t = n - m $.
\noindent \textbf{Proof of Theorem}: The random variable $X$ of Alice's sifted key obeys uniform distribution, so
\begin{equation}
H(x)=-\sum_1^{2^{n}}(\frac{1}{2^{n}}\log{\frac{1}{2^{n}}})=\log{2^{n}}=n.
\label{equ:prove-7}
\end{equation}
\noindent From equations (\ref{equ:prove-1}), (\ref{equ:prove-2}), and (\ref{equ:prove-3}), we derive
\begin{equation}
\begin{split}
H(x|z)&=H(x)-H(z)\\
&=H(x)-I(x;z)\ge n-m=t.
\label{equ:prove-8}
\end{split}
\end{equation}
According to \textbf{Theorem 1} and \textbf{Theorem 2}, Eve can get at most $m$ bits information. Thus, if the $m$ bits is discarded
during privacy amplification, the security of the key can be guaranteed.
Generally, Alice and Bob can use the following method to abandon the $m$ bits information leakage.
If the matrix $H_{m\times n}$ has the following structure,
\begin{equation}
H_{m\times n}=(H_{m\times t}^{'},E_m),
\label{equ:prove-9}
\end{equation}
\noindent where $H_{m\times t}^{'}$ is a matrix which has $m$ rows and $t$ columns, $E_m$ is an m-order identity matrix, then $H_{m\times n}$ is called a system code.
In other words, $m$ vectors of $E_m$ are linearly independent in $H_{m\times n}$. Under this circumstance, Alice can calculate and send the syndrome by
\begin{equation}
\begin{split}
z&=(H_{m\times t}^{'},E_m)\cdot x\\
&=H_{m\times t}^{'}\cdot\begin{bmatrix} x_1 \\ x_2 \\ \vdots \\ x_t \end{bmatrix}\oplus\begin{bmatrix} x_{t+1} \\ x_{t+2} \\ \vdots \\ x_n \end{bmatrix}=\begin{bmatrix} z_1 \\ z_2 \\ \vdots \\ z_m \end{bmatrix}.
\label{equ:prove-10}
\end{split}
\end{equation}
\noindent From \textbf{Theorem 1}, we know that Eve can obtain at most $m$ bits of information about $x$.
Assume these $m$ bits of information is $m$ bits of $x$.
And for Eve, it is in her best interests if the $m$ bits of $x$ are $[x_{t+1}, \cdots, x_n]^{T}$.
Then Eve has to solve a underdetermined system of equation, which has no unique solution.
Moreover, after Alice and Bob discard the $m$ bits key $[x_{t+1}, \cdots, x_n]^{T}$, Eve cannot even form the system of equation and get any information about $[x_1, \cdots, x_t]^{T}$, even if she knows $H_{m\times t}^{'}$ and $z$.
If the matrix $H_{m\times n}$ is a non-system code, a system code can be formed by a series of elementary row transformations and column exchanges based on
\begin{equation}
H_{m\times n}=A\cdot(H_{m\times t}^{'},E_m)\cdot B,
\label{equ:prove-11}
\end{equation}
\noindent where A is a m-order invertible square matrix representing a whole train of primary row transformation. B is a n-order square matrix representing a series of column exchanges. Denote ${z^{'}}^{T}={(A^{-1}\cdot z)}^{T}=[z_1^{'}, \cdots, z_m^{'}]$ and ${x^{'}}^{T}={(B\cdot x)}^{T}=[x_1^{'}, \cdots, x_n^{'}]$, then
\begin{equation}
\begin{split}
z^{'}&=(H_{m\times t}^{'},E_m)\cdot x^{'}\\
&=H_{m\times t}^{'}\cdot\begin{bmatrix} x_1^{'} \\ x_2^{'} \\ \vdots \\ x_t^{'} \end{bmatrix}\oplus\begin{bmatrix} x_{t+1}^{'} \\ x_{t+2}^{'} \\ \vdots \\ x_n^{'} \end{bmatrix}=\begin{bmatrix} z_1^{'} \\ z_2^{'} \\ \vdots \\ z_m^{'} \end{bmatrix}.
\label{equ:prove-12}
\end{split}
\end{equation}
\noindent Similarly, after Alice and Bob abandon the $m$ bits key $[x_{t+1}^{'}, \cdots, x_n^{'}]^{T}$, even if Eve knows $H_{m\times t}^{'}$ and $z^{'}$, she will not be able to get any information about $[x_1^{'}, \cdots, x_t^{'}]^{T}$.
From the above analysis, we can see that if we first select $m$ linearly independent columns in $H_{m\times n}$, then discard the corresponding bits of these columns, the $m$ bits information leakage can be removed, thus ensuring the security of the key. Therefore, we design a multiple matrices construction method as shown in the Appendix B. And all matrices used in the simulation are prepared according to the the method.
Through the above method, we can construct a series of matrices $(H_1, \cdots, H_u)$ of the same size. Let $H_i$ and $H_j$ denote any two matrices from $(H_1, \cdots, H_u)$. They can be represented as follows:
\begin{equation}
H_i=A_i\cdot (H_i^{'},E_m)\cdot B_i,
\label{equ:prove-13}
\end{equation}
\begin{equation}
H_j=A_j\cdot (H_j^{'},E_m)\cdot B_j,
\label{equ:prove-14}
\end{equation}
\noindent Their syndromes $z^{'i}$ and $z^{'j}$ can be represented as:
\begin{equation}
\begin{split}
z^{'i}&=(H_{i}^{'},E_m)\cdot x^{'i}\\
&=H_{i}^{'}\cdot\begin{bmatrix} x_{1}^{'i} \\ x_{2}^{'i} \\ \vdots \\ x_{t}^{'i} \end{bmatrix}\oplus\begin{bmatrix} x_{t+1}^{'i} \\ x_{t+2}^{'i} \\ \vdots \\ x_{n}^{'i} \end{bmatrix}=\begin{bmatrix} z_{1}^{'i} \\ z_{2}^{'i} \\ \vdots \\ z_{m}^{'i} \end{bmatrix},
\label{equ:prove-15}
\end{split}
\end{equation}
\begin{equation}
\begin{split}
z^{'j}&=(H_{j}^{'},E_m)\cdot x^{'j}\\
&=H_{j}^{'}\cdot\begin{bmatrix} x_{1}^{'j} \\ x_{2}^{'j} \\ \vdots \\ x_{t}^{'j} \end{bmatrix}\oplus\begin{bmatrix} x_{t+1}^{'j} \\ x_{t+2}^{'j} \\ \vdots \\ x_{n}^{'j} \end{bmatrix}=\begin{bmatrix} z_{1}^{'j} \\ z_{2}^{'j} \\ \vdots \\ z_{m}^{'j} \end{bmatrix},
\label{equ:prove-16}
\end{split}
\end{equation}
\noindent More precisely, ${z^{'i}}^{T}={(A_i^{-1}\cdot z^{i})}^{T}=[z_{1}^{'i},\cdots,z_{m}^{'i}]$, ${x^{'i}}^{T}={(B_i\cdot x)}^{T}=[x_{1}^{'i},\cdots,x_{n}^{'i}]$, ${z^{'j}}^{T}={(A_j^{-1}\cdot z^{j})}^{T}=[z_{1}^{'j},\cdots,z_{m}^{'j}]$, and ${x^{'j}}^{T}={(B_j\cdot x)}^{T}=[x_{1}^{'j},\cdots,x_{n}^{'j}]$. From the above matrices construction method, we can see that $[x_{t+1}^{'i},\cdots,x_{n}^{'i}]$ and $[x_{t+1}^{'j},\cdots,x_{n}^{'j}]$ are not equal, but their corresponding variable nodes sets are the same.
Similarly, assume Eve knows $[x_{t+1}^{'i},\cdots,x_{n}^{'i}]$ and $[x_{t+1}^{'j},\cdots,x_{n}^{'j}]$, then she has to solve the system of equation.
Because $H_i$ and $H_j$ are construct with the method in the Appendix B, the two sets of underdetermined systems of equation in equations (\ref{equ:prove-15}) and (\ref{equ:prove-16}) are the same.
In other word, it is impossible to form a determined or overdetermined system of equation.
After Alice and Bob discard those $m$ bits, even if Eve knows $H_i$, $H_j$, $z^{i}$, and $z^{j}$, she cannot obtain any information about $[x_{1}^{'i},\cdots,x_{t}^{'i}]$ and $[x_{1}^{'j},\cdots,x_{t}^{'j}]$. In fact, any two matrices constructed by this method will not reveal extra information during reconciliation.
Accordingly, in the case of reconciliation with more than two matrices, because the discarded $m$ bits information is corresponding to the same $m$ linearly independent columns, multiple syndromes transmitted through the classical channel do not reveal extra information, i.e.
\begin{equation}
H(z^{i}|z^{i-1},\cdots,z^{1})=0,
\forall z^{i}\in \{z^{2},\cdots,z^{u}\},
\label{equ:prove-17}
\end{equation}
\noindent then we get
\begin{equation}
\begin{aligned}
I(x;Z)&=I(x;z^{1})+I(x;z^{2}|z^{1})+I(x;z^{3}|z^{2} z^{1})+ \\
&\cdots+I(x;z^{u}|z^{u-1} \cdots\ z^{1}) \\
&=H(z^{1})-H(z^{1}|x)+H(z^{2}|z^{1})-H(z^{2}|x\ z^{1})+ \\
&\cdots+H(z^{u}|z^{u-1}\cdots z^{1})-H(z^{u}|x\ z^{u-1}\cdots z^{1}) \\
&=H(z^{1})+H(z^{2}|z^{1})+\cdots +H(z^{u}|z^{u-1}\cdots z^{1}) \\
&=H(z^{1})
\end{aligned}
\label{equ:prove-18}
\end{equation}
\noindent where $Z=\{z^{1},z^{2},\cdots,z^{u}\}$. Therefore, if Alice and Bob use our method to construct matrices, they can guarantee the security of the key, i.e., guarantee the security of the multi-matrix post-processing.
\section{Appendix B\\ Multiple Matrices Construction Method}
\begin{enumerate}
\item The first LDPC matrix called $H_1$ is constructed;
\item By a series of elementary row transformation and column exchanges, $H_1$ is transformed into a system code, such that $m$ linearly independent columns can be determined.
These columns correspond to variable nodes $[v_{(1)}, \cdots, v_{(m)}]$ in $H_1$. Let the remaining variable nodes be $[v_{[1]}, \cdots, v_{[t]}]$;
\item The rest $u-1$ parity check matrices $(H_2, \cdots ,H_u)$ can be constructed based on $H_1$: First, rearrange the columns of the variable nodes $[v_{(1)}, \cdots, v_{(m)}]$ in $H_1$. Then rearrange the columns of the variable nodes $[v_{[1]}, \cdots, v_{[t
]}]$ in $H_1$. It's clear that the set of the positions of linearly independent columns, in this way, is identical to each other for all of the $u$ matrices.
\end{enumerate}
\section{Appendix C\\ Pseudocode of MBP, MSBP and MLBP}
\textbf{MBP algorithm}
\begin{algorithmic}[1]
\State $Initialize\ L_{v_i\to c_j}^{k}=L_{P_i}^{k}$
\For{$every\ parity\textrm{-}check\ matrix\ H_k$}
\For{$j=1$ to $m$}
\For{$every\ v_i^{k}\in neighborhood\ of\ c_j^{k}$}
\State $Generate\ and\ propagate\ L_{c_j\to v_i}^{k}$
\EndFor
\EndFor
\For{$i=1$ to $n$}
\For{$every\ c_j^{k}\in neighborhood\ of\ v_i^{k}$}
\State $Generate\ and\ propagate\ L_{v_i\to c_j}^{k}$
\EndFor
\EndFor
\EndFor
\State $Make\ decoding\ decisions$
\If{$ stopping\ rule\ is\ not\ satisfied$}
\State $Go\ back\ to\ line\ 2$
\EndIf
\end{algorithmic}
~\\
\textbf{MSBP algorithm}
\begin{algorithmic}[1]
\State $Initialize\ L_{v_i\to c_j}^{k}=L_{P_i}^{k}$
\For{$every\ parity\textrm{-}check\ matrix\ H_k$}
\For{$i=1$ to $n$}
\For{$every\ c_j^{k}\in neighborhood\ of\ v_i^{k}$}
\State $Generate\ and\ propagate\ L_{c_j\to v_i}^{k}$
\EndFor
\For{$every\ c_j^{k}\in neighborhood\ of\ v_i^{k}$}
\State $Generate\ and\ propagate\ L_{v_i\to c_j}^{k}$
\EndFor
\EndFor
\EndFor
\State $Make\ decoding\ decisions$
\If {$stopping\ rule\ is\ not\ satisfied$}
\State $Go\ back\ to\ line\ 2$
\EndIf
\end{algorithmic}
~\\
\textbf{MLBP algorithm}
\begin{algorithmic}[1]
\State $Initialize\ L_{v_i\to c_j}^{k}=L_{P_i}^{k}$
\For{$every\ parity\textrm{-}check\ matrix\ H_k$}
\For{$j=1$ to $m$}
\For{$every\ v_i^{k}\in neighborhood\ of\ c_j^{k}$}
\State $Generate\ and\ propagate\ L_{c_j\to v_i}^{k}$
\For{$every\ c_l^{k}\in neighborhood of\ v_i^{k}\ except\ c_j^{k}$}
\State $Generate\ and\ propagate\ L_{v_i\to c_l}^{k}$
\EndFor
\EndFor
\EndFor
\EndFor
\State $Make\ decoding\ decisions$
\If{$sto pping\ rule\ is\ not\ satisfied$}
\State $Go\ back\ to\ line\ 2$
\EndIf
\end{algorithmic}
\end{document}
|
\begin{document}
\title{On homogeneous pinning models and penalizations}
\author{Mihai Gradinaru \and Samy Tindel}
\keywords{Polymer models, penalization method, random walk, renewal theorem}
\subjclass[2000]{60K35, 82B41, 82B44}
\address{
{\it Mihai Gradinaru:}
{\rm IRMAR, Universit\'e de Rennes 1, Campus de Beaulieu,
35042 Rennes Cedex.
}
\newline
$\mbox{ }$\hspace{0.1cm}
{\it Email: }{\tt [email protected]}
\newline
$\mbox{ }$\hspace{0.1cm}
\newline
$\mbox{ }$\hspace{0.1cm}
{\it Samy Tindel:}
{\rm Institut {\'E}lie Cartan Nancy, B.P. 239,
54506 Vand{\oe}uvre-l{\`e}s-Nancy Cedex, France}.
\newline
$\mbox{ }$\hspace{0.1cm}
{\it Email: }{\tt [email protected]}
}
\begin{abstract}
In this note, we show how the penalization method, introduced in order to
describe some non-trivial changes of the Wiener measure, can be applied
to the study of some simple polymer models such as the pinning model.
The bulk of the analysis is then focused on the study of a martingale which
has to be computed as a Markovian limit.
\mathds{E}nd{abstract}
\maketitle
\section{Introduction}~~
Our motivation for writing the current note is the following: on the one hand,
in the last past years, some interesting advances have seen the light concerning
various kind of polymer models, having either an interaction with a random environment or
a kind of intrinsic self-interaction. Among this wide class of models, we will be interested
here in some polymers interacting with a given interface, as developed for instance
in \mathcal Ite{BdH,MGO}. For this kind of polymers, the introduction of some generalized renewal
tools has yield some very substantial progresses in the analysis of the model, and a quite
complete picture of their asymptotic behaviour in terms of localization near the
interface is now available e.g. in
\mathcal Ite{CGZ,GT} and in the monograph \mathcal Ite{Gi}.
On the other hand, and a priori in a different context, the series of papers starting by
\mathcal Ite{RVY} and ending with the recent monograph \mathcal Ite{RY} presents a rather simple
method in order to quantify the penalization of a Brownian (or Bessel) path by a functional
of its trajectory (such as the one-sided supremum or the number of excursions).
This method can then be applied in a wide number of natural situations, getting a very complete
description of some Gibbs type measures based on the original Brownian motion.
More specifically, when translated in a random walk context, the penalization method
can be read as follows:
let $\{b_n;\, n\ge 0 \}$ be a symmetric random walk on $\mathds{Z}$,
defined on a stochastic basis $(\Omega,\mathcal F,(\mathcal F_n)_{n\ge 1},(\mathds{P}_z)_{z\in\mathds{Z}})$.
For $n\ge 0$, let also $e^{H_n}$ be a bounded positive
measurable functional of the path $(b_0,\ldots,b_n)$.
Then, for $\beta\in\mathds{R}$, $n\ge p \ge 0$, we are concerned with a generic Gibbs type measure
$\rho_n$ on $\mathcal F_p$ defined, for $\Gamma_p\in\mathcal F_p$, by
\begin{equation}\lambdabel{def:hat-rho-n}
\rho_n(\Gamma_p)
=\frac{\mathds{E}_{0}\left[\mathds{1}_{\Gamma_p} e^{\beta H_n} \right]}{Z_{n}} ,
\mathds{Q}uad\mbox{ where }\mathds{Q}uad
Z_{n}= \mathds{E}_{0}\left[e^{\beta H_n}\right].
\mathds{E}nd{equation}
In its general formulation, the penalization principle, which allows an asymptotic study
of $\rho_n$, can be stated as follows:
\begin{prop}\lambdabel{prop:general-penalization}
Suppose that the process $(b_n,H_n)$ is a $\mathds{Z}\times\mathds{R}_+$-valued
Markov process, and let $\lambdaa_n$ be its semi-group.
Assume that, for any $p\ge 0$, the function $M_p$ defined by
\begin{equation}\lambdabel{markov:lim}
M_p(w,z):=\lim_{n\to\infty}
\frac{[\lambdaa_{n-p}f](w,z)}{[\lambdaa_n f](0)},
\mathds{Q}uad\mbox{ where }\mathds{Q}uad
f(w,z)=e^{-\beta z}
\mathds{E}nd{equation}
exists, for any $(w,z)\in \mathds{Z}\times\mathds{R}_+$, and that
\begin{equation*}
\frac{[\lambdaa_{n-p}f](w,z)}{[\lambdaa_n f](0)}
\le
C(p,w,z),
\mathds{Q}uad\mbox{ where }\mathds{Q}uad
\mathds{E}_{0}[C(p,b_p,\mathds{E}ll_p)]<\infty.
\mathds{E}nd{equation*}
Then:
\begin{enumerate}
\item
the process $M_p:=M_p(b_p,\mathds{E}ll_p)$ is a non-negative $\mathds{P}_{0}$-martingale;
\item
for any $p\ge 0$, when $n\to\infty$, the measure $\rho_n$ defined by (\ref{def:hat-rho-n})
converges weakly on $\mathcal F_p$ to a measure $\rho$, where $\rho$ is defined
by
\begin{equation*}
\rho(\Gamma_p)=\mathds{E}_{0}\left[\mathds{1}_{\Gamma_p}M_p\right],
\mathds{Q}uad\mbox{ for }\mathds{Q}uad
\Gamma_p\in\mathcal F_p.
\mathds{E}nd{equation*}
\mathds{E}nd{enumerate}
\mathds{E}nd{prop}
This last proposition can be seen then as an invitation
to organize the asymptotic study of the measure $\rho_n$
in the following way: first compute explicitly the limit of the ratio
$[\lambdaa_{p-n}f](w,z)/[\lambdaa_p f](0)$ when $p\to\infty$, which should define also an asymptotic
measure $\rho$ in the infinite volume regime. Then try to read the basic properties of $\rho$
by taking advantage of some simple relations on the martingale $M_p$.
It is easily seen that some links exists between the polymer measure theory as mentioned above
and the penalization method. Furthermore, we believe that the two theories can interact in a
fruitful way. Indeed, the penalizing scheme offers a simple and systematic framework for
the study of Gibbs measures based on paths, and it is also quite pleasant to be able to read the
main features of the limiting measure $\rho$ on the martingale $M_p$, which is usually a simple
object. Without presenting a completely new result, this article will thus try to make a bridge
between the two aspects of the topic, by studying the simplest of the interface-based polymers,
namely the polymer pinned at an interface, through a purely penalizing scheme. Let us be
more specific once again, and describe our model and the main results we shall obtain:
denote by $\mathds{E}ll_n$ the local time at 0 of $b$, that is
$$
\mathds{E}ll_n=\sharp \{p\le n;\,b_p=0\}.
$$
For $\beta\in\mathds{R}$, $n\ge p \ge 0$, we are concerned here with the Gibbs type measure
$\mathds{Q}_{0}^{(n,\beta)}$ on $\mathcal F_p$ defined, for $\Gamma_p\in\mathcal F_p$, $p<n$, by
\begin{equation}\lambdabel{def:rho-n}
\mathds{Q}_{0}^{(n,\beta)}(\Gamma_p)
=\frac{\mathds{E}_{0}\left[\mathds{1}_{\Gamma_p}e^{\beta \mathds{E}ll_n} \right]}{Z_{n}^{f}} ,
\mathds{Q}uad\mbox{ where }\mathds{Q}uad
Z_{n}^{f}= \mathds{E}_{0}\left[e^{\beta\mathds{E}ll_n}\right].
\mathds{E}nd{equation}
Finally, we will need to introduce a slight variation of the Bessel walk of dimension 3,
which is defined as a random walk $R$ on $\mathds{N}$ starting from 0, such that
$\mathds{P}_{0}(R_0=0)=\mathds{P}_{0}(R_1=1)=1$, and whenever $j\ge 1$,
\begin{equation}\lambdabel{eq:transition-bessel-walk}
\mathds{P}_{0}(R_{n+1}=j\mathds{P}m 1\, |\, R_{n}=j)= \frac{j\mathds{P}m 1}{2j}.
\mathds{E}nd{equation}
With these notations in hand, the main result we shall obtain is then the following:
\begin{thm}\lambdabel{thm:desription-gibbs-limit}
For $\beta\in\mathds{R}$, $n\ge p \ge 0$, let $\mathds{Q}_{0}^{(n,\beta)}$ be the measure defined by
(\ref{def:rho-n}). Then, for any $p\ge 0$, the measure $\mathds{Q}_{0}^{(n,\beta)}$ on $\mathcal F_p$ converges
weakly, as $n\to\infty$, to a measure $\mathds{Q}_{0}^{(\beta)}$ defined by
\begin{equation}\lambdabel{def:rho}
\mathds{Q}_{0}^{(\beta)}(\Gamma_p)=\mathds{E}_{0}\left[\mathds{1}_{\Gamma_p}M_p^{(\beta)}\right],
\mathds{Q}uad\mbox{ for }\mathds{Q}uad
\Gamma_p\in\mathcal F_p.
\mathds{E}nd{equation}
According to the sign of $\beta$ the two following
situations can occur:
\noindent
{\bf (1)} When $\beta<0$ (delocalized phase): set $\alphapha=-\beta$. Then $M_p^{(\beta)}$ has
the following expression:
$$
M_p^{(\beta)}=e^{-\alphapha\mathds{E}ll_p}\left[ (1-e^{-\alphapha})|b_p| + 1 \right].
$$
Moreover, under the probability $\mathds{Q}_{0}^{(\beta)}$, the process $b$ and its local time $\mathds{E}ll$
can be described in the following way:
\begin{enumerate}
\item[a)]
The random variable $\mathds{E}ll_{\infty}$ is finite almost surely, and is distributed
according to a geometric law with parameter $1-e^{-\alphapha}$.
\item[b)]
Let $g=\sup\{r\ge 0;\, b_r=0\}$.Then $g$ is finite almost surely, and the two processes
$b^{(-)}=\{b_r;\, r\le g\}$ and $b^{(+)}=\{b_{r+g};\, r\ge 0\}$ are independent.
\item[c)]
The process $|b^{(+)}|$ is a Bessel random walk as defined by the transition law
{\rm (\ref{eq:transition-bessel-walk})}, and $\mbox{sign}(b^{(+)})=\mathds{P}m 1$ with probability $1/2$.
\item[d)]
Given the event $\mathds{E}ll_{\infty}=l$ for $l\ge 1$, the process $b^{(-)}$ is a standard random
walk, stopped when its local time reaches $l$.
\mathds{E}nd{enumerate}
\noindent
{\bf (2)} When $\beta>0$ (localized phase): in this case, the martingale $M_p^{(\beta)}$
can be written as:
\begin{equation}\lambdabel{eq:def-martingale-localized}
M_p^{(\beta)}=\mathds{E}xp\left\{\beta{\hat\mathds{E}ll}_{p}-c_{+,\beta}\,|b_{p}|-c_{-,\beta}\,p\right\},
\mathds{E}nd{equation}
where $c_{\mathds{P}m,\beta}=(\nicefrac{1}{2})[\beta\mathds{P}m\ln(2-e^{-\beta})]$,
and where $\hat\mathds{E}ll_p$ is a slight modification of $\mathds{E}ll_p$ defined by
$\hat\mathds{E}ll_p=\mathds{E}ll_p-\mathds{1}_{b_p=0}$.
Furthermore, under the probability $\mathds{Q}_{0}^{(\beta)}$, the process $b$ can be decomposed
as follows:
\begin{enumerate}
\item[a)]
Let $\tau=(\tau_0^j)_{ j\ge 1}$ be the successive return times of $b$ at 0, and set
$\tau_0^0=0$, $\tau_0^1=\tau_0$.
Then the sequence $\{\tau_0^j-\tau_0^{j-1};\, j\ge 1\}$ is i.i.d, and the law of $\tau_0$
is defined by its Laplace transform (\ref{new:laplace}). In particular, $\tau_0$
has a finite mean, whose equivalent, as $\beta\to\infty$, is $1-e^{-\beta}/2$.
\item[b)]
Given the sequence $\tau$, the excursions $(b^{j})_{ j\ge 1}$, defined by
$b_r^j=b_{\tau_0^{j-1}+r}$ for $r\le \tau_0^j-\tau_0^{j-1}$, are independent.
Moreover, each $b^j$ is distributed as a random walk starting from 0, constrained
to go back to 0 at time $\tau_0^j-\tau_0^{j-1}$.
\mathds{E}nd{enumerate}
\mathds{E}nd{thm}
As mentioned above, the results presented in this note are not really new. In the penalization
literature, the random walk weighted by a functional of its local time has been considered
by Debs in \mathcal Ite{De} for the delocalized phase, and we only cite his result here in order
to give a complete picture of our polymer behaviour. We shall thus concentrate on the
localized phase $\beta>0$ in the remainder of the article.
However, in this case the results concerning
homogeneous polymers can be considered now as classical,
and the first rigorous treatment of our
pinned model can be traced back at least to \mathcal Ite{BdH}. The results we obtain for the
localized part of our theorem can also be found, in an (almost) explicit way, in
\mathcal Ite{CGZ,Gi}. But once again, our goal here is just to show that the penalization
method can be applied in this context, and may shed a new light on the polymer problem.
Furthermore, we believe that this method may be applied to other continuous
or discrete inhomogeneous models, hopefully leading to some simplifications in their analysis.
These aspects will be handled in a subsequent publication.
Let us say now a few words about the way our article is structured:
at Section \ref{sec:classical-random-walk}, we will recall some basic identities in law
for the simple symmetric random walk on $\mathds{Z}$. In order to apply our penalization
program, a fundamental step is then to get some sharp asymptotics for the semi-group
$\lambdaa_n$ mentioned at Proposition \ref{prop:general-penalization}. This will be done
at Section \ref{sec:laplace-local-time}, thanks to the renewal trick introduced e.g. in
\mathcal Ite{Gi}. This will allow to us to describe our infinite volume limit at Section \ref{sec:gibbs-limit}
in terms of the martingale $M_p^{(\beta)}$. The description of the process $b$
under the infinite volume measure given at Theorem \ref{thm:desription-gibbs-limit}
will then be proved, in terms of the behavior of $M_p^{(\beta)}$,
at Section \ref{sec:process-new-pb-measure}.
\section{Classical facts on random walks}\lambdabel{sec:classical-random-walk}~~
\setcounter{equation}{0}
Let us first recall some basic results about the random walk $b$: for $n\ge 0$
and $z\in\mathds{Z}$, set
\begin{equation*}
S_n=\sup\{ b_p;\, p\le n \},
\mathds{Q}uad
T_z=\inf\{n\ge 0;\, b_n=z\}
\mathds{Q}uad\mbox{ and }\mathds{Q}uad
\tau_z=\inf\{n\ge 1;\, b_n=z\}.
\mathds{E}nd{equation*}
Let us denote by $\mathds{D}$ the set of even integers in $\mathds{Z}$, and for $(n,r)\in\mathds{N}
\times\mathds{Z}$, recall that $p_{n,r}:=\mathds{P}_{0}(b_n=r)$ is given by:
\begin{equation*}
p_{n,r}
=\left(\frac{1}{2}\right)^n \begin{pmatrix}\nicefrac{(n+r)}{2}\\n\mathds{E}nd{pmatrix}
\mathds{1}_{\mathds{D}}(n+r)
\mathds{1}_{\{ |r|\le n \}}.
\mathds{E}nd{equation*}
Then it is well-known (see e.g. \mathcal Ite{Fe,De}) that
\begin{equation}\lambdabel{distribution:Sn-Tr}
\mathds{P}_{0}(S_n=r)=p_{n,r}\varepsilone p_{n,r+1}
\mathds{Q}uad\mbox{ and }\mathds{Q}uad
\mathds{P}_{0}(T_r=n)=
\frac{r}{n}\left(\frac{1}{2}\right)^n \begin{pmatrix}\nicefrac{(n+r)}{2}\\n\mathds{E}nd{pmatrix}.
\mathds{E}nd{equation}
Moreover, the distribution of $\mathds{E}ll_n$ can be expressed in terms of these quantities:
\begin{equation}\lambdabel{dist:loc-time}
\mathds{P}_{0}(\mathds{E}ll_n=k)
=\mathds{P}_{0}(S_{n-k}=k)+\mathds{P}_{0}(T_{n+1}=n-k),
\mathds{E}nd{equation}
and the following asymptotic results hold true:
\begin{lem}\lambdabel{equiv:sn-ln}
Let $p\in\mathds{N}$ and set $\kappa=(2/\mathds{P}i)^{\nicefrac{1}{2}}$. Then
\begin{equation*}
\lim_{n\to\infty}n^{\nicefrac{1}{2}}\mathds{P}_{0}(S_n=p)
=\lim_{n\to\infty}n^{\nicefrac{1}{2}}\mathds{P}_{0}(\mathds{E}ll_n=p)
=\kappa,
\mathds{Q}uad\mbox{ and }\mathds{Q}uad
\lim_{n\to\infty}n^{\nicefrac{3}{2}}\mathds{P}_{0}(T_z=n) = \kappa z.\\
\mathds{E}nd{equation*}
\mathds{E}nd{lem}
For our further computations, we will also need the following expression for
the Laplace transform of $T_r$ and $\tau_{r}$:
\begin{lem}\lambdabel{laplace:hitt-time}
Let $r\in\mathds{N}$, $\delta>0$.
Then
\begin{equation}\lambdabel{laplace:T}
\mathds{E}_{0}[ e^{-\delta T_r}] = \mathds{E}xp\left\{-r\arg\cosh(e^\delta)\right\}
\mathds{E}nd{equation}
and
\begin{equation}\lambdabel{laplace:tau}
\mathds{E}_{0}[ e^{-\delta \tau_r}] =
\left\{
\begin{array}{ll}
\mathds{E}xp\left\{-r\arg\cosh(e^\delta)\right\},&\mbox{ if }\,r\geq 1\\
\mathds{E}xp\left\{-\delta-\arg\cosh(e^\delta)\right\},&\mbox{ if }\,r=0
\mathds{E}nd{array}
\right.
\mathds{E}nd{equation}
\mathds{E}nd{lem}
\begin{proof}
This is an elementary computation based on the fact that
$\{\mathds{E}xp(\mathds{E}ta b_n-\delta n);\,n\geq 1\}$ is a martingale.
Also, note that $\tau_{0}$ has the same law as $1+T_{1}$.
\mathds{E}nd{proof}
\section{Laplace transform of the local time}\lambdabel{sec:laplace-local-time}~~
\setcounter{equation}{0}
Our aim in this section is to find an asymptotic equivalent for the Laplace transform
$Z_{n}^{f}$ of $\mathds{E}ll_n$. However, for computational purposes, we will also have to
consider the following constrained Laplace transform :
\begin{equation*}
Z_{2m}^{c}:=\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2m}}\mathds{1}_{\{b_{2m}=0\}}\right]\mathds{Q}uad(\beta\geq 0).
\mathds{E}nd{equation*}
With this notation in hand, here is our first result about the exponential moments of
the local time:
\begin{lem}\lambdabel{lem:equiv-laplace:tl0}
For any $\beta>0$, we have
\begin{equation}\lambdabel{equiv:laplace:tl0c}
\lim_{m\to\infty}\left(e^{-\beta}(2-e^{-\beta})\right)^{m}Z_{2m}^{c}
= c_\beta^c,
\mathds{Q}uad\mbox{ where }\mathds{Q}uad
c_\beta^c:=\frac{2(1-e^{-\beta})}{2-e^{-\beta}},
\mathds{E}nd{equation}
and
\begin{equation}\lambdabel{equiv:laplace:tl0f}
\lim_{n\to\infty}\left(e^{-\beta}(2-e^{-\beta})\right)^{\lfloor\nicefrac{n}{2}\rfloor}Z_{n}^{f}
= c_\beta^f,
\mathds{Q}uad\mbox{ where }\mathds{Q}uad
c_\beta^f:=\frac{2}{2-e^{-\beta}}.
\mathds{E}nd{equation}
\mathds{E}nd{lem}
\begin{proof}
According to (1.9)-(1.10) in \mathcal Ite[p. 9]{Gi}, by using the renewal theorem, we can write
\begin{equation}\lambdabel{equa:renouv}
Z_{2m}^{c}=\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2m}}\mathds{1}_{\{b_{2m}=0\}}\right]
=\sum_{k=1}^{m}\sum_{{\bf r}\in A_{k,m}}
\mathds{P}rod_{j=1}^{k}e^{\beta}\mathds{P}_{0}(\tau_{0}=2r_{j})
\underset{m\to\infty}{\sigmam}
\frac{e^{m\mbox{\small{\tt F}}(\beta)}}{\sum_{m}m{\tilde K}_{\beta}(m)},
\mathds{E}nd{equation}
where we denoted $A_{k,m}=\{{\bf r}=(r_{1},\ldots,r_{k}),\sum_{j=1}^{k}r_{j}=m\}$.
Here
\begin{equation}\lambdabel{K}
{\tilde K}_{\beta}(m):=\mathds{E}xp\left(\beta-m\mbox{\small\tt F}(\beta)\right)K(m),
\mathds{Q}uad\mbox{ where }\mathds{Q}uad K(m):=\mathds{P}_{0}(\tau_{0}=2m),
\mathds{E}nd{equation}
and $\thetaxt{\small\tt F}(\beta)$ is the solution of the following equation (see also (1.6), p. 8 in
\mathcal Ite{Gi})
\begin{equation}\lambdabel{equa:F}
\sum_{m}e^{-m\mbox{\small\tt F}(\beta)}K(m)=e^{-\beta}
\mathds{Q}uad\mbox{ i.e. }\mathds{Q}uad
\mathds{E}_0\left[ e^{\nicefrac{-\mbox{\small\tt F}(\beta)\tau_0}{2}} \right]
= e^{-\beta}.
\mathds{E}nd{equation}
Notice that in our case, equation (\ref{equa:F}) can be solved explicitly: thanks to relation
(\ref{laplace:tau}), it can be transformed into:
\begin{equation*}
\mathds{E}xp\left(\nicefrac{-\mbox{\small\tt F}(\beta)}{2}
-\arg\cosh\left(e^{\nicefrac{\mbox{\small\tt F}(\beta)}{2}}\right)\right)=e^{-\beta}
\Leftrightarrow
\cosh\left(\beta-\nicefrac{\mbox{\small\tt F}(\beta)}{2}\right)
=e^{\nicefrac{\mbox{\small\tt F}(\beta)}{2}}
\Leftrightarrow
e^{\beta-\mbox{\small\tt F}(\beta)}+e^{-\beta}=2,
\mathds{E}nd{equation*}
and thus, the solution of (\ref{equa:F}) is given by
\begin{equation}\lambdabel{expr:F}
\mbox{\small\tt F}(\beta)=\beta-\ln(2-e^{-\beta}).
\mathds{E}nd{equation}
On the other hand,
\begin{equation*}
\sum_{m}me^{-\lambdambda m}\mathds{P}_{0}(\tau_{0}=2m)
=-\frac{d}{d\lambdambda}\mathds{E}_{0}\left[e^{\nicefrac{-\lambdambda\tau_{0}}{2}}\right]
=-\frac{d}{d\lambdambda}\left(1-e^{\nicefrac{-\lambdambda}{2}}(e^{\lambdambda}-1)^{\nicefrac{1}{2}}\right)
=\frac{e^{-\lambdambda}}{2(1-e^{-\lambdambda})^{\nicefrac{1}{2}}},
\mathds{E}nd{equation*}
as we can see again by (\ref{laplace:tau}) and simple computation. Therefore, taking
$\lambdambda=\mbox{\small\tt F}(\beta)$, we obtain
\begin{equation}\lambdabel{tilde:espe}
\sum_{m}m{\tilde K}_{\beta}(m)
=e^{\beta}\sum_{m}m e^{-m\mbox{\small\tt F}(\beta)}\mathds{P}_{0}(\tau_{0}=2m)
=\frac{2-e^{-\beta}}{2(1-e^{-\beta})},
\mathds{E}nd{equation}
since, according to (\ref{expr:F}),
$e^{-\mbox{\small\tt F}(\beta)}=e^{-\beta}(2-e^{-\beta})=1-(1-e^{-\beta})^{2}$.
Puting together (\ref{equa:renouv}), (\ref{expr:F}) and (\ref{tilde:espe}) we get
the equivalent for the constrained Laplace transform (\ref{equiv:laplace:tl0c}).
We proceed now with the study of the free Laplace transform, called $Z_{n}^{f}$. Set
$\overline{K}(n):=\sum_{j>n}K(j)$.
We can write
\begin{multline*}
Z_{2m}^{f}
=\sum_{j=0}^{m}\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2m}}\mathds{1}_{\max\{k\leq m,b_{2k}=0\}=j}\right]
=\sum_{j=0}^{m}\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2j}}\mathds{1}_{\{b_{2j}=0\}}
\mathds{1}_{\{\tau_{0}\mathcal Irc\theta_{2j}>2(m-j)\}}\right]\\
=\sum_{j=0}^{m}\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2j}}\mathds{1}_{\{b_{2j}=0\}}\right]
\mathds{P}_{0}\left(\tau_{0}>2(m-j)\right)
=\sum_{j=0}^{m}\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2(m-j)}}\mathds{1}_{\{b_{2(m-j)}=0\}}\right]
{\overline K}(j)\\
=\sum_{j=0}^{m}Z_{2(m-j)}^{c}{\overline K}(j)
=e^{m\mbox{\small\tt F}(\beta)}\sum_{j=0}^{m}e^{-(m-j)\mbox{\small\tt F}(\beta)}\,Z_{2(m-j)}^{c}
e^{-j\mbox{\small\tt F}(\beta)}{\overline K}(j).
\mathds{E}nd{multline*}
In order to use (\ref{equiv:laplace:tl0c}) on the right hand side of the latter equality we need
to apply the dominated convergence theorem. This is allowed by the inequality
\begin{equation}\lambdabel{eq:up-bnd-zmc}
e^{-(m-j)\mbox{\small\tt F}(\beta)}\,Z_{2(m-j)}^{c}\leq 1,
\mathds{E}nd{equation}
which is valid since $e^{-j\mbox{\small\tt F}(\beta)} Z_{2j}^{c}$ represents
the probability that a random walk with positive increments with law ${\tilde K}_{\beta}$
passes by $j$ (see also (1.9) in \mathcal Ite{Gi}, p. 9).
Therefore, according to (\ref{equiv:laplace:tl0c}) and (\ref{laplace:tau}),
\begin{multline*}
Z_{2m}^{f}
\underset{m\to\infty}{\sigmam}
c_{\beta}^{c}e^{m\mbox{\small\tt F}(\beta)}
\sum_{j=0}^{\infty}e^{-j\mbox{\small\tt F}(\beta)}\sum_{i=j+1}^{\infty}K(i)
=c_{\beta}^{c}e^{m\mbox{\small\tt F}(\beta)}
\sum_{i=1}^{\infty}K(i)\sum_{j=0}^{i-1}e^{-j\mbox{\small\tt F}(\beta)}\\
=\frac{c_{\beta}^{c}}{1-e^{-\mbox{\small\tt F}(\beta)}}e^{m\mbox{\small\tt F}(\beta)}
\left(\sum_{i=1}^{\infty}K(i)-\sum_{i=1}^{\infty}K(i)e^{-i\mbox{\small\tt F}(\beta)}\right)
=\frac{c_{\beta}^{c}e^{m\mbox{\small\tt F}(\beta)}
(1-e^{-\beta})}{1-e^{-\mbox{\small\tt F}(\beta)}}
=\frac{2e^{m\mbox{\small\tt F}(\beta)}}{2-e^{-\beta}}
\mathds{E}nd{multline*}
and we get (\ref{equiv:laplace:tl0f}), by using (\ref{expr:F}).
To finish the proof, let us note that, for any $\beta>0$,
\begin{equation}\lambdabel{laplace:impair-free}
\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2m+1}}\right]=\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{2m}}\right]=Z_{2m}^{f}
\underset{m\to\infty}{\sigmam}c_{\beta}^{f}e^{m\mbox{\small\tt F}(\beta)}.
\mathds{E}nd{equation}
\mathds{E}nd{proof}
We will now go one step further and give an equivalent of $\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n}}\right]$
for an arbitrary $x\in\mathds{Z}$. Let us denote by $\mathds{O}$ the set of odd integers in
$\mathds{Z}$.
\begin{lem}\lambdabel{equiv:laplace:issux}
Let $x\in\mathds{Z}$ be the starting point for $b$ and recall that the constant $c_{\beta}^{f}$
has been defined at relation (\ref{equiv:laplace:tl0f}). Then, for any $\beta>0$,
\begin{equation}\lambdabel{equiv:laplace:tlx}
\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n}}\right]
\underset{n\to\infty}{\sigmam}
c_{\beta}^{f}
\mathds{E}xp\left\{\frac{\mbox{\small\tt F}(\beta)}{2}\,
(n+|x|-\mathds{1}_{\mathds{O}}(n+x))-\beta|x|\right\}.
\mathds{E}nd{equation}
\mathds{E}nd{lem}
\begin{proof}
First of all, notice that, by symmetry of the random walk,
$\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n}}\right]=\mathds{E}_{-x}\left[e^{\beta\mathds{E}ll_{n}}\right]$. We will thus treat the
case of a strictly positive initial condition $x$ without loss of generality.
\noindent
\underline{Case $x,n\in\mathds{D}$.~} Let us split $\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{2m}}\right]$ into
\begin{equation*}
\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{2m}}\right]
=\mathds{P}_{x}\left(T_0>2m\right)
+
\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{2m}}\mathds{1}_{\{T_0\leq 2m\}}\right]
=:
D_1(2m)+D_2(2m).
\mathds{E}nd{equation*}
Then, on the one hand,
\begin{equation*}
D_1(2m)=\mathds{P}_{0}\left(T_x>2m\right)=\mathds{P}_{0}\left(S_{2m}< x\right),
\mathds{E}nd{equation*}
and thus, owing to Lemma \ref{equiv:sn-ln}, we have
\begin{equation}\lambdabel{equiv:d1n}
D_1(2m)\underset{m\to\infty}{\sigmam}\kappa xm^{\nicefrac{-1}{2}}.
\mathds{E}nd{equation}
On the other hand, setting $g(p)=\mathds{E}_{0}\left[e^{\beta\mathds{E}ll_{p}}\right]$, we can write
\begin{multline*}
D_2(2m)=\mathds{E}_{x}\left[\mathds{1}_{\{T_{0}\leq 2m\}}g(2m-T_{0})\right]
=\sum_{k=0}^{m}\mathds{P}_{x}(T_{0}=2k)g(2(m-k))\\
=e^{m\mbox{\small\tt F}(\beta)}\sum_{k=0}^{m}\mathds{P}_{x}(T_{0}=2k)e^{-k\mbox{\small\tt F}(\beta)}
g(2(m-k))e^{-(m-k)\mbox{\small\tt F}(\beta)}\\
\underset{m\to\infty}{\sigmam}
c_{\beta}^{f}e^{m\mbox{\small\tt F}(\beta)}
\mathds{E}_{0}\left[\mathds{E}xp\left\{-\mbox{\small\tt F}(\beta)\frac{T_{x}}{2}\right\}\right]
=c_{\beta}^{f}\mathds{E}xp\left\{m\mbox{\small\tt F}(\beta)
-\arg\cosh\left(\frac{\mbox{\small\tt F}(\beta)}{2}\right)x\right\}\\
=c_{\beta}^{f}\mathds{E}xp\left\{\frac{\mbox{\small\tt F}(\beta)}{2}(2m+x)-\beta x\right\},
\mathds{E}nd{multline*}
which is (\ref{equiv:laplace:tlx}). Here we used the dominated convergence theorem allowed again
by the fact that $g(2(m-k))e^{-(m-k)\mbox{\small\tt F}(\beta)}\leq 1$
(this inequality being obtained by a little elaboration of (\ref{eq:up-bnd-zmc})).
\noindent
\underline{Case $x\in\mathds{D},n\in\mathds{O}$.~}
Clearly, invoking the latter result, we have
\begin{equation*}
\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n}}\right]=\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n-1}}\right]
\underset{n\to\infty}{\sigmam}
c_{\beta}^{f}\mathds{E}xp\left\{\frac{\mbox{\small\tt F}(\beta)}{2}(n-1+x)-\beta x\right\}.
\mathds{E}nd{equation*}
\noindent
\underline{Case $x\in\mathds{O},n\in\mathds{D}$.~}
Following a similar reasoning as for the first case, we see that it is enough to study the
term $D_2(2m)$:
\begin{multline*}
D_2(2m)=\mathds{E}_{x}\left[\mathds{1}_{\{T_{0}\leq 2m\}}g(2m-T_{0})\right]
=\sum_{k=1}^{m}\mathds{P}_{x}(T_{0}=2k-1)g(2m-2k+1)\\
=\sum_{k=1}^{m}\mathds{P}_{x}(T_{0}=2k-1)g(2(m-k))
\underset{m\to\infty}{\sigmam}
c_{\beta}^{f}e^{m\mbox{\small\tt F}(\beta)}
\sum_{k=1}^{\infty}\mathds{P}_{x}(T_{0}=2k-1)e^{-k\mbox{\small\tt F}(\beta)}\\
=c_{\beta}^{f}e^{m\mbox{\small\tt F}(\beta)}
\mathds{E}_{x}\left[\mathds{E}xp\left\{-\mbox{\small\tt F}(\beta)\frac{1+T_{0}}{2}\right\}\right]
=c_{\beta}^{f}e^{(m-\nicefrac{1}{2})\mbox{\small\tt F}(\beta)}
\mathds{E}_{0}\left[\mathds{E}xp\left\{-\mbox{\small\tt F}(\beta)\frac{T_{x}}{2}\right\}\right]\\
=c_{\beta}^{f}e^{(m-\nicefrac{1}{2})\mbox{\small\tt F}(\beta)}
\mathds{E}xp\left\{\left(\frac{\mbox{\small\tt F}(\beta)}{2}-\beta\right)\right\}
=c_{\beta}^{f}\mathds{E}xp\left\{\frac{\mbox{\small\tt F}(\beta)}{2}(2m-1+x)-\beta x\right\}.
\mathds{E}nd{multline*}
Here we used again the dominated convergence theorem and the fact that $\mathds{E}ll_{2(m-k)+1}$
and $\mathds{E}ll_{2(m-k)}$ have the same law under $\mathds{P}_{0}$.
\noindent
\underline{Case $x,n\in\mathds{O}$.~}
Again, by using the preceding result
\begin{equation*}
\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n}}\right]=\mathds{E}_{x}\left[e^{\beta\mathds{E}ll_{n+1}}\right]
\underset{n\to\infty}{\sigmam}
c_{\beta}^{f}\mathds{E}xp\left\{\frac{\mbox{\small\tt F}(\beta)}{2}(n+x)-\beta x\right\}.
\mathds{E}nd{equation*}
\mathds{E}nd{proof}
\section{Gibbs limit}\lambdabel{sec:gibbs-limit}~~
\setcounter{equation}{0}
Let us turn now to the asymptotic behaviour of the measure $\mathds{Q}_{0}^{(n,\beta)}$ defined at
(\ref{def:rho-n}). To this purpose, we will need an additional definition: for
$n\ge 0$, let ${\hat\mathds{E}ll}_{n}$ be the modified local time given by:
\begin{equation*}
{\hat\mathds{E}ll}_{n}=\mathds{E}ll_n-\mathds{1}_{\{b_n=0\}},
\mathds{E}nd{equation*}
and notice that this modified local time appears here because $\mathds{E}ll$ satisfies the relation
\begin{equation*}
\mathds{E}ll_n={\hat\mathds{E}ll}_p+\mathds{E}ll_{n-p}\mathcal Irc \theta_p
\mathds{Q}uad\mbox{ instead of }\mathds{Q}uad
\mathds{E}ll_n=\mathds{E}ll_p+\mathds{E}ll_{n-p}\mathcal Irc \theta_p.
\mathds{E}nd{equation*}
Indeed, it is readily checked that one zero is doubly counted in the latter relation if $b_p=0$.
With this notation in hand, the limit of $\mathds{Q}$ is given by the following:
\begin{prop}
For any $p\ge 0$, the measure $\mathds{Q}_{0}^{(n,\beta)}$ converges weakly on $\mathcal F_{p}$, as
$n\to\infty$, to the measure $\mathds{Q}_{0}^{(\beta)}$ given by
\begin{equation}\lambdabel{new:prob}
\mathds{Q}_{0}^{(\beta)}(\Gamma_{p})=\mathds{E}_{0}\left[\mathds{1}_{\Gamma_{p}}M_{p}^{(\beta)}\right],
\mathds{Q}uad\mbox{ for }\mathds{Q}uad
\Gamma_{p}\in\mathcal F_{p},
\mathds{E}nd{equation}
with $M^{(\beta)}$ a positive martingale defined by
\begin{equation}\lambdabel{martingale}
M_p^{(\beta)}=\mathds{E}xp\left\{\beta{\hat\mathds{E}ll}_{p}-c_{+}(\beta)\,|b_{p}|-c_{-}(\beta)\,p\right\},
\mathds{E}nd{equation}
where
\begin{equation}\lambdabel{les-constantes}
c_{\mathds{P}m}(\beta)=(\nicefrac{1}{2})[\beta\mathds{P}m\ln(2-e^{-\beta})].
\mathds{E}nd{equation}
\mathds{E}nd{prop}
\begin{proof}
For $n\ge p$, let us decompose $\mathds{E}ll_n$ into
$$
\mathds{E}ll_n={\hat\mathds{E}ll}_p+\mathds{E}ll_{n-p}\mathcal Irc \theta_p.
$$
Thanks to this decomposition, we obtain, for a given $\Gamma_p\in\mathcal F_p$,
\begin{equation}\lambdabel{exp2:rho-n-ga}
\mathds{Q}_{0}^{(n,\beta)}(\Gamma_p)
=\mathds{E}_{0}\left[\mathds{1}_{\Gamma_p} e^{\beta{\hat\mathds{E}ll}_p} U_{n,p}(b_p)\right],
\mathds{Q}uad\mbox{ with }\mathds{Q}uad
U_{n,p}(x)=
\frac{\mathds{E}_x\left[e^{\beta\mathds{E}ll_{n-p}}\right]}{\mathds{E}_0\left[e^{\beta\mathds{E}ll_{n}}\right]}.
\mathds{E}nd{equation}
Moreover, according to relation (\ref{equiv:laplace:tlx}), we have, for any $x\in\mathds{Z}$,
\begin{equation}\lambdabel{equiv:unpx}
U_{n,p}(x)
\underset{n\to\infty}{\sigmam}
\left\{\begin{array}{ll}
\mathds{E}xp\{\frac{\mbox{\small\tt F}(\beta)}{2}(|x|-p)-\beta|x|-\mathds{1}_{\mathds{O}}(x+p)\}
&\mbox{ if }\,n\in\mathds{D}\\\\
\mathds{E}xp\{\frac{\mbox{\small\tt F}(\beta)}{2}(|x|-p)-\beta|x|+\mathds{1}_{\mathds{O}}(x+p)\}
&\mbox{ if }\,n\in\mathds{O},
\mathds{E}nd{array}\right.
\mathds{E}nd{equation}
where we used the symmetry on $x$. To apply the dominated convergence theorem
let us note that
\begin{equation*}
\mathds{E}_x\left[e^{\beta\mathds{E}ll_{n-p}}\right]\leq \mathds{E}_0\left[e^{\beta\mathds{E}ll_{n-p}}\right],\,\forall x\in\mathds{Z}
\mathds{Q}uad\Rightarrow\mathds{Q}uad
U_{n,p}(x)\leq 1,\,\forall x\in\mathds{Z}
\mathds{Q}uad\Rightarrow\mathds{Q}uad
U_{n,p}(b_p)\leq 1.
\mathds{E}nd{equation*}
Therefore, we obtain that
\begin{equation*}
M_{p}^{(\beta)}
=\mathds{E}xp\left\{\frac{\mbox{\small\tt F}(\beta)}{2}(b_{p}-p)-\beta b_{p}+\beta{\hat\mathds{E}ll}_{p}\right\},
\mathds{E}nd{equation*}
and we deduce (\ref{martingale}).
It is now easily checked that the process $M^{(\beta)}$ is a martingale. Indeed, setting
$N_{p}^{(\beta)}=\ln(M_{p}^{(\beta)})$, and noting that $c_{+}(\beta)+c_{-}(\beta)=\beta$, we have
\begin{multline*}
N_{p+1}^{(\beta)}=\beta{\hat\mathds{E}ll}_{p+1}-c_{+}(\beta)\,|b_{p+1}|-c_{-}(\beta)\,(p+1)\\
=\mathds{1}_{\{b_{p}=0\}}[\beta({\hat\mathds{E}ll}_{p}+1)-\beta-c_{-}(\beta)\,p]
+\mathds{1}_{\{b_{p}\neq 0\}}
[\beta({\hat\mathds{E}ll}_{p}-c_{+}(\beta)\,(|b_{p}|+\xi_{p+1})-c_{-}(\beta)\,(p+1)],
\mathds{E}nd{multline*}
where $\xi_{p+1}$ is a symmetric $\mathds{P}m 1$-valued random variable independent of $\mathcal F_{p}$,
representing the increment of $b$ at time $p+1$.
Hence
\begin{equation}\lambdabel{logmartingale}
N_{p+1}^{(\beta)}
=\mathds{1}_{\{b_{p}=0\}}N_{p}^{(\beta)}+\mathds{1}_{\{b_{p}\neq 0\}}[N_{p}^{(\beta)}
-c_{+}(\beta)\,\xi_{p+1}-c_{-}(\beta)].
\mathds{E}nd{equation}
Thus
\begin{equation*}
\mathds{E}_{0}[M_{p+1}^{(\beta)}\mid\mathcal F_{p}]=\mathds{1}_{\{b_{p}=0\}}M_{p}^{(\beta)}
+\mathds{1}_{\{b_{p}\neq 0\}}M_{p}^{(\beta)}\,\cosh(c_{+}(\beta))\mathds{E}xp(-c_{-}(\beta)),
\mathds{E}nd{equation*}
from which the martingale property is readily obtained from the definition (\ref{les-constantes}).
\mathds{E}nd{proof}
\begin{rem}
It should be noticed that the convergence of $\mathds{Q}_0^{(n,\beta)}$
we have obtained on $\mathcal F_p$ is stronger than the weak convergence. In fact, we have
been able to prove that, for any $\Gamma_p\in\mathcal F_p$, we have
$\lim_{n\to\infty}\mathds{Q}_0^{(n,\beta)}(\Gamma_p)=\mathds{Q}_0^{(\beta)}(\Gamma_p)$.
This property is classical in the penalization theory.
\mathds{E}nd{rem}
\section{The process under the new probability measure}
\lambdabel{sec:process-new-pb-measure}~~
\setcounter{equation}{0}
It must be noticed that $\mathds{Q}_{0}^{(\beta)}$ is a probability measure on $(\Omega,\mathcal F,(\mathcal F_{n})
_{n\geq 1})$, since $M_{0}^{(\beta)}=1$. In this section we study the process $\{b_{n};n\geq 1\}$
under the new probability measure $\mathds{Q}_{0}^{(\beta)}$, which recovers the results of Theorem
\ref{thm:desription-gibbs-limit}, part 2.
\begin{prop}
Let $\mathds{Q}_{0}^{(\beta)}$ be the probability measure defined by {\rm (\ref{new:prob})} with
$M^{(\beta)}$ given by {\rm (\ref{martingale})}. Then, under $\mathds{Q}_{0}^{(\beta)}$:
\begin{enumerate}
\item[a)] $\{b_{n};n\geq 1\}$ is a Markov process on the state space $\mathds{Z}$
having some transition probabilities given by
\begin{equation}\lambdabel{new:naiss}
\mathds{Q}_{0}^{(\beta)}(b_{n}=r\mid b_{n-1}=r-1)=
\left\{\begin{array}{ll}
\nicefrac{e^{-\beta}}{2}&\mbox{ if }r>1\\\\
1-\nicefrac{e^{-\beta}}{2}&\mbox{ if }r<-1,
\mathds{E}nd{array}\right.
\mathds{E}nd{equation}
\begin{equation}\lambdabel{new:mort}
\mathds{Q}_{0}^{(\beta)}(b_{n}=r\mid b_{n-1}=r+1)=
\left\{\begin{array}{ll}
1-\nicefrac{e^{-\beta}}{2}&\mbox{ if }r\geq 0\\\\
\nicefrac{e^{-\beta}}{2}&\mbox{ if }r<-1
\mathds{E}nd{array}\right.
\mathds{E}nd{equation}
and
\begin{equation}\lambdabel{new:0}
\mathds{Q}_{0}^{(\beta)}(b_{n}=1\mid b_{n-1}=0)=\mathds{Q}_{0}^{(\beta)}(b_{n}=-1\mid b_{n-1}=0)=\nicefrac{1}{2}.
\mathds{E}nd{equation}
\item[b)] the Laplace transform of the first return time in 0 is given by
\begin{equation}\lambdabel{new:laplace}
\mathds{E}_{0}^{(\beta)}\left[e^{-\delta\tau_{0}}\right]
=e^{\beta}\left(e^{\delta+\mbox{\small\tt F}(\beta)}
-\left[e^{2(\delta+\mbox{\small\tt F}(\beta))}-1\right]^{\nicefrac{1}{2}}\right).
\mathds{E}nd{equation}
In particular, $\mathds{E}_{0}^{(\beta)}[\tau_0]<\infty$ for any $\beta>0$, and
\begin{equation}\lambdabel{eq:equiv-mean-tau0}
\mathds{E}_{0}^{(\beta)}[\tau_0]\sigmam 1-e^{-\beta/2},
\mathds{Q}uad\mbox{ when }\mathds{Q}uad
\beta\to\infty.
\mathds{E}nd{equation}
\item[c)] the distribution law of the excursion between two succesive zero of the process
$\{b_{n};n\geq 1\}$ is the same as under $\mathds{P}_{0}$.
\mathds{E}nd{enumerate}
\mathds{E}nd{prop}
\begin{proof}
a) Let $\Gamma_{n-2}\in\mathcal F_{n-2}$ arbitrary. Then
\begin{multline}\lambdabel{new:markov}
\mathds{Q}_{0}^{(\beta)}(b_{n}=r\mid b_{n-1}=r-1,\Gamma_{n-2})
=\frac{\mathds{Q}_{0}^{(\beta)}(b_{n}=r,b_{n-1}=r-1,\Gamma_{n-2})}{\mathds{Q}_{0}^{(\beta)}(b_{n-1}=r-1,\Gamma_{n-2})}\\
=\frac{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n}=r\}}\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}
M_{n}^{(\beta)}\right]}{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}
M_{n-1}^{(\beta)}\right]}
=\frac{\mathds{E}_{0}\left\{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n}=r\}}\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}
M_{n}^{(\beta)}\mid\mathcal F_{n-1}\right]\right\}}
{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}M_{n-1}^{(\beta)}\right]}.
\mathds{E}nd{multline}
First, assume that $r=1$ in the latter equality Since $M_{n}^{(\beta)}=M_{n-1}^{(\beta)}$
if $b_{n-1}=0$, then
\begin{multline}\lambdabel{0vers1}
\mathds{Q}_{0}^{(\beta)}(b_{n}=1\mid b_{n-1}=0,\Gamma_{n-2})
=\frac{\mathds{E}_{0}\left\{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n}=1\}}\mathds{1}_{\{b_{n-1}=0\}}\mathds{1}_{\Gamma_{n-2}}
M_{n-1}^{(\beta)}\mid\mathcal F_{n-1}\right]\right\}}
{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n-1}=0\}}\mathds{1}_{\Gamma_{n-2}}M_{n-1}^{(\beta)}\right]}\\
=\mathds{E}_{0}\left[\mathds{1}_{\{b_{n}=1\}}\mid\mathcal F_{n-1}\right]=\frac{1}{2}.
\mathds{E}nd{multline}
The same kind of computations can be performed with $\Gamma_{n-2}=\Omega$, which gives
\begin{equation}\lambdabel{Markov:0vers1}
\mathds{Q}_{0}^{(\beta)}(b_{n}=1\mid b_{n-1}=0,\Gamma_{n-2})
=\mathds{Q}_{0}^{(\beta)}(b_{n}=1\mid b_{n-1}=0).
\mathds{E}nd{equation}
Second, assume that $r>1$ in (\ref{new:markov}). In this case, invoking (\ref{logmartingale})
we have
\begin{multline}\lambdabel{versdroite}
\mathds{Q}_{0}^{(\beta)}(b_{n}=r\mid b_{n-1}=r-1,\Gamma_{n-2})\\
=\frac{\mathds{E}_{0}\left\{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n}=r\}}\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}
M_{n-1}^{(\beta)}e^{-\xi_{n}c_{+}(\beta)-c_{-}(\beta)}\mid\mathcal F_{n-1}\right]\right\}}
{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}M_{n-1}^{(\beta)}\right]}\\
=\frac{\mathds{E}_{0}\left\{\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}
M_{n-1}^{(\beta)}\mathds{E}_{0}\left[\mathds{1}_{\{b_{n}=r\}}e^{-\xi_{n}c_{+}(\beta)-c_{-}(\beta)}\mid\mathcal F_{n-1}\right]
\right\}}{\mathds{E}_{0}\left[\mathds{1}_{\{b_{n-1}=r-1\}}\mathds{1}_{\Gamma_{n-2}}M_{n-1}^{(\beta)}\right]}\\
=\mathds{E}_{r-1}\left[\mathds{1}_{\{b_{1}=r\}}e^{-\xi_{1}c_{+}(\beta)-c_{-}(\beta)}\right]
=\frac{1}{2}e^{-(c_{+}(\beta)+c_{-}(\beta))}=\frac{1}{2}e^{-\beta}.
\mathds{E}nd{multline}
Again, we can get that
\begin{equation}\lambdabel{Markov:versdroite}
\mathds{Q}_{0}^{(\beta)}(b_{n}=r\mid b_{n-1}=r-1,\Gamma_{n-2})
=\mathds{Q}_{0}^{(\beta)}(b_{n}=r\mid b_{n-1}=r-1).
\mathds{E}nd{equation}
Hence (\ref{Markov:0vers1}) and (\ref{Markov:versdroite}) prove the Markovian
feature of the process $\{b_{n};n\geq 1\}$ under $\mathds{Q}_{0}^{(\beta)}$, while (\ref{0vers1})
and (\ref{versdroite}) prove the first equalities in
(\ref{new:naiss}) and (\ref{new:0}). The other equalities can be obtained in a similar way.
\noindent
b) We can write
\begin{equation*}
\mathds{Q}_{0}^{(\beta)}(\tau_{0}=2k)
=\mathds{E}_{0}\left[\mathds{1}_{\{\tau_{0}=2k\}}M_{2k}^{(\beta)}\right]
=e^{\beta-2kc_{-}(\beta)}\mathds{P}_{0}(\tau_{0}=2k)
=e^{\beta-k\mbox{\small\tt F}(\beta)}\mathds{P}_{0}(\tau_{0}=2k),
\mathds{E}nd{equation*}
where we used (\ref{martingale}) and the fact that $2c_{-}(\beta)=\mbox{\small\tt F}(\beta)$.
Clearly, the latter equality defines a probability measure
since, thanks to (\ref{equa:F}),
\begin{equation*}
\sum_{k\geq 1}e^{\beta-k\mbox{\small\tt F}(\beta)}\mathds{P}_{0}(\tau_{0}=2k)
=e^{\beta}\mathds{E}_{0}\left[e^{\nicefrac{-\mbox{\small\tt F}(\beta)\tau_{0}}{2}}\right]
=1.
\mathds{E}nd{equation*}
Moreover, we can compute the Laplace transform of $\tau_{0}$
\begin{multline}\lambdabel{new:laplacetau}
\mathds{E}_{0}^{(\beta)}\left[e^{-\delta\tau_{0}}\right]
=\sum_{k\geq 1}e^{-2\delta k}e^{\beta-2kc_{-}(\beta)}\mathds{P}_{0}(\tau_{0}=2k)
=e^{\beta}\mathds{E}_{0}\left[e^{-(\delta+\mbox{\small\tt F}(\beta))\tau_{0}}\right]\\
=\mathds{E}xp\left\{\beta-\arg\cosh\left(e^{\delta+\mbox{\small\tt F}(\beta)}\right)\right\}
=\frac{e^{\beta}}{e^{\delta+\mbox{\small\tt F}(\beta)}
+\left[e^{2(\delta+\mbox{\small\tt F}(\beta))}-1\right]^{\nicefrac{1}{2}}}\\
=e^{\beta}\left\{e^{\delta+\mbox{\small\tt F}(\beta)}-\left[e^{2(\delta+\mbox{\small\tt F}(\beta))}-1\right]^{\nicefrac{1}{2}}\right\}.
\mathds{E}nd{multline}
We deduce
\begin{equation}\lambdabel{new:esperance}
\mathds{E}_{0}^{(\beta)}\left[\tau_{0}\right]
=-\frac{d}{d\delta}\mathds{E}_{0}^{(\beta)}\left[e^{-\delta\tau_{0}}\right]_{\mid\delta=0}
=e^{\beta+\mbox{\small\tt F}(\beta)}
\left\{\frac{1}{\left[1-e^{-2\mbox{\small\tt F}(\beta)}\right]^{\nicefrac{1}{2}}}-1\right\}.
\mathds{E}nd{equation}
By (\ref{new:esperance}) we also get that $\lim_{\beta\to\infty}\mathds{E}_{0}^{(\beta)}\left[\tau_{0}\right]
=1=\lim_{\beta\to\infty}\nicefrac{1}{\mbox{\small\tt F}'(\beta)}$, by using also (\ref{expr:F}),
while $\mathds{E}_{0}^{(\beta)}\left[\tau_{0}\right]\neq\nicefrac{1}{\mbox{\small\tt F}'(\beta)}$.
The equivalent (\ref{eq:equiv-mean-tau0}) is also easily deduced from
(\ref{new:esperance}).
\noindent
c) Thanks to the Markov property it is enough to describe the first excursion of $b$ between 0 and
$\tau_{0}$. For any positive Borel function $f$, we have
\begin{equation*}
\mathds{E}_{0}^{(\beta)}\left[f(b_{0},\ldots,b_{n})\mid\tau_{0}=n\right]
=\frac{\mathds{E}_{0}\left[f(b_{0},\ldots,b_{n})\mathds{1}_{\{\tau_{0}=n\}}M_{\tau_{0}}\right]}
{\mathds{E}_{0}\left[\mathds{1}_{\{\tau_{0}=n\}}M_{\tau_{0}}\right]}.
\mathds{E}nd{equation*}
Since, $M_{\tau_{0}}=e^{\beta-c_{-}(\beta)n}$, if $\tau_{0}=n$, we obtain that
\begin{equation*}
\mathds{E}_{0}^{(\beta)}\left[f(b_{0},\ldots,b_{n})\mid\tau_{0}=n\right]
=\mathds{E}_{0}\left[f(b_{0},\ldots,b_{n})\mid\tau_{0}=n\right].
\mathds{E}nd{equation*}
\mathds{E}nd{proof}
\begin{thebibliography}{99}
\bibitem{BdH}
E. Bolthausen, F. Den Hollander:
Localisation transition for a polymer near an interface.
{\it Ann. Probab.} {\bf 25} (1997), 1334-1366.
\bibitem{De}
P. Debs,
{\sl P{\'e}nalisation de la marche al{\'e}atoire standard par une fonction du maximum
unilat{\`e}re, du temps local en z{\'e}ro et de la longueur des excursions},
Preprint IECN, 2007.
\bibitem{Fe}
W. Feller,
{\sl An introduction to probability theory and its applications}
vol. 1, 3rd edition, Wiley, New York, 1970.
\bibitem{Gi}
G. Giacomin,
{\sl Random Polymer Models}
Imperial College Press, World Scientific, London, 2007.
\bibitem{CGZ}
F. Caravenna, G. Giacomin, L. Zambotti:
Sharp asymptotic behavior for wetting models in (1+1)-dimension.
{\it Electronic J. Probab.} {\bf 11} (2006), 345-362.
\bibitem{GT}
G. Giacomin; F. Toninelli,
Smoothing effect of quenched disorder on polymer depinning transitions.
Preprint.
\bibitem{MGO}
C. Monthus, T. Garel, H. Orland:
Copolymer at a selective interface and two dimensional wetting: a grand canonical approach.
{\it Eur. Phys. J. B} {\bf 17} (2000), 121-130.
\bibitem{RVY}
B. Roynette; P. Vallois; M. Yor:
Limiting laws associated with Brownian motion perturbed by its
maximum, minimum and local time. II. {\it Stud. Math. Hungarica} {\bf 43} (2006), 295-360.
\bibitem{RY}
B. Roynette, M. Yor:
Penalising Brownian paths : rigorous results and Meta-theorems, book in progress, 2007.
\mathds{E}nd{thebibliography}
\mathds{E}nd{document}
|
\begin{equation}gin{document}
\begin{equation}gin{abstract}
We consider two-dimensional versions of the Keller--Segel model for the chemotaxis with either classical (Brownian) or fractional (anomalous) diffusion. Criteria for blowup of solutions in terms of suitable Morrey spaces norms are derived. Moreover, the impact of the consumption term on the global-in-time existence of solutions is analyzed for the classical Keller--Segel system.
\varepsilonnd{abstract}
\keywords{chemotaxis, blowup of solutions, global existence of solutions}
\sigmaubjclass[2010]{35Q92, 35B44, 35K55}
\date{\today}
\thanks{ This work is accepted for publication in DCDS-A }
\maketitle
\baselineskip=17pt
\sigmaection{Introduction}
We consider in this paper the following version of the parabolic-elliptic Keller--Segel model of chemotaxis in two space dimensions
\begin{equation}a
u_t+(-\Delta)^{\alpha/2}u+\nablabla\cdot(u\nablabla v)&=&0,\ \ x\in {\mathbb R}^2,\ t>0,\label{equ}\\
\Delta v-\gamma v+u &=& 0,\ \ x\in {\mathbb R}^2,\ t>0,\label{eqv}
\varepsilonnd{equation}a
supplemented with the initial condition
\begin{equation}
u(x,0)=u_0(x)\label{ini}.
\varepsilonnd{equation}
Here the unknown variables $u=u(x,t)$ and $v=v(x,t)$ denote the density of the population and the density of the chemical secreted by the microorganisms, respectively, and the given consumption (or degradation) rate of the chemical is denoted by $\gamma\ge 0$. The diffusion operator is described either by the usual Laplacian ($\alpha=2$) or by a fractional power of the Laplacian $(-\Delta)^{\alpha/2}$ with $\alpha\in(0,2)$. The initial data are nonnegative functions $u_0\in L^1(\mathbb R^2)$ of the total mass
\begin{equation}
M=\intop\limits u_0(x)\,{\rm d}x.\label{M}
\varepsilonnd{equation}
Our main results include criteria for blowup of nonnegative solutions of problem \rf{equ}--\rf{ini} expressed in terms of a local concentration of data (Theorem \ref{blow}), and the existence of global-in-time solutions for the initial condition of an arbitrary mass $M$ and each sufficiently large $\gamma$ (Theorem \ref{ex}). The novelty of these blowup results consists in using local properties of solutions instead of a comparison of the total mass and moments of a solution as was done in {\it e.g.} \cite{N}, \cite{KS-JEE}, \cite{LR07,LR08,LRZ}, \cite{BW}, \cite{BKL}, \cite{KS-AM}, and \cite{BK-JEE}. In particular, we complement the result in \cite{KS-JEE} saying that solutions of \rf{equ}--\rf{ini} with $\alpha=2$, fixed $\gamma\ge 0$ and sufficiently well concentrated $u_0$ with $M>8\pi$ blow up in a finite time, by showing that solutions of that system with $u_0$ of arbitrary $M>0$ and all sufficiently large $\gamma$ are global-in-time.
Many previous works have dealt with the existence of global-in-time solutions with small data in critical Morrey spaces, i.e.~those which are scale-invariant under a natural scaling of the chemotaxis model, cf. {\it e.g.} \cite{B-SM} and \cite{Lem}. Our criteria for a blowup of solutions with large concentration can be expressed by Morrey space norms (see Remark \ref{r3} below for more details), and we have found that the size of such a norm is critical for the global-in-time existence versus finite time blowup. The analogous question for radially symmetric solutions of the $d$-dimensional Keller--Segel model with $d\ge 3$ has been recently studied in \cite{BKZ}.
\sigmaection{Statement of results}
It is well-known that problem \rf{equ}--\rf{ini} with $\alpha=2$ has a unique
mild solution $u\in {\mathcal C}([0,T); L^1(\mathbb{R}^2))$
for every $u_0\in L^1(\mathbb{R}^2)$ and $\gamma\geq 0$.
Here, as usual, a mild solution satisfies a suitable integral formulation \rf{D} of the Cauchy problem \rf{equ}--\rf{ini} as recalled at the beginning of Section 5. Moreover, given $u=u(x,t)$, we define $v=(-\Delta +\gamma)^{-1}u$, see Lemma \ref{H}, below.
Due to a parabolic regularization effect
(following {\it e.g.} \cite[Th. 4.2]{GMO}), this solution is smooth for $t>0$, hence, it satisfies the Cauchy problem in the classical sense. Moreover, it conserves the total mass \rf{M}
$$
\intop\limits_{\mathbb{R}^2} u(x,t)\,{\rm d}x=\intop\limits_{\mathbb{R}^2}u_0(x)\,{\rm d}x\qquad \text{for all $t\in [0,T)$},
$$
and is nonnegative when $u_0\geq 0$. Proofs of these classical results can be found {\it e.g.} in \cite{KS-JEE, Lem, K-O, KS-AM,BDP},
see also Section 5 of this work.
Analogous results on local-in-time solutions to the Cauchy problem \rf{equ}--\rf{ini} with $\alpha\in (1,2)$ have been obtained in \cite{BK-JEE}, \cite[Th. 2]{Lem}.
To the best of our knowledge, \cite[Th. 1.1]{LRZ} and a recent \cite[Th. 1, Th. 2]{SYK} are the only results on local-in-time classical solutions of the Cauchy problem \rf{equ}--\rf{ini} with $\alpha\in (0,1]$, $d\ge 2$.
Thus, the case (iii) of Theorem \ref{blow} asserts
that such a solution cannot be global-in-time for initial data satisfying \rf{a:cond}.
In our first result,
we formulate new sufficient conditions for blowup
({i.e.} nonexistence for all $t>0$)
of such local-in-time solutions of problem \rf{equ}--\rf{eqv}.
\begin{equation}gin{theorem}\label{blow}
Consider $u\in {\mathcal C}([0,T); L^1(\mathbb{R}^2))$ --- a local-in-time nonnegative classical solution of problem \rf{equ}--\rf{ini} with a nonnegative $u_0\in L^1(\mathbb{R}^2)$.
\noindent
(i) If $\alpha=2$, $\gamma=0$ (the scaling invariant Keller--Segel model), then for each $M>8\pi$ the solution $u$ blows up in a finite time.
\noindent
(ii) Let $\alpha=2$ and $\gamma>0$ (the Keller--Segel model with the consumption).
If $M>8\pi$ and if $u_0$ is well concentrated around a point $x_0\in \mathbb{R}^2$, namely, there exists $R>0$ such that
\begin{equation}
{\rm e}^{-\sigmaqrt{\gamma}R}\intop\limits_{\{|y-x_0|<R\}}u_0(y)\,{\rm d}y>8\pi\ \ {\rm and\ \ } \intop\limits_{\{|y-x_0|\ge R\}}u_0(y)\,{\rm d}y<\nu
\label{ass:blow}
\varepsilonnd{equation}
with an explicitly computed small constant $\nu>0$, then the solution $u$ blows up in a finite time.
\noindent
(iii) Let $\alpha\in(0,2)$ and $\gamma\ge 0$ (the Keller--Segel model with fractional diffusion). If there exist $x_0\in\mathbb{R}^2$ and $R>0$ such that
\begin{equation}gin{equation}\label{a:cond}
R^{\alpha-2} \intop\limits_{\{|y-x_0|<R\}} u_0(y)\,{\rm d}y>C \ \ {\rm and\ \ }\intop\limits_{\{|y-x_0|\ge R\}}u_0(y)\,{\rm d}y<\nu,
\varepsilonnd{equation}
for some explicit constants: small $\nu>0$ and big $C>0$, then the solution $u$ ceases to exists in a finite time.
\varepsilonnd{theorem}
\begin{equation}gin{remark} \label{r2}
The result (i) for $\alpha=2$ and $\gamma=0$ is, of course, well known, but the proof below slightly differs from the previous ones.
The case (ii) $\alpha=2$ and $\gamma>0$ has been considered in \cite{KS-JEE} but the sufficient conditions for blowup were expressed in terms of {\varepsilonm globally} defined quantities: i.e. mass $M>8\pi$ and the moment $\intop\limits u_0(x)|x|^2 \,{\rm d}x$.
\varepsilonnd{remark}
\begin{equation}gin{remark} \label{r3}
The case (iii) $\alpha<2$.
Recall that the (homogeneous) Morrey space $M^p(\mathbb{R}^2)$ is defined as the space of locally integrable functions such that
$$
|\!\!| u|\!\!|_{M^p}=\sigmaup_{R>0,\, x\in\mathbb{R}^2}R^{2(1/p-1)}\intop\limits_{\{|y-x|<R\}}u(y)\,{\rm d}y<\infty.
$$
The first condition in \varepsilonqref{a:cond} is equivalent to a sufficiently large Morrey norm of $u_0$ in the space $M^{2/\alpha}(\mathbb{R}^2)$.
Indeed, obviously we have
$$|\!\!| u_0|\!\!|_{M^{2/\alpha}}\ge
R^{\alpha-2} \intop\limits_{\{|y-x_0|<R\}} u_0(y)\,{\rm d}y$$ for every $x_0$ and $R>0$, but also there is
$x_0\in\mathbb{R}^2$ and $R>0$ such that $$|\!\!| u_0|\!\!|_{M^{2/\alpha}}\le
2 R^{\alpha-2} \intop\limits_{\{|y-x_0|<R\}} u_0(y)\,{\rm d}y.$$
Thus, our blowup condition in terms of the Morrey norm seems to be new and complementary to that guaranteeing the global-in-time existence of solutions, where smallness of initial conditions in the $M^{2/\alpha}$-Morrey norm has to be imposed, cf. prototypes of such results in \cite[Theorem 1]{B-SM} and \cite[Remark 2.7]{BK-JEE}.
\varepsilonnd{remark}
\begin{equation}gin{remark} \label{r4}
A natural scaling for system \rf{equ}--\rf{eqv} with $\gamma=0$:
$$
u_\lambda(x,t)=\lambda^\alpha u(\lambda x,\lambda^\alpha t),$$
leads to the equality $\intop\limits u_\lambda \,{\rm d}x = \lambda^{\alpha-2}\intop\limits u \,{\rm d}x$, i.e.
mass of rescaled solution $u_\lambda$ can be chosen arbitrarily with a suitable $\lambda>0$. Thus, the conditions in Theorem
\ref{blow}.iii are insensitive to the actual value of $M$, so w.l.o.g. we may suppose that $M=1$.
\varepsilonnd{remark}
\begin{equation}gin{remark} \label{r5}
The second parts of the condition \rf{ass:blow} and \rf{a:cond} are not scaling invariant. However, we believe that these assumptions are not necessary for the conclusion in Theorem \ref{blow}.ii, \ref{blow}.iii. In fact, one can prove it for $\alpha$ close to 2 by an inspection of methods in \cite{BKZ,BZ}.
\varepsilonnd{remark}
Next, we show that
the first condition in the concentration assumptions \rf{ass:blow} is in some sense optimal to obtain a blowup of solutions. In the following theorem we show that for every initial integrable function $u_0$, even with its $L^1$-norm above $8\pi$, the corresponding mild solution to the model \rf{equ}--\rf{ini} with $\alpha=2$ is global-in-time for all sufficiently large consumption rates $\gamma>0$.
\begin{equation}gin{theorem}\label{ex}
Let $\alpha=2$, $\gamma>0$.
For each $u_0\in L^1(\mathbb R^2)$, there exists $\gamma(u_0)>0$ such that for all $\gamma\ge\gamma(u_0)$ the Cauchy problem \rf{equ}--\rf{ini} has a global-in-time mild solution satisfying $u\in {\mathcal C}([0,\infty);L^1(\mathbb R^2))$. This is a classical solution of system \rf{equ}--\rf{eqv} for $t>0$, and satisfies for each $p\in[1,\infty)$ the decay estimates
\begin{equation}
\sigmaup_{t>0}t^{1-1/p}\|u(t)\|_p<\infty.\label{esti}
\varepsilonnd{equation}
\varepsilonnd{theorem}
Thus, for each $u_0$ (not necessarily nonnegative) and $\gamma$ large enough depending on $u_0\in {L^1(\mathbb R^2)}$, solutions of the Cauchy problem are global-in-time, so there is no critical value of mass which leads to a blowup of solutions.
On the other hand, if $M>8\pi$, then for $0\le\gamma\ll 1$ the solutions blow up in a finite time, as it is seen from the sufficient conditions for blowup in Theorem \ref{blow}.ii.
\sigmaection{Notation and preliminaries}
In the sequel, $\|\cdot\|_q$ denotes the usual $L^q(\mathbb R^2)$ norm, and $C$'s are generic constants independent of $t$, $u$, $z$, ... which may, however, vary from line to line.
Integrals with no integration limits are meant to be calculated over the whole plane.
Let us denote by $G$ the Gauss--Weierstrass kernel of the heat semigroup ${\rm e}^{t\Delta}$ on $L^p({\mathbb R}^2)$
space
\begin{equation}
G(x,t)=(4\pi t)^{-1}\varepsilonxp\left(-\frac{|x|^2}{4t}\right).\label{GW}
\varepsilonnd{equation}
As it is well known the convolution with $G$, denoted by $G(t)\ast z={\rm e}^{t\Delta}z$, satisfies the following $L^q-L^p$ estimates
\begin{equation}
\|{\rm e}^{t\Delta}z\|_p\le Ct^{1/p-1/q}\|z\|_q\label{lin1}
\varepsilonnd{equation}
and
\begin{equation}
\|\nablabla{\rm e}^{t\Delta}z\|_p\le Ct^{-1/2+1/p-1/q}\|z\|_q\label{lin2}
\varepsilonnd{equation}
for all $1\le q\le p\le\infty$, $t>0$.
Moreover, for each $p>1$ and $z\in L^1(\mathbb{R}^2)$ the following relation holds
\begin{equation}
\lim_{t\to 0}t^{1-1/p}\|{\rm e}^{t\Delta}z\|_p=0\label{function}
\varepsilonnd{equation}
which is, {\it e.g.}, noted in \cite[Lemma 4.4]{GMO}.
\begin{equation}gin{lemma} \label{H}
For every $\gamma>0$,
the operator $(-\Delta+\gamma)^{-1}$ solving the Helmholtz equation \rf{eqv} satisfies
\begin{equation}
\|\nablabla (-\Delta+\gamma)^{-1} z\|_q\le C\gamma^{1/p-1/q-1/2}\|z\|_p,\label{Kgamma}
\varepsilonnd{equation}
for every $1\le p<2< q<\infty$ such that $\frac1p-\frac1q< \frac12$ and some $C$ independent of $\gamma$. In the critical case $\frac1p-\frac1q= \frac12$ inequality \varepsilonqref{Kgamma} also holds provided $p>1$.
Moreover, the Bessel kernel $K_\gamma$ of $(-\Delta+\gamma)^{-1}$ has the following pointwise behavior at $0$ and $\infty$
\begin{equation}a
\nablabla K_\gamma(x)&\sigmaim&-\frac{1}{2\pi}\frac{x}{|x|^2}\ \ {\rm as}\ \ x\to 0,\label{0}\\
|\nablabla K_\gamma(x)|&\le& C\frac{1}{|x|}{\rm e}^{-\sigmaqrt{\gamma}|x|}\ \ {\rm as} \ \ x\to\infty,\label{infty}
\varepsilonnd{equation}a
and satisfies the global one-sided bound
\begin{equation}
x\cdot\nablabla K_\gamma(x)\le -\frac{1}{2\pi}{\rm e}^{-\sigmaqrt{\gamma}|x|}.\label{point}
\varepsilonnd{equation}
\varepsilonnd{lemma}
\proof
The proof of inequality \rf{Kgamma} requires separate arguments in two cases, $\frac1p-\frac1q< \frac12$ and $\frac1p-\frac1q= \frac12$. In the first case, the result is a consequence of inequalities \rf{lin1} and \rf{lin2} by representing the operator $(-\Delta+\gamma)^{-1}$ as the Laplace transform
\begin{equation}
(-\Delta+\gamma)^{-1}=\intop\limits_0^\infty {\rm e}^{-\gamma s}{\rm e}^{s\Delta}\,{\rm d}s.\label{K}
\varepsilonnd{equation}
Indeed, we have the following representation of $K_\gamma$ in the Fourier variables
$$
\widehat{(K_\gamma\ast z)}(\xi)=\frac{1}{|\xi|^2+\gamma}\hat z(\xi) =\intop\limits_0^\infty {\rm e}^{-\gamma s}{\rm e}^{-s|\xi|^2}\hat z(\xi)\,{\rm d}s,
$$
so that
\begin{equation}a
\|\nablabla (-\Delta+\gamma)^{-1} z\|_q&\le& C\intop\limits_0^\infty {\rm e}^{-\gamma s}s^{1/q-1/p-1/2}\,{\rm d}s\, \|z\|_p\nonumber\\
&\le& C\gamma^{1/p-1/q-1/2}\intop\limits_0^\infty {\rm e}^{-s}s^{-1/2+1/q-1/p}\,{\rm d}s\, \|z\|_p,\nonumber
\varepsilonnd{equation}a
the latter integral is finite due to the assumption on $p$ and $q$.
When $\frac1p-\frac1q= \frac12$, inequality \rf{Kgamma} follows from the end-point case of the Sobolev inequality $\left\|\nablabla (-\Delta)^{-1}u\right\|_q\leq C\left\|u\right\|_p$.
\noindent
For properties \rf{0}, \rf{infty} and \rf{point}, see {\it e.g.} \cite[Lemma 3.1]{KS-JEE} and \cite[Ch. V, Sec. 6.5]{S}.
$\square$
\begin{equation}gin{remark} \label{r6}
Let us note that the reference \cite[Theorem 2.9]{KS-AM} provides us with precise conditions on radial convolution kernels $K$ leading to a blowup of solutions of general diffusive aggregation equations with the Brownian diffusion of the form
$u_t-\Delta u+\nablabla\cdot(u(\nablabla K\ast u))=0$. They are strongly singular, i.e. they have the singularity at $0$:
$\limsup_{x\to 0} x\cdot\nablabla K (x)<0$, and are of moderate growth at $\infty$:
$| x\cdot\nablabla K (x)|\le C|x|^2$. Of course, the Bessel kernel $K_\gamma$ is strongly singular in the sense of \cite{KS-AM}, as it is seen from \rf{point}.
\varepsilonnd{remark}
\sigmaection{Blowup of solutions }
In this section we prove Theorem \ref{blow} using the method of truncated moments which is reminiscent of that in the papers \cite{N1}, \cite{K-O}.
First, we define the ``bump'' function $\psi$ and its rescalings for $R>0$
\begin{equation}
\psi(x)=(1-|x|^2)_+^2
=\left\{
\begin{equation}gin{array}{ccc}
(1-|x|^2)^2& \text{for}& |x|<1,\\
0& \text{for}& |x|\geq 1,
\varepsilonnd{array}
\right.
\qquad \psi_R(x)=\psi\bigg(\frac{x}{R}\bigg). \label{bump}
\varepsilonnd{equation}
The function $\psi$ is piecewise ${\mathcal C}^2(\mathbb R^2)$, with ${\rm supp}\,\psi=\{|x|\le 1\}$, and satisfies
\begin{equation}a
\nablabla\psi(x)&=&-4x(1-|x|^2)\ \ {\rm for\ }\ |x|<1, \label{gradpsi}\\
\Delta\psi(x)&=&(-8+16|x|^2) \ge -8\psi(x)\ge -8\,\ {\rm for\ \ }{|x|<1}.\label{lappsi}
\varepsilonnd{equation}a
We will use in the sequel the
fact that $\psi$ is strictly concave in a neighbourhood of $x=0$.
\begin{equation}gin{lemma}\label{prop1}
For each $\varepsilon\in\left(0,\frac{1}{\sigmaqrt{3}}\right)$, the function $\psi$ defined in \varepsilonqref{bump} is strictly concave for all $|x|\le \varepsilon$. More precisely, $\psi$ satisfies
\begin{equation}gin{equation}\label{0.1}
H\psi\leq -\theta(\varepsilon) I
\varepsilonnd{equation}
for all $|x|\le \varepsilon$,
where $H\psi$ is the Hessian matrix of second derivatives of $\psi$,
$\theta(\varepsilon) =4\left(1-3\varepsilon^2\right)$, and $I$ is the identity matrix.
In particular, we have
\begin{equation}gin{equation}\label{wazne}
\theta(\varepsilon)\nearrow 4\quad \mbox{as}\quad \varepsilon\sigmaearrow 0.
\varepsilonnd{equation}
\varepsilonnd{lemma}
\proof
For every $\xi\in \mathbb{R}^2$ the following identity holds
\[
\xi\cdot H\psi\, \xi= 4\left(-|\xi|^2\left(1-|x|^2\right)+2(x\cdot\xi)^2\right).
\]
Thus, by the Schwarz inequality, we have
$\xi\cdot H\psi\, \xi \le 4|\xi|^2\left(3|x|^2-1\right)$.
$\square$
Next, we recall a well-known property of concave functions.
\begin{equation}gin{lemma}\label{prop2}
For every function $\Psi:\mathbb{R}^2\rightarrow \mathbb{R}$ which is strictly concave on a domain $\Omega\sigmaubset\mathbb{R}^2$ we have for all $x,y\in \mathbb{R}^2$
\begin{equation}gin{equation}\label{concave}
(x-y)\cdot\left(\nablabla \Psi(x)-\nablabla \Psi(y)\right)\leq -\theta |x-y|^2,
\varepsilonnd{equation}
where $\theta>0$ is the constant of strict concavity of $\Psi$ on $\Omega$, i.e. satisfying $H\Psi\le -\theta \,I$.
\varepsilonnd{lemma}
\proof
By the concavity, we obtain
\[
\Psi(x)\leq \Psi(y)+\nablabla\Psi(y)\cdot(x-y)-\frac{\theta}{2!}|x-y|^2.
\]
Summing this inequality with its symmetrized version (with $x,\,y$ interchanged) leads to the claim.
$\square$
We have the following scaling property of the fractional Laplacian
\begin{equation}
(-\Delta)^{\alpha/2}\psi_R(x)=R^{-\alpha}\big((-\Delta)^{\alpha/2}\psi\big)_R,\label{scal}
\varepsilonnd{equation}
and we notice the following boundedness property of $(-\Delta)^{\alpha/2}\psi$.
\begin{equation}gin{lemma}\label{Getoor}
For every $\alpha\in(0,2]$ there exists a constant $k_\alpha>0$ such that
\begin{equation}
\left|(-\Delta)^{\alpha/2}\psi(x)\right|\le k_\alpha.\label{deltapsi}
\varepsilonnd{equation}
Moreover, $(-\Delta)^{\alpha/2}\psi(x)\le 0$ for $|x|\ge 1$.
In particular, for $\alpha=2$ we have $k_2=8$.
\varepsilonnd{lemma}
\proof
For $\alpha=2$, this is an obvious consequence of the explicit form of $\psi$, hence we assume $\alpha\in(0,2)$.
To show estimate \varepsilonqref{deltapsi} for $\alpha\in (0,2)$,
it suffices to use the following well-known representation of the fractional Laplacian with $\alpha\in(0,2)$
\begin{equation}
(-\Delta)^{\alpha/2}\psi(x)=-c_\alpha\, {\rm P.V.} \intop\limits \frac{\psi(x+y)-\psi(x)}{|y|^{2+\alpha}}\,{\rm d}y \nonumber \\
\varepsilonnd{equation}
for certain explicit constant $c_\alpha>0$.
Now, using the Taylor formula together with
the fact that $\psi,\, D^2\psi\in L^\infty(\mathbb{R}^2)$, we immediately obtain that the integral on the right-hand side is finite and uniformly bounded in $x\in\mathbb{R}^2$.
Since $\psi(x)\ge 0$ and $\psi(x)=0$ for $|x|\ge 1$ we have
$$
(-\Delta)^{\alpha/2}\psi(x)=-c_\alpha\, {\rm P.V.}\intop\limits \frac{\psi(x+y)}{|y|^{2+\alpha}}\,{\rm d}y\le 0
$$
for $|x|\ge 1$.
$\square$
Now, we formulate a crucial inequality in our proof of the blowup result.
\begin{equation}gin{lemma}\label{prop3}
For the Bessel kernel $K_\gamma$ with $\gamma\geq 0$ and a strictly concave function $\Psi$ we have for all $x,y$ on the domain of the strict concavity of $\Psi$
\begin{equation}gin{equation}\label{1.1}
\nablabla K_\gamma (x-y)\cdot \left(\nablabla \Psi(x)-\nablabla \Psi(y)\right)\geq \frac{\theta}{2\pi}g_{\gamma}(|x-y|),
\varepsilonnd{equation}
where $\theta$ is the constant of the strict concavity of $\Psi$ introduced in Lemma \ref{prop2}, and $g_\gamma$ is a radially symmetric continuous function, such that
\begin{equation}gin{equation}\label{bessel}
\nablabla K_\gamma(x)=-\frac{1}{2\pi}\frac{x}{|x|^2}g_\gamma(|x|).
\varepsilonnd{equation}
In particular, $g_\gamma(0)=1$, the profile of $g_\gamma$ decreases,
and $g_\gamma(|x|)\le C{\rm e}^{-\sigmaqrt{\gamma}|x|}$.
\varepsilonnd{lemma}
\proof
Combining Lemma \ref{prop2} with equation \varepsilonqref{bessel}
and properties \rf{0}, \rf{infty} and \rf{point}
we arrive immediately at the claimed formula.
$\square$
We are in a position to prove our main blowup result.
\noindent {\bf Proof of Theorem \ref{blow}.} We consider the quantity
$$w_R(t)=\intop\limits u(x,t)\psi_R(x)\,{\rm d}x,$$
a {\varepsilonm local} moment of $u(.,t)$, where $\psi_R(x)$ is
defined in \rf{bump} for each $R>0$.
Let
\begin{equation}\label{Mt}
M_R(t)\varepsilonquiv \intop\limits_{\{|x|<R\}}u(x,t)\,{\rm d}x \geq w_R(t)
\varepsilonnd{equation}
denote mass of the distribution $u$ contained in the ball $\{|x|<R\}$ at the moment $t$.
Now, using equation \rf{equ} we determine the evolution of $w_R(t)$
\begin{equation}a
\frac{\rm d}{{\rm d}t}w_R(t)
&=& -\intop\limits(-\Delta)^{\alpha/2}u(x,t)\psi_R(x)\,{\rm d}x +\intop\limits u(x,t)\nablabla v(x,t)\cdot\nablabla\psi_R(x)\,{\rm d}x\nonumber\\
&=&-\intop\limits u(x,t)(-\Delta)^{\alpha/2}\psi_R(x)\,{\rm d}x \label{J}\\
&\qquad& +\frac12\iint u(x,t)u(y,t)\nablabla K_\gamma(x-y)\cdot\big(\nablabla\psi_R(x) -\nablabla\psi_R(y)\big)\,{\rm d}y\,{\rm d}x,\nonumber
\varepsilonnd{equation}a
where we applied the formula $v=K_\gamma\ast u$, and the last expression follows by the symmetrization of the double integral: $x\mapsto y$, $y\mapsto x$.
Since $u(x,t)\ge 0$, by the scaling relation \rf{scal} and Lemma \ref{Getoor}, we obtain
\begin{equation}gin{equation}\label{J0}
-\intop\limits u(x,t)(-\Delta)^{\alpha/2}\psi_R(x)\,{\rm d}x\ge -R^{-\alpha} k_\alpha\intop\limits_{\{|x|\le R\}}u(x,t)\,{\rm d}x.
\varepsilonnd{equation}
Now, let $\varepsilon\in\left(0, \frac1{\sigmaqrt3}\right)$. By Lemma \ref{prop1}, the weight function $\psi_R$ in \rf{bump} is concave for $|x|\le \varepsilon R$ with a concavity constant $\theta=\theta(\varepsilon).$
Thus, by Lemma~\ref{prop3}, we have
\[
\nablabla K_\gamma (x-y)\cdot \left(\nablabla \psi_R(x)-\nablabla \psi_R(y)\right)\geq R^{-2}\frac{\theta(\varepsilon)}{2\pi}g_{\gamma}(|x-y|)
\]
for $|x|, |y|<\varepsilon R$.
Hence, the bilinear term on the right-hand side of \rf{J} satisfies
\begin{equation}gin{equation}\label{bilinear}
\begin{equation}gin{split}
\frac12\iint u(x,t)&u(y,t)\nablabla K_\gamma(x-y)\cdot\big(\nablabla\psi_R(x) -\nablabla\psi_R(y)\big)\,{\rm d}y\,{\rm d}x\\
\ge
&R^{-2}\frac{\theta(\varepsilon)}{4\pi} \intop\limits_{\{|x|<\varepsilon{R}\}}\intop\limits_{\{|y|< \varepsilon{R}\}} g_\gamma(|x-y|)u(x,t)u(y,t)\,{\rm d}y\,{\rm d}x + \frac{1}{2}J,
\varepsilonnd{split}
\varepsilonnd{equation}
where the letter $J$ denotes the integral
$$
J=\iint_{{\mathbb R}^2\times\mathbb{R}^2\sigmaetminus\left(\{|x|<\varepsilon{R}\}\times\{|y|< \varepsilon{R}\}\right)}u(x,t)u(y,t)\nablabla K_\gamma(x-y)\cdot (\nablabla\psi_R(x)-\nablabla\psi_R(y))\,{\rm d}y\,{\rm d}x.
$$
We estimate the first integral on the right-hand side of \varepsilonqref{bilinear}
in the following way
\begin{equation}gin{equation}\label{JJJ}
\begin{equation}gin{split}
&\intop\limits_{\{|x|<\varepsilon{R} \}}\intop\limits_{\{|y|< \varepsilon{R} \}} g_\gamma(|x-y|)u(x,t)u(y,t)\,{\rm d}y\,{\rm d}x \\
&\ge g_\gamma(2\varepsilon R)\bigg(M_R(t)-\intop\limits_{\{\varepsilon R\leq |x|\le {R} \}}u(x,t)\,{\rm d}x\bigg)^2\\
&\ge g_\gamma(2\varepsilon R)M_R^2(t)-2 g_\gamma(2\varepsilon R) M_R(t)\intop\limits_{\{\varepsilon R\leq |x|\le {R} \}}u(x,t)\frac{1-\psi_R(x)}{\inf_{\{|x|\ge \varepsilon{R} \}} \big(1-\psi_R(x)\big) }{\,{\rm d}x}\\
&\ge g_\gamma(2\varepsilon R) M_R(t)^2-2 C_\varepsilon M_R(t)(M-w_R(t)),
\varepsilonnd{split}
\varepsilonnd{equation}
where $C_\varepsilon=\left(\inf_{\{|x|\ge \varepsilon{R} \}} \big(1-\psi_R(x)\big)\right)^{-1}= \big(1-(1-\varepsilon^2)^2\big)^{-1}$. In the above inequalities we used the fact that $g_\gamma$ is a continuous decreasing function and $0\leq g_\gamma \leq 1$.
Next,
since we have the inclusion
\begin{equation}a
&&{\mathbb R}^2\times\mathbb{R}^2\sigmaetminus\Big(\{|x|<\varepsilon{R}\}\times\{|y|< \varepsilon{R}\}\Big)\sigmaubset \nonumber\\
&&\Big(\{|x|<R\}\times\{|y|\ge\varepsilon{R}\}\Big) \cup \Big(\{|x|\ge \varepsilon{R} \}\times\{|y|<R\}\Big) \cup \Big(\{|x|\ge R\}\times\{|y|\ge R\}\Big)\nonumber
\varepsilonnd{equation}a
and the factor with $\nablabla\psi_R$ vanishes on the set $\{|x|\ge R\}\times\{|y|\ge R\}$, we obtain immediately the estimate
\begin{equation}a
|J|&\le& 2CR^{-2}\intop\limits_{\{|x|<R\}}\intop\limits_{\{|y|\ge \varepsilon{R} \}}u(x,t)u(y,t) \frac{1-\psi_R(y)}{\inf_{\{|y|\ge \varepsilon{R} \}}\big(1-\psi_R(y)\big)}{\,{\rm d}x\, \,{\rm d}y}\nonumber\\
&\le& 2R^{-2}C C_\varepsilon M_R(t) \intop\limits u(y,t)\big(1-\psi_R(y)\big)\,{\rm d}y\nonumber\\
&\le& 2R^{-2}C C_\varepsilon M_R(t)(M-w_R(t)), \label{JJ}
\varepsilonnd{equation}a
where $C=\sigmaup|z\cdot\nablabla K_\gamma(z)|\, \|D^2\psi\|_\infty$.
Finally, estimates \rf{J0}--\rf{JJ} as well as inequality \varepsilonqref{Mt} applied to equation \rf{J} lead to the inequalities
\begin{equation}gin{equation}\label{ineq}
\begin{equation}gin{split}
\frac{\rm d}{{\rm d}t}w_R(t)&\ge R^{-\alpha}M_R(t)\bigg(-k_\alpha + \frac{\theta(\varepsilon)}{4\pi} R^{\alpha-2}g_\gamma(2\varepsilon R)M_R(t)+C(\varepsilon) R^{\alpha-2}(w_R(t)-M)\bigg)\\
&\ge R^{-\alpha}w_R(t)\bigg(-k_\alpha + \frac{\theta(\varepsilon)}{4\pi} R^{\alpha-2}g_\gamma(2\varepsilon R)w_R(t)+C(\varepsilon) R^{\alpha-2}(w_R(t)-M)\bigg),
\varepsilonnd{split}
\varepsilonnd{equation}
whenever the expression in the parentheses is nonnegative, with $C(\varepsilon)=3CC_\varepsilon= 3C\big(1-(1-\varepsilon^2)^2\big)^{-1}$.
Now, notice that the linear function of $w_R$ in the parentheses on the right-hand side of \varepsilonqref{ineq} is monotone increasing. Thus,
if at the initial moment $t=0$ the right-hand side of \rf{ineq} is positive, then
$w_R(t)$ will increase indefinitely in time. Consequently, after a moment $T={\mathcal O}\bigg( R^\alpha \big(\frac{M}{w_R(0)}-1\big)\bigg)$ the function $w_R(t)$ will become larger than the total mass $M$. This is a~contradiction with the global-in-time existence of a nonnegative solution $u$ since it conserves mass \rf{M}.
Now, let us analyze the cases when the right-hand side of inequality \rf{ineq} is strictly positive.
{\it Case (i)}: $\alpha=2$ and $\gamma=0$.
We recall that by Lemma \ref{Getoor} $k_2=8$ holds.
For $\gamma=0$, the Bessel potential $K_\gamma$ should be replaced
by the fundamental solution $E_2(x)$ of Laplacian on $\mathbb{R}^2$ which satisfies $\nablabla E_2(x)=-\frac{1}{2\pi}\frac{x}{|x|^2}$, so that
$g_0(2\varepsilon R)=1$.
In view of \varepsilonqref{wazne}, the quantity $\frac{\theta(\varepsilon)}{4\pi}$ is close to $\frac1{\pi} $ at the expense of taking sufficiently small $\varepsilon>0$.
Choosing $\varepsilon>0$ small enough, we get blowup in the optimal range $M>8\pi$. Indeed, if $M>8\pi$, then there exists $\varepsilon>0$ small, and $R\ge R(\varepsilon)>0$ sufficiently large so that $w_R(0)$ is sufficiently close to $M$ and we have
\[
-8+\frac1{\pi} w_R(0)+C(\varepsilon)(w_R(0)-M)>0.
\]
{\it Case (ii)}: $\alpha=2$, $\gamma>0$. If $M>8\pi$ and $u_0$ is sufficiently well concentrated near the origin, i.e. $\frac{\theta(\varepsilon)}{4\pi}g_\gamma(2\varepsilon R)w_R(0)>k_2=8$ and, at the same time $C(\varepsilon)(M-w_R(0))$ is sufficiently small, then the solution $u$ cannot be global-in-time.
{\it Case (iii)}:
In the case $\alpha<2$, the blowup occurs if for some $R>0$ the quantity \newline $R^{\alpha-2}\intop\limits_{\{|x|<R\}}u_0(x)\,{\rm d}x$ is large enough, and simultaneously $u_0$ is well concentrated, i.e. $C(\varepsilon)(M-w_R(0))$ is small.
$\square$
\sigmaection{Global existence of large mass solutions}
In this section we prove Theorem \ref{ex}.
Our proof splits naturally into several parts. The first one is a construction of local-in-time mild solutions with initial data in $L^1$ with an estimate of the existence time {\varepsilonm uniform} in $\gamma$.
The second step consists in proving the continuation of such a local solution to a global-in-time one satisfying the (nonoptimal) decay estimate $\limsup_{t\to\infty} t^{1/\sigmaigma-1/p}\|u(t)\|_p<\infty$ for each fixed $p\in(4/3,2)$ and any $\sigmaigma\in(1,p)$.
Finally, we will prove a uniform global $L^1$ bound $\sigmaup_{t>0}\|u(t)\|_1<\infty$
as well as the optimal decay (hypercontractive)
estimate $\sigmaup_{t>0}t^{1-1/p}\|u(t)\|_p<\infty$.
First of all, the Cauchy problem \rf{equ}--\rf{ini} is studied via the integral equation (a.~k.~a. the Duhamel formula)
\begin{equation}
u(t)={\rm e}^{t\Delta}u_0+B(u,u)(t),\label{D}
\varepsilonnd{equation}
whose solutions are called {\varepsilonm mild}\, solutions of the original Cauchy problem.
Here, the bilinear term $B$ is defined as
\begin{equation}
B(u,z)(t)=-\intop\limits_0^t\left(\nablabla{\rm e}^{(t-s)\Delta}\right)\cdot\left(u(s)\, \nablabla(-\Delta+\gamma)^{-1}z(s)\right)\,{\rm d}s.\label{form}
\varepsilonnd{equation}
Then, to solve equation \rf{D} in a Banach space $(\mathcal E,|\!|\!|\,.\,|\!|\!|)$ of vector-valued functions, it is sufficient to prove the boundedness of the bilinear form $B:{\mathcal E}\times{\mathcal E}\to {\mathcal E}$
\begin{equation}
|\!|\!| B(u,z)|\!|\!|\le \varepsilonta |\!|\!| u|\!|\!| |\!|\!| z|\!|\!|,\label{form2}
\varepsilonnd{equation}
with a constant $\varepsilonta$ independent of $u$ and $z$.
The first and the second steps toward the proof of Theorem \ref{ex} are based on a lemma which is convenient to formulate in the following way:
\begin{equation}gin{lemma}\label{fix}
If $|\!|\!| B(u,z)|\!|\!|\le \varepsilonta|\!|\!| u|\!|\!|\, |\!|\!| z |\!|\!|$ and
$|\!|\!| {\rm e}^{t\Delta}u_0|\!|\!| \le R<\frac{1}{4\varepsilonta}$, then equation \rf{D} has a solution which is unique in the ball of radius $2R$ in the space $\mathcal E$.
Moreover, these solutions depend continuously on the initial data, i.e. $|\!|\!| u-\tilde u|\!|\!|\le C|\!|\!| {\rm e}^{t\Delta}(u_0-\tilde u_0)|\!|\!|$.
\varepsilonnd{lemma}
The detailed proof of Lemma \ref{fix} can be found in \cite{Lem}, \cite{B-SM}. The reasoning involves the Banach contraction theorem, the unique solution being achieved as a limit in ${\mathcal E}$ of the sequence of successive approximations
\begin{equation}
w_0(t)={\rm e}^{t\Delta}u_0,\ \ w_{n+1}=w_0+B(w_n,w_n).\label{c_sequence}
\varepsilonnd{equation}
\sigmaubsection*
{Step 1. Local-in-time solutions with the initial data in $L^1$}
\begin{equation}gin{lemma}\label{lem:loc}
For every $u_0\in L^1(\mathbb{R}^2)$ and
$p\in\big(\frac43,2\big)$, there exists $T>0$ independent of $\gamma$, such that equation \varepsilonqref{D} has a solution $u=u(x,t)$ in the space
\begin{equation}
{\mathcal E}=\{u\in L^\infty_{\rm loc}((0,T); L^p(\mathbb R^2)):\ \ \sigmaup_{0<t\le T}t^{1-1/p}\|u(t)\|_p<\infty\},\label{ET}
\varepsilonnd{equation}
endowed with the norm
\begin{equation}
|\!|\!| u|\!|\!|\varepsilonquiv \sigmaup_{0<t\le T}t^{1-1/p}\|u(t)\|_p<\infty.\label{normT}
\varepsilonnd{equation}
\varepsilonnd{lemma}
\begin{equation}gin{proof}
Let $\frac1r=\frac2p-\frac12$, so that $r\in(1,2)$. Moreover, denote by $q$ a number satisfying $\frac1p +\frac1q=\frac1r$. We estimate the bilinear form $B$ for each $t\in(0,T)$ using \rf{lin2} and \rf{Kgamma}
\begin{equation}gin{equation} \nonumber
\begin{equation}gin{split}
\|B(u,z)(t)\| _p
&\le C\intop\limits_0^t(t-s)^{-1/2+1/p-1/r}\|u(s)\nablabla(-\Delta+\gamma)^{-1}z(s)\|_r\,{\rm d}s \\
&\le C\intop\limits_0^t(t-s)^{-1/2+1/p-2/p+1/2}\|u(s)\|_p\|\nablabla(-\Delta+\gamma)^{-1}z(s)\|_q\,{\rm d}s\\
&\le C\gamma^{-1/2-1/q+1/p}\intop\limits_0^t(t-s)^{-1/p} s^{2(1/p-1)}\left(\sigmaup_{0<s\le t}s^{1-1/p}\|u(s)\|_p\right)\\
&\qquad \times \,\left(\sigmaup_{0<s\le t}s^{(1-1/p)}\|z(s)\|_p\right)\,{\rm d}s, \\
&\le t^{1/p-1}C|\!|\!| u|\!|\!|\, |\!|\!| z|\!|\!|
\varepsilonnd{split}
\varepsilonnd{equation}
with a constant $C>0$ {\varepsilonm independent} of $\gamma>0$ (and also of $T>0$) since $-\frac12-\frac1q+\frac1p=0$.
The last inequality is a consequence of the fact that
\[
\intop\limits_0^t(t-s)^{-1/p}s^{2(1/p-1)}\,{\rm d}s= Ct^{1/p-1}.
\]
Finally, given $u_0\in L^1(\mathbb R^2)$, by \rf{function} we may choose $T>0$ so small to have
$$\sigmaup_{0<t\le T}t^{1-1/p}\|{\rm e}^{t\Delta}u_0\|_p<\frac{1}{4C}.$$
Thus, the existence of a solution in $\mathcal E$ follows by an application of Lemma \ref{fix}.
\varepsilonnd{proof}
\begin{equation}gin{lemma}[The $L^1$-bound]\label{L-1}
The solution constructed in Lemma \ref{lem:loc} satisfies
\begin{equation}
\sigmaup_{0<t\le T}\|u(t)\|_1<\infty.\label{L1}
\varepsilonnd{equation}
\varepsilonnd{lemma}
\begin{equation}gin{proof}
Let us take the sequence $w_n$ of approximations of $u$ as in \varepsilonqref{c_sequence}. By Lemma \ref{lem:loc} we know that
there exists a constant $C_0$ such that
\begin{equation}
|\!|\!| w_n|\!|\!| \label{ogr}\le C_0<\infty.
\varepsilonnd{equation}
Next, we take any $r\in [\frac43, 2)$ such that $r<p$, and interpolate the $L^r$ norm
\begin{equation}
\left\|w_n\right\|_r\leq\left\|w_n\right\|_p^{1-\theta}\left\|w_n\right\|_1^{\theta},\label{interpolacja}
\varepsilonnd{equation}
where $\theta=\frac{\frac1r-\frac1p}{1-\frac1p}$, and thus $1-\theta=\frac{1-\frac1r}{1-\frac1p}$.
Next, observe that for $\frac1r+\frac1q=1$, by \rf{c_sequence} and \rf{lin2} we have
\begin{equation}a\label{osz3}
\left\|w_{n+1}(t)\right\|_1& \leq &\left\|u_0\right\|_1+C\intop\limits_0^t (t-s)^{-1/2}\left\|w_n(s)\nablabla (-\Delta+\gamma)^{-1}w_n(s)\right\|_1\,{\rm d}s\\ \nonumber
& \leq &\left\|u_0\right\|_1+C\intop\limits_0^t (t-s)^{-1/2}\left\|w_n(s)\right\|_r\left\|\nablabla(-\Delta+\gamma)^{-1}w_n(s) \right\|_q\,{\rm d}s\\ \nonumber
& \leq &\left\|u_0\right\|_1+C\intop\limits_0^t (t-s)^{-1/2}\left\|w_n(s)\right\|_r^2\,{\rm d}s,\\ \nonumber
& \leq &\left\|u_0\right\|_1+C\intop\limits_0^t (t-s)^{-1/2}\left\|w_n(s)\right\|_p^{2(1-\theta)}\left\|w_n(s)\right\|_1^{2\theta}\,{\rm d}s\\ \nonumber
&\le &\|u_0\|_1+C\intop\limits_0^t(t-s)^{-1/2}s^{2(1/r-1)}\|w_n(s)\|_1^{2\theta}\,{\rm d}s,
\varepsilonnd{equation}a
where we used inequality \varepsilonqref{Kgamma} for $r\geq\frac43$ and \varepsilonqref{ogr}.
Now, for $t\le T$ we take $r=\frac43$ so that $q=4$ and note that $\intop\limits_0^t(t-s)^{-1/2}s^{-1/2}\,{\rm d}s=\pi=\,{\rm const}$.
Let us define
$$
A_0(t)=\sigmaup_{0<s\le t}\|w_0(s)\|_1,\quad \dots,\quad
A_n(t)=\sigmaup_{0<s\le t}\|w_n(s)\|_1,
$$
so that $A_0(t)<\infty$, and as a consequence of \rf{osz3} we arrive at
\[
A_{n+1}(t)\le C_1+C_2A_n(t)^\varrho<\infty,
\]
where $\varrho=2\theta<1$, since $r=4/3$ and $p<2$.
Therefore $A_n(t)$ is uniformly bounded in $L^\infty(0,T)$ (with a bound which depends on $C_1$ and $C_2$ but is independent of $n$) by an easy recurrence argument. From the fact that $|\!|\!| w_n-u|\!|\!|\rightarrow 0$ when $n\rightarrow \infty$ we infer that for any $t>0$ the family $w_n(\cdot,t)$ converges to $u(\cdot,t)$ in $L^1_{\rm loc}(\mathbb{R}^2)$.
Applying the Fatou lemma we see that
$
\left\|u(t)\right\|_1\leq \liminf_{n}A_n(t)\leq C
$
for every fixed $0<t\le T$.
\varepsilonnd{proof}
\sigmaubsection*{Step 2. Global-in-time solutions for $\gamma$ large}
\begin{equation}gin{lemma}\label{lem:nonopt}
Now we keep $p\in\big(\frac43,2\big)$ and take any $\sigmaigma\in(1,p)$. For every $u_0\in L^\sigmaigma(\mathbb{R}^2)$, there exists a constant $\gamma(u_0)>0$ such that for all $\gamma\geq \gamma(u_0)$ equation \varepsilonqref{D} has a unique solution in the
new functional space
\begin{equation}
\widetilde{\mathcal E}=\{u\in L^\infty_{\rm loc}((0,\infty); L^p(\mathbb R^2)):\ \ \sigmaup_{t>0}t^{1/\sigmaigma-1/p}\|u(t)\|_p<\infty\},\label{E}
\varepsilonnd{equation}
endowed with the norm
\begin{equation}
|\!|\!| u|\!|\!|\varepsilonquiv \sigmaup_{t>0}t^{1/\sigmaigma-1/p}\|u(t)\|_p<\infty.\label{norm}
\varepsilonnd{equation}
\varepsilonnd{lemma}
\begin{equation}gin{proof}
Let $\frac1r=\frac2p+\frac12-\frac1\sigmaigma$ for some suitable $\sigmaigma\in(1,p)$ so that $r\in(1,2)$. Moreover, denote by $q$ a number satisfying $\frac1p +\frac1q=\frac1r$. Under this choice of parameters we make sure that $\frac1p-\frac1q<\frac12$ and $q>2$, so that we can use \rf{Kgamma} to estimate the bilinear form $B$
\begin{equation}gin{equation} \nonumber
\begin{equation}gin{split}
&\|B(u,z)(t)\| _p
\le C\intop\limits_0^t(t-s)^{-1/2+1/p-1/r}\|u(s)\nablabla(-\Delta+\gamma)^{-1}z(s)\|_r\,{\rm d}s \\
&\le C\intop\limits_0^t(t-s)^{2/\sigmaigma-1/p-1}\|u(s)\|_p\|\nablabla(-\Delta+\gamma)^{-1}z(s)\|_q\,{\rm d}s\\
&\le C\gamma^{-1/2-1/q+1/p}\intop\limits_0^t(t-s)^{1/\sigmaigma-1/p-1} s^{2/p-2/\sigmaigma}\\
&\times\left(\sigmaup_{0<s\le t}s^{1/\sigmaigma-1/p}\|u(s)\|_p\right)
\left(\sigmaup_{0<s\le t}s^{1/\sigmaigma-1/p}\|z(s)\|_p\right)\,{\rm d}s \\
&\le t^{1/p-1/\sigmaigma}C\gamma^{1/\sigmaigma-1}|\!|\!| u|\!|\!|\, |\!|\!| z|\!|\!|.
\varepsilonnd{split}
\varepsilonnd{equation}
Thus, we obtained inequality \rf{form2}
with the norm defined in \varepsilonqref{norm} and
with $\varepsilonta=C\gamma^{1/\sigmaigma-1}$.
We may choose $\gamma(u_0)$ so large to have
$$\sigmaup_{t>0} t^{1/\sigmaigma-1/p}\|e^{t\Delta}u_0\|_p<\gamma^{1-1/\sigmaigma}/4C$$
for all $\gamma\ge\gamma(u_0)$, which is possible due to estimate
\varepsilonqref{lin1}. Then, the proof is completed by applying Lemma \ref{fix}.
\varepsilonnd{proof}
\sigmaubsection*{ Step 3. Proof of Theorem \ref{ex} and optimal hypercontractive estimates}
\
By Lemma \ref{lem:loc}, we have a local-in-time solution on an interval $(0,T]$ with $T>0$ independent of $\gamma$.
Moreover,
$u\left(\frac{T}{2}\right)\in L^\sigmaigma(\mathbb{R}^2)\sigmaubset L^1\cap L^p$.
Thus, we may continue
this local-in-time solution $u(t)$ to the whole half-line $(0,\infty)$ choosing $\gamma>0$ sufficiently large. We notice that on the interval $\left(\frac{T}2,T\right)$ solutions obtained in Lemma \ref{lem:loc} and Lemma \ref{lem:nonopt} coincide as a consequence of uniqueness assertion of Lemma \ref{lem:loc}.
It remains to prove optimal decay estimates.
{\it The optimal $L^1$-bound.}
Next, we show that the solution satisfies the uniform global estimate
\begin{equation}
\sigmaup_{t>0}\|u(t)\|_1<\infty.\label{ell-1}
\varepsilonnd{equation}
For $t\ge T$, similarly to the proof of Lemma \ref{L-1}, we consider a sequence of approximations of $u$, combine estimates \rf{interpolacja} and an analog of \rf{osz3} (this time on $(T,\infty)$) with $1<\sigmaigma<\frac43<r<p<2<q$, $\theta=\frac{\frac1r-\frac1p}{1-\frac1p}$. Moreover, we define
\begin{equation}
\varepsilon=\frac{1-\frac1\sigmaigma}{1-\frac1p}\;, \;\mbox{so that}\; t^{1/\sigmaigma-1/p}=t^{(1-1/p)(1-\varepsilon)}. \label{wybor_e}
\varepsilonnd{equation}
Again, we arrive at an estimate similar to \varepsilonqref{L1}
\[
\left\|w_{n+1}(t)\right\|_1\leq \left\|u_0\right\|_1+\intop\limits_0^t(t-s)^{-1/2}\left\|w_n(s)\right\|_p^{2(1-\theta)}\left\|w_n(s)\right\|_1^{2\theta}\,{\rm d}s,
\]
where $\varrho=2\theta<1$ since we can choose $\frac2r<1+\frac1p$. We split the integral on the right-hand side into two integrals, over the interval $\left(0,\frac{T}2\right)$ and the integral over $\left(\frac{T}2,t\right)$. The first one is estimated by $C(T)$ in view of Lemma \ref{lem:loc} and \varepsilonqref{L1}. To estimate the second interval, we notice that
\begin{equation}gin{equation} \nonumber
\begin{equation}gin{split}
&\intop\limits_{T/2}^t(t-s)^{-1/2}\left\|w_n(s)\right\|_p^{2(1-\theta)}\left\|w_n(s)\right\|_1^{2\theta}\,{\rm d}s\\
&\sigmatackrel{\varepsilonqref{norm}}{\leq} C A_n(t)^{\varrho}\intop\limits_{T/2}^t(t-s)^{-1/2}\left(s-\frac{T}2\right)^{2(1-\theta)(1/p-1/\sigmaigma)}\,{\rm d}s,
\varepsilonnd{split}
\varepsilonnd{equation}
but
\[
\intop\limits_{T/2}^t(t-s)^{-1/2}\left(s-\frac{T}2\right)^{2(1-\theta)(1/p-1/\sigmaigma)}\,{\rm d}s\leq C\left(t-\frac{T}2\right)^{1/2-2\left(1-1/r\right)(1-\varepsilon)}
\]
holds with $\varepsilon$ as in \varepsilonqref{wybor_e}. We notice that choosing $\sigmaigma>1$ close enough to $1$ and $r>\frac43$ close enough to $\frac43$ we ensure that
$1/2-2(1-1/r)(1-\varepsilon)<0$. We proceed further as in the proof of Lemma \ref{L-1} and arrive at \varepsilonqref{ell-1}.
\begin{equation}gin{remark}
One can show by a standard method that
$u\in {\mathcal C}([0,T);L^1(\mathbb R^2))$. Here, it suffices to use the continuity of the bilinear form $B$ as, {\it e.g.}, in the proof in \cite[Theorem~1.1]{BGK}.
\varepsilonnd{remark}
{\it The optimal hypercontractive estimate for $p>1$. }
First, we improve the decay estimates from Lemma \ref{lem:nonopt}.
\begin{equation}gin{lemma}\label{step3}
For each $p\in(\frac43,2)$, the solution of \varepsilonqref{equ}--\varepsilonqref{eqv} with $u_0\in L^1(\mathbb{R}^2)$ satisfies
\begin{equation}
\sigmaup_{t>0}t^{1-1/p}\left\|u(\cdot,t)\right\|_p<\infty.\label{osz6}
\varepsilonnd{equation}
\varepsilonnd{lemma}
\noindent
\proof \ \
By definition \varepsilonqref{E} of the space $\widetilde{\mathcal E}$, one immediately sees that it is enough to prove \varepsilonqref{osz6} for $t\geq T$.
By the Duhamel formula \rf{D}, \rf{lin2} and \rf{Kgamma}, we have
\begin{equation}gin{equation}\label{osz7}
\begin{equation}gin{split}
&\left\|u(t)\right\|_p \leq \left\|e^{t\Delta}u_0\right\|_p+C\intop\limits_0^t (t-s)^{-1/2-1+1/p}\left\|u(s)\nablabla (-\Delta+\gamma)^{-1}u(s)\right\|_1\,{\rm d}s\\
\leq &\left\|e^{t\Delta}u_0\right\|_p+C\intop\limits_0^t (t-s)^{-1/2-1+1/p}\left\|u(s)\right\|_p\left\|\nablabla (-\Delta+\gamma)^{-1}u(s)\right\|_q\,{\rm d}s\\
\leq &Ct^{1/p-1}\left\|u_0\right\|_1+C\intop\limits_0^{T/2} (t-s)^{-3/2+1/p}\left\|u(s)\right\|_p^2\,{\rm d}s+C\intop\limits_{T/2}^t (t-s)^{-3/2+1/p}\left\|u(s)\right\|_p^2\,{\rm d}s.\\
\varepsilonnd{split}
\varepsilonnd{equation}
Using \varepsilonqref{E} we estimate the second term on the right-hand side of \varepsilonqref{osz7} as
\[
\intop\limits_0^{T/2} (t-s)^{-3/2+1/p}\left\|u(s)\right\|_p^2\,{\rm d}s
\leq C\intop\limits_0^{T/2} (t-s)^{-3/2+1/p}s^{2(1/p-1)}\,{\rm d}s\leq C(T)\left(t-\frac{T}2\right)^{-3/2+1/p}.
\]
Hence for $t\geq T$ and in view of the fact that
\begin{equation}
\mbox{for}\;\; t\geq T \;\mbox{it holds}\;\;\frac{t}{2}\leq t-\frac{T}2\label{tt}
\varepsilonnd{equation}
relation \varepsilonqref{osz7} reads
\[
t^{1-1/p}\left\|u(t)\right\|_p\leq C\left\|u_0\right\|_1+Ct^{-1/2}+Ct^{1-1/p}\intop\limits_{T/2}^t (t-s)^{-3/2+1/p}\left\|u(s)\right\|_p^2\,{\rm d}s.
\]
In turn, in view of \varepsilonqref{E} and owing to definition of $\varepsilon$ in \varepsilonqref{wybor_e}, for $t\geq T$ we arrive at
\begin{equation}
t^{1-1/p}\left\|u(t)\right\|_p\leq C\left\|u_0\right\|_1+C+Ct^{1-1/p}\intop\limits_{T/2}^t (t-s)^{-3/2+1/p}\left(s-\frac{T}2\right)^{-2(1-1/p)(1-\varepsilon)}\,{\rm d}s.\label{osz8}
\varepsilonnd{equation}
Next we use the inequality
\[
\intop\limits_{T/2}^t (t-s)^{-3/2+1/p}\left(s-\frac{T}2\right)^{-2(1-1/p)(1-\varepsilon)}\,{\rm d}s\leq C\left(t-\frac{T}2\right)^{1/p-1/2-2(1-1/p)(1-\varepsilon)},
\]
to see that by \varepsilonqref{tt} for $t\geq T$ \varepsilonqref{osz8} yields
\[
t^{1-1/p}\left\|u(t)\right\|_p\leq C+Ct^{1/2}\left(t-\frac{T}2\right)^{-2(1-1/p)(1-\varepsilon)}\leq C+Ct^{1/2-2(1-1/p)(1-\varepsilon)}.
\]
Since $p\in(\frac43,2)$, it is enough to pick up $\varepsilon>0$ small enough to ensure that $\frac12-2\left(1-\frac1p\right)(1-\varepsilon)<0$, and we arrive at
\varepsilonqref{osz6} for $t\geq T$, Lemma \ref{step3} is proved.
$\square$
{\it The optimal decay estimate for other $p\in\left(1,\infty\right)$.}
In view of Lemma \ref{step3}, we see that Theorem \ref{ex} is true for $p=1$ and $p\in(\frac43,2)$. Since for $p\in (1, \frac43]$, we have by interpolation
\[
t^{1-1/p}\left\|u(t)\right\|_p\leq \left\|u(t)\right\|_1^\vartheta \left(t^{1-1/\bar{p}}\left\|u(t)\right\|_{\bar{p}}\right)^{1-\vartheta},
\]
where $\bar{p}<2$, $\vartheta=\frac{\frac1p-\frac{1}{\bar{p}}}{1-\frac{1}{\bar{p}}}$, and therefore Theorem \ref{ex} holds also for $p\in (1,\frac43]$.
Now, we can interpolate estimate \rf{osz6} for $p\in \left(\frac43,2\right)$ and \rf{ell-1} to get \rf{osz6} with any $p\in[1,2)$.
The last step of the proof of Theorem \ref{ex} is the extrapolation of the hypercontractive estimates \rf{osz6} for $q\in [2, \infty)$. Actually, it is enough to obtain the decay estimate for $q\in (2, \infty)$, the remaining case $q=2$ will follow by simple interpolation.
Taking $q\in (2,\infty)$, we know that
\begin{equation}gin{equation}\label{osz9}
\begin{equation}gin{split}
&\left\|u(t)\right\|_q \leq Ct^{1/q-1}\left\|u_0\right\|_1+C\intop\limits_0^t (t-s)^{-1/2-1/r+1/q}\left\|u(s)\nablabla (-\Delta+\gamma)^{-1}u(s)\right\|_r\,{\rm d}s\\
\leq &Ct^{1/q-1}\left\|u_0\right\|_1+C\intop\limits_0^t (t-s)^{-1/2-1/r+1/q}\left\|u(s)\right\|_\sigmaigma\left\|\nablabla (-\Delta+\gamma)^{-1}u(s)\right\|_\rho \,{\rm d}s\\
\leq &Ct^{1/q-1}\left\|u_0\right\|_1+C\intop\limits_0^{t} (t-s)^{-1/2-1/r+1/q}\left\|u(s)\right\|_\sigmaigma^2\,{\rm d}s.\\
\varepsilonnd{split}
\varepsilonnd{equation}
Here $r$ is chosen in such a way that $\frac1r=\frac{2}{\sigmaigma}-\frac12$, so that for $\sigmaigma\in (\frac43,2)$ we have $r\in(1,2)$, $r$ close to $2$. At the same time $\frac1\rho+\frac1\sigmaigma=\frac1r$ and $\frac1\rho=\frac1\sigmaigma-\frac12$, the above choice of parameters allows us to apply \varepsilonqref{Kgamma} to \varepsilonqref{osz9}. Relation \varepsilonqref{osz6} with $\sigmaigma\in\left(\frac43,2\right)$
$\left\|u(s)\right\|_\sigmaigma\leq Cs^{1/\sigmaigma -1}$,
applied to \varepsilonqref{osz9} yields
\[
\left\|u(t)\right\|_q\leq Ct^{1/q-1}\left\|u_0\right\|_1+C\intop\limits_0^{t} (t-s)^{-1/2-1/r+1/q}s^{2\left(1/\sigmaigma -1\right)}\,{\rm d}s.
\]
Since
\[
\intop\limits_0^{t} (t-s)^{-1/2-1/r+1/q}s^{2\left(1/\sigmaigma -1\right)}\,{\rm d}s= Ct^{-1/2-1/r+1/q+2/\sigmaigma-1},
\]
we notice that
$\left\|u(t)\right\|_q\leq Ct^{1/q-1}\left\|u_0\right\|_1+Ct^{1/q-1}$
holds in view of the relation
\[
-\frac12-\frac1r+\frac1q+\frac2\sigmaigma-1=\frac1q-1.
\]
Thus, the decay estimate for $q>2$ is proved.
$\square$
\sigmaection*{Acknowledgments}
This work was initiated during the visits of T. Cie\'slak at Uniwersytet Wroc{\l}awski, T.C. would like to express his gratitude for support and hospitality.
The first, the third and the fourth authors were supported by the NCN grant 2013/09/B/ST1/04412.
The second author was partially supported by the Polish Ministry of Science and Higher Education under the Iuventus Plus grant No. 0073/IP3/2011/71. The fourth author was also supported by the grant DEC-2012/05/B/ST1/00692.
\begin{equation}gin{thebibliography}{99}
\bibitem{B-SM}(MR1333870)
\newblock P. Biler,
\newblock The Cauchy problem and self-similar solutions for a nonlinear parabolic equation,
\newblock \varepsilonmph{Studia Math.} \textbf{114}, 181--205 (1995).
\bibitem{BGK}(MR3411100) [10.3934/cpaa.2015.14.2117]
\newblock P. Biler, I. Guerra, G. Karch,
\newblock Large global-in-time solutions of the parabolic-parabolic Keller-Segel system on the plane,
\newblock \varepsilonmph{Comm. Pure Appl. Analysis} \textbf{14} (2015), 2117--2126.
\bibitem{BK-JEE} (MR2643796) [10.1007/s00028-009-0048-0]
\newblock P. Biler, G. Karch,
\newblock Blowup of solutions to generalized Keller--Segel model,
\newblock \varepsilonmph{J. Evol. Equ.} \textbf{10} (2010), 247--262.
\bibitem{BKL}(MR2519677) [10.1088/0951-7715/22/7/003]
\newblock P. Biler, G. Karch, P. Lauren\c cot,
\newblock Blowup of solutions to a~diffusive aggregation model,
\varepsilonmph{ Nonlinearity} \textbf{22} (2009), 1559--1568.
\bibitem{BKZ} [10.1088/0951-7715/28/12/4369]
\newblock P. Biler, G. Karch, J. Zienkiewicz,
\newblock Optimal criteria for blowup of radial and $N$-symmetric solutions of chemotaxis systems,
\newblock \varepsilonmph{Nonlinearity} \textbf{28} (2015), 4369--4387.
\bibitem{BW}(MR1661243)
\newblock P. Biler, W. A. Woyczy\'nski,
\newblock Global and exploding solutions of nonlocal quadratic evolution problems,
\newblock \varepsilonmph{SIAM J. Appl. Math.} \textbf{59} (1999), 845--869.
\bibitem{BZ} (MR3411404)[10.4064/ba63-1-6]
\newblock
P. Biler, J. Zienkiewicz,
\newblock Existence of solutions for the Keller-Segel model of chemotaxis with measures as initial data,
\newblock \varepsilonmph{Bull. Polish Acad. Sci. Mathematics} \textbf{63}, (2015) 41-51.
\bibitem{BDP}(MR2226917)
\newblock A. Blanchet, J. Dolbeault, B. Perthame,
\newblock Two-dimensional {K}eller-{S}egel model: optimal critical mass and qualitative properties of the solutions,
\newblock \varepsilonmph{Electron. J. Differential Equations} \textbf{44}, 32 pp. (2006).
\bibitem{GMO} (MR1017289)
\newblock Y. Giga, T. Miyakawa, H. Osada,
\newblock Two-dimensional Navier-Stokes flow with measures as initial vorticity,
\newblock \varepsilonmph{Arch. Rational Mech. Anal.} \textbf{104} (1988), 223--250.
\bibitem{KS-AM}(MR2812192) [10.4064/am38-3-1]
\newblock G. Karch, K. Suzuki,
\newblock Blow-up versus global existence of solutions to aggregation equations,
\newblock \varepsilonmph{Appl. Math. (Warsaw)} \textbf{38} (2011), 243--258.
\bibitem{KS-JEE} (MR2407206) [10.1007/s00028-008-0375-6]
\newblock H. Kozono, Y. Sugiyama,
\newblock {\it Local existence and finite time blow-up of solutions in the 2-D Keller-Segel system,}
\newblock \varepsilonmph{J. Evol. Equ.} \textbf{8} (2008), 353--378.
\bibitem{K-O} (MR1972874)
\newblock M. Kurokiba, T. Ogawa,
\newblock Finite time blow-up of the solution for a nonlinear parabolic equation of drift-diffusion type,
\newblock \varepsilonmph{Differ. Integral Equ.} {\bf 16} (2003), 427--452.
\bibitem{Lem}(MR3129022)
\newblock P.-G. Lemari\'e-Rieusset,
\newblock Small data in an optimal Banach space for the parabolic-parabolic and parabolic-elliptic Keller-Segel equations in the whole space,
\newblock \varepsilonmph{Adv. Diff. Eq.} \textbf{18} (2013), 1189--1208.
\bibitem{LR07}(MR2481755) [10.1007/s00220-008-0669-0]
\newblock{D. Li, J. L. Rodrigo},
\newblock Finite-time singularities of an aggregation equation in $\mathbb{R}^n$ with fractional dissipation,
\newblock \varepsilonmph{Comm. Math. Phys.} \textbf{287} (2009), 687--703.
\bibitem{LR08}(MR2493179) [10.1016/j.aim.2008.10.016]
\newblock{D. Li, J. L. Rodrigo},
\newblock Refined blowup criteria and nonsymmetric blowup of an aggregation equation,
\newblock \varepsilonmph{Adv. Math.} {\bf 220} (2009), 1717--1738.
\bibitem{LRZ} (MR2666316)
\newblock{ D. Li, J. L. Rodrigo, X. Zhang},
\newblock Exploding solutions for a nonlocal quadratic evolution problem,
\newblock \varepsilonmph{Rev. Mat. Iberoam.} \textbf{26} (2010), 295--332.
\bibitem{N} (MR1783582)
\newblock T. Nagai,
\newblock Behavior of solutions to a parabolic-elliptic system modelling chemotaxis,
\newblock \varepsilonmph{J. Korean Math. Soc.} \textbf{37} (2000), 721--732.
\bibitem{N1}(MR1887324) [10.1155/S1025583401000042]
\newblock T. Nagai,
\newblock Blowup of nonradial solutions to parabolic-elliptic systems modeling chemotaxis in two-dimensional domains,
\newblock \varepsilonmph{J. Ineq. Appl.} \textbf{6} (2001), 37--55.
\bibitem{S}(MR0290095)
\newblock E. M. Stein,
\newblock \varepsilonmph{Singular integrals and differentiability properties of functions},
\newblock Princeton Mathematical Series, No. 30, Princeton University Press, Princeton, N.J., 1970.
\bibitem{SYK}(MR3317626)[DOI:10.1016/j.jde.2014.12.033]
\newblock Y. Sugiyama, M. Yamamoto, K. Kato,
\newblock Local and global solvability and blow up for the drift-diffusion equation with the fractional dissipation in the critical space,
\newblock \varepsilonmph{J. Diff. Eq.} \textbf{258} (2015), 2983--3010.
\varepsilonnd{thebibliography}
\varepsilonnd{document}
|
\begin{document}
\title{On the importance of data encoding in quantum Boltzmann methods}
\begin{abstract}
In recent years, quantum Boltzmann methods have gained more and more interest as they might provide a viable path towards solving fluid dynamics problems on quantum computers once this emerging compute technology has matured and fault-tolerant many-qubit systems become available.
The major challenge in developing a start-to-end quantum algorithm for the Boltzmann equation consists in encoding relevant data efficiently in quantum bits (qubits) and formulating the streaming, collision and reflection steps as one comprehensive unitary operation. The current literature on quantum Boltzmann methods mostly proposes data encodings and quantum primitives for individual phases of the pipeline assuming that they can be combined to a full algorithm.
In this paper we disprove this assumption by showing that for encodings commonly discussed in literature either the collision or the streaming step cannot be unitary. Building on this landmark result we propose a novel encoding in which the number of qubits used to encode the velocity depends on the number of time steps one wishes to simulate, with the upper bound depending on the total number of grid points.
In light of the non-unitarity result established for existing encodings, our encoding method is to the best of our knowledge the only one currently known that can be used for a start-to-end quantum Boltzmann solver where both the collision and the streaming step are implemented as a unitary operation. Furthermore our theoretical unitarity result can serve as a guideline on which types of encodings to consider or whether a `stop-and-go' method with repeated measurements and re-initializations is the method of choice.
\end{abstract}
\section{Introduction}
Since the first quantum computing boom in the 1990s, quantum computational fluid dynamics (QCFD) has been a field of interest to researchers worldwide. Due to the high computational demands of classic CFD the exponential potential of quantum computers in combination with quantum parallelism and quantum indeterminacy has caused interest in the application. The first QCFD algorithms were proposed by Yepez and his co-workers around the turn of the century \cite{Yepez1998,Yepez2001,YepezBoghosian2001, Yepez2002, Pravia2003}. These algorithms are based on a quantum distributed computing approach assuming that many small-scale quantum computers are more realistic than one large many-qubit system. The core idea of the so-called quantum lattice-gas model is that each grid point of position-space gets its own 6-qubit quantum computer associated to it (which can also be groups of 6 qubits of a future many-qubit quantum computer). The benefit of this approach is that the possible quantum circuit depth and stable entanglement required remains very low, making it a realistic and relatively near-term approach given the capabilities of current quantum devices. Its downside is that to encode a grid of size $N$ a total of $6N$ qubits are required, which means that the amount of qubits required grows linearly with the size of the grid. Given the limited amount of quantum devices available and the large amount of grid points required for solving practical problems with modern Boltzmann methods, this distributed approach proves a significant drawback. Furthermore, as we will show below, the computational basis state encoding of the velocity vector adopted in the aforementioned papers does not allow for implementing the streaming step as a unitary operator so that measurement and state re-initialization is mandatory after each time step.
After these early results by Yepez et al., the QCFD field became stagnant for about a decade until its recent resurgence, in particular, in the form of quantum Boltzmann methods.
Most recent are the methods presented in \cite{Todorova2020, Budinski2020, Budinski2021, Moawad2022, Schalkers2022, Steijl2023}, that all have their own strengths and weaknesses. Some papers include a streaming and specular reflection mechanism, but no collision methods yet \cite{Todorova2020, Budinski2020, Schalkers2022}. Other approaches have implemented a collision method using the linear combination of unitary approach \cite{Childs2012}, causing the algorithm to require a measurement-and-restart strategy after each time step \cite{Budinski2021}. Due to the high costs of quantum state preparation and the chance of measurement errors this `stop-and-go' strategy is hardly usable in practice. Other algorithms have managed to create a unitary collision operator, but have not yet been able to combine this with a streaming step into one start-to-end algorithm \cite{Moawad2022, Steijl2023}.
What remained an open problem is the development of a full-fledged quantum Boltzmann method (QBM) that implements both the streaming and the collision step as unitary operations. In this paper we present the first-of-its-kind full-fledged QBM building on a novel encoding scheme of the velocity vector that scales with the number of time steps. Furthermore, we prove rigorously that for the encoding schemes considered for universal quantum computers in all previous publications it is impossible to implement both streaming and collision as a unitary, downgrading them as candidates for any practical QBM. Taking both contributions of this paper together, our new encoding and the theoretical (negative) result on existing encodings, we hope to stimulate a paradigm shift in QBM research from focusing on encodings and algorithms for individual steps of the pipeline to developing full-fledged QBM algorithms.
\section{Lattice Boltzmann method}
In the Boltzmann method the macroscopic behavior of a fluid is simulated by considering the microscopic behavior of the fluid particles as they move through space and deriving the macroscopic quantities via averaging-based post-processing, instead of encoding the macroscopic variables directly, as is commonly done in other CFD methods like the finite volume method.
In this paper we consider the discrete lattice Boltzmann method, where a particle can only move with specific velocities taken from a finite set of discrete velocities. We define the structure of the method using the D$n$Q$m$ system, where $n$ represents the amount of spatial dimensions and $m$ the amount of discrete velocities considered. Figure \ref{fig:multiple_DnQm} gives examples of the commonly used D1Q2, D1Q3, D2Q5 and D2Q9 systems, respectively, in standard Boltzmann convention. For an in depth review of the lattice Boltzmann method we refer to the book \cite{Krüger2017}.
\begin{figure}
\caption{Four examples of different types of D$n$Q$m$ possible. The top picture on the left portrays the D1Q2 setting and the top picture on the right portrays the D1Q3 setting (where a stationary particle can be included). The picture on the left bottom portrays the D2Q5 setting and the picture on the right below shows the D2Q9 setting. }
\label{fig:multiple_DnQm}
\end{figure}
Boltzmann methods simulate the macroscopic behavior of a fluid or gas by implementing a streaming step followed by particle collision on the microscopic level in each time step. When obstacles are present an additional reflection step is performed in each time step. For brevity we omit a detailed description of the latter and refer the interested reader to our recent work \cite{Schalkers2022} on this topic.
The streaming step is implemented by letting the particles move by one grid point per time step in the direction they are traveling currently. Figure \ref{fig:D1Q2_streaming} illustrates how the particles travel in one time step from the point $x$ to $x\pm 1$ respectively for the D1Q2 case. Similar illustrations can be constructed for two- and three-dimensional cases but are omitted here for brevity reasons.
To implement the collision step we define so-called equivalence classes of streaming patterns which have the same total mass and momentum and are thus considered to be equivalent. A combination of colliding particles can therefore be transformed into any combination from the same equivalence class upon collision without changing the total mass and momentum. Figure \ref{fig:equivalence_class} shows an example of two equivalent velocity combinations for the D2Q5 (and D2Q4) case.
\begin{figure}
\caption{Illustration of the streaming step for the D1Q2 case. Figure (a) shows the velocity vectors at position $x$ at time $t$. Figure (b) shows the same after configuration at time $t+1$ after particles have moved to positions $x-1$ and $x+1$, respectively. Red and blue colors identify the different streaming directions and their propagation pattern.}
\label{fig:y equals x}
\label{fig:three sin x}
\label{fig:D1Q2_streaming}
\end{figure}
In Section \ref{sec:novel_encoding}, we present a lattice Boltzmann encoding for which both the collision and the streaming step can be performed through unitary operations and thus admit a straightforward implementation on a sufficiently large fault-tolerance quantum computer. Before that, in Section \ref{sec:proof}, we provide rigorous mathematical proofs that show that such a unitary treatment of both streaming \emph{and} collision is impossible with the encodings adopted in current literature, thereby underpinning the uniqueness of our proposed encoding.
\begin{figure}
\caption{Illustration of two velocity combinations of the D2Q5 (and D2Q4) velocity spectrum that belong to the same equivalence class with total momentum 0 and mass 2: (a) particles streaming in the $q_1$ and $q_3$ direction, and (b) particles streaming in the $q_2$ and $q_4$ direction. }
\label{fig:equivalence_class}
\end{figure}
\section{Data encoding}\label{sec:proof}
As in any computational field, data encoding is pivotal for reaching a good result. More than five decades of classical CFD research and application have established `good practices' for storing field data such as densities and velocities at, e.g., the grid points or cell centers as floating-point numbers following the IEEE-754 standard. Every now and then new hardware developments stimulate research into non-standard formats, like reduced or mixed-precision \cite{Freytag2022}, but, in general, data encoding is not considered to be an open problem.
Not so in QCFD and, in particular, quantum Boltzmann methods. In this section we will review the main data encodings currently used for QBM and show that in all of them either the streaming step or the collision step cannot be unitary. This result, though discouraging at first sight, should be interpreted as wake-up call that novel quantum encodings for CFD states are imperative for devising full-fledged QCFD applications in the future. We propose one such novel encoding in Section \ref{sec:novel_encoding} and discuss its potential and limitations.
The two mainstream encodings of the velocity vector are the amplitude based encoding and the computational basis state encoding. In what follows, we will consider both approaches separately and show how they both lead to a contradiction in the unitarity of either the collision or the streaming operation.
\subsection{Amplitude based encoding}\label{ssec:amplitude_based_encoding}
The first type of encoding we consider is the so-called amplitude based encoding, used for several quantum Boltzmann methods \cite{Todorova2020, Budinski2020, Budinski2021, Schalkers2022}.
The amplitude based encoding of the velocity vector is such that at each location $\ket{x}$ there can be multiple particles with different velocities, for instance $\ket{v_0}$, $\ket{v_1}$, $\ket{v_2}$ and $\ket{v_3}$ for D2Q4. Here and below, $\ket{i}$ denotes the representation of $i$ as bit string. The state of the system at this point $x$ can then be encoded as\footnote{Note that we distinguish in our notation between the grid point $x$ and its representation $\ket{x}$ as part of the quantum register.}
\begin{equation}
\ket{x}\left (\alpha_0\ket{v_0} + \alpha_1\ket{v_1} + \alpha_2\ket{v_2} + \alpha_3\ket{v_3} \right ),
\end{equation}
where $\alpha_0$, $\alpha_1$, $\alpha_2$ and $\alpha_3$ are complex numbers that simply represent the relative weight or amount of particles traveling at the given velocity at grid point $x$. For simplicity we will assume that $|\alpha_0|^2+|\alpha_1|^2+|\alpha_2|^2+|\alpha_3|^2=1$, and so in this example there are only particles at grid point $x$ but the proof extends trivially to the general case with particles spread around the grid.
In order to show that this encoding of the velocity vector inevitably leads to non-unitary collision operators, let us assume a system in a specific state $\ket{\psi_1}$, with only particles traveling with velocities $\ket{v_0}$ and $\ket{v_1}$, meaning that $|\alpha_0|,|\alpha_1| > 0$ and $\alpha_2=\alpha_3=0$. Then we can write the state of the system as
\begin{equation}\label{eq:sys1}
\ket{\psi_1} = \ket{x}\left (\alpha_0\ket{v_0} + \alpha_1\ket{v_1} \right ).
\end{equation}
Now assume that an equivalent velocity combination exists consisting of particles traveling with velocities $\beta_2\ket{v_2} + \beta_3\ket{v_3}$, where we have $|\beta_2|,|\beta_3| > 0$. To realize this potential outcome of a collision as a quantum algorithm, we need to implement the transformation between both equivalent states as a unitary operation
$U_\text{col}$ which changes the states of the velocity encodings as follows
\begin{equation}
\begin{split}
\ket{\psi_1^\prime} &= I \otimes U_\text{col} \ket{\psi_1} \\& = \ket{x}\otimes U_\text{col}\left (\alpha_0\ket{v_0} + \alpha_1\ket{v_1} \right ) \\ & =\ket{x} \left ( \gamma_0(\alpha_0\ket{v_0} + \alpha_1\ket{v_1}) + \gamma_1(\beta_2\ket{v_2} + \beta_3\ket{v_3}) \right ).
\end{split}
\end{equation}
Here, if $\gamma_0=1$ and $\gamma_1=0$ no collision is taking place (and we simply implement an identity operation) and if $\gamma_1=1$ we fully change from the original velocities to its alternative representative from the same equivalence class.\footnote{Here $\gamma_0$, $\gamma_1$ are chosen to reflect the fact that a collision operation should switch weight of a combination of velocities in an equivalent class to another combination of velocities in the same equivalence class. This equation could be written in a less restrictive way by splitting $\gamma_0$ and $\gamma_1$ up into separate amplitudes $\gamma_i$ for all the basis states $\ket{v_i}$, the same contradiction of unitarity as presented below however could be reached.} Note that to preserve unitarity $|\gamma_0|^2+|\gamma_1|^2=1$ must hold.
Let us now consider another system in state $\ket{\psi_2} = \ket{x}\ket{v_2}$. Applying the unitary operation $U_\text{col}$ should not effect the state at all as a single speed is only in an equivalence class with itself, and so the required behavior for $U_\text{col}$ is
\begin{equation}\label{eq:sys2_end}
\begin{split}
\ket{\psi_2^\prime} &=I \otimes U_\text{col}\ket{\psi_2} \\ & = \ket{x}U_\text{col}\ket{v_2} \\ & = e^{i\theta}\ket{x}\ket{v_2},
\end{split}
\end{equation}
with $\theta\in (0,2\pi]$. That is, the collision operator must preserve the single-velocity state except for changes in the phase factor $e^{i\theta}$ that can be neglected.
Now that we have identified the required behavior for $U_\text{col}$ to implement a collision operation, we can prove that any $U_\text{col}$ that meets both requirements simultaneously cannot be unitary. Here, we resort to the characterization $U_\text{col}^\dagger U_\text{col}=I$ of unitary operators, with superscript $\dagger$ denoting the adjoint operator.
\begin{proof}
To reach a contradiction, assume that $U_\text{col}$ is a unitary operator. Then it must preserve the inner product for all possible states $\ket{\phi_1}$ and $\ket{\phi_2}$
\begin{equation}
\braket{\phi_1 | \phi_2} = \bra{\phi_1} U_\text{col}^\dagger U_\text{col} \ket{\phi_2}.
\end{equation}
However, for a collision operation $U_\text{col}$ that behaves as expected on the system states described in Equations \eqref{eq:sys1} to \eqref{eq:sys2_end}, it follows that
\begin{equation}
\begin{split}
0 &=\braket{\psi_1|\psi_2} \\ & = \bra{\psi_1} \left ( I \otimes U_\text{col} \right )^\dagger \left ( I \otimes U_\text{col} \right ) \ket{\psi_2} \\ & = e^{i\theta}\left ( \gamma_0(\alpha_0\bra{v_0} + \alpha_1\bra{v_1}) + \gamma_1(\beta_2\bra{v_2} + \beta_3\bra{v_3}) \right ) \bra{x} \ket{x} \ket{v_2} \\ & = e^{i\theta}\gamma_1\beta_2.
\end{split}
\end{equation}
The first equality follows from the fact that $\ket{\psi_1}$ and $\ket{\psi_2}$ are orthogonal by construction. The second one holds under the assumption of $U_\text{col}$ being unitary, which is disproved by the fact that the entire equality chain only holds for the trivial case $\gamma_1=0$ (as $|\beta_2|>0$ by definition of the state $\ket{\psi_1}$), that is, when $U_\text{col}$ does not implement the collision operation.
From this we can conclude that an amplitude based encoding of the velocity does not allow for a unitary implementation of the collision operation.
\end{proof}
Notice that this proof works for any amplitude based encoding of $v$ where the different possible velocities at a position are all represented by their own basis state as there will always be a case with only a single incoming velocity, for which an identity operation up to a phase shift should take place, while at the same time there will be combinations of velocities for which we want some weight of the system to change from one combination of velocities to another combination of velocities in the same equivalence class. These two antagonizing requirements will always lead to the same contradiction of unitarity proven above and we further expand on this intuition in Section \ref{ssec:intuition}.
\subsection{Computational basis state encoding}\label{ssec:comp_basis_encoding}
The second type of encoding of a quantum state considered is the computational basis encoding, used in several quantum lattice Boltzmann papers such as \cite{Yepez1998, Yepez2001, YepezBoghosian2001, Yepez2002, Pravia2003, Moawad2022, Steijl2023}. Using this encoding the contradiction of unitarity in the collision operation can be avoided by encoding the velocity of the qubits at a position $\ket{x}$ in space by identifying each direction particles could be streamed from with its own qubit, which will be set to one if and only if there is a particle streaming from that direction.
As an example consider the D2Q4 lattice depicted in Figure \ref{fig:D2Q4_ex}. In this case the velocity can be encoded using four qubits $q_0$, $q_1$, $q_2$ and $q_3$ where the state
\begin{equation}
\ket{x}\ket{v} = \ket{x}\ket{q_0q_1q_2q_3} = \ket{x}\ket{0110}
\end{equation}
is such that from the center point $(1,1)$, there is a particle streaming to $(1,2)$ and a particle streaming to $(0,1)$ but not to $(2,1)$ or $(1,0)$.
\begin{figure}
\caption{Illustration of the computational basis state encoding for the D2Q4 lattice. For each grid point $x$ we set the respective qubit $q_j$ to one if and only if there is a particle streaming in that direction, i.e. $\ket{v}
\label{fig:D2Q4_ex}
\end{figure}
Using this encoding the collision step can be defined quite naturally as unitary operation. However, we run into trouble when attempting to define a unitary streaming step $U_\text{str}$ as we demonstrate in what follows.
To simplify notation let us restrict ourselves to the D1Q2 lattice and consider the two settings at time $t$ from Figures \ref{fig:D1Q2ex_1} and \ref{fig:D1Q2ex_2}, which can be encoded as
\begin{align}
\ket{\psi_1} &= \sum_{x=0}^3 \ket{x}\ket{v}\\ &= \frac{1}{2}\left ( \ket{00}\ket{00} + \ket{01}\ket{11} +
\ket{10}\ket{10} + \ket{11}\ket{10}\right ),
\label{eq:psi1_str}
\end{align}
and
\begin{equation}
\ket{\psi_2} = \frac{1}{2} \left ( \ket{00}\ket{01} + \ket{01}\ket{01} +
\ket{10}\ket{00} + \ket{11}\ket{11} \right ),
\label{eq:psi2_str}
\end{equation}
respectively. It then follows directly that
\begin{equation}
\braket{\psi_1|\psi_2} = 0.
\label{eq:orthogonal}
\end{equation}
Upon streaming, the systems from Figures \ref{fig:D1Q2ex_1} and \ref{fig:D1Q2ex_2} change from their state at time $t$ (top lattice) to that at time $t+1$ (bottom lattice), i.e.
\begin{equation}
\ket{\psi_1^\prime} = \frac{1}{2} \left ( \ket{00}\ket{11} + \ket{01}\ket{00} +
\ket{10}\ket{10} + \ket{11}\ket{10} \right ),
\label{eq:psiprime1_str}
\end{equation}
and
\begin{equation}
\ket{\psi_2^\prime} = \frac{1}{2} \left ( \ket{00}\ket{11} + \ket{01}\ket{00} +
\ket{10}\ket{01} + \ket{11}\ket{01} \right ) ,
\label{eq:psiprime2_str}
\end{equation}
respectively. As in the previous section, we will show by contradiction that any operation $U_\text{str}$ for which $U_\text{str}\ket{\psi_1} = \ket{\psi_1^\prime}$ and $U_\text{str}\ket{\psi_2} = \ket{\psi_2^\prime}$ cannot be unitary.
\begin{proof}
Let us assume that $U_\text{str}$ is unitary, i.e. it preserves the inner product
\begin{equation}
\braket{\phi_1 | \phi_2} = \bra{\phi_1} U^\dagger_\text{str} U_\text{str} \ket{\phi_2},
\end{equation}
for all states $\ket{\phi_1}, \ket{\phi_2}$. Substituting the states \eqref{eq:psi1_str} and \eqref{eq:psi2_str} on the left side, and \eqref{eq:psiprime1_str} and \eqref{eq:psiprime2_str} into the right inner product we arrive at the contradiction
\begin{equation}
0=\braket{\psi_1 | \psi_2} = \bra{\psi_1}U_\text{str}^\dagger U_\text{str} \ket{\psi_2} =
\braket{\psi_1^\prime | \psi_2^\prime} = \frac{1}{2}.
\end{equation}
The first equality follows from the orthogonality property \eqref{eq:orthogonal}, and the second one from the assumption that $U_\text{str}$ is a unitary operator, which we just disproved.
\end{proof}
\begin{figure}
\caption{D1Q2 example setting 1. The binary encoding above the arrows indicate whether or not a particle is flowing there in that time step. 1 indicates that there is a particle streaming there and 0 indicates that there is no particle. In the example setting we consider periodic boundary conditions. The top figure shows the state of the system at time $t$. The figure below shows the state of the system at time $t+1$.}
\label{fig:D1Q2ex_1_1}
\label{fig:D1Q2ex_1_2}
\label{fig:D1Q2ex_1}
\end{figure}
\begin{figure}
\caption{D1Q2 example setting 2, the binary encoding above the arrows indicate whether or not a particle is flowing there in that time step. 1 indicates that there is a particle streaming there and 0 indicates that there is no particle. In the example setting we consider periodic boundary conditions. The top figure shows the state of the system at time $t$. The figure below shows the state of the system at time $t+1$.}
\label{fig:y equals x}
\label{fig:three sin x}
\label{fig:D1Q2ex_2}
\end{figure}
As in Section \ref{ssec:amplitude_based_encoding} this proof extends to any computational basis encoding where each possible combination of velocities at a specific lattice point is encoded using its own basis state, as one can always construct two situations with no overlap at time $t$ that will have non-zero overlap after streaming at time $t+1$. This proof also extends trivially to any other D$n$Q$m$ setting as the streaming possibilities of D1Q2 are essentially a subset of any other system and thus the same example can be used by setting the other streaming directions to 0.
\subsection{Intuition and extension of non-unitarity proofs}\label{ssec:intuition}
In this section we expand on our non-unitarity proofs by providing physical intuition behind the proofs presented above. It is intended to give insight into what types of encodings our non-unitarity proof extends to, and what physical features of the system necessarily lead to the non-unitarity for these encodings.
Consider the proof from Section \ref{ssec:amplitude_based_encoding} that shows that the amplitude based encoding, where each velocity direction is identified through its own basis state leaving the total velocity at a position $x$ to be a superposition of such basis states, prevents the collision operator $U_\text{col}$ from being unitary. Since it encodes each streaming direction as a different basis state, the quantum encodings of the velocity directions are all orthogonal to one another. This is also necessary, since if the basis states of the possible streaming directions are not orthogonal, we cannot fully distinguish between them. However, this orthogonality of the different velocity directions leads directly to the non-unitarity of $U_\text{col}$. Since a collision operator that will rotate a given linear combination of basis states into a linear combination of other basis states in such a way that the represented streaming patterns belong to the same equivalence class, it will also rotate `pure' velocities represented by a single basis state into another basis state leading to a nonphysical and undesired change of velocities.
Following this line of argumentation it can be seen that the non-unitarity of $U_\text{col}$ is not so much a result of a specific choice of encoding but an inherent non-unitarity of the collision step itself that directly leads to the idea of computational basis state encoding, where each velocity pattern (i.e. the combination of velocities) at a grid point is encoded as its own basis state, and not as a unitary combination of all the basis states representing a non-zero contribution.
When encoding the velocity pattern at each grid point as a basis state, naturally, the non-unitarity of collision falls away and we can find a straightforward unitary operator to implement the collision step.
However, such an encoding will always lead to non-unitarity of streaming due to the non-local nature of a streaming operation. Consider an arbitrary point in space $x$ and imagine two different scenarios with two different combinations of speeds $\ket{v_1}$ and $\ket{v_2}$ at this point. Then the inner product between $\ket{x}\ket{v_1}$ and $\ket{x}\ket{v_2}$ must be 0, as these are different basis states. However, the velocity states of the systems at position $x$ in the next time step do not depend on the current velocity states in the lattice point. In fact, they only depend on the velocity states of the neighboring lattice points. Since the inner product of the states at the point $x$ at the next time step does not depend on the current states at the point $x$, in the next time step the velocity at the point $x$ of the two systems could be identical, and hence, the inner product could be one. There is no way of ensuring that this can only happen when the inner product at some other point $x^\prime$ of the systems was non-zero before as each grid point has velocity vectors in multiple directions determining its associated velocity basis state.
This shows that any quantum encoding that successfully implements both streaming and collision as a unitary operation must belong to one of the following three types. The first type is an amplitude based type encoding, where the different velocities are not orthogonal and thus not entirely distinguishable. The second type is a computational basis state encoding where the non-locality of streaming is somehow avoided. The last type is a completely novel encoding method that avoids both non-unitarity problems entirely.
In the next section we will present precisely one such idea.
\section{Space-time data encoding}\label{sec:novel_encoding}
In this section we propose a novel space-time data encoding that enables unitary collision \emph{and} streaming at the same time. To the best of our knowledge, this is the first-of-its-kind start-to-end quantum Boltzmann algorithm that does not require measurement and quantum-state re-initialization after each time step.
In what follows, we adopt an extended computational basis state encoding, where at each location $x$ we take into account the velocities at all grid points in the vicinity of $x$. Here, `in the vicinity of $x$' means that a particle can theoretically reach the grid point $x$ within the number of time steps still to be performed before measurement. Mathematically speaking being `in the vicinity of $x$' means being, respectively, in the so-called extended von Neumann, Moore or hexagonal neighborhood of the point $x$, depending on the lattice structure.\footnote{The von Neumann neighborhood of extent $r$ defines the diamond-shaped set of points at a Manhattan distance of up to $r$ from the point $x$. It applies to, e.g., D2Q4, D2Q5, D3Q6, and D3Q7. The Moore distance extends the former one by diagonal directions and applies to, e.g., D2Q8, D2Q9, D3Q26, and D3Q27. As its name suggests, the hexagonal neighborhood applies to D2Q6 and its extension to its three-dimensional counterpart.}
This leads to a trade-off between the number of time steps that can be performed between measurements and the number of qubits required to encode the velocity at each grid point $x$. The more time steps one wishes to take between measurement-and-re-initialization cycles, the more qubits are required for our space-time encoding. Obviously the maximum number of qubits required to implement the velocity without any in-between measurements must be such that the entire grid is spanned. For a D$n$Q$m$ lattice this will be $mN_g$, where $N_g$ is the total number of grid points. When encoding the proposed method on a classical computer $mN_g$ bits would also be required, so when encoding the full domain there is no quantum benefit in terms of (qu)bit numbers. The quantum improvement comes from exploiting quantum parallelism, which is done as long as we do not encode the whole space.
In what follows, let $N_t$ denote the number of streaming steps to be performed between (re-)initialization and measurement. We extend the computational basis state encoding of velocity directions from Section \ref{ssec:comp_basis_encoding} to take into account all the speed states from grid points in the neighborhood of $x$ that can (at least theoretically) reach $x$ within $N_t$ streaming steps. This takes away the non-locality of the streaming operator, which led to the non-unitarity of $U_\text{str}$ for the `regular' computational basis state encoding at the cost of increasing the number of qubits required to encode all required velocity data.
We will give a detailed description of this encoding for the D2Q4 lattice, but want to note that it can be extended naturally to any other choice of D$n$Q$m$. Consider the D2Q4 lattice given in Figure \ref{fig:D2Q4} with qubit $q_j$ set to one if and only if there is a particle traveling with velocity direction $j$ from grid point $x$ into a neighboring grid point in the current time step. We now extend this encoding to include \emph{all} possible velocities at positions `in the vicinity of $x$' for the total of $N_t$ time steps in order to obtain a unitarily streamable encoding. This is illustrated in Figure \ref{fig:D2Q4_quantum} for a single time step, i.e. $N_t=1$ yielding the encoding
\begin{equation}
\ket{x}\ket{q_{19}q_{18} \dots q_0}.
\end{equation}
For D2Q4, the number of qubits encoding the possible velocity states per grid location $x$ grows with the number of time steps (still) to be taken as
\begin{equation}\label{eq:von_Neumann}
n_v = 4 + \sum_{i=1}^{N_t}16i = 8N_t^2 + 8N_t + 4,
\end{equation}
where the maximum number of qubits required to encode all velocity directions over the entire grid equals $4N_g$ as stated before.\footnote{Note that the growth rate of qubit numbers per time step depends on the choice of D$n$Q$m$. The number of qubits required is equal to the number of points in the extended Von Neumann, Moore or hexagonal neighborhood, depending on which choice of $n$ and $m$ considered.}
Similarly it can be shown that for $d$ dimensions the growth rate is of the order $\mathcal{O}\left (N_t^d \right)$.
We can now encode the collision step by first identifying the equivalence class for the D2Q4 lattice. We note that at each grid point $x$ as represented in Figure \ref{fig:D2Q4} the states $\ket{q_0q_1q_2q_3} = \ket{1010}$ and $\ket{q_0q_1q_2q_3} = \ket{0101}$ belong to the same equivalence class (cf. Figure \ref{fig:equivalence_class}), as they have the same total mass and momentum.\footnote{The other equivalence classes are $\ket{q_0q_1q_2q_3} = \ket{1000}$ and $\ket{q_0q_1q_2q_3} = \ket{1100}$ and all cyclic shifts of these patterns, and $\ket{q_0q_1q_2q_3} = \ket{1111}$. However, they all have just a single representative so that we define the collision operator based on the ambiguous case.} We implement the collision step by defining a unitary operator $U_\text{col}$ which performs the following mappings
\begin{align}
U_\text{col}\ket{1010} &= \phantom{-}\alpha\ket{1010} + \beta\ket{0101},\\
U_\text{col}\ket{0101} &= -\beta\ket{1010} + \alpha\ket{0101},
\end{align}
with $\alpha, \beta \in \mathbb{C}$ and $|\alpha|^2+|\beta|^2=1$,
while acting as the identity operation on any other basis state. It can easily be verified that this operation is unitary. With the so-defined $U_\text{col}\in \mathbb{C}^{2^4 \otimes 2^4}$, we can write the total collision operation for an encoding of the velocities states $v$ consisting of $n_v=4k$ qubits as $k$-fold Kronecker products of $U_\text{col}$ operations, i.e. $U_\text{col}^\text{tot}=U_\text{col} \otimes \dots \otimes U_\text{col}$. Since each $U_\text{col}$ requires a few CNOT and a single triple controlled rotation gate the total collision operator can be efficiently implemented even on near-term devices.\footnote{We can implement the described collision operator by first applying three CNOT operations to the system turning the states into $\ket{1010} \mapsto \ket{1110}$ and $\ket{0101} \mapsto \ket{1111}$. Subsequently a triple controlled rotation operation of choice is applied to the right-most qubit (controlled on the three left-most qubits). Finally the initial three CNOT operations are applied in reverse order to reset all velocity states correctly.}
In practice the total collision operator $U_\text{col}^\text{tot}$ differs per time step, since its local counterpart $U_\text{col}$ only needs to be applied to velocity states `in the vicinity of $x$'.
In the first out of the $N_t$ time steps it is important for all qubits representing velocity states `in the vicinity of $x$' to be updated correctly. In the very last time step, however, it is only important for the qubits $q_0$, $q_1$, $q_2$ and $q_3$ to end up in the correct state. The more time steps $t$ have been taken, the less time steps $N_t-t$ are still to be taken and so the 4-qubit local collision operator $U_\text{col}$ only needs to be applied to the remaining qubits relevant for encoding the `directly connected' velocity states as given in Equation \eqref{eq:von_Neumann}.
With this logic we can define a collision operator per time step $t$ as
\begin{equation}
U_{\text{col},t}^\text{tot}=\underbrace{U_\text{col} \otimes \dots \otimes U_\text{col}}_{\text{$c$ collision operations}} \otimes \underbrace{I \otimes \dots \otimes I}_{\text{identity operations}},
\end{equation}
where $c = 2(N_t-t)^2 + 2(N_t-t) +1$ and the identity operations are added to avoid dimensionality issues. In practice no operation will be applied on the qubits encoding velocity states not `in the vicinity of $x$' within $N_t-t$ time steps.
Our space-time encoding enables different manners of implementing the streaming step. It can easily be seen that the way the streaming method should be implemented differs per time step $t$ depending on which positions will be `in the vicinity of $x$' in the next time step as well. At the first time step it is important for (almost) all qubits to be streamed to a very specific position, whereas in the last time step it is only important for the qubits $q_0$, $q_1$, $q_2$ and $q_3$ to end up in the correct state. For the example shown below we are only considering a total of one step to be taken (i.e. $N_t=1$) and so we only need to consider the speeds that will stream to location $x$ in one time step. In this case that means that streaming consists of performing a swap operation between the following qubit pairs $q_0$ and $q_{12}$, $q_1$ and $q_{17}$, $q_2$ and $q_6$ as well as $q_3$ and $q_{11}$.
Also in general (i.e. $N_t>1$), the streaming step can be implemented by a combination of swap gates. Following the same in-the-vicinity-of-$x$ argument as was used for the collision step, a total of
\begin{equation}
n_\text{swap}(t) = 4+\sum_{i=1}^{N_t-t }16i = 8 \left ( N_t - t \right )^2 + 8\left ( N_t - t \right ) +4
\end{equation}
swap gates are required to update as many velocity-encoding qubits in time step $t$, whereby these swap operations can be performed largely in parallel.\footnote{In each time step the swap operations in the 4 (or generally speaking $m$) different directions can be performed in parallel. Furthermore the swap operations for the velocities in the same direction but not in the same `line of streaming' can all be performed in parallel. Therefore we only need to take into account the velocities in the same line of streaming and the depth of the circuit is determined by the longest `line of streaming', which is equal to $T-t$. In each layer of the swap operations at least half of the $T-t$ velocities can be swapped to the correct position. Therefore a total of $\log_2 \left (T-t \right )$ swap operations needs to be performed in the $t$-th time step.}
The depth of the streaming circuit at time $t$ will amount to
\begin{equation}
d_\text{str}(t) = \log_2 \left ( T-t \right )
\end{equation}
swap operations at time $t$.
\begin{figure}
\caption{Illustration of the computational basis state encoding for D2Q4.}
\label{fig:D2Q4}
\end{figure}
\begin{figure}
\caption{Illustration of the space-time encoding for D2Q4 for a single time step.}
\label{fig:D2Q4_quantum}
\end{figure}
\section{Conclusion}
In this paper we have shown that current data encoding methods considered for quantum Boltzmann methods do not allow for treating both streaming and collision as unitary quantum operations. We have provided both a mathematical proof of its impossibility, and insight into the physical properties of the system and encodings that lead to this behavior. Using this insight we subsequently developed a new space-time data encoding method that does allow for both streaming and collision to be implemented as a unitary operation. This paper should serve as a guideline on where (not) to look for successful quantum encodings of the lattice Boltzmann and other QCFD methods.
\printbibliography
\end{document}
|
\begin{document}
\title{Functional equations for Selberg zeta functions with Tate motives}
\author{Shin-ya Koyama\footnote{Department of Biomedical Engineering, Toyo University,
2100 Kujirai, Kawagoe, Saitama, 350-8585, Japan.} \ \& Nobushige Kurokawa\footnote{Department of Mathematics, Tokyo Institute of Technology,
Oh-okayama, Meguro-ku, Tokyo, 152-8551, Japan.}}
\maketitle
\begin{abstract}
For a compact Riemann surface $M$ of genus $g\ge 2$, we study the functional equations
of the Selberg zeta functions attached with the Tate motives $f$.
We prove that certain functional equations hold if and only if $f$ has the absolute automorphy.
\end{abstract}
Key Words: Selberg zeta functions, functional equations, Tate motives
AMS Subject Classifications: 11M06, 11M41, 11F72
\section*{Introduction}
For a compact Riemann surface $M$ of genus $g\ge2$ the standard Selberg zeta function $Z_M(s)$ is constructed as
$$
Z_M(s)=\prod_{P\in\Prim(M)}\prod_{n=0}^\infty\l(1-N(P)^{-s-n}\r),
$$
where $\Prim(M)$ denotes the set of primitive closed geodesics and the norm $N(P)$ is defined by
$$
N(P)=\exp(\mathrm{length}(P)).
$$
It has the functional equation under $s\rightarrow 1-s$:
$$
Z_M(1-s)=Z_M(s)\exp\l((4-4g)\int_0^{s-\f12}\pi t \tan(\pi t)dt\r).
$$
This functional equation was proved by Selberg \cite{S1, S2} and the following symmetric version was found later:
$$
\hat Z_M(1-s)=\hat Z_M(s),
$$
where
$$
\hat Z_M(s)=Z_M(s)\G_M(s)
$$
with
$$
\G_M(s)=(\G_2(s)\G_2(s+1))^{2g-2}.
$$
This double gamma function $\G_2(s)$ is the normalized one used in \cite{KK} and we will recall the proof of the
symmetric functional equation for $Z_M(s)$ in the text.
Now, the simple Euler product
$$
\z_M(s)=\prod_{P\in\Prim(M)}(1-N(P)^{-s})^{-1}=\f{Z_M(s+1)}{Z_M(s)}
$$
was also studied later and it is a more natural analog of the usual Euler product for the Riemann zeta function
$$
\z(s)=\prod_{p:\,\text{primes}}(1-p^{-s})^{-1}.
$$
Especially the proof of the prime number theorem
$$
\pi(x)\sim\f x{\log x}\quad(x\to\infty)
$$
applied to $\z_M(s)$ gives the prime geodesic theorem
$$
\pi_M(x)\sim\f x{\log x}\quad(x\to\infty),
$$
where
$$
\pi_M(x)=\#\{P\in\Prim(M)\ |\ N(P)\le x\}.
$$
The functional equation of $\z_M(s)$ has the following form:
$$
\z_M(-s)=\z_M(s)^{-1}(2\sin(\pi s))^{4-4g}.
$$
In this paper we study the functional equations for $\z_{M(f)}(s)$ with Tate motives $f$.
Here we define $\z_{M(f)}(s)$ as
$$
\z_{M(f)}(s)=\prod_k\z_M(s-k)^{a(k)}
$$
for a Laurent polynomial
$$
f(x)=\sum_{k\in\Z}a(k)x^k\in\Z[x,x^{-1}].
$$
It may be suggestive to consider $x=\mathbb T$ the Tate twist.
Of course $\z_{M(1)}(s)=\z_M(s)$ in our notation.
We describe results on $\z_{M(f)}(s)$ only for ``odd'' $f$ here in Introduction.
See the text concerning the ``even'' cases.
\noindent{\bf Theorem}
Let $M$ and $f$ be as above.
For each integer $D$ the following conditions are equivalent.
\begin{enumerate}[\rm (1)]
\item $\z_{M(f)}(D-s)=\z_{M(f)}(s)$.
\item $f(x^{-1})=-x^{-D}f(x)$.
\end{enumerate}
\noindent{\bf Remark}
Condition (2) is called the {\it absolute automorphy} \cite{KT}.
In the paper \cite{KT} the definition of absolute automorphic forms are described in a more general setting
for any function $f$ on positive real numbers, and
the theory of absolute zeta functions $\z_f(s)$ is developed,
which are the autmorphic $L$-functions constructed from the absolute automorphic forms $f$.
It is in the framework of absolute mathematics \cite{DKK, KO}.
For example, let $f(x)=(x-1)^r$ for an odd integer $r\ge1$. Then we see that
$$
f(x^{-1})=-x^{-r}f(x).
$$
Hence Theorem gives the functional equation of $\z_{M(f)}(s)$ as
$$
\z_{M(f)}(r-s)=\z_{M(f)}(s).
$$
A remarkable point is that we need no ``gamma factors'' here.
In the simplest case $r=1$ we get the functional equation for
$$
\z_{M(f)}(s)=\f{\z_{M}(s-1)}{\z_{M}(s)}
$$
as
$$
\z_{M(f)}(1-s)=\z_{M(f)}(s).
$$
We remark that the study of the functional equations for
$$
Z_{M(f)}(s)=\prod_k Z_{M}(s-k)^{a(k)}
$$
is quite similar.
We add a few more comments on $Z_{M(f)}(s)$. Let
$$
f(x)=\sum_k a(k)x^k\in\Z[x,x^{-1}]
$$
satisfying
$$
f(x^{-1})=Cx^{-D}f(x)
$$
with $C=\pm1$. Then
$$
Z_{M(f)}(s)=\prod_k Z_M(s-k)^{a(k)}
$$
has the functional equation
$$
Z_{M(f)}(D+1-s)=Z_{M(f)}(s)^C S_{M(f)}(s)^C,
$$
where
$$
S_{M(f)}(s)=\prod_k S_{M}(s-k)^{a(k)}
$$
with
$$
S_M(s)=\f{\G_M(s)}{\G_M(1-s)}=(S_2(s)S_2(s+D))^{2-2g}.
$$
Here
$$
S_2(s)=\f{\G_2(2-s)}{\G_2(s)}
$$
is the normalized double sine function of \cite{KK}.
For example $f(x)=x^{-1}-1$ $(C=-1$, $D=-1$) gives the functional equation for
$$
Z_{M(f)}(s)=\f{Z_M(s+1)}{Z_M(s)}=\z_M(s)
$$
as
$$
\z_M(-s)=\z_M(s)^{-1}(2\sin(\pi s))^{4-4g}
$$
where the result
$$
S_{M(f)}(s)=\f{S_M(s+1)}{S_M(s)}=\l(\f{S_2(s+2)}{S_2(s)}\r)^{2-2g}=(2\sin(\pi s))^{4-4g}
$$
is used. Similarly we obtain the functional equation for
$$
Z_{M(f^2)}(s)=\f{Z_M(s+2)Z_M(s)}{Z_M(s+1)}=\f{\z_M(s+1)}{\z_M(s)}=\z_{M(f)}(s)
$$
as
$$
\z_{M(f)}(-1-s)=\z_{M(f)}(s)
$$
that is
$$
Z_{M(f^2)}(-1-s)=Z_{M(f^2)}(s)
$$
with no gamma factors.
\section{Selberg zeta functions}
We describe the needed functional equations for $Z_M(s)$ and $\z_M(s)$ with simple proofs.
Let $\G_r(s)$ be the normalized gamma function of order $r$ defined by
$$
\G_r(s)=\exp\l(\l.\f\partial{\partial w}\z_r(w,s)\r|_{w=0}\r)
$$
with the Hurwitz zeta function of order $r$
$$
\z_r(w,s)=\sum_{n_1,\cdots,n_r\ge0}(n_1+\cdots+n_r+s)^{-w}.
$$
The normalized sine function $S_r(s)$ of order $r$ is constructed as
$$
S_r(s)=\G_r(s)^{-1}\G_r(r-s)^{(-1)^r}:
$$
see \cite{KK} for detailed properties with proofs.
\begin{theorem}
\begin{enumerate}[\rm (1)]
\item Let
$$
\hat Z_M(s)=Z_M(s)\G_M(s)
$$
with
$$
\G_M(s)=(\G_2(s)\G_2(s+1))^{2g-2}.
$$
Then
$$
\hat Z_M(1-s)=\hat Z_M(s).
$$
\item
$$
\z_M(-s)=\z_M(s)^{-1}(2\sin(\pi s))^{4-4g}.
$$
\end{enumerate}
\end{theorem}
{\it Proof.}
(1) From the functional equation for $Z_M(s)$ due to Selberg \cite{S1, S2}
$$
Z_M(1-s)=Z_M(s)\exp\l((4-4g)\int_0^{s-\f12}\pi t \tan(\pi t)dt\r)
$$
we see that it is sufficient to show the identity
$$
\exp\l((4-4g)\int_0^{s-\f12}\pi t \tan(\pi t)dt\r)=\f{\G_M(s)}{\G_M(1-s)}.
$$
We first show that
\begin{equation}
\exp\l((4-4g)\int_0^{s-\f12}\pi t \tan(\pi t)dt\r)
=(S_2(s)S_2(s+1))^{2-2g}.
\end{equation}
Since both sides are equal to 1 at $s=\f12$ (note that
$S_2(\f32)=\G_2(\f32)\G_2(\f12)^{-1}=S_2(\f12)^{-1}$),
it suffices to show the coincidence of logarithmic derivatives.
The left hand side becomes
$$
(4-4g)\pi\l(s-\f12\r)\tan\l(\pi\l(s-\f12\r)\r)
=(2-2g)\pi(1-2s)\cot(\pi s).
$$
Concerning the right hand side, the differential equation
$$
S_2'(s)=\pi(1-s)\cot(\pi s)S_2(s)
$$
proved in \cite{KK} gives
\begin{align*}
\lefteqn{(2-2g)\l(\f{S_2'(s)}{S_2(s)}+\f{S_2'(s+1)}{S_2(s+1)}\r)}\\
&=(2-2g)\l(\pi(1-s)\cot(\pi s)+\pi(-s)\cot(\pi(s+1))\r)\\
&=(2-2g)\pi(1-2s)\cot(\pi s).
\end{align*}
Thus we obtain (1.1).
Next from (1.1) we get
\begin{align*}
\exp\l((4-4g)\int_0^{s-\f12}\pi t \tan(\pi t)dt\r)
&=(S_2(s)S_2(s+1))^{2-2g}\\
&=\l(\f{\G_2(2-s)}{\G_2(s)}\cdot\f{\G_2(1-s)}{\G_2(s+1)}\r)^{2-2g}\\
&=\f{(\G_2(s)\G_2(s+1))^{2g-2}}{(\G_2(1-s)\G_2(2-s))^{2g-2}}\\
&=\f{\G_M(s)}{\G_M(1-s)}.
\end{align*}
Hence we have the functional equation
$$
Z_M(1-s)=Z_M(s)\f{\G_M(s)}{\G_M(1-s)}
$$
that is
$$
\hat Z_M(1-s)=\hat Z_M(s)
$$
as desired.
(2) Since
$$
\z_M(s)=\f{Z_M(s+1)}{Z_M(s)}
$$
we have
\begin{align*}
\z_M(-s)\z_M(s)
&=\f{Z_M(1-s)}{Z_M(-s)}\cdot \f{Z_M(s+1)}{Z_M(s)}\\
&=\f{Z_M(1-s)}{Z_M(s)}\cdot \f{Z_M(s+1)}{Z_M(-s)}.
\end{align*}
Hence (1) gives
\begin{align*}
\z_M(-s)\z_M(s)
&=\f{\G_M(s)}{\G_M(1-s)}\cdot \f{\G_M(-s)}{\G_M(s+1)}\\
&=(S_2(s)S_2(s+1))^{2-2g}(S_2(s+1)S_2(s+2))^{2g-2}\\
&=\l(\f{S_2(s+2)}{S_2(s)}\r)^{2g-2}.
\end{align*}
Recall the relations proved in \cite{KK}:
\begin{align*}
S_2(s+2)
&=S_2(s+1)S_1(s+1)^{-1}\\
&=S_2(s+1)(-2\sin(\pi s))^{-1}
\end{align*}
and
\begin{align*}
S_2(s)
&=S_2(s+1)S_1(s)\\
&=S_2(s+1)(2\sin(\pi s)).
\end{align*}
Thus we get the functional equation for $\z_M(s)$:
$$
\z_M(-s)\z_M(s)
=(2\sin(\pi s))^{4-4g}
$$
that is
$$
\z_M(-s)=\z_M(s)^{-1}(2\sin(\pi s))^{4-4g}.
$$
\qed
\section{Functional equation for $\z_{M(f)}(s)$}
Let
$$
\z_{M(f)}(s)=\prod_k\z_M(s-k)^{a(k)}
$$
for
$$
f(x)=\sum_{k}a(k)x^k\in\Z[x,x^{-1}].
$$
We prove the following theorem.
\begin{theorem}
For each integer $D$ the following conditions are equivalent:
\begin{enumerate}[\rm(1)]
\item $\z_{M(f)}(D-s)=\z_{M(f)}(s)$.
\item $f(x^{-1})=-x^{-D}f(x)$.
\item $a(D-k)=-a(k)$ for all $k$.
\end{enumerate}
\end{theorem}
{\it Proof.}
We first show the equivalence $(2)\Longleftrightarrow(3)$.
Let
$$
f(x)=\sum_{k}a(k)x^k.
$$
Then
\begin{align*}
x^D f(x^{-1})
&=\sum_k a(k)x^{D-k}\\
&=\sum_k a(D-k)x^k,
\end{align*}
where we needed the exchange $k\longleftrightarrow D-k$. Hence
$$
x^Df(x^{-1})=-f(x)
$$
is equivalent to
$$
a(D-k)=-a(k)\qquad \text{for all }k.
$$
Next we show the equivalence $(1)\Longleftrightarrow(2)$. Since
\begin{align*}
\z_{M(f)}(D-s)
&=\prod_k\z_M((D-s)-k)^{a(k)}\\
&=\prod_k\z_M((D-k)-s)^{a(k)}\\
&=\prod_k\z_M(k-s)^{a(D-k)},
\end{align*}
the functional equation for $\z_M(s)$ gives
\begin{align*}
\z_{M(f)}(D-s)
&=\prod_k (\z_M(s-k)^{-1}(2\sin(\pi s))^{4-4g})^{a(D-k)}\\
&=\l(\prod_k\z_M(s-k)^{-a(D-k)}\r)(2\sin(\pi s))^{(4-4g)f(1)},
\end{align*}
where we used
$$
f(1)=\sum_k a(k)=\sum_k a(D-k).
$$
Hence we have the following expression
\begin{equation}
\f{\z_{M(f)}(D-s)}{\z_{M(f)}(s)}
=\l(\prod_k\z_M(s-k)^{-a(D-k)-a(k)}\r)(2\sin(\pi s))^{(4-4g)f(1)}.
\end{equation}
From this expression the equivalence $(1)\Longleftrightarrow(3)$ is shown as follows.
First the condition (3) (or equivalently (2)) implies $f(1)=0$ and that
$a(D-k)+a(k)=0$ for all $k$. Hence (2.1) gives
$$
\f{\z_{M(f)}(D-s)}{\z_{M(f)}(s)}=1,
$$
which is (1).
Now assume (1). Then from (2.1) we have the identity
\begin{equation}
\prod_k\z_M(s-k)^{a(D-k)+a(k)}=(2\sin(\pi s))^{(4-4g)f(1)}.
\end{equation}
Since $\z_M(s)$ is non-zero holomorphic in $\Re(s)>1$, the left hand side of (2.2) is
non-zero holomorphic for sufficiently large $\Re(s)$. Hence looking at the left hand side at
sufficiently large $s\in\Z$ we see $f(1)=0$. Then (2.2) gives
\begin{equation}
\prod_k\z_M(s-k)^{a(D-k)+a(k)}=1.
\end{equation}
We remark that (2.3) is actually written as
\begin{equation}
\prod_{k\le K}\z_M(s-k)^{a(D-k)+a(k)}=1
\end{equation}
for some $K\in\Z$, since $f(x)\in\Z[x,x^{-1}]$. Hence we have the identity
\begin{equation}
\z_M(s-K)^{a(D-K)+a(K)}=\prod_{k< K}\z_M(s-k)^{-a(D-k)-a(k)}.
\end{equation}
Look at (2.5) at $s=K+1$, then the right hand side is
$$
\prod_{k< K}\z_M(1+(K-k))^{-a(D-k)-a(k)},
$$
which is a finite non-zero value. Hence looking at the left hand side of (2.5) we see that
$$
a(D-K)+a(K)=0
$$
since $\z_M(s-K)$ has a simple pole at $s=K+1$. Thus (2.5) becomes
\begin{equation}
\prod_{k\le K-1}\z_M(s-k)^{a(D-k)+a(k)}=1
\end{equation}
Inductively we see (3).
\qed
Theorem 2 treated ``odd'' $f$.
The next theorem deals with the other case for ``even'' $f$.
\begin{theorem}
For each integer $D$ the following conditions are equivalent:
\begin{enumerate}[\rm(1)]
\item $\z_{M(f)}(D-s)=\z_{M(f)}(s)^{-1}(2\sin(\pi s))^{(4-4g)f(1)}$.
\item $f(x^{-1})=x^{-D}f(x)$.
\item $a(D-k)=a(k)$ for all $k$.
\end{enumerate}
\end{theorem}
{\it Proof.}
The equivalence $(2)\Longleftrightarrow(3)$ is shown exactly as in the proof of Theorem 2.
Now we show $(1)\Longleftrightarrow(3)$. Notice that
\begin{align*}
\z_{M(f)}(D-s)
&=\prod_k\z_M((D-s)-k)^{a(k)}\\
&=\prod_k\z_M((D-k)-s)^{a(k)}\\
&=\prod_k\z_M(k-s)^{a(D-k)}\\
&=\prod_k\l(\z_{M}(s-k)^{-1}(2\sin(\pi s))^{4-4g}\r)^{a(D-k)}\\
&=\l(\prod_k \z_{M}(s-k)^{-a(D-k)}\r)(2\sin(\pi s))^{(4-4g)f(1)},
\end{align*}
where we used that
$$
\sum_k a(D-k)=f(1).
$$
\underline{\it Proof of $(3)\Longrightarrow(1)$.}
From (3) we have
\begin{align*}
\z_{M(f)}(D-s)
&=\l(\prod_k \z_M(s-k)^{-a(k)}\r)(2\sin(\pi s))^{(4-4g)f(1)}\\
&=\z_{M(f)}(s)^{-1}(2\sin(\pi s))^{(4-4g)f(1)},
\end{align*}
which is (1).
\underline{\it Proof of $(1)\Longrightarrow(3)$.}
Since
$$
\z_{M(f)}(D-s)=\l(\prod_k \z_M(s-k)^{-a(D-k)}\r)(2\sin(\pi s))^{(4-4g)f(1)}
$$
as above, we have
$$
\f{\z_{M(f)}(D-s)}{\z_{M(f)}(s)^{-1}(2\sin(\pi s))^{(4-4g)f(1)}}
=\prod_k \z_M(s-k)^{a(k)-a(D-k)}.
$$
Hence from the assumption (1) we get
$$
\prod_k \z_M(s-k)^{a(k)-a(D-k)}=1,
$$
which can be written as
$$
\prod_{k\le K} \z_M(s-k)^{a(k)-a(D-k)}=1
$$
that is
$$
\z_M(s-K)^{a(K)-a(D-K)}=\prod_{k< K} \z_M(s-k)^{a(D-k)-a(k)}.
$$
Then we obtain $a(D-K)=a(K)$ and inductively $a(D-k)=a(k)$ for all $k$ exactly as in the proof of Theorem 2.
\qed
{\bf Example.}
Let $f(x)=(x-1)^r$ for an even integer $r\ge0$. Then we see that
$$
f(x^{-1})=x^{-r}f(x).
$$
Hence we obtain the functional equation
$$
\z_{M(f)}(r-s)=\z_{M(f)}(s)^{-1}\times
\begin{cases}
(2\sin(\pi s))^{4-4g}&(r=0),\\
1&(r\ge2,\text{ even}).
\end{cases}
$$
Of course the $r=0$ case gives the functional equation of $\z_M(s)$.
{\bf Remark.}
Let $f(x)=(x-1)^r$ for an integer $r\ge0$. Then $\z_M(s)$ is written explicitly as
$$
\z_{M(f)}(s)=\prod_{k=0}^r \z_M(s-k)^{(-1)^{r-k}\binom rk}.
$$
In this case another suggestive notation would be
$$
\z_{M(f)}(s)=\z_{M\otimes\mathbb G_m^r}(s)
$$
since $(x-1)^r$ is the counting function of $\mathbb G_m^r$;
see \cite{CC, KO, DKK}.
\begin{bibdiv} \begin{biblist}
\bib{CC}{article}{
author={A. Connes},
author={C. Consani},
title={Schemes over $\F_1$ and zeta functions},
journal={Compositio Math.},
volume={146},
date={2010},
pages={1383-1415},
}
\bib{DKK}{article}{
author={A. Deitmar},
author={S. Koyama},
author={N. Kurokawa},
title={Counting and zeta functions over $\F_1$},
journal={Abh. Math. Seminar Univ. Hamburg},
volume={85},
date={2015},
pages={59-71},
}
\bib{KK}{article}{
author={S. Koyama},
author={N. Kurokawa},
title={Multiple sine functions},
journal={Forum Math.},
volume={15},
date={2003},
pages={839--876},
}
\bib{KO}{article}{
author={Kurokawa, N.},
author={Ochiai, H.},
title={Dualities for absolute zeta functions and multiple gamma functions},
journal={Proc. Japan Acad.},
volume={89A},
date={2013},
pages={75-79},
}
\bib{KT}{article}{
author={Kurokawa, N.},
author={H. Tanaka},
title={Absolute zeta functions and the automorphy},
journal={Kodai Math. J.},
volume={40},
date={2017},
pages={584-614},
}
\bib{S1}{article}{
author={Selberg, A.},
title={Harmonic analysis and discontinuous groups in weakly symmetric Riemannian spaces with applications to Dirichlet series},
journal={J. Indian Math. Soc.},
volume={20},
date={1956},
pages={47-87},
}
\bib{S2}{inproceedings}{
author={Selberg, A.},
title={G\"ottingen lectures},
publisher={Springer Verlag}
booktitle={Collected Works, Vol. I}
date={1989},
pages={626-674},
}
\end{biblist} \end{bibdiv}
\end{document}
|
\begin{document}
\title{\LARGE\bf{Normalized ground states solutions for nonautonomous Choquard equations}$\thanks{{\small This work was partially supported by NSFC(11901532,11901531).}}$ }
\date{}
\author{ Huxiao Luo, Lushun Wang$\thanks{{\small Corresponding author. E-mail: [email protected] (H. Luo), [email protected] (L. Wang).}}$\\
\small Department of Mathematics, Zhejiang Normal University, Jinhua, Zhejiang, 321004, P. R. China
}
\maketitle
\begin{center}
\begin{minipage}{13cm}
\par
\small {\bf Abstract:} In this paper, we study normalized ground state solutions for the following nonautonomous Choquard equation:
\[
\left\{
\begin{array}{ll}
\aligned
&-\Delta u-\lambda u=\left(\frac{1}{|x|^{\mu}}\ast A|u|^{p}\right)A|u|^{p-2}u,\\
&\int_{\mathbb{R}^{N}}|u|^{2}dx=c,\quad u\in H^1(\mathbb{R}^N,\mathbb{R}),
\endaligned
\end{array}
\right.
\]
where $c>0$, $0<\mu<N$, $\lambda\in\mathbb{R}$, $A\in C^1(\mathbb{R}^N,\mathbb{R})$. For $p\in(2_{*,\mu}, \bar{p})$, we prove that the Choquard equation possesses ground state normalized solutions, and the set of ground states is orbitally stable.
For $p\in (\bar{p},2^*_\mu)$, we find a normalized solution, which is not a global minimizer. $2^*_\mu$ and $2_{*,\mu}$ are the upper and lower critical exponents due to the Hardy-Littlewood-Sobolev inequality, respectively. $\bar{p}$ is $L^2-$critical exponent.
Our results generalize and extend some related results.
\vskip2mm
\par
{\bf Keywords:} Nonautonomous Choquard equation; Variational methods; Normalized solution; Orbitally stable.
\vskip2mm
\par
{\bf MSC(2010): }35J50; 58E30
\end{minipage}
\end{center}
{\section{Introduction}}
\setcounter{equation}{0}
Consider the time dependent nonautonomous Choquard equation
\begin{equation}\label{t1.1.0}
\left\{
\begin{array}{ll}
\aligned
&i\partial_t\psi=-\Delta \psi-\left(\frac{1}{|x|^\mu}\ast A|\psi|^{p}\right)A|\psi|^{p-2}\psi,\quad t\in\mathbb{R},~ x\in\mathbb{R}^N,\\
&\psi(0,x)=\psi_0(x)\in H^1(\mathbb{R}^N,\mathbb{C}),
\endaligned
\end{array}
\right.
\end{equation}
where $N\in\mathbb{N}$ denotes space dimension, $0<\mu<N$, $A\in L^\infty(\mathbb{R}^N,\mathbb{R})$, $p\in(2_{*,\mu},2^*_\mu)$, where
\begin{equation*}
\left\{
\begin{array}{ll}
\aligned
&2_{*,\mu}:=\frac{2N-\mu}{N}, \\
& 2^*_\mu:=\frac{2N-\mu}{(N-2)_+}=
\left\{
\begin{array}{ll}
\aligned
&\frac{2N-\mu}{N-2}~\text{if}~N\geq 3,\\
&+\infty~\text{if}~N=1,2.
\endaligned
\end{array}
\right.
\endaligned
\end{array}
\right.
\end{equation*}
Equation $(\ref{t1.1.0})$ has several physical origins. In particular, when $N = 3$, $p = 2$, $\mu = 1$ and $A(x)\equiv 1$, (\ref{t1.1.0}) appeared at least as early as in 1954, in a work by S. I. Pekar \cite{MR2561169,pekar} describing the quantum mechanics of a polaron at rest. In 1976, P. Choquard \cite{MR471785} used $(\ref{t1.1.0})$ to describe an electron trapped in its own hole, in a certain approximation to Hartree-Fock theory of one component plasma.
Twenty years later, R. Penrose proposed $(\ref{t1.1.0})$ as a model of self-gravitating matter, in a programme in
which quantum state reduction is understood as a gravitational phenomenon, see \cite{MR1649671}.
For our setting, $A(x)$ is a real-valued bounded function and not necessarily a constant function. However, according to \cite{cazenave,feng}, by testing equation (\ref{t1.1.0}) against $\bar{\psi}$ (the complex conjugate of $\psi$) and $\partial_t\bar{\psi}$, it is easy to obtain the conservation property of mass $\int_{\mathbb{R}^N}|\psi|^2dx$ and of energy
$$\frac{1}{2}\int_{\mathbb{R}^N}|\nabla \psi|^2dx-\frac{1}{2p}\int_{\mathbb{R}^N}(|x|^{-\mu}\ast A|\psi|^p)A|\psi|^pdx.$$
And similar to \cite[Theorem 1.1]{feng}, for $0<\mu<\min\{N,4\}$ and $2\leq p<2^*_\mu$, we have from Hardy-Littlewood-Sobolev inequality and H\"{o}lder inequality that
$$\|(|x|^{-\mu}\ast A|u|^p)A|u|^{p-2}u -(|x|^{-\mu}\ast A|v|^p)A|v|^{p-2}v\|_{\frac{2Np}{2Np-2N+\mu}}
\leq C(\| u\|^{2p-2}_{\frac{2Np}{2N-\mu}} + \|v\|^{2p-2}_{\frac{2Np}{2N-\mu}} )\|u-v\|_{\frac{2Np}{2N-\mu}},$$
where $\|\cdot\|_{q}$ denotes the usual norm of the Lebesgue space $L^{q}(\mathbb{R}^N,\mathbb{R})$.
Then by Strichartz estimate and fixed point argument \cite[Theorem 3.3.9]{cazenave}, (\ref{t1.1.0}) is local well-posedness in $H^1(\mathbb{R}^N,\mathbb{C})$. Moreover, if $2\leq p<\bar{p}$, it is standard to get global existence by the conservation of mass and energy and Gagliardo-Nirenberg inequality of convolutional type, see \cite[Theorem 1.2]{feng}.
Here, $\bar{p}:=\frac{2N-\mu+2}{N}$ denotes the $L^2$-critical (mass-critical) exponent.
In general, equation (\ref{t1.1.0}) admits special regular solutions, which are called solitary (standing)
waves. More precisely, these solutions have the form $\psi(t, x) = e^{-i\lambda t}u(x)$, where $-\lambda\in\mathbb{R}$
is the frequency and $u(x)$ solves the following elliptic equation
\begin{equation}\label{1.1.0}
\left\{
\begin{array}{ll}
\aligned
&-\Delta u-\lambda u=\left(\frac{1}{|x|^{\mu}}\ast A|u|^{p}\right)A|u|^{p-2}u \quad \text{in}~\mathbb{R}^N, \\
&\int_{\mathbb{R}^{N}}|u|^{2}dx=\int_{\mathbb{R}^N}|\psi_0|^2dx:=c,\\
&u\in H^1(\mathbb{R}^N,\mathbb{R}).
\endaligned
\end{array}
\right. \tag{P}
\end{equation}
Here the constraint $\int_{\mathbb{R}^{N}}|u|^{2}dx=c$ is natural due to the conservation of mass.
To study (\ref{1.1.0}) variationally, we need to recall the following Hardy--Littlewood--Sobolev inequality \cite[Theorem 4.3]{MR1817225}.
\begin{lemma}
(Hardy--Littlewood--Sobolev inequality.) Let $s,r>1$ and $0<\mu<N$ with $1/s+\mu/N+1/r=2$, $f\in L^{s}(\mathbb{R}^N,\mathbb{R})$ and $h\in L^{r}(\mathbb{R}^N,\mathbb{R})$. There exists a sharp constant $C(N,\mu,s,r)$, independent of $f,h$, such that
\begin{equation}\label{HLS1}
\int_{\mathbb{R}^{N}}\int_{\mathbb{R}^{N}}\frac{f(x)h(y)}{|x-y|^{\mu}}dxdy\leq C(N,\mu,s,r) \|f\|_{s}\|h\|_{r}.
\end{equation}
\end{lemma}
If $A(x)$ is bounded in $\mathbb{R}^N$, then by (\ref{HLS1}) and Sobolev inequality, the integral
$$
\int_{\mathbb{R}^{N}}\int_{\mathbb{R}^{N}}\frac{ A(x) |u(x)|^pA(y) |u(y)|^p}{|x-y|^\mu} \mathrm{d} x\mathrm{d} y
$$
is well defined in $H^1(\mathbb{R}^N,\mathbb{R})$ for
$$
2_{*,\mu}=\frac{2N-\mu}{N}\leq p\leq2^*_\mu=\frac{2N-\mu}{(N-2)_+}.
$$
As a result, the functional $I: H^1(\mathbb{R}^N,\mathbb{R})\mapsto\mathbb{R}$,
\begin{equation}\label{I}
I(u):=\frac{1}{2} \int_{\mathbb{R}^{N}}|\nabla u|^{2} \mathrm{d} x-\frac{1}{2p}\int_{\mathbb{R}^{N}}\int_{\mathbb{R}^{N}}\frac{ A(x) |u(x)|^pA(y) |u(y)|^p}{|x-y|^\mu} \mathrm{d} x\mathrm{d} y
\end{equation}
is well defined. Furthermore, by a standard argument, we have $I \in C^1(H^1(\mathbb{R}^N,\mathbb{R}), \mathbb{R})$.
Due to the constraint $\int_{\mathbb{R}^{N}}|u|^{2}dx=c$, the solution for \eqref{1.1.0} is called normalized solution, which can be found by looking for critical points of the functional $I$ on the constraint
$$
\mathcal{S}(c)=\left\{u \in H^{1}\left(\mathbb{R}^{N},\mathbb{R}\right) :\int_{\mathbb{R}^N}|u|^2dx=c\right\}.
$$
In this situation, the frequency $-\lambda\in\mathbb{R}$ can no longer be fixed but instead appears
as a Lagrange multiplier, and each critical point $u_c \in \mathcal{S}(c)$ of $I|_{\mathcal{S}(c)}$ corresponds a
Lagrange multiplier $\lambda_c \in \mathbb{R}$ such that $(u_c, \lambda_c)$ solves (weakly) \eqref{1.1.0}.
Due to physical application, we are particularly interested in normalized ground state solutions, defined as follows:
\begin{definition}
For any fixed $c > 0$, we say that $u_c\in \mathcal{S}(c)$ is a normalized
ground state solution to (\ref{1.1.0}) if $I'|_{\mathcal{S}(c)}(u_c) = 0$ and
$$I(u_c) = \inf\{I(u): u \in \mathcal{S}(c), I'|_{\mathcal{S}(c)}(u) = 0\}.$$
\end{definition}
For any $c>0$, we set $$\sigma(c):=\inf\limits_{u\in \mathcal{S}(c)} I(u).$$
If the minimizers of $\sigma(c)$ exist, then all minimizers are critical points
of $I|_{\mathcal{S}(c)}$ as well as normalized ground state solutions to \eqref{1.1.0}.
\begin{remark}
If $\sigma(c)$ admits a global minimizer, then this definition of ground
states naturally extends the notion of ground states from linear quantum mechanics.
\end{remark}
There is a lot of literature studying ground states to the autonomous Choquard equations. For example, the existence
of ground states to the autonomous Choquard equation
\begin{equation}\label{e1.1.2}
-\Delta u-\lambda u=\left(\frac{1}{|x|^{\mu}}\ast |u|^{p}\right)|u|^{p-2}u \quad \text{in}~\mathbb{R}^N
\end{equation}
is established by Moroz and Van Schaftingen \cite{MR3056699} under $\lambda=-1$ and $2_{*,\mu}< p<2^*_\mu$. In \cite{MR3642765}, Ye obtained sharp existence results of the normalized solution to (\ref{e1.1.2}). Precisely,
\begin{itemize}
\item[(i)] If $p \in \left(2_{*,\mu}, \bar{p}\right)$, $\sigma(c)$ has at least one
minimizer for each $c > 0$ and $\sigma(c) > -\infty$;
\item[(ii)] If $p\in\left(\bar{p}, 2^*_\mu\right)$, $\sigma(c)$ has no minimizer for each $c > 0$ and $\sigma(c)=-\infty$;
\item[(iii)] The $L^2-$critical case $p = \bar{p}$ is complicated, see \cite{MR3642765} for details.
\end{itemize}
As far as we know, normalized solution of nonautonomous Choquard equation \eqref{1.1.0} has not been studied.
In this paper, we are interested in normalized solutions for the nonautonomous Choquard equation (\ref{1.1.0}) under two cases: (i) $L^2-$subcritical case, i.e., $p\in(2_{*,\mu},\bar{p})$; (ii) $L^2-$supercritical case, i.e., $p\in(\bar{p}, 2^*_\mu)$.
For the $L^2-$subcritical case, we generalize the result in \cite{MR3642765} to the nonautonomous setting.
\begin{theorem}\label{th1.1} Let $N\geq1$, $0<\mu<N$ and $2_{*,\mu}<p<\bar{p}$. Suppose that
\begin{itemize}
\item[($A_{1}$)] $A\in C^1(\mathbb{R}^N, \mathbb{R})$, $\lim\limits_{|x|\to+\infty}A(x):=A_\infty\in(0,+\infty)$, and
$A(x)\geq A_\infty$ for all $x\in\mathbb{R}^N$;
\item[($A_{2}$)] there exists a constant $\varrho> 0$ such that $t^{\frac{N-\mu+2\varrho(p-1)}{2}}A(t x)$ is nondecreasing on $t \in (0, +\infty)$ for every $x\in \mathbb{R}^N$.
\end{itemize}
Then $I$ admits a critical point $\bar{u}_c$ on $\mathcal{S}(c)$ which is a negative global minimum of $I$. Moreover, for the above critical point $\bar{u}_c$, there exists Lagrange multiplier $\lambda_c $ such that $(\bar{u}_c, \lambda_c)$ is
a solution of $(\ref{1.1.0})$.
\end{theorem}
\begin{remark}
For autonomous situation $A\equiv1$, Ye \cite{MR3642765} proved that the Lagrange multiplier $\lambda_c<0$. However, in our nonautonomous setting, we cannot be sure that Lagrange multiplier $\lambda_c $ is negative due to the complexity in the Poho\v{z}aev identity of nonautonomous equation.
\end{remark}
Compared with \cite{MR3642765}, the proof of Theorem \ref{th1.1} is more complex due to more general
nonlinearity in (\ref{1.1.0}). The main difficulty is to prove the compactness of a minimizing
sequence of $\sigma(c)= \inf\limits_{u\in \mathcal{S}(c)}I$. To do that, inspired by \cite{MR2826402,MR4081327,MR778970,MR778974}, we shall establish the following subadditivity inequality:
\begin{equation}\label{c1.11}
\sigma(c)<\sigma(\alpha)+\sigma(c-\alpha), \quad \forall 0<\alpha<c
\end{equation}
with the help of the scaling
\begin{equation}\label{A1}
s\mapsto u_{s}:=s^{\varrho}u(x/s).
\end{equation}
Let $Z_c$ denote the set of the normalized ground state solutions for (\ref{t1.1.0}). We also interest in the stability and instability of normalized ground state solutions, defined as follows:
\begin{definition} $Z_{c}$ is orbitally stable if for every $\varepsilon > 0$ there exists $\delta > 0$ such that, for
any $\psi_0 \in H^1(\mathbb{R}^N, \mathbb{C})$ with $\inf\limits_{u\in Z_{c}} \|\psi_0 - u\|_{H^1(\mathbb{R}^N, \mathbb{C})} < \delta$, we have
$$\inf\limits_{u\in Z_{c}}
\|\psi(t,\cdot)-u\|_{H^1(\mathbb{R}^N, \mathbb{C})}< \varepsilon \quad \forall t > 0,$$
where $\psi(t, \cdot)$ denotes the solution to (\ref{t1.1.0}) with initial datum $\psi_0$.
A standing wave $e^{-i\lambda t} u$ is strongly unstable if for every $\varepsilon > 0$ there exists $\psi_0\in H^1(\mathbb{R}^N, \mathbb{C})$
such that $\|u -\psi_0\|_{H^1(\mathbb{R}^N, \mathbb{C})} < \varepsilon$, and $\psi(t, \cdot)$ blows-up in finite time.
\end{definition}
Following the same argument as in \cite{MR677997}, we can deduce that $Z_c$ is orbitally stable provided that any minimizing sequence to $\sigma(c)$ is compact in $H^1(\mathbb{R}^N,\mathbb{R})$.
Note that due to the presence of the coefficients $A(x)$ in (\ref{1.1.0}), our minimization problems are
not invariant by the action of the translations. To overcome this difficult, we adopt the method of studying the nonautonomous Schr\"{o}dinger equation in \cite{bellazzini}. The main point is the analysis of the compactness of minimizing sequences
to suitable constrained minimization problem related to (\ref{1.1.0}).
More precisely,
\begin{theorem}\label{th1.1.5}
Let $N\geq1$, $0<\mu<2$, $2\leq p<\bar{p}$. Suppose that
\begin{itemize}
\item[($A'_{1}$)] $A\in L^\infty(\mathbb{R}^N, \mathbb{R})$, $A(x)\geq0$ for almost every $x\in\mathbb{R}^N$, and there is $A_0>0$ such that
$meas\{ A(x)>A_0\}\in(0,+\infty)$.
\end{itemize}
Then there exists $c_0> 0$ such that all the minimizing sequences for $I|_{\mathcal{S}(c)}$ are compact
in $H^1(\mathbb{R}^N,\mathbb{R})$ provided that $c> c_0$. In particular, $Z_c$ is a nonempty compact set
and it is orbitally stable.
\end{theorem}
\begin{remark}
The condition $0<\mu<2$ in Theorem \ref{th1.1.5} is to ensure $2<\bar{p}=\frac{2N-\mu+2}{N}$.
If $p<2$, the nonlocal term $\left(|x|^{-\mu}\ast A|\psi|^p\right)A|\psi|^{p-2}\psi$ in dispersion equation (\ref{t1.1.0}) is singular, the existence of local-well posedness of (\ref{t1.1.0}) is invalid, similar to the autonomous equation ($A\equiv 1$) in \cite{feng}.
\end{remark}
In the second part of this article, we consider the $L^2-$supercritical case. Since $\sigma(c) = -\infty$ for $p\in(\bar{p}, 2^*_\mu)$, it is impossible to search for a minimum of $I$ on $\mathcal{S}(c)$. So it is nature to look for a critical point of $I$ having a minimax characterization. For example, for the following Schr\"{o}dinger equation
\begin{equation}\nonumber
-\Delta u-\lambda u=f(u),~~
u\in H^{1}(\mathbb{R}^N,\mathbb{R}),
\end{equation}
Jeanjean \cite{MR1430506} constructed mountain-pass geometrical structure on
$\mathcal{S}(c)\times \mathbb{R}$ to an auxiliary functional
\begin{equation}\label{c1.8}
\tilde{I}(u, t)=\frac{e^{2 t}}{2} \int_{\mathbb{R}^{N}}|\nabla u|^{2} \mathrm{d} x-\frac{1}{e^{t N}} \int_{\mathbb{R}^{N}} F\left(e^{\frac{t N}{2}} u\right) \mathrm{d} x,
\end{equation}
where $F(u)=\int_0^uf(t)dt$.
Then applying the Ekeland principle to the auxiliary functional, the author obtained a
sequence $\{(v_n,s_n)\} \subset \mathcal{S}(c)\times\mathbb{R}$ which can be used to construct a bounded Palais-Smale sequence $\{u_n\}\subset \mathcal{S}(c)$ for $I$ at the M-P level.
By using Jeanjean's method \cite{MR1430506}, Li and Ye \cite{MR3390522} obtained the normalized solutions
to the Choquard equation:
\begin{equation}
-\Delta u-\lambda u=\left(|x|^{-\mu}\ast F(u)\right)f(u),
\end{equation}
where $\lambda\in\mathbb{R}$, $N \geq3$, $\mu\in(0,N)$, and $F(u)$ behaves like $|u|^p$ for $\frac{2N-\mu+2}{N}<p<\frac{2N-\mu}{N-2}$.
However, for nonautonomous equation (\ref{1.1.0}), the method of constructing a Poho\u{z}aev-Palais-Smale sequence in \cite{MR1430506} fails. To overcome this difficulty, we adopt the method in \cite{MR4081327}. More precisely, we assume
that
\begin{itemize}
\item[($A_{3}$)] $t \mapsto (Np-2N+\mu)A(t x) - 2\nabla A(t x) \cdot (t x)$
is nonincreasing on $(0,\infty)$ for every $x\in\mathbb{R}^N$;
\item[($A_{4}$)] $t^{\frac{2p-(Np-2N+\mu)}{2}}A(t x)$ is strictly increasing on $t \in (0, \infty)$ for every $x \in \mathbb{R}^N$.
\end{itemize}
Besides $A\equiv$constant, there are indeed many functions which satisfy
$(A_1), (A_3)$ and $(A_4)$. For example
\begin{itemize}
\item[($i$)] $A_1(x) = 1 + be^{-\tau|x|}$ with $0 < b \leq e\cdot\frac{2p- (Np- 2N+\mu)}{2}$ and $\tau> 0$;
\item[($ii$)] $A_2(x) = 1 + \frac{b}{1+|x|}$ with $0 < b \leq 2[2p- (Np- 2N+\mu)]$.
\end{itemize}
Under $(A_1), (A_3)$ and $(A_4)$, we shall establish the existence of normalized ground
state solutions to the nonautonomous Choquard equation (\ref{1.1.0}) by taking a minimum
on the manifold
\begin{equation}\label{c1.9}
\mathcal{M}(c)=\left\{u \in \mathcal{S}(c) : J(u) :=\left.\frac{\mathrm{d}}{\mathrm{d} t} I\left( u^{t}\right)\right|_{t=1}=0\right\},
\end{equation}
where $u^{t}(x) :=t^{N / 2}u(t x)$ for all $t > 0$ and $x\in\mathbb{R}^N$, and $u^t \in \mathcal{S}(c)$ if $u \in \mathcal{S}(c)$.
\begin{theorem}\label{th1.2} Suppose that $N\geq1$, $0<\mu<N$, $\bar{p}<p<2^*_\mu$, $(A_1), (A_3)$ and $(A_4)$ hold. Then for any $c > 0$, (\ref{1.1.0})
has a couple of solutions $\left(\overline{u}_{c}, \lambda_{c}\right) \in \mathcal{S}(c) \times \mathbb{R}$ such that
$$
I\left(\overline{u}_{c}\right)=\inf _{u \in \mathcal{M}(c)} I(u)=\inf _{u \in \mathcal{S}(c)} \max _{t>0} I\left( u^{t}\right)>0.
$$
\end{theorem}
To address the lack of compactness, we should consider the \emph{limit equation} of (\ref{1.1.0}):
\begin{equation}\label{1.1.2}
-\Delta u-\lambda u=A_\infty^2\left(|x|^{-\mu}\ast|u|^p\right)|u|^{p-2}u,~~
u\in H^{1}(\mathbb{R}^N).
\tag{P0}
\end{equation}
The energy functional is defined as follows:
\begin{equation}\label{c1.13}
I_\infty(u)=\frac{1}{2} \int_{\mathbb{R}^{N}}|\nabla u|^{2} \mathrm{d} x-\frac{A_\infty^2}{2p}\int_{\mathbb{R}^{N}}\int_{\mathbb{R}^{N}}\frac{ |u(x)|^p |u(y)|^p}{|x-y|^\mu} \mathrm{d} x\mathrm{d} y.
\end{equation}
Similar to (\ref{c1.9}), we define
\begin{equation}\label{c1.14}
\mathcal{M}_\infty(c)=\left\{u \in \mathcal{S}(c) : J_\infty(u) :=\left.\frac{\mathrm{d}}{\mathrm{d} t} I_\infty\left( u^{t}\right)\right|_{t=1}=0\right\}.
\end{equation}
\begin{remark}
Compared to \cite{MR4081327}, the main difficulty in our nonlocal setting:
When proving that $\inf _{u \in \mathcal{M}(c)} I(u)$ can be achieved, it needs to be compared (\ref{1.1.0}) with the limit equation. The difference between $I$ and $I_\infty$ is more complicated than that of the Schr\"{o}dinger equation.
\end{remark}
Finally, we give our future research directions about this article: \\
For $A\equiv1$ and $\bar{p}<p<2^*_\mu$, by using the blow up for a class of initial data with nonnegative energy, Chen and Guo \cite{chen} proved that the standing wave of (\ref{t1.1.0}) must be strongly unstable. For nonautonomous situation ($A\not\equiv$constant), the method in \cite{chen} is invalid. We will study the problem in the future.
This paper is organized as follows. In section 2, we prove Theorem \ref{th1.1} and Theorem \ref{th1.1.5}. In section 3, we show Theorem \ref{th1.2}.
\vskip2mm
\par\noindent
In this paper, we make use of the following notation: \\
$\diamondsuit$ $C, C_i , i = 1, 2, \cdot\cdot\cdot,$ will be repeatedly used to denote various positive constants whose exact values are irrelevant. \\
$\diamondsuit$ \begin{equation*}
2^*=
\left\{
\begin{array}{ll}
\aligned
&\frac{2N}{N-2}~&\text{if}~N\geq 3\\
&+\infty~&\text{if}~N=1,2
\endaligned
\end{array}
\right.
\end{equation*} denotes the Sobolev critical exponent. \\
$\diamondsuit$ $o(1)$ denotes the infinitesimal as $n\to+\infty$.\\
$\diamondsuit$ For the sake of simplicity, integrals over the whole $\mathbb{R}^N$ will be often written $\int$.
\vskip4mm
{\section{ $L^2-$subcritical case }}
\setcounter{equation}{0}
First, we prove a nonlocal version of Brezis-Lieb lemma, which will be used in the proof below both $L^2-$subcritical case and $L^2-$supercritical case. We need the following classical Brezis-Lieb lemma \cite{bogachev}.
\begin{lemma} (\cite{bogachev}) Let $N\in\mathbb{N}$ and $q\in [2, 2^*]$.
If $u_n\rightharpoonup u$ in $H^1(\mathbb{R}^N,\mathbb{R})$, then
\begin{equation}\label{BL}
\aligned
\int|u_{n}-u|^qdx-\int|u_{n}|^qdx=\int|u|^qdx+o(1).
\endaligned
\end{equation}
\end{lemma}
\begin{lemma}\label{lm2.11} Let $N\in\mathbb{N}$, $\mu\in(0, N)$, $p\in [2_{*,\mu}, 2^*_\mu]$, and $A,B\in L^\infty(\mathbb{R}^N,\mathbb{R})$.
If $u_n\rightharpoonup u$ in $H^1(\mathbb{R}^N,\mathbb{R})$, then
\begin{equation}\label{c2.18}
\aligned
&\int(|x|^{-\mu}\ast A|u_n-u|^p)B|u_{n}-u|^pdx-\int(|x|^{-\mu}\ast A|u_n|^p)B|u_{n}|^pdx\\
=&\int(|x|^{-\mu}\ast A|u|^p)B|u|^pdx+o(1).
\endaligned
\end{equation}
\end{lemma}
\begin{proof} For every $n\in\mathbb{N}$, one has
\begin{equation*}
\aligned
&\int(|x|^{-\mu}\ast A|u_n|^p)B|u_{n}|^pdx-\int_{\mathbb{R}^N}(|x|^{-\mu}\ast A|u_n-u|^p)B|u_{n}-u|^pdx\\
=&\int(|x|^{-\mu}\ast A(|u_n|^p-|u_n-u|^p))B(|u_n|^p-|u_n-u|^p)dx \\
&+2\int(|x|^{-\mu}\ast A(|u_n|^p-|u_n-u|^p))B|u_n-u|^pdx\\
&-\int(|x|^{-\mu}\ast A|u_n|^p)B|u_n-u|^pdx\\
&+\int(|x|^{-\mu}\ast A|u_n-u|^p)B|u_n|^pdx\\
:=&I_1+I_2+I_3+I_4.
\endaligned
\end{equation*}
By the classical Brezis-Lieb lemma with $q = \frac{2Np}{2N-\mu}$, we have $|u_n - u|^p - |u_n|^p \to |u|^p$, strongly
in $L^{\frac{2N}{2N-\mu}}(\mathbb{R}^N)$ as $n \to\infty$.
Then, Hardy-Littlewood-Sobolev inequality implies
that $$|x|^{-\mu}\ast A(|u_n - u|^p - |u_n|^p)\to |x|^{-\mu}\ast A|u|^p~~\text{in}~L^{\frac{2N}{\mu}}(\mathbb{R}^N,\mathbb{R})~\text{as}~n \to\infty.$$ Thus
$$I_1\to \int(|x|^{-\mu}\ast A|u|^p)B|u|^pdx~\text{as}~n \to\infty.$$
On the other hand, $I_2$, $I_3$ and $I_4$ both converge to $0$ since that $|u_n - u|^p\rightharpoonup0$ weakly in $L^{\frac{2N}{2N-\mu}}(\mathbb{R}^N,\mathbb{R})$, $|x|^{-\mu}\ast A|u_n|^p$ and $B|u_n|^p$ are bounded in $L^{\frac{2N}{\mu}}(\mathbb{R}^N,\mathbb{R})$.
Thus, (\ref{c2.18}) holds.
\end{proof}
\vskip4mm
{\subsection{The proof of Theorem \ref{th1.1} }}
In this section, we prove Theorem \ref{th1.1} under the conditions $(A_1)$-$(A_2)$ and $p\in(2_{*,\mu}, \bar{p})$. Since $A(x) \equiv A_\infty$ satisfies $(A_1)$-$(A_2)$, all the following conclusions on $I$ are also true for $I_\infty$.
For $u \in \mathcal{S}(c)$, set $u^s(x) = s^{\frac{N}{2}} u(sx)$ $\forall s> 0$. Then
$$\|u^s\|_2^2=\|u\|_2^2=c, \quad \|\nabla u^s\|_2^2=s^2\|\nabla u\|_2^2,$$
\begin{equation}\label{I2}
I(u^s)=\frac{1}{2}s^2\|\nabla u\|_2^2-\frac{1}{2p}s^{Np-2N+\mu}\int\int\frac{ A(s^{-1}x)|u(x)|^p A(s^{-1}y) |u(y)|^p}{|x-y|^\mu}dxdy.
\end{equation}
and
\begin{equation}\label{J}
\aligned
J(u)=&\frac{dI(u^s)}{ds}|_{s=1}\\
=&\|\nabla u\|_2^2-\frac{1}{2p}\int\int\frac{ \left[(Np-2N+\mu)A(x)-2\nabla A(x)\cdot x\right]A(y)|u(x)|^p |u(y)|^p}{|x-y|^\mu}dxdy.
\endaligned
\end{equation}
\begin{lemma}\label{lm3.1} For any $c > 0$, $\sigma(c) =
\inf\limits_{u\in \mathcal{S}(c)}I(u)$ is well defined and $\sigma(c) < 0$.
\end{lemma}
\begin{proof}
By the Gagliardo-Nirenberg inequality
\begin{equation}\label{GN2}
\|u\|_q\leq C(N,q)\|\nabla u\|_2^{\frac{N(q-2)}{2q}}\|u\|_2^{1-\frac{N(q-2)}{2q}}~\forall q\in(2,2^*),
\end{equation}
Hardy--Littlewood--Sobolev inequality and $(A_1)$, for $u\in \mathcal{S}(c)$ we have
\begin{equation}\label{c3.1}
I(u)\geq\frac{1}{2}\|\nabla u\|_2^2-C(N,\mu)\|u\|_{\frac{2Np}{2N-\mu}}^{2p}\geq \frac{1}{2}\|\nabla u\|_2^2-C(N,\mu,p)c^{\frac{2N-\mu-(N-2)p}{2}}\|\nabla u\|^{Np-2N+\mu}_{2}.
\end{equation}
Since $$p<\bar{p}\mathbb{R}ightarrow Np-2N+\mu< 2,$$
thus $I$ is bounded from below on $\mathcal{S}(c)$ for any $c > 0$, and $\sigma(c)$ is well defined. For any $c> 0$, we can choose a function $u_0\in\mathcal{C}^\infty_0(\mathbb{R}^N, [-M, M])$ satisfying $\|u_0\|_2^2 = c$ for
some constant $M> 0$. Then it follows from $(A_1)$ and $(\ref{I2})$ that
\begin{equation}\label{c3.3}
I({u_0}^{t})\leq \frac{t^2}{2}\|\nabla u_0\|_2^2-\frac{A_\infty^2t^{Np-2N+\mu}}{2p}\int\int\frac{ |u_0(x)|^p |u_0(y)|^p}{|x-y|^\mu}dxdy,\quad \forall t\in(0,1].
\end{equation}
Since $0 < Np-2N+\mu < 2$, $(\ref{c3.3})$ implies that $I({u_0}^{t}) < 0$ for small
$t\in(0, 1)$. Jointly with the fact that $\|{u_0}^t\|_2 = \|u_0\|_2$, we obtain
$$\sigma(c) \leq\inf\limits_{t\in(0,1]} I({u_0}^t) < 0.$$
\end{proof}
\begin{lemma}\label{lm3.2} $\sigma(c)$ is continuous on $(0, +\infty)$.
\end{lemma}
\begin{proof}
For any $c > 0$, let $c_n > 0$ and $c_n \to c$. For every $n\in\mathbb{N}$, let $u_n\in\mathcal{S}(c_n)$ such
that $I(u_n) < \sigma(c_n) + \frac{1}{n} < \frac{1}{n}$. Then $(\ref{c3.1})$ implies that $\{u_n\}$ is bounded in $H^1(\mathbb{R}^N,\mathbb{R})$.
Moreover, we have
\begin{equation}\label{c3.4}
\begin{aligned} \sigma(c) & \leq I\left(\sqrt{\frac{c}{c_{n}}} u_{n}\right) \\
&=\frac{c}{2 c_{n}}\left\|\nabla u_{n}\right\|_{2}^{2}-\frac{c^{p}}{2pc_n^{p}}\int\int\frac{ A(x)|u_{n}(x)|^p A(y)|u_{n}(y)|^p}{|x-y|^\mu}dxdy \\
&=I\left(u_{n}\right)+o(1) \leq \sigma\left(c_{n}\right)+o(1). \end{aligned}
\end{equation}
On the other hand, given a minimizing sequence $\{v_n\}\subset\mathcal{S}(c)$ for $I$, we have
$$
\sigma\left(c_{n}\right) \leq I\left(\sqrt{\frac{c_{n}}{c}} v_{n}\right) \leq I\left(v_{n}\right)+o(1)=\sigma(c)+o(1),
$$
which together with $(\ref{c3.4})$, implies that $\lim\limits_{n\to+\infty} \sigma(c_n) = \sigma(c)$.
\end{proof}
From \cite{MR778970,MR778974}, we know that subadditivity inequality implies the compactness of the minimizing sequence for $\sigma(c)$ (up to translations). Although $I$ is not invariant by translations, by using the following subadditivity inequality and comparing with the limit equation we can still verify that $\sigma(c)$ has a minimizer.
\begin{lemma}\label{lm3.3} For each $c > 0$,
\begin{equation}\label{c3.5}
\sigma(c)<\sigma(\alpha)+\sigma(c-\alpha), \quad \forall 0<\alpha<c.
\end{equation}
\end{lemma}
\begin{proof}
Letting $\{u_n\}\subset\mathcal{S}(c)$ be such that $I(u_n)\to\sigma(c)$, it follows from $(\ref{c3.1})$ and
Lemma \ref{lm3.1} that $\sigma(c) < 0$, and $\{u_n\}$ is bounded in $H^1(\mathbb{R}^N,\mathbb{R})$. Now, we claim that there
exists a constant $\rho_0 > 0$ such that
\begin{equation}\label{c3.6}
\liminf _{n \rightarrow \infty}\left\|\nabla u_{n}\right\|_{2}>\rho_{0}.
\end{equation}
Otherwise, if $(\ref{c3.6})$ is not true, then up to a subsequence, $\|\nabla u_n\|_2 \to 0$, and so $(\ref{c3.1})$
yields $$0 > \sigma(c) = \lim\limits_{n\to+\infty} I(u_n) = 0.$$
This contradiction shows that $(\ref{c3.6})$ holds.
Let ${u_n}_t=t^\varrho u_n(x/t)$ $\forall t>0$, the constant $\varrho$ is given in the condition $(A_2)$.
Then by $(A_2)$, we have
\begin{equation}\label{c3.7}
\begin{aligned}
I\left({u_n}_t\right)&=I\left(t^\varrho u_{n}(x/t)\right) \\
&=\frac{t^{2 \varrho+N-2}}{2}\left\|\nabla u_{n}\right\|_{2}^{2}-\frac{t^{2N-\mu+2\varrho p}}{2p} \int\int\frac{A(tx)\left|u_{n}(x)\right|^pA(ty)\left|u_{n}(y)\right|^p}{|x-y|^\mu} dxdy \\
&\leq \frac{t^{2 \varrho+N-2}}{2}\left\|\nabla u_{n}\right\|_{2}^{2}-\frac{t^{2 \varrho+N}}{2p} \int\int\frac{A(x)\left|u_{n}(x)\right|^pA(y)\left|u_{n}(y)\right|^p}{|x-y|^\mu} dxdy \\
&=t^{2 \varrho+N} I\left(u_{n}\right)+\frac{t^{2 \rho+N}\left(t^{-2}-1\right)}{2}\left\|\nabla u_{n}\right\|_{2}^{2}, \quad \forall t>1.
\end{aligned}
\end{equation}
Since $\|{u_n}_t\|_2^2 = t^{2\varrho+N}\|u_{n}\|_2^2 =t^{2\varrho+N}c$ for all $t > 0$, then it follows from $(\ref{c3.6})$ and $(\ref{c3.7})$
that
\begin{equation*}
\begin{aligned} \sigma\left(t^{2\varrho+N} c\right) & \leq I\left({u_n}_{t}\right) \leq t^{2 \varrho+N} \sigma(c)+\frac{t^{2 \varrho+N}\left(t^{-2}-1\right)}{2} \rho_{0}^{2}+o(1), \quad \forall t>1, \end{aligned}
\end{equation*}
which implies
\begin{equation}\label{c3.8}
\sigma(tc) < t\sigma(c),~~\forall t > 1.
\end{equation}
Moreover, it follows from $(\ref{c3.8})$ that
$$
\sigma(c)=\frac{\alpha}{c} \sigma(c)+\frac{c-\alpha}{c} \sigma(c)<\sigma(\alpha)+\sigma(c-\alpha), \quad \forall 0<\alpha<c.
$$
This completes the proof.
\end{proof}
\begin{lemma}\label{lm3.4} $\sigma(c) \leq\sigma_\infty(c)$ for any $c > 0$.
\end{lemma}
\begin{proof}
Let $c > 0$ be given and let $\{u_n\}\subset\mathcal{S}(c)$ be such that $I_\infty(u_n)\to \sigma_\infty(c)$. Since
$A_\infty\leq A(x)$ for all $x\in\mathbb{R}^N$, it follows from $(\ref{I})$ that
$$\sigma(c) \leq I(u_n) \leq I_\infty(u_n) = \sigma_\infty(c) + o(1),$$
which implies that $\sigma(c)\leq\sigma_\infty(c)$ for any $c > 0$.
\end{proof}
\begin{lemma}\label{lm3.5} For each $c > 0$, $\sigma(c)$ has a minimizer.
\end{lemma}
\begin{proof}
In view of Lemma \ref{lm3.1}, we have $\sigma(c) < 0$. Let $\{u_n\}\subset\mathcal{S}(c)$ be such that
$I(u_n)\to\sigma(c)$. Then $(\ref{c3.1})$ implies that $\{u_n\}$ is bounded in $H^1(\mathbb{R}^N,\mathbb{R})$. We then may
assume that for some $\bar{u}\in H^1(\mathbb{R}^N,\mathbb{R})$ such that up to a subsequence, $u_n\rightharpoonup\bar{u}$ in $H^1(\mathbb{R}^N,\mathbb{R})$.
Case (i): $\bar{u}= 0$. Then $u_n \to 0$ in $L^s_{\text{loc}}(\mathbb{R}^N,\mathbb{R})$ for $1\leq s < 2^*$
and $u_n \to 0$ a.e. in $\mathbb{R}^N$. By $(A_1)$, it is easy to check that
\begin{equation}\label{c3.10}
\int\int\frac{\left(A_\infty^2-A(x)A(y)\right)|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\to0\quad\text{as}~n\to\infty.
\end{equation}
Then (\ref{I}), (\ref{c1.13}), and (\ref{c3.10}) imply
\begin{equation}\label{c3.11}
I_\infty(u_n)\to\sigma(c)\quad\text{as}~n\to\infty.
\end{equation}
Next, we show that
\begin{equation}\label{c3.12}
\delta:= \limsup\limits_{n\to+\infty}
\sup\limits_{y\in\mathbb{R}^N}\int_{B_1(y)} |u_n|^2dx > 0.
\end{equation}
In fact, if $\delta=0$, by Lions' concentration compactness principle \cite{MR778970,MR778974},
we have $u_n \to 0$ in $L^q(\mathbb{R}^N,\mathbb{R})$ for $2 < q < 2^*$, and so $(A_1)$ and $(A_2)$ imply that
$$\int\int\frac{A(x)A(y)|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\to0\quad\text{as}~n\to\infty.$$
Then by (\ref{I}), we have
$$0 > \sigma(c) = \lim\limits_{n\to+\infty} I(u_n) = \lim\limits_{n\to+\infty}\frac{1}{2}\|u_n\|_2^2 \geq 0,$$
which is impossible. Hence, we have $\delta> 0$, and there exists a sequence $\{y_n\}\subset\mathbb{R}^N$
such that
\begin{equation}\label{c3.13}
\int_{B_1(y_n)} |u_n|^2dx >\frac{\delta}{2}.
\end{equation}
Let $\hat{u}_n(x) = u_n(x + y_n)$. Then (\ref{c3.11}) leads to
\begin{equation}\label{c3.14}
\hat{u}_n \in \mathcal{S}(c),\quad I_\infty(\hat{u}_n) \to \sigma(c).
\end{equation}
In view of (\ref{c3.13}), we may assume that there exists $\hat{u}\in H^1(\mathbb{R}^N,\mathbb{R})\setminus\{0\}$ such that, passing
to a subsequence,
\begin{equation}\label{c3.15}
\hat{u}_n\rightharpoonup \hat{u}~\text{in}~H^1(\mathbb{R}^N,\mathbb{R}), \quad\hat{u}_n \to \hat{u}~\text{in}~L^q_{\text{loc}}(\mathbb{R}^N,\mathbb{R})~\forall q\in [1, 2^*), \quad
\hat{u}_n\to \hat{u}~a.e.~\text{in}~\mathbb{R}^N.
\end{equation}
Then it follows from (\ref{c3.14}), (\ref{c3.15}), Lemmas \ref{lm3.2}, \ref{lm3.4} and \ref{lm2.11} that
\begin{equation}\label{c3.16}
\aligned
\sigma_\infty(c)\geq&\sigma(c)=\lim\limits_{n\to+\infty}I_\infty(\hat{u}_n)=I_\infty(\hat{u})+\lim\limits_{n\to+\infty}I_\infty(\hat{u}_n-\hat{u})\\
\geq&\sigma_\infty(\|\hat{u}\|_2^2)+\lim\limits_{n\to+\infty}\sigma_\infty(\|\hat{u}_n-\hat{u}\|_2^2)=\sigma_\infty(\|\hat{u}\|_2^2)+\sigma_\infty(c-\|\hat{u}\|_2^2).
\endaligned
\end{equation}
If $\|\hat{u}\|_2^2 < c$, then (\ref{c3.16}) and Lemma \ref{lm3.3} imply
$$\sigma_\infty(c) \geq\sigma_\infty(\|\hat{u}\|_2^2)+\sigma_\infty(c-\|\hat{u}\|_2^2) >\sigma_\infty(c),$$
which is impossible. This shows $\|\hat{u}\|_2^2 = c$. Then we have $\hat{u}_n \to\hat{u}$ in $L^q(\mathbb{R}^N,\mathbb{R})$
for $2 \leq q < 2^*$. From this, the weak semicontinuity of norm and (\ref{c3.16}), we derive
$$\sigma(c) = \lim\limits_{n\to+\infty} I(\hat{u}_n)\geq I(\hat{u}) \geq\sigma(c),$$
which leads to $\sigma(c) = I(\hat{u})$. Hence, $\hat{u}$ is a minimizer of $\sigma(c)$ for any $c > 0$.
Case (ii): $\bar{u}\neq0$. Then $u_n \to \bar{u}$ in $L^q_{\text{loc}}(\mathbb{R}^N,\mathbb{R})$ for $1\leq q< 2^*$
and $u_n \to \bar{u}$ a.e. in $\mathbb{R}^N$. By Lemmas \ref{lm3.2} and \ref{lm2.11}, we have
\begin{equation}\label{c3.9}
\aligned
\sigma(c)=&\lim\limits_{n\to+\infty}I(\bar{u}_n)=I(\bar{u})+\lim\limits_{n\to+\infty}I(\bar{u}_n-\bar{u})\\
\geq&\sigma(\|\bar{u}\|_2^2)+\lim\limits_{n\to+\infty}\sigma(\|\bar{u}_n-\bar{u}\|_2^2)=\sigma(\|\bar{u}\|_2^2)+\sigma(c-\|\bar{u}\|_2^2).
\endaligned
\end{equation}
If $\|\bar{u}\|_2^2 < c$, then (\ref{c3.9}) and Lemma \ref{lm3.3} imply
$$\sigma(c) \geq\sigma(\|\bar{u}\|_2^2)+\sigma(c-\|\bar{u}\|_2^2) >\sigma(c),$$
which is impossible. This shows $\|\bar{u}\|_2^2 = c$. Then we have $\bar{u}_n \to\bar{u}$ in $L^q(\mathbb{R}^N,\mathbb{R})$
for $2 \leq q < 2^*$. From this, the weak semicontinuity of norm and (\ref{c3.16}), we have
$$\sigma(c) = \lim\limits_{n\to+\infty} I(\bar{u}_n)\geq I(\bar{u}) \geq\sigma(c),$$
which leads to $\sigma(c) = I(\bar{u})$. Hence, $\bar{u}$ is a minimizer of $\sigma(c)$ for any $c > 0$.
\end{proof}
{\it Proof of Theorem \ref{th1.1}.} For any $c > 0$, from Lemma \ref{lm3.5}, there exists $\bar{u}_c \in \mathcal{S}(c)$ such
that $I(\bar{u}_c) = \sigma(c)$. In view of the Lagrange multiplier theorem, there exists $\lambda_c \in\mathbb{R}$
such that
$$I'(\bar{u}_c) = \lambda_c\bar{u}_c.$$
Therefore, $(\bar{u}_{c},\lambda_c)$ is a solution of (\ref{1.1.0}).
$\Box$
\vskip4mm
{\subsection{ The proof of Theorem \ref{th1.1.5} }}
In this section, under condition $(A'_1)$ and $p\in(2, \bar{p})$, we prove Theorem \ref{th1.1.5} by using the following abstract variational principle \cite[Proposition 1.2]{bellazzini}.
\begin{proposition}\label{pr1.2}(\cite[Proposition 1.2]{bellazzini})
Let $\mathcal{H}$, $\mathcal{H}_1$ and $\mathcal{H}_2$ be three Hilbert spaces such that
$$\mathcal{H}\subset\mathcal{H}_1,\quad \mathcal{H}\subset\mathcal{H}_2$$
and
$$C_1\left(\|u\|^2_{\mathcal{H}_1}+\|u\|^2_{\mathcal{H}_2}\right)\leq\|u\|^2_{\mathcal{H}}\leq C_2\left(\|u\|^2_{\mathcal{H}_1}+\|u\|^2_{\mathcal{H}_2}\right)~~\forall u\in\mathcal{H}.$$
For given $c>0$, let $W, T: \mathcal{H}\mapsto \mathbb{R}$ such that:\\
(1) $T(0)=0$;\\
(2) $T$ is weakly continuous;\\
(3) $T(\nu u)\leq\nu^2 T(u)$ and $W(\nu u)\leq \nu^2 W(u)$, $\forall\nu\geq1,~u\in \mathcal{H}$;\\
(4) If $u_n\rightharpoonup u$ in $\mathcal{H}$ and $u_n\to u$ in $\mathcal{H}_2$, then $W(u_n)\to W(u)$;\\
(5) If $u_n\rightharpoonup u$ in $\mathcal{H}$, then $W(u_n-u)+W(u)=W(u_n)+o(1)$;\\
(6) $-\infty<\varsigma^{W+T}(c)<\varsigma^{W}(c)$, where
\begin{equation}\label{J1.8}
\aligned
&\varsigma^{W+T}(c):=\inf\limits_{u\in B_{\mathcal{H}_2}(c)\cap\mathcal{H}}\left(\frac{1}{2}\| u\|^2_{\mathcal{H}_1}+W(u)+T(u)\right),
\endaligned
\end{equation}
$$\varsigma^{W}(c):=\inf\limits_{u\in B_{\mathcal{H}_2}(c)\cap\mathcal{H}}\left(\frac{1}{2}\| u\|^2_{\mathcal{H}_1}+W(u)\right),$$
and
$$B_{\mathcal{H}_2}(c):=\{u\in\mathcal{H}_2: \|u\|_{\mathcal{H}_2}^2=c\};$$
(7) For every sequence $\{u_n\}\subset B_{\mathcal{H}_2}(c)\cap\mathcal{H}$ such that $\|u_n\|_{\mathcal{H}}\to\infty$, we have
$$\frac{1}{2}\|u_n\|_{\mathcal{H}_1}^2+W(u_n)+T(u_n)\to\infty~\text{as}~n\to\infty.$$
Then every minimizing sequence for (\ref{J1.8}), i.e.,
$$u_n\in B_{\mathcal{H}_2}(c)\cap\mathcal{H}~~\text{and}~~\frac{1}{2}\|u_n\|^2_{\mathcal{H}_1}+W(u_n)+T(u_n)\to \varsigma^{W+T}(c),$$
is compact in $\mathcal{H}$.
\end{proposition}
\begin{lemma}\label{J3.1} Assume that $A(x)$, $A_0$ and $p$ are as in Theorem \ref{th1.1.5}. Then there exists $c_0>0$ such that:
$$\sigma^{A(x),A(y)}(c)<\sigma^{\min\{A(x),A_0\},A(y)}(c)~~\forall~c>c_0,$$
where
$$\sigma^{A(x),A(y)}(c):=\inf\limits_{u\in \mathcal{S}(c)}\left[\frac{1}{2} \int|\nabla u|^{2} \mathrm{d} x-\frac{1}{2p}\int\int\frac{ A(x) |u(x)|^pA(y) |u(y)|^p}{|x-y|^\mu} \mathrm{d} x\mathrm{d} y\right]=\sigma(c)$$
and
$$\sigma^{\min\{A(x),A_0\},A(y)}(c):=\inf\limits_{u\in \mathcal{S}(c)}\left[\frac{1}{2} \int|\nabla u|^{2} \mathrm{d} x-\frac{1}{2p}\int\int\frac{ \min\{A(x),A_0\}|u(x)|^pA_0|u(y)|^p}{|x-y|^\mu} \mathrm{d} x\mathrm{d} y\right].$$
\end{lemma}
\begin{proof}
Since $A\in L^\infty(\mathbb{R}^N,\mathbb{R})$, then by Lebesgue derivation Theorem that
$$\lim\limits_{\delta\to0}\delta^{-N}\int_{B_\delta(x_0)}|A(x)-A(x_0)|^{\frac{2N}{2N-\mu}}dx=0~~\text{for~almost~each~}x_0\in\mathbb{R}^N.$$
By condition $(A'_1)$ we have $meas\{x\in\mathbb{R}^N: A(x)>A_0\}>0$, then we deduce that there exists $\tilde{x}\in\{x\in\mathbb{R}^N: A(x)>A_0\}$ such that
$$\lim\limits_{\delta\to0}\delta^{-N}\int_{B_\delta(\tilde{x})}|A(x)-A(\tilde{x})|^{\frac{2N}{2N-\mu}}dx=0.$$
For simplicity we can assume that $\tilde{x}\equiv0$, hence we have
\begin{equation}\label{J3.2}
\lim\limits_{\delta\to0}\delta^{-N}\int_{B_\delta(0)}|A(x)-A(0)|^{\frac{2N}{2N-\mu}}dx=0~\text{with}~A(0)>A_0.
\end{equation}
By Lemma \ref{lm3.5}, there exists a minimizer $u_0\in H^1(\mathbb{R}^N,\mathbb{R})$ for $\sigma^{A_0}(1)$. It is easy to check that
\begin{equation}\label{J3.3}
u_0^c:=u_0(x/c^{a})c^{-b}~\text{is~a~minimizer~for~}\sigma^{A_0}(c),
\end{equation}
where
$$a:=\frac{p-1}{N(p-2)+\mu-2},\quad b:=\frac{N+2-\mu}{2N(p-2)+2\mu-4}.$$
Notice that by $p<\bar{p}=2+\frac{2-\mu}{N}$, we have
$$a<0~~\text{and}~~b<0.$$
We claim that there is $c_0>0$ such that
\begin{equation}\label{J3.5}
\sigma^{A(x),A(y)}(c)<\sigma^{A_0, A(y)}(c)~\forall~c>c_0.
\end{equation}
On the other hand $0\leq\min\{A(x),A_0\}\leq A_0$ implies that
\begin{equation}\label{J3.6}
\sigma^{A_0, A(y)}(c)\leq\sigma^{A(x), A(y)}(c).
\end{equation}
By combining (\ref{J3.5}) and (\ref{J3.6}) we get the desired result.
Next we prove (\ref{J3.5}). Due to (\ref{J3.3}) it is sufficient to prove the following inequality:
\begin{equation*}
\aligned
&\frac{1}{2}\|\nabla u_0^c\|_2^2-\frac{1}{2p}\int\int\frac{A(x)|u_0^c(x)|^pA(y)|u_0^c(y)|^p}{|x-y|^{\mu}}dxdy\\
<&\frac{1}{2}\|\nabla u_0^c\|_2^2-\frac{A_0}{2p}\int\int\frac{|u_0^c(x)|^pA(y)|u_0^c(y)|^p}{|x-y|^{\mu}}dxdy
\endaligned
\end{equation*}
or equivalently
\begin{equation}\label{J3.7}
\aligned
&I(c)+II(c):= \\
&\frac{A_0-A(0)}{2p}\int\int\frac{|u_0^c(x)|^p|u_0^c(y)|^p}{|x-y|^{\mu}}dxdy
+\frac{1}{2p}\int\int\frac{(A(0)-A(x))|u_0^c(x)|^pA(y)|u_0^c(y)|^p}{|x-y|^{\mu}}dxdy\\
<&0.
\endaligned
\end{equation}
By $A_0<A(0)$ we can fix $R_0>0$ such that
\begin{equation}\label{J3.8}
\aligned
&\frac{A_0-A(0)}{2p}\int\int\frac{|u_0(x)|^p|u_0(y)|^p}{|x-y|^{\mu}}dxdy \\
&+\frac{1}{2^{\frac{\mu}{2N}}p}\|A\|_\infty^{2-\frac{\mu}{2N}}\|u_0\|_{\frac{2Np}{2N-\mu}}^p\left( \int_{|x|\geq R_0 }
|u_0(x)|^{\frac{2Np}{2N-\mu}}dx \right)^{\frac{2N-\mu}{2N}}\\
=&-\varepsilon_0<0.
\endaligned
\end{equation}
By calculation, we get
$$I(c)=\frac{A_0-A(0)}{2p}c^{(2N-\mu)a-2pb}\int\int\frac{|u_0(x)|^p|u_0(y)|^p}{|x-y|^{\mu}}dxdy.$$
And by (\ref{J3.2}) and Hardy-Littlewood-Sobolev inequality we have
\begin{equation*}
\aligned
II(c)=&\frac{1}{2p}\int\int\frac{(A(0)-A(x))|u_0^c(x)|^pA(y)|u_0^c(y)|^p}{|x-y|^{\mu}}dxdy\\
\leq& \frac{\|A\|_\infty}{2p}\|u^c_0\|_{\frac{2Np}{2N-\mu}}^p \left(\int|A(0)-A(x)|^{\frac{2N}{2N-\mu}}|u_0^c(x)|^{\frac{2Np}{2N-\mu}}dx\right)^{\frac{2N-\mu}{2N}}
\\
\leq& \frac{\|A\|_\infty}{2p}c^{\frac{(2N-\mu)a-2pb}{2}}\|u_0\|_{\frac{2Np}{2N-\mu}}^p \\
&\cdot\left(2\|A\|_\infty c^{Na-\frac{2Np}{2N-\mu}b}\int_{|x|\geq R_0 }
|u_0(x)|^{\frac{2Np}{2N-\mu}}dx \right.\\
&\left.+c^{Na-pb}\|u_0\|_\infty c^{-Na}\int_{|x|\leq R_0 c^a}|A(0)-A(x)|^{\frac{2N}{2N-\mu}}dx\right)^{\frac{2N-\mu}{2N}}\\
=&\frac{\|A\|_\infty}{2p}\|u_0\|_{\frac{2Np}{2N-\mu}}^pc^{(2N-\mu)a-2pb}\left(2\|A\|_\infty \int_{|x|\geq R_0 }
|u_0(x)|^{\frac{2Np}{2N-\mu}}dx \right)^{\frac{2N-\mu}{2N}}\\
&+c^{(2N-\mu)a-2pb+\frac{\mu}{2N}pb}o(1),
\endaligned
\end{equation*}
where $\lim\limits_{c\to\infty}o(1)=0$.
Therefore, by (\ref{J3.8}) we have
\begin{equation*}
\aligned
I(c)+II(c)\leq&c^{(2N-\mu)a-2pb}\left[\frac{A_0-A(0)}{2p}\int\int\frac{|u_0(x)|^p|u_0(y)|^p}{|x-y|^{\mu}}dxdy \right.\\
&\left.+\frac{1}{2^{\frac{\mu}{2N}}p}\|A\|_\infty^{2-\frac{\mu}{2N}}\|u_0\|_{\frac{2Np}{2N-\mu}}^p\left( \int_{|x|\geq R_0 }
|u_0(x)|^{\frac{2Np}{2N-\mu}}dx \right)^{\frac{2N-\mu}{2N}}+c^{\frac{\mu}{2N}pb}o(1)\right]\\
&=c^{(2N-\mu)a-2pb}(-\varepsilon_0+o(1)),
\endaligned
\end{equation*}
which implies (\ref{J3.7}) for $c$ large enough, and in turn it is equivalent (\ref{J3.5}).
\end{proof}
{\it Proof of Theorem \ref{th1.1.5}.} It is sufficient to show that any minimizing sequence for $\sigma(c)=\sigma^{A(x),A(y)}(c)$ is compact in $H^1(\mathbb{R}^N,\mathbb{R})$. We use Proposition \ref{pr1.2} to prove this by choosing
\begin{equation*}
\aligned
&\mathcal{H}=H^1(\mathbb{R}^N,\mathbb{R}),\quad \mathcal{H}_1=\mathcal{D}^{1,2}(\mathbb{R}^N,\mathbb{R}),\quad \mathcal{H}_2=L^2(\mathbb{R}^N,\mathbb{R}),\\
&W(u)=-\frac{1}{2p}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}\frac{\min\{A(x),A_0\}|u(x)|^pA(y)|u(y)|^p}{|x-y|^\mu}dxdy,\\
&T(u)=-\frac{1}{2p}\int_{A(x)\geq A_0}\int_{y\in\mathbb{R}^N}\frac{(A(x)-A_0)|u(x)|^pA(y)|u(y)|^p}{|x-y|^\mu}dxdy.
\endaligned
\end{equation*}
Then
\begin{equation*}
\aligned
W(u)+T(u)=-\frac{1}{2p}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}\frac{A(x)|u(x)|^pA(y)|u(y)|^p}{|x-y|^{\mu}}dxdy
\endaligned
\end{equation*}
and $\varsigma^{W+T}(c)=\sigma(c)$.
It is easy to verify that the conditions (1), (3) in Proposition \ref{pr1.2} hold. The left hand side inequality in (6) follows from Lemma \ref{lm3.1}; The right hand side inequality in (6) follows from Lemma \ref{J3.1} provided that $c>c_0$,
where $c_0$ comes from (\ref{J3.5}). Since
$$p<\bar{p}\mathbb{R}ightarrow 2>Np-2N+\mu,$$
then (\ref{c3.1}) implies (7). (5) follows from Lemma \ref{lm2.11}.
Next, we prove (2). Let $u_n\rightharpoonup u$ in $H^1(\mathbb{R}^N,\mathbb{R})$, then $\{u_n-u\}$ is bounded in $H^1(\mathbb{R}^N,\mathbb{R})$.
By $(A'_1)$, $\{x\in\mathbb{R}^N: A(x)\geq A_0\}$ is bounded in $\mathbb{R}^N$, and thus by Rellich Compactness Theorem
$$\int_{A(x)\geq A_0}|u_n(x)-u(x)|^{\frac{2Np}{2N-\mu}}dx\to0.$$
Then, by Lemma \ref{lm2.11} (Brezis-Lieb lemma of nonlocal version) and Hardy-Littlewood-Sobolev inequality, we have
\begin{equation*}
\aligned
|T(u_n)-T(u)|=&\frac{1}{2p}\left|\int_{A(x)\geq A_0}\int_{y\in\mathbb{R}^N}\frac{(A(x)-A_0)A(y)(|u_n(x)|^p|u_n(y)|^p-|u(x)|^p|u(y)|^p)}{|x-y|^\mu}dxdy\right|\\
\leq&\frac{\|A\|_\infty^2}{p}\left|\int_{A(x)\geq A_0}\int_{y\in\mathbb{R}^N}\frac{(|u_n(x)|^p|u_n(y)|^p-|u(x)|^p|u(y)|^p)}{|x-y|^\mu}dxdy\right|\\
=&\frac{\|A\|_\infty^2}{p}\left|\int_{A(x)\geq A_0}\int_{y\in\mathbb{R}^N}\frac{|u_n(x)-u(x)|^p|u_n(y)-u(y)|^p}{|x-y|^\mu}dxdy+o(1)\right|\\
\leq&C\left|\int_{A(x)\geq A_0}|u_n(x)-u(x)|^{\frac{2Np}{2N-\mu}}dx\right|^{\frac{2N-\mu}{2N}}\|u_n-u\|^p_{\frac{2Np}{2N-\mu}}+o(1)\\
&\to 0.
\endaligned
\end{equation*}
Thus $T$ is weakly continuous, i.e., (2) holds.
Finally we shall verify (4). Let $u_n\rightharpoonup u$ in $H^1(\mathbb{R}^N,\mathbb{R})$ and $u_n\to u$ in $L^2(\mathbb{R}^N,\mathbb{R})$. Then we have
$$u_n\to u~~\text{in}~L^q(\mathbb{R}^N,\mathbb{R})~~\forall 2\leq q<2^*.$$
Then, Lemma \ref{lm2.11} and Hardy-Littlewood-Sobolev inequality imply
\begin{equation*}
\aligned
|W(u_n)-W(u)|
\leq&\frac{\|A\|_\infty^2}{2p}\left|\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}\frac{(|u_n(x)|^p|u_n(y)|^p-|u(x)|^p|u(y)|^p)}{|x-y|^\mu}dxdy\right|\\
\leq&C\|u_n-u\|^{2p}_{\frac{2Np}{2N-\mu}}+o(1)\to 0.
\endaligned
\end{equation*}
Thus (4) holds.
$\Box$
\vskip4mm
{\section{ $L^2-$supercritical case }}
\setcounter{equation}{0}
\vskip4mm
We prove Theorem \ref{th1.2} in this section. Our method is derived from \cite{MR4081327}. $(A_1)$, $(A_3)$ and $(A_4)$ hold with $\bar{p}<p<2^*_\mu$.
Since $A(x) \equiv A_\infty$ satisfies $(A_1)$, $(A_3)$ and $(A_4)$, all the following conclusions on $I$ are also true for $I_\infty$.
\begin{lemma}\label{lm2.1} We have
\begin{equation}\label{c2.3}
\begin{array}{ll}
\psi(t,x):=&-2 t^{-\frac{Np-2N+\mu}{2}}[A(x)-A(t x)]\\
&+\frac{4(t^{-\frac{Np-2N+\mu}{2}}-1)}{Np-2N+\mu} \nabla A(x) \cdot x\geq 0,\quad~ \forall t>0, x\in \mathbb{R}^{N};
\end{array}
\end{equation}
\begin{equation}\label{c2.4}
t\mapsto A(t x)~\text{is~nonincreasing~on}~(0,\infty), \quad \forall x \in \mathbb{R}^{N};
\end{equation}
\begin{equation}\label{c2.5}
-\nabla A(x) \cdot x \geq 0,\quad\forall x \in \mathbb{R}^{N}, \text { and } -\nabla A(x) \cdot x \rightarrow 0, \quad \text { as }|x| \rightarrow \infty.
\end{equation}
\end{lemma}
\begin{proof}
First, for any $x\in \mathbb{R}^N$, by $(A_3)$, we have
\begin{equation*}
\aligned
\frac{d\psi(t, x)}{dt}
=&t^{-1-\frac{Np-2N+\mu}{2}}\big\{[(Np-2N+\mu)A(x)-2\nabla A(x)\cdot x]\\
&-[(Np-2N+\mu)A(tx)-2\nabla A(tx)\cdot (tx)]\big\}
\\
&\quad\quad \left\{
\begin{array}{ll}
\aligned
&\geq0,~~~t\geq1,\\
&\leq0,~~~0<t<1,
\endaligned
\end{array}
\right.
\endaligned
\end{equation*}
which implies that $\psi(t, x) \geq \psi(1, x) = 0$ for all $t > 0$ and $x\in\mathbb{R}^N$ , i.e., $(\ref{c2.3})$ holds.
Next, let $t \to+\infty$ in $(\ref{c2.3})$, we have $-\nabla A(x)\cdot x \geq 0$ for all $x\in\mathbb{R}^N$, which leads to $(\ref{c2.4})$.
Last, let $t = 1/2$ in $(\ref{c2.3})$, then one has
$$0\leq-\nabla A(x)\cdot x\leq -\frac{2^{\frac{Np-2N+\mu}{2}}(Np-2N+\mu)[A(x)-A(x/2)] }{2(2^{\frac{Np-2N+\mu}{2}}-1)}\to0,\quad \text{as}~|x|\to+\infty.$$
This shows $(\ref{c2.5})$ holds.
\end{proof}
\begin{lemma}\label{cr2.7} For $u \in \mathcal{M}(c)$,
$I(u)>I\left( u^{t}\right)$ for all $t\in(0,1)\cup(1,+\infty)$, where $u^t(x) = t^{\frac{N}{2}} u(tx)$.
\end{lemma}
\begin{proof}
By $p\in\left(\frac{2N-\mu+2}{N}, \frac{2N-\mu}{(N-2)_{+}}\right)$, $(A_1)$ and $(\ref{c2.5})$, for $u \in \mathcal{M}(c)$, we have
\begin{equation*}
\aligned
I(u)=&I(u)-\frac{1}{2}J(u) \\
=&\frac{1}{4p}\int\int\frac{ \left[(Np-2N+\mu-2)A(x)-2\nabla A(x)\cdot x\right]A(y)|u(x)|^p |u(y)|^p}{|x-y|^\mu}dxdy
>0.
\endaligned
\end{equation*}
Fix a $u \in \mathcal{M}(c)$, let
\begin{equation}\label{g}
\aligned
g(t,u):=&I(u)-I(u^t)
=\frac{1-t^2}{2}\|\nabla u\|_2^2-\frac{1}{2p}\int\int\frac{ A(x)|u(x)|^p A(y) |u(y)|^p}{|x-y|^\mu}dxdy\\
&+\frac{t^{Np-2N+\mu}}{2p}\int\int\frac{ A(t^{-1}x)|u(x)|^p A(t^{-1}y) |u(y)|^p}{|x-y|^\mu}dxdy.
\endaligned
\end{equation}
Then we have
\begin{equation}\label{2.14}
g(0,u)=I(u)>0,\quad g(1,u)=0,\quad g(+\infty,u)=+\infty.
\end{equation}
By $(\ref{I2})$, we have
\begin{equation*}
\aligned
\frac{dg(t,u)}{dt}=-t\left(\|\nabla u\|_2^2-h(t,u)\right),\quad \frac{dg(t,u)}{dt}|_{t=1}=-J(u)=0,
\endaligned
\end{equation*}
where
\begin{equation*}
\aligned
h(t,u)=&\frac{t^{Np-2N+\mu-2}}{2p} \cdot \\
&\int\int\frac{\left[(Np-2N+\mu)A(t^{-1}x)-2\left(\nabla A(t^{-1}x)\cdot (t^{-1}x)\right)\right] A(t^{-1}y)|u(x)|^p |u(y)|^p}{|x-y|^\mu}dxdy.
\endaligned
\end{equation*}
Using $(A_1)$, $(A_3)$ and Lemma \ref{lm2.1}, we have
\begin{equation}\label{2.15}
\aligned
&h(0,u)=0;\quad h(t,u)\to+\infty~\text{as}~t\to+\infty; \\
\text{~the~function}~&t\mapsto h(t,u)~\text{strictly~increasing~on} ~(0,+\infty).
\endaligned
\end{equation}
Then by $(\ref{2.15})$, $t=1$ is the unique solution of equation $\frac{dg(t,u)}{dt}=0$. This together with $(\ref{2.14})$ implies the conclusion.
\end{proof}
\begin{lemma}\label{lm2.8} For any $u \in \mathcal{S}(c)$, there
exists a unique $t_u > 0$ such that $ u^{t_u} \in\mathcal{M}(c)$.
\end{lemma}
\begin{proof}
Let $u\in \mathcal{S}(c)$ be fixed and define a function $\zeta(t):= I(u^t)$ on $(0, \infty)$.
By $(\ref{J})$, we have
\begin{equation}\label{2.7.1}
\aligned
J(u^t)=&\|\nabla u\|_2^2-t^2h(t,u).
\endaligned
\end{equation}
Clearly, by $(\ref{I2})$ and $(\ref{2.7.1})$, we have
\begin{equation*}
\aligned
&\zeta'(t)=0
&\Longleftrightarrow J(u^t)=0~ \Longleftrightarrow~ u^t\in\mathcal{M}.
\endaligned
\end{equation*}
It is easy to verify that $\lim\limits_{t\to0}\zeta(t) = 0$,
$\zeta(t) > 0$ for $t > 0$ small and $\zeta(t) < 0$ for $t$ large. Therefore $\max\limits_{t\in[0,\infty)}\zeta(t)$ is achieved at
some $t_u > 0$ so that $\zeta'(t_u) = 0$ and $u^{t_u}\in\mathcal{M}$.
And we have from $(\ref{2.15})$ and $(\ref{2.7.1})$ that $t_u$ is unique for any $u\in S(c)$.
\end{proof}
Combining Lemma \ref{cr2.7} and Lemma \ref{lm2.8}, we get the following result.
\begin{lemma}\label{lm2.10}
$$
\inf _{u \in \mathcal{M}(c)} I(u)=m(c)=\inf _{u \in \mathcal{S}(c)} \max _{t>0} I\left( u^{t}\right).
$$
\end{lemma}
\begin{lemma}\label{lm2.12} The function $c\mapsto m(c)$ is
nonincreasing on $(0, \infty)$. In particular, if $m(c)$ is achieved, then $m(c) > m(c')$ for
any $c'> c$.
\end{lemma}
\begin{proof}
For any $c_2 > c_1 > 0$, it follows that there exists $\{u_n\}\subset\mathcal{M}(c_1)$ such that
$$I(u_n)<m(c_1)+\frac{1}{n}.$$
Let $\xi := \sqrt{c_2/c_1} \in (1,+\infty)$ and $v_n(x) := \xi^{(2-N)/2}u_n(\xi^{-1}x)$. Then $\|v_n\|_2^2 = c_2$ and
$\|\nabla v_n\|_2 = \|\nabla u_n\|_2$. By Lemma \ref{lm2.8}, there exists $t_n > 0$ such that $v_n^{t_n}\in\mathcal{M}(c_2)$.
Then it follows from $(A_4)$, $(\ref{I2})$, and Lemma \ref{cr2.7} that
\begin{equation*}
\aligned
m\left(c_{2}\right) \leq& I\left(v_{n}^{t_{n}}\right)
=I\left(u_{n}^{t_{n}}\right)+\frac{t_{n}^{Np-2N+\mu}}{2p}\cdot \\
&\int\int\frac{\left[A\left(t_{n}^{-1} x\right)A\left(t_{n}^{-1} y\right) -\xi^{(2-N)p+2N-\mu} A\left(\xi t_{n}^{-1} x\right)A\left(\xi t_{n}^{-1} y\right)\right]|u_{n}(x)|^{p}|u_{n}(y)|^{p}}{|x-y|^\mu}dxdy \\
\leq& I\left(u_{n}^{t_{n}}\right)
\leq I\left(u_{n}\right)<m\left(c_{1}\right)+\frac{1}{n},
\endaligned
\end{equation*}
which shows that $m(c_2) \leq m(c_1)$ by letting $n \to+\infty$.
Next, we assume that $m(c)$ is achieved, i.e., there exists $\tilde{u}\in\mathcal{M}(c)$ such that $I(\tilde{u})=m(c)$. For any given $c' > c$. Let $\tilde{\xi}= c'/c \in (1,+\infty)$ and $\tilde{v}(x):= \tilde{\xi}^{(2-N)/2}
\tilde{u}(\tilde{\xi}^{-1}x)$.
Then $\|\tilde{u}\|_2^2 = c'$ and $\|\tilde{u}\|_2^2= \|\tilde{v}\|_2^2$. By Lemma \ref{lm2.8}, there exists $t_0 > 0$ such that
$\tilde{v}^{t_0} \in \mathcal{M}(c')$. Then it follows from $(A_4)$, $(\ref{I2})$, and Lemma 3.2 that
\begin{equation*}
\aligned
m\left(c'\right) \leq& I\left(\tilde{v}^{t_{0}}\right)
=I\left(\tilde{u}^{t_{0}}\right)+\frac{t_{0}^{Np-2N+\mu}}{2p}\cdot \\
&\int\int\frac{\left[A\left(t_{0}^{-1} x\right)A\left(t_{0}^{-1} y\right) -\tilde{\xi}^{(2-N)p+2N-\mu} A\left(\tilde{\xi} t_{0}^{-1} x\right)A\left(\tilde{\xi} t_{0}^{-1} y\right)\right]|\tilde{u}(x)|^{p}|\tilde{u}(y)|^{p}}{|x-y|^\mu}dxdy \\
<& I\left(\tilde{u}^{t_{0}}\right)\leq
I\left(\tilde{u}\right)=m\left(c\right),
\endaligned
\end{equation*}
which shows that $m(c') < m(c)$.
\end{proof}
\begin{lemma}\label{lm2.13}
(i) There exists $\rho_0 > 0$ such that $\|\nabla u\|_2\geq \rho_0$, $\forall u\in\mathcal{M}(c)$; \\
(ii) $m(c) = \inf\limits_{u\in\mathcal{M}(c)}I(u) > 0$.
\end{lemma}
\begin{proof}
(i) For $u\in\mathcal{M}(c)$,
$$\|\nabla u\|_2^2=\frac{1}{2p}\int\int\frac{ \left[(Np-2N+\mu)A(x)-2\nabla A(x)\cdot x\right]A(y) |u(x)|^p |u(y)|^p}{|x-y|^\mu}dxdy.$$
By $(A_1)$, Lemma 3.1, and Hardy--Littlewood--Sobolev inequality,
$$\|\nabla u\|_2^2\leq C\int\int\frac{ |u(x)|^p |u(y)|^p}{|x-y|^\mu} \mathrm{d} x\mathrm{d} y \leq C(N,\mu)\|u\|_{\frac{2Np}{2N-\mu}}^{2p}.$$
On the other hand, we have from Gagliardo-Nirenberg inequality that
\begin{equation*}
\aligned
\| u\|_s\leq C(N,s)\|\nabla u\|_2^{\frac{N(s-2)}{2s}}\|u\|_2^{\frac{2N-(N-2)s}{2s}},\quad\forall u\in H^1(\mathbb{R}^N,\mathbb{R}),\quad s\in\left[2,2^*\right).
\endaligned
\end{equation*}
Therefore,
\begin{equation}\label{cc}
\|\nabla u\|_2^{Np-2N+\mu-2}\geq C(N,p,\mu) \|u\|_2^{Np-2N+\mu-2p}=C(N,p,\mu)c^{\frac{Np-2N+\mu-2p}{2}}.
\end{equation}
Since $Np-2N+\mu>2$, there exists $$\rho_0=C(N,p,\mu)^{\frac{1}{Np-2N+\mu-2}}c^{\frac{Np-2N+\mu-2p}{2(Np-2N+\mu-2)}} > 0$$ such that $\|\nabla u\|_2\geq\rho_0$.
\vskip2mm
(ii) For $u\in\mathcal{M}(c)$, it follows from $-\left(\nabla A(x)\cdot x\right)A(y)\geq0$ that
\begin{equation}\label{2.10.ii}
\aligned
I(u)&=I(u)-\frac{1}{Np-2N+\mu}J(u)\\
&=\left(\frac{1}{2}-\frac{1}{Np-2N+\mu}\right)\|\nabla u\|_2^2-\frac{1}{(Np-2N+\mu)p}\int\int\frac{\left(\nabla A(x)\cdot x\right)A(y)|u(x)|^p |u(y)|^p}{|x-y|^\mu}dxdy \\
&\geq \left(\frac{1}{2}-\frac{1}{Np-2N+\mu}\right)\rho_0^2.
\endaligned
\end{equation}
Therefore $m(c) = \inf\limits_{u\in\mathcal{M}(c)}I(u) > 0$.
\end{proof}
By Lemma \ref{lm2.10}, we have
\begin{equation}\label{A2}
m(c) \leq m_\infty(c).
\end{equation}
With the help of (\ref{A2}), we can show the following lemma.
\begin{lemma}\label{lm2.15} $m(c)$ is achieved.
\end{lemma}
\begin{proof}
By Lemmas \ref{lm2.8} and \ref{lm2.13}, we have we have $\mathcal{M}(c)\neq\emptyset$ and $m(c)> 0$.
Let $\{u_n\}\subset \mathcal{M}(c)$ be such that $I(u_n)\to m(c)$. Since $J(u_n) = 0$, it follows from $(\ref{2.10.ii})$ that
\begin{equation*}
m(c)+o(1)=I(u_n)\geq \left(\frac{1}{2}-\frac{1}{Np-2N+\mu}\right)\|\nabla u_n\|_2^2.
\end{equation*}
This shows that $\{\|\nabla u_n\|_2\}$ is bounded. Passing to a subsequence, we have $$u_n\rightharpoonup \bar{u}\text{ in }
H^1(\mathbb{R}^N,\mathbb{R}),\quad
u_n \to\bar{u}\text{ in }L^s_{\text{loc}}(\mathbb{R}^N,\mathbb{R})\text{ for }2 \leq s < 2^*,\quad\text{and }u_n\to\bar{u} ~a.e.\text{ in }\mathbb{R}^N.$$
Case (i) $\bar{u}= 0$. Let $B_R(0)$ be a ball in $\mathbb{R}^N$ with the origin as its center and $R$ as its radius, by Lemma 2.1 and Hardy--Littlewood--Sobolev inequality, we have
\begin{equation}\label{1111}
\aligned
&\left|\int\int\frac{(\nabla A(x)\cdot x)A(y)|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\right|\\
=&\left|\int\left(\int_{x\in B_R(0)}+\int_{x\in B^c_R(0)}\right)\frac{(\nabla A(x)\cdot x)A(y)|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\right|\\
\leq&\left[\left(\int_{x\in B_R(0)}+\int_{x\in B^c_R(0)}\right)\left|\nabla A(x)\cdot x\right|^{\frac{2N}{2N-\mu}}|u_n(x)|^{\frac{2Np}{2N-\mu}}dx\right]^{\frac{2N-\mu}{2N}} \\
&\cdot\left[\int |A(y)|^{\frac{2N}{2N-\mu}}|u_n(y)|^{\frac{2Np}{2N-\mu}}dy\right]^{\frac{2N-\mu}{2N}}\\
\to&0,\quad \text{as}~R\to\infty,~n\to\infty.
\endaligned
\end{equation}
Similarly, by $\lim\limits_{|x|\to\infty}A(x)=A_\infty$, we have
\begin{equation}\label{2222}
\aligned
&\int\int\frac{(A_\infty^2-A(x)A(y))|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy \\
=&\int\int\frac{A_\infty(A_\infty-A(x))|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy+\int\int\frac{A(x)(A_\infty-A(y))|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\\
=&\int\left(\int_{x\in B_R(0)}+\int_{x\in B^c_R(0)}\right)\frac{A_\infty(A_\infty-A(x))|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\\
~~~~&+\int\left(\int_{y\in B_R(0)}+\int_{y\in B^c_R(0)}\right)\frac{A(x)(A_\infty-A(y))|u_n(x)|^p|u_n(y)|^p}{|x-y|^\mu}dxdy\\
\to&0,\quad \text{as}~R\to\infty,~n\to\infty.
\endaligned
\end{equation}
Therefore, as $n\to\infty$, it follows from (\ref{1111}) and (\ref{2222}) that
\begin{equation}\label{c2.26}
I_\infty(u_n) \to m(c),\quad J_\infty(u_n)\to 0.
\end{equation}
where
\begin{equation*}
\aligned
J_\infty(u_n)
=\frac{dI_\infty(u_n^t)}{dt}|_{t=1}
=\|\nabla u_n\|_2^2-\frac{Np-2N+\mu}{2p}\int\int\frac{A_\infty^2|u_n(x)|^p |u_n(y)|^p}{|x-y|^\mu}dxdy.
\endaligned
\end{equation*}
By Lemma \ref{lm2.13}-(i) and $(\ref{c2.26})$, we have
\begin{equation}\label{c2.27}
\rho_0^2\leq \|\nabla u_n\|_2^2=\frac{Np-2N+\mu}{2p}\int\int\frac{A_\infty^2|u_n(x)|^p |u_n(y)|^p}{|x-y|^\mu}dxdy+o(1).
\end{equation}
Using $(\ref{c2.27})$ and Lions' concentration compactness principle \cite{MR778970,MR778974}, we can easily prove that there exist $\delta> 0$ and $\{y_n\}\subset \mathbb{R}^N$
such that $\int_{B_1(y_n)}|u_n|^2dx > \frac{\delta}{2}.$ Let $\hat{u}_n(x) = u_n(x + y_n)$. Then $\|\hat{u}_n\| = \|u_n\|$,
$
\int_{B_1(0)}|\hat{u}_n|^2dx > \frac{\delta}{2},
$
and
\begin{equation}\label{2.11.3}
I_\infty(\hat{u}_n)\to m(c),~~J_\infty(\hat{u}_n)\to0.
\end{equation}
Therefore, there exists $\hat{u}\in H^1(\mathbb{R}^N,\mathbb{R})\setminus\{0\}$ such that, passing to a subsequence, as $n\to\infty$,
\begin{equation}\label{2.11.4}
\hat{u}_n\rightharpoonup \hat{u}~~\text{in}~H^1(\mathbb{R}^N,\mathbb{R}),\quad
\hat{u}_n\to \hat{u}~~\text{in}~L^q_{\text{loc}}(\mathbb{R}^N,\mathbb{R})~\text{for}~ q\in[1,2^*),\quad
\hat{u}_n\to \hat{u}~~\text{a.e.~on}~\mathbb{R}^N.
\end{equation}
Let $w_n = \hat{u}_n -\hat{u}$. Then $(\ref{2.11.4})$ and Lemma \ref{lm2.11} yield
\begin{equation}\label{2.11.5}
I_\infty(\hat{u}_n)=I_\infty(\hat{u})+I_\infty(w_n)+o(1),~~J_\infty(\hat{u}_n)=J_\infty(\hat{u})+J_\infty(w_n)+o(1).
\end{equation}
Set $$\Psi_{\infty}(u):=I_\infty(u)-\frac{1}{2}J_\infty(u)=\frac{Np-2N+\mu-2}{4p}\int\int\frac{A_\infty^2|u(x)|^p|u(y)|^p}{|x-y|^\mu}dxdy,\quad \forall u\in H^1(\mathbb{R}^N,\mathbb{R}).$$
Then by $(\ref{2.10.ii})$, $(\ref{2.11.3})$ and $(\ref{2.11.5})$, we have
\begin{equation}\label{2.11.6}
\aligned
m(c)-\Psi_{\infty}(\hat{u}) +o(1)=\Psi_{\infty}(w_n)
\endaligned
\end{equation}
and
\begin{equation}\label{2.11.7}
J_\infty(w_n)=-J_\infty(\hat{u})+o(1).
\end{equation}
By a standard argument \cite[Lemma 2.15]{MR4081327}, we have
\begin{equation}\label{2.11.8}
I_\infty(\hat{u})=m(c),~~J_\infty(\hat{u})=0.
\end{equation}
By Lemma \ref{lm2.8}, there exists $\tilde{t}> 0$ such that $\hat{u}^{\tilde{t}}\in\mathcal{M}(c)$, moreover, it
follows from $(A_1)$, $(\ref{2.11.8})$, and Lemma \ref{cr2.7} that
$$
m(c) \leq I\left(\hat{u}^{\tilde{t}}\right) \leq I_{\infty}\left( \hat{u}^{\tilde{t}}\right) \leq I_{\infty}(\hat{u})=m(c).
$$
This shows that $m(c)$ is achieved at $\hat{u}^{\tilde{t}}\in\mathcal{M}(c)$.
Case (ii) $\bar{u}\neq 0$. Let $v_n = u_n - \bar{u}$. Then $v_n\rightharpoonup0$ in $H^1(\mathbb{R}^N,\mathbb{R})$. By Lemma \ref{lm2.11}, we have
\begin{equation}\label{22.11.5}
I(u_n)=I(\bar{u})+I(v_n)+o(1),~~J(u_n)=J(\bar{u})+J(v_n)+o(1).
\end{equation}
For $u\in H^1(\mathbb{R}^N,\mathbb{R})$, set
\begin{equation}\label{A3}
\begin{array}{ll}
\Psi(u):&\displaystyle=I(u)-\frac{1}{2}J(u)
\\
&\displaystyle=\frac{1}{4p}\int\int\frac{\left[(Np-2N+\mu-2)A(x)-2\nabla A(x)\cdot x\right]A(y)|u(x)|^p |u(y)|^p}{|x-y|^\mu}dxdy.
\end{array}
\end{equation}
Then it follows from $(A_1)$ and Lemma 3.1 that $\Psi(u) > 0$ for $u \in H^1(\mathbb{R}^N,\mathbb{R})\setminus\{0\}$.
Using the same argument in \cite[Lemma 2.15]{MR4081327}, we have
\begin{equation}\label{22.11.8}
I(\bar{u})=m(c),~~J(\bar{u})=0,~~\|\bar{u}\|_2^2=c.
\end{equation}
This completes the conclusion.
\end{proof}
\begin{lemma}\label{lm2.16} If $\bar{u}\in\mathcal{M}(c)$ and $I(\bar{u}) = m(c)$, then $\bar{u}$ is a critical point of $I|_{S(c)}$.
\end{lemma}
\begin{proof}
By a similar deformation argument in \cite[Lemma 2.16]{MR4081327}, we get the conclusion.
\end{proof}
{\it Proof of Theorem \ref{th1.2}.} For any $c > 0$, in view of Lemmas \ref{lm2.15} and \ref{lm2.16}, there exists
$\overline{u}_{c} \in \mathcal{M}(c)$ such that
$
I\left(\overline{u}_{c}\right)=m(c),\left.\quad I\right|_{\mathcal{S}(c)} ^{\prime}\left(\overline{u}_{c}\right)=0.
$
In view of the Lagrange multiplier theorem, there exists $\lambda_c\in\mathbb{R}$ such that
$
I^{\prime}\left(\overline{u}_{c}\right)=\lambda_{c} \overline{u}_{c}.
$
Therefore, $(\overline{u}_{c},\lambda_c)$ is a solution of (\ref{1.1.0}).
$\Box$
\vskip4mm
\end{document}
|
\begin{document}
\title{ \LARGE \bf High-order maximum principles for the stability analysis of positive bilinear control~systems}
\thispagestyle{empty}
\pagestyle{empty}
\begin{abstract}
We consider a continuous-time positive bilinear control system~(PBCS), i.e.
a bilinear control system with
Metzler matrices.
The positive orthant is an invariant set of such a system, and
the corresponding transition matrix~$C(t)$ is
entrywise nonnegative for all time~$t\geq 0$.
Motivated by the stability analysis
of positive linear switched systems~(PLSSs) under arbitrary switching laws, we fix
a final time~$T>0$
and define a control as
optimal if it maximizes the spectral radius of~$C(T)$.
A recent paper~\cite{lior_SIAM} developed
a first-order necessary condition for optimality
in the form of a maximum
principle~(MP). In this paper, we derive higher-order necessary conditions for optimality for
both singular and bang-bang controls.
Our approach is based on combining results on the second-order derivative of the spectral radius of a nonnegative
matrix with the generalized Legendre-Clebsch condition and
the Agrachev-Gamkrelidze second-order optimality condition.
\end{abstract}
\begin{IEEEkeywords}
Positive switched systems, stability under arbitrary switching laws, variational approach,
high-order maximum principles, Perron-Frobenius theory.
\end{IEEEkeywords}
\section{Introduction}\label{sec:intro}
Consider the continuous-time linear switched system
\begin{align}\label{eq:a0a1}
\dot{x}(t)& =A_{\sigma (t)}x(t), \nonumber \\
x(0)& =x_{0},
\end{align}
where~$x:\mathbb{R}_{+}\rightarrow \mathbb{R}^{n}$ is the state vector, and~$
\sigma :\mathbb{R}_{+}\rightarrow \{0,1\}$ is a piecewise constant function
referred to as the \emph{switching signal}. This models a system that can
switch between the two linear subsystems
\begin{equation*}
\dot{x}=A_{0}x\;\text{ and }\;\dot{x}=A_{1}x.
\end{equation*}
Recall that~\eqref{eq:a0a1} is said to be \emph{globally uniformly asymptotically stable}~(GUAS) if there exists a class~$\mathcal{K}\mathcal{L}$ function\footnote{A continuous function $\alpha :[0,\infty)\rightarrow [ 0,\infty )$
belongs to the class $\mathcal{K}$ if it is strictly increasing and
$\alpha (0)=0$. A continuous function $\beta :[0,\infty)\times [ 0,\infty
)\rightarrow [ 0,\infty )$ belongs to the class $\mathcal{K}\mathcal{L}
$ if for each fixed $s$, $\beta (\cdot ,s)$ belongs to $\mathcal{K}$, and
for each fixed $r>0
$, the mapping $\beta (r,\cdot )$ is decreasing and $\beta
(r,s)\rightarrow 0$ as $s\rightarrow \infty $.}~$\beta $ such that for any
initial condition~$x_0 \in \mathbb{R}^{n}$ and \emph{any} switching law~$
\sigma $, the corresponding solution of~\eqref{eq:a0a1} satisfies
\begin{equation*}
|x(t)|\leq \beta (|x_0|,t),\text{ for all }t\geq 0.
\end{equation*}
This implies in particular that
\begin{equation}
\lim_{t\rightarrow \infty }x(t)=0,\quad \text{ for all }\sigma
\text{ and all }x_0 \in \mathbb{R}^{n}, \label{eq:ctz}
\end{equation}
and or linear switched systems,~\eqref{eq:ctz} is in fact equivalent to GUAS (see,
e.g.,~\cite{angeli-sontag-positive-2009}).
Switched systems and, in particular, their stability analysis
are attracting considerable interest in the last two decades;
see e.g. the survey papers~\cite{shorten,branicky_98,decarloetal,libsur99} and the monographs~\cite{liberzon_book,morse-book,Jungers,johansson,sun_ge_book, sun_ge_2}.
It is well-known that a
necessary (but not sufficient) condition for GUAS of~\eqref{eq:a0a1} is the following.
\begin{assumption} \label{a:nec}
The matrix~$kA_{0}+(1-k)A_{1}$ is
Hurwitz for all~$k\in \lbrack 0,1]$.
\end{assumption}
Recall that a linear system
\be\label{eq:posys}
\dot{x}=Ax,
\ee
with~$A\in \R^{n\times n}$,
is called \emph{positive} if the positive orthant
\begin{equation*}
\mathbb{R}_{+}^{n} : =\{x\in \mathbb{R}^{n}\mid x_{i}\geq 0,\;i=1,\dots, n\}
\end{equation*}
is an invariant set of the dynamics, i.e.,~$x(0)\in \mathbb{R}_{+}^{n}$
implies that~$x(t)\in \mathbb{R}_{+}^{n}$ for all~$t\geq 0$.
Positive systems play an important role in
systems and control theory because in many physical systems the
state-variables represent quantities that can never attain negative values
(e.g. population sizes,
probabilities, concentrations, buffer loads)~\cite{farina2000,berman87,posi_sys89}.
A necessary and
sufficient condition for~\eqref{eq:posys} to be positive
is that $A$ is a \emph{Metzler matrix}, that
is, $a_{ij} \geq 0 $ for all~$i \not = j$. If~$A$ is Metzler
then~$\exp (At)$ is (entrywise) nonnegative
for all~$t\geq 0$. By the
Perron--Frobenius theory, the spectral radius of~$\exp (At)$ (i.e., the eigenvalue with maximal
absolute value) is real and nonnegative, and since~$\exp(At)$ is non-singular, it is in fact positive.
If both~$A_{0}$ and~$A_{1}$ are Metzler and~$x(0)\in \mathbb{R}_{+}^{n}$
then~\eqref{eq:a0a1} is called a \emph{positive linear switched system}
~(PLSS).
Mason and Shorten~\cite{mason-shorten03}, and independently David Angeli,
posed the following.
\begin{conj}\label{conj:shaorten}
\label{conj} If~\eqref{eq:a0a1} is a PLSS, then Assumption~\ref{a:nec}
provides a \emph{sufficient} condition for GUAS.
\end{conj}
Had this conjecture been true, it would have implied that
determining GUAS for a PLSS is relatively simple.
(See~\cite{Gurvits_Olshevsky} for analysis of the computational
complexity of determining whether any matrix in a convex set of matrices is Hurwitz.)
Gurvits, Shorten, and Mason~\cite{gurvits-shorten-mason07} proved that Conjecture~\ref{conj:shaorten}
is in general false (see also~\cite{gurvits-cdc03}), but
that it does hold when~$n=2$ (even when the number of
subsystems is arbitrary). Their proof in the planar case is based on showing
that the PLSS admits a common quadratic Lyapunov function~(CQLF). (For more on
the analysis of switched systems using CQLFs, see~\cite
{branicky_98,shorten,copos,forna,ron_margaliot}.) Margaliot and Branicky~\cite{mar-bra-full}
derived a
reachability--with--nice--controls--type result for planar bilinear control
systems, and showed that the proof of Conjecture~\ref{conj} when~$n=2$
follows as a special case. Fainshil, Margaliot, and Chigansky~\cite{lior} showed
that Conjecture~\ref{conj} is false already for the case~$n=3$.
In general, it seems that as far as the GUAS problem is concerned,
analyzing PLSSs is not simpler than analyzing linear switched
systems.
There is a rich literature on \emph{sufficient} conditions for GUAS, see, e.g., \cite{branicky_98,decarloetal,liberzon_book,libsur99,sun_ge_book}.
A more challenging problem is to determine a \emph{necessary and sufficient} condition for GUAS.
What makes this
problem difficult is that the set of all possible switching laws is
huge, so exhaustively checking the solution for each switching law is
impossible.
A natural idea is to try and characterize a ``most destabilizing''
switching law~$\sigma ^{\ast }$ of the switched system, and
then analyze the behavior of the corresponding trajectory~$x^{\ast }$. If~$
x^{\ast }$ converges to the origin, then so does any trajectory of the
switched system and this establishes GUAS.
This idea was pioneered by E. S.
Pyatntisky~\cite{pyat70,pyat71},
who studied the celebrated
\emph{absolute stability problem}~(ASP).
This
variational approach was further developed by several scholars including
N. E. Barabanov
and L. B. Rapoport, and proved to be highly successful; see the
survey papers~\cite{mar-simple,bar-cdc,rap}, the related work in~\cite{Boscain2008Nondiagonalizable_case,Boscain2009stability_conditions},
and
the recent extensions to the stability analysis of
{discrete--time} linear switched systems in~\cite{monovich1,tal2}.
A first attempt to extend the variational approach to the stability
analysis of PLSSs was taken
in~\cite{mar-bra-posi} using
the classical Pontryagin maximum principle~(PMP).
Recently, Fainshil and Margaliot~\cite{lior_SIAM} developed an alternative
approach
that combines the Perron-Frobenius theory of nonnegative matrices with
the standard needle variation used in the PMP.
The goal of this paper is to derive stronger, higher-order necessary conditions for optimality.
We thus begin by reviewing the first-order MP in~\cite{lior_SIAM}.
\subsection{Stability analysis of PLSSs: a Variational Approach}
The variational approach to the stability analysis of a linear switched system includes several steps.
The first step
is relaxing~\eqref{eq:a0a1} to the \emph{bilinear control system}~(BCS)
\begin{align}\label{eq:pscon}
\dot{x}& =(A+uB)x,\quad u\in \mathcal{U}, \\
x(0)& =x_{0}\nonumber,
\end{align}
where~$A: =(A_0+A_1)/2$~,~$B:=( A_1-A_0) /2$, and~$\mathcal U$ is the set of measurable controls taking values in~$[-1,1]$. Note that for~$u(t) \equiv -1$ [$u(t) \equiv 1$],
Eq.~\eqref{eq:pscon} yields~$\dot{x}=A_0 x$ [$\dot{x}=A_1x]$, i.e.,
trajectories of the~BCS corresponding to
piecewise constant bang-bang controls are
also trajectories of the original switched system.
The BCS~\eqref{eq:pscon} is said to be \emph{globally asymptotically stable}~(GAS) if~$\lim_{t \to \infty} x(t)=0$ for all~$x_0\in\R^n$ and \emph{all}~$u \in \mathcal{U}$.
Since every trajectory of the switched system~\eqref{eq:a0a1} is also a trajectory of~\eqref{eq:pscon},
GAS of~\eqref{eq:pscon} implies GUAS of the linear switched system.
It is not difficult to show that the converse implication also holds, so the BCS is GAS
if and only if the linear switched system is GUAS.
Thus, the GUAS problem for the switched linear system~\eqref{eq:a0a1} is equivalent to the GAS
problem for the BCS~\eqref{eq:pscon}.
From here on we assume that the switched system is positive,
i.e.~$A+kB$ is Metzler for all~$k\in[-1,1]$. For the BCS,
this implies that if~$
x_{0}\in \mathbb{R}_{+}^{n}$, then~$x(t)\in \mathbb{R}_{+}^{n}$
for all $u\in \mathcal{U}$\ and all~$t\geq 0$.
Thus~\eqref{eq:pscon} becomes a \emph{positive bilinear control system}~(PBCS).
For~$0\leq a \leq b \leq T$, and~$u \in \mathcal U$,
let~$C(b,a,u)$ denote the solution at time~$t=b$
of the matrix differential equation
\begin{align} \label{eq:trans}
\frac{d}{dt}C(t,a,u)& =(A+Bu(t))C(t,a,u ), \notag
\\
C(a ,a,u )& =I.
\end{align}
It is straightforward to verify that the solution of~\eqref{eq:pscon}
satisfies~$
x(b)=C(b,a,u)x(a)$
for all~$u \in \mathcal U$
and all~$0\leq a \leq b \leq T$. In other words,~$C(b,a,u)$ is the \emph{transition matrix}
from time~$a$ to time~$b$ of~\eqref{eq:pscon} corresponding to the
control~$u$. To simplify the notation, we will sometimes omit the dependence
on~$u$ and just write~$C( b,a)$.
When the initial time is~$a=0$ we write~\eqref{eq:trans} as
\begin{align} \label{eq:transzero}
\dot{C}(t)& =(A+Bu(t))C(t), \notag
\\
C(0 )& =I.
\end{align}
For a PBCS,~$C(t,u)$ is a non-negative matrix for all~$t\geq 0$ and all~$u\in \mathcal{U}$. Since it
is also
non-singular, the spectral radius~$\rho
(C(t,u))$ is a real and positive eigenvalue of~$C(t,u)$, called the Perron root.
If this eigenvalue is simple then the corresponding eigenvector~$v \in \R^n_+$, called the Perron eigenvector,
is unique (up to multiplication by a scalar).
The next step in the variational approach is to relate~$\rho
(C(t,u))$ to GAS of the PBCS.
Define the \emph{generalized spectral radius}
of the PBCS~\eqref{eq:pscon} by
\[
\rho(A,B):=\limsup_{t \to \infty} \rho_t(A,B),
\]
where
\begin{equation}\label{eq:rhot}
\rho_t(A,B):= \max_{u \in \mathcal U} (\rho(C(t,u)))^{1/t} .
\ee
Note that the maximum here is well-defined, as the reachable set of~\eqref{eq:transzero}
corresponding to~$\mathcal{U}$ is compact~\cite{filippov-paper}.
In fact, this is why we consider a bilinear control system with controls in~$ \mathcal U$
rather than the original linear switched system with piecewise constant switching laws.
The next result relates the GAS of the PBCS to~$\rho(A,B)$.
\begin{Theorem} \label{thm:starho}
The PBCS~\eqref{eq:pscon} is GAS if and only if \[\rho(A,B)<1.\]
\end{Theorem}
Thm.~\ref{thm:starho} already appeared in~\cite{lior_SIAM}, but without a proof.
For the sake of completeness we include its proof in the Appendix.
\begin{Remark}
It follows from~\eqref{eq:rhot} and Thm.~\ref{thm:starho}
that if~$\rho(C(T ,u)) \geq 1$ for some~$T>0$ and~$u \in \mathcal U$,
then the~PBCS is \emph{not} GAS.
Indeed,
for any integer~$k>0$, define~$\bar{u}:[0,kT] \to [-1,1]$ via the periodic extension of~$u$,
and let~$\bar{C}(t)$ denote the corresponding solution of~\eqref{eq:transzero} at time~$t$. Then
\[
\rho( \bar{C}( kT ) )=(\rho(C(T)))^k ,
\]
so~\eqref{eq:rhot} yields
\begin{align*}
\rho_{kT}(A,B)&\geq (\rho( \bar{C}(kT)))^{1/(kT)}\\&=(\rho(C(T )))^{1/T}\\& \geq 1,
\end{align*}
and this implies that~$\rho(A,B) \geq 1$.~\qed
\end{Remark}
Thm.~\ref{thm:starho} motivates
the following optimal control problem.
\begin{Problem}
\label{prob:rho} Consider the PBCS~\eqref{eq:transzero}. Fix
an arbitrary final time~$T>0$. Find a control~$u^{\ast }\in \mathcal{U}$ that \emph{maximizes}~$\rho (C(T ,u)) $.
\end{Problem}
The main result in~\cite{lior_SIAM} is a first-order necessary condition for optimality. Let~$A'$
denote the transpose of the matrix~$A$.
\begin{Theorem}\label{thm:mprho}\cite{lior_SIAM}
Consider the PBCS~\eqref{eq:transzero}. Suppose that~$u^*\in \mathcal{U}$ is an optimal control for
Problem~\ref{prob:rho}. Let~$C^*(t) $ denote the
corresponding solution of~\eqref{eq:transzero} at time~$t$, and let~$\rho ^{\ast }:=\rho
(C^*(T))$. Suppose that~$\rho ^{\ast }$ is a \emph{simple} eigenvalue of~$C^*(T)$.
Let~$v^{\ast }\in \mathbb{R}_{+}^{n}$ [$w^{\ast }\in \mathbb{R}_{+}^{n}$]
be an eigenvector of~$C^*(T)$ [$(C^*(T))'$] corresponding
to~$\rho^*$, normalized such that~$(w^*)'v^*=1$.
Let~$q:[0,T]\to \mathbb{R}_{+}^{n}$ be the solution of
\begin{align}\label{Qeq}
\dot{q}& =-(A+Bu^{\ast })'q, \\
q(T)& =w^{\ast }, \notag
\end{align}
and let~$p:[0,T]\to \mathbb{R}_{+}^{n}$ be the solution of
\begin{align}
\dot{p}& =(A+Bu^{\ast })p, \label{Peq} \\
p(0)& =v^{\ast }. \notag
\end{align}
Define the \emph{switching function}~$m:[0,T]\to \mathbb{R}$ by
\begin{equation}
m(t):=q'(t)Bp(t). \label{SwitchingFunction}
\end{equation}
Then for almost all $t\in \left[ 0,T\right]$,
\begin{equation}\label{eq:UStar}
u^{\ast }(t)=
\begin{cases}
1, & m(t)>0, \\
-1, & m(t)<0.
\end{cases}
\end{equation}
\end{Theorem}
This MP has some special properties.
\begin{Remark}
First, note that~\eqref{Qeq} implies that
\[
q'(0)= q'(t)C^*(t), \quad \text{ for all } t \in[0,T].
\]
In particular, substituting~$t=T$ yields
\begin{align*}
q'(0)&=q'(T) C^*(T) \\
&=(w^*)'C^*(T) \\
&=\rho^* (w^*)' ,
\end{align*}
as~$w^*$ is an eigenvector of~$(C^*(T))'$ corresponding to the eigenvalue~$\rho^*$.
Since scaling~$q$ by a positive constant has no effect on the sign of~$m$, this means
that
the final condition~$q(T)=w^*$ in~\eqref{Qeq} can be replaced by
the initial condition~$q(0)= w^* $. This leads to
an MP in the form of a \emph{one-point} boundary value problem
(with the unknown~$v^*,w^*$ as the initial conditions at time~$0$).~\qed
\end{Remark}
\begin{Remark} Note that
\begin{align}\label{eq:misperiodic}
m(T)&=q'(T) B p(T) \nonumber \\
&=(w^*)' B C^*(T) p(0)\nonumber \\
&=(w^*)' B \rho^* v^* \nonumber \\
&=q'(0) B p(0)\nonumber \\
&=m(0).
\end{align}
Thus, the switching function is ``periodic'' in the sense that~$m(T)=m(0)$.~\qed
\end{Remark}
One difficulty in applying Theorem~\ref{thm:mprho}
is that both~$v^*$ and~$w^*$ are unknown.
There are cases where this difficulty may be alleviated somewhat
berceuse~$w^*$ can be expressed in terms of~$v^*$.
The next example demonstrates this.
\begin{Example}\label{exa:w_fiunc_v}
Consider an optimal bang-bang
control in the form
\[
u^*(t)=\begin{cases}
1,& t \in (0,t_1),\\
-1,& t \in (t_1,T),\end{cases}
\]
where~$0<t_1<T$.
The corresponding transition matrix is
\[
C^*(T)=\exp((A-B)\tau_2)\exp((A+B)\tau_1),
\]
where~$\tau_1:=t_1-0$ and~$\tau_2:=T-t_1$.
Thus,~$v^*$ and~$w^*$ satisfy
\begin{align*}
\exp((A-B)\tau_2)\exp((A+B)\tau_1)v^*&= \rho^* v^*,
\end{align*}
and
\begin{align}\label{eq:wmulle}
\exp((A+B)'\tau_1)\exp((A-B)'\tau_2)w^*&= \rho^* w^*.
\end{align}
Suppose that~$A$ and~$B$ are symmetric matrices. Then~\eqref{eq:wmulle} becomes
\[
\exp((A+B) \tau_1)\exp((A-B) \tau_2)w^* = \rho^* w^*,
\]
and multiplying this on the left by~$\exp((A-B) \tau_2)$ yields
\[
C^*(T) \exp((A-B) \tau_2)w^* = \rho^* \exp((A-B) \tau_2) w^*.
\]
Since the Perron eigenvector of~$C^*(T)$ is unique (up to multiplication by a constant) this means that
\[
\exp((A-B) \tau_2)w^* = r v^*,
\]
for some~$r >0$.~\qed
\end{Example}
The MP in Theorem~\ref{thm:mprho} is a necessary, but not sufficient, condition for optimality and it is possible of course
that
a
control satisfying this MP is not an optimal control.
The next example demonstrates this.
\begin{Example}\label{exa:mp2}
Consider a PBCS satisfying
the following properties:
\begin{itemize}
\item The matrix $A $ is symmetric. Its
maximal eigenvalue~$\mu$ is simple with corresponding eigenvector~$z$, and
\be\label{eq:vpbv}
z ' B z=0.
\ee
\item The matrices~$A-B$ and~$A+B$ are Metzler;
\item $\rho(A+B)>\rho(A)=\mu$.
\end{itemize}
(A specific example is~$n=2$,
$A=\begin{bmatrix}
2.2 & 1.6 \\ 1.6 & -0.2
\end{bmatrix}$
and
$B=\begin{bmatrix}
-1.1 & 0.2 \\
0.95 & 2.1
\end{bmatrix}
$.
Indeed, here~$\mu=3$, $z = \begin{bmatrix}2&1\end{bmatrix} '$ and it is straightforward to verify that all
the properties above hold.)
Consider the possibility that the singular control~$u (t) \equiv 0$ is optimal.
Then
\[
\rho(u):=\rho(\exp(AT))=\exp(\mu T).
\]
Since~$A$ is symmetric,
the corresponding right and left eigenvector is~$z$, so in the MP $p(0)=q(T)=z$.
Thus,~\eqref{Peq} and~\eqref{Qeq} yield
\begin{align*}
p(t)&= \exp(At)z\\
& =\exp(\mu t) z,
\end{align*}
and
\begin{align*}
q(t)&= \exp(A'(T-t))z\\
& = \exp(\mu(T-t)) z.
\end{align*}
Substituting this in~\eqref{SwitchingFunction} yields
\begin{align*}
m(t)&=\exp(\mu T)z' B z \equiv 0.
\end{align*}
Thus,~$u (t)\equiv 0$ (vacuously) satisfies Thm.~\ref{thm:mprho}.
However, since~$\rho(A+B)>\rho(A)$ the control~$\tilde u(t)\equiv 1$ yields
\[
\rho(\tilde u):=\exp((A+B)T)>\rho(u),
\]
so clearly~$u(t)\equiv 0$ is \emph{not} an optimal control.~\qed
\end{Example}
The reason that~$u(t)\equiv 0$ in Example~\ref{exa:mp2}
cannot be ruled out is that Thm.~\ref{thm:mprho} is a \emph{first-order} MP.
More specifically, its derivation is based the following idea. Suppose that~$u$
is a candidate for an optimal control.
Introduce a new control~$\tilde{u}:[0,T] \to [-1,1]$ by adding a needle variation to~$u $, i.e.
\[
\tilde{u}(t):=\begin{cases} a , &t \in [\tau,\tau+\epsilon),\\
u (t),& \text{otherwise},\end{cases}
\]
where~$a \in[-1,1]$,~$\tau \in [0,T)$ is a Lebesgue point of~$u $,
and~$\epsilon>0$ is sufficiently small, and analyze
the difference~$C(T,\tilde{u})-C(T,u^*)$ to first-order in~$\epsilon$.
For~$u (t) \equiv 0$,
\[
C(T, \tilde{u})=\exp(A (T-\tau-\epsilon ) ) \exp( (A+a B) \epsilon) \exp(A \tau),
\]
so
\begin{align*}
\left. \frac{d}{d \epsilon} C(T,\tilde{u})\right |_{\epsilon=0} & = a \exp(A (T-\tau ) ) B
\exp(A \tau) .
\end{align*}
Combining this with
known results on the derivative of a simple eigenvalue of a matrix (see, e.g.~\cite[Chapter~6]{mat_ana_sec_ed}) yields
\begin{align}\label{eq:oeps}
\rho(C&(T, \tilde{u})) = \rho(C(T,u )) +\epsilon a w' \exp(A (T-\tau ) ) B
\exp(A \tau)v +o(\epsilon) .
\end{align}
If~$a w' \exp(A (T-\tau ) ) B \exp(A \tau)v >0$ then~$\rho(C(T,\tilde{u})) > \rho(C(T,u ))$
for all sufficiently small~$\epsilon>0$ and thus~$u $ is not optimal. However,
in Example~\ref{exa:mp2} the term multiplying~$\epsilon$ in~\eqref{eq:oeps} is zero
for all~$ a$, $\tau$, and $T$, so a first-order analysis cannot rule out the
possibility that~$u $ is optimal.
Summarizing, Example~\ref{exa:mp2} suggests that there is a need for a higher-order
MP, i.e.,
an MP that takes into account higher-order terms in the Taylor expansion
of~$\rho(C(T,\tilde{u})) - \rho(C(T,u ))$ with respect to~$\epsilon$,
and can thus be used
to rule out the optimality of a larger set of controls.
In the next section, we
apply the generalized Legendre-Clebsch condition to derive
a high-order necessary condition for a singular control to be optimal.
We also
combine known results on the second-order derivative of the Perron root~\cite{Deutsch19841}
and the
Agrachev-Gamkrelidze second-order variation
for bang-bang controls (see, e.g.,~\cite{agrachev-sigalotti}) to derive a \emph{second-order} MP for
bang-bang controls.
The proofs of these results are given in Section~\ref{sec:proofs}.
\section{Main results}
Our first result is a high-order
necessary condition
for singular optimal controls for Problem~\ref{prob:rho}. Without loss of generality (see~\cite{hermes78}),
we assume that the singular control is~$u^*(t) \equiv 0$.
Let~$[P,Q]:=QP-PQ$ denote the Lie-bracket
of the matrices~$P,Q \in \R^{n \times n}$.
\subsection{High-order MP for singular controls}
\begin{Theorem}\label{thm:secsing}
Consider the PBCS~\eqref{eq:pscon}. Suppose that the conditions
of Thm.~\ref{thm:mprho} hold, and
that~$u^*(t) \equiv 0$ is an optimal control.
Then
\begin{equation}\label{eq:abb}
(w^*)'[B,[B,A]] v^* \leq 0.
\end{equation}
\end{Theorem}
\begin{Example}\label{exa:sing_sec}
Consider the specific PBCS with~$n=2$ given in Example~\ref{exa:mp2}.
In this case,
\[
[B,[B,A]] =\begin{bmatrix} 6.8& 18.4\\ 21.4& -6.8\end{bmatrix},
\]
and~$v^*=w^* = \begin{bmatrix}2&1\end{bmatrix} '/\sqrt{5}$,
so
\[
(w^*)'[B,[B,A]] v^* = 20.
\]
It follows from~\eqref{eq:abb} that~$u^*(t)\equiv 0$ is \emph{not} an optimal control.
Note that we were not able to derive this conclusion using the first-order MP
in Thm.~\ref{thm:mprho}.~\qed
\end{Example}
\subsection{Second-order MP for bang-bang controls}
In this section, we derive an Agrachev-Gamkrelidze-type second-order MP for optimal bang-bang controls for Problem~\ref{prob:rho}.
Note that for an optimal bang-bang~$u^*$ we have
\[
C^*(T)=\exp((A+B)\tau_k) \dots\exp((A+B)\tau_2) \exp((A-B)\tau_1) ,
\]
with~$\tau_i\geq 0$ and~$\sum_{i=1}^k \tau_i=T$. Any cyclic shift of~$C^*(T)$, e.g.,
\[
\exp((A-B)\tau_1) \exp((A+B)\tau_k) \dots\exp((A+B)\tau_2)
\]
also corresponds to an optimal control
(as a product of matrices and its cyclic shift have the same spectral radius).
This means that we can always assume that~$t_0:=0$ is a switching point of~$u^*$, and then~\eqref{eq:misperiodic}
implies that~$T$ is also a switching point of~$u^*$.
Let~$ \mathcal P^k$ denote the set of all vectors~$\alpha=\begin{bmatrix} \alpha_0 & \alpha_1&\dots& \alpha_k\end{bmatrix}'\in\ \R^{k+1}$
satisfying
\begin{equation}\label{eq:defpk}
\alpha_1 + \dots + \alpha_k = 0 .
\ee
We can now state the main result in this section.
\begin{Theorem}\label{thm:mainhere}
Suppose that~$u^*$ is an optimal control for Problem~\ref{prob:rho},
that the conditions of Thm.~\ref{thm:mprho} hold, and that the switching function~\eqref{SwitchingFunction} admits
a finite number of zeros at~$ t_0 < t_1 < \dots < t_k $, with~$t_0=0$,~$t_k= T$,
so that~$u^*(t)=r$ for~$t \in (0,t_1)$, $u^*(t)=-r$ for~$t \in ( t_1,t_2)$,
$u^*(t)=r$ for~$t \in ( t_2,t_3)$, and so on,
with~$r \in \{-1,1\}$. Denote~$P:=A+rB$,~$Q:=A-rB$,
and~$\tau_i:=t_i-t_{i-1}$.
Define matrices~$H_i \in \R^{n\times n}$, $i=1,\dots,k $, by
\begin{align}\label{eq:defhis}
H_1&:=P,\\
H_2&:=Q,\nonumber\\
H_3&:=\exp(-\tau_2 Q) P \exp(\tau_2 Q),\nonumber\\
H_4&:=\exp(-\tau_2 Q) \exp(-\tau_3 P)Q \exp(\tau_3 P) \exp(\tau_2 Q),\nonumber\\
H_5&:=\exp(-\tau_2 Q) \exp(-\tau_3 P) \exp(-\tau_4 Q) P \exp(\tau_4 Q) \exp(\tau_3 P) \exp(\tau_2 Q)\nonumber,\\
&\vdots\nonumber
\end{align}
Then
\begin{equation}\label{eq:firsth1}
q'(t_1) \sum_{i=1}^{k } \alpha_i H_i p(t_1)=0, \quad \text{for all } \alpha \in \mathcal P^{k }.
\ee
Furthermore,
\[
r_k(\alpha):=q'(t_1) \sum_{1\leq i < j \leq k } \alpha_i \alpha_j [H_i,H_j] p(t_1)
\]
satisfies
\begin{equation}\label{eq:secoopt}
r_k(\alpha) \leq 0, \quad \text{for all } \alpha \in \mathcal Q^{k },
\ee
where
\be\label{eq:defpkpk}
\mathcal Q^{k } :=\{ \alpha \in \mathcal P^{k } : \sum_{i=1}^{k } \alpha_i H_i p(t_1) =0 \}.
\ee
\end{Theorem}
We refer to the control~$u^*$ defined above as a control with~$k$ bang arcs. As will be shown in the proof, condition~\eqref{eq:firsth1} is a first-order condition (that can also be derived using the first-order MP).
Condition~\eqref{eq:secoopt} however is a second-order condition, and it is meaningful for values~$\alpha$
that make a certain first-order variation vanish, i.e. that belong to~$\mathcal Q^k$.
Note that the conditions in Thm.~\ref{thm:mainhere} are given in terms of~$p(t_1)$ and~$q(t_1)$. It is possible of course
to state them in terms of~$p(t_0)=v^*$ and~$q(t_0)= \rho^* w^*$, but this leads to
slightly more cumbersome expressions.
The next example demonstrates the calculations for a control with two bang arcs.
\begin{Example}\label{exa:1switch}
Consider an optimal control in the form
\[
u^*(t)=\begin{cases}
1,& t \in (0,t_1),\\
-1,& t \in (t_1,T),\end{cases}
\]
where~$0<t_1<T$.
In this case,~\eqref{eq:firsth1} becomes
\[
q'(t_1) (\alpha_1 (A+B) + \alpha_2 (A-B)) p(t_1)=0, \quad \text{for all } \alpha \in \mathcal P^{2 },
\]
and the definition of~$\mathcal P^{2 }$ yields
\[
\alpha_1 q'(t_1) ( (A+B) - (A-B)) p(t_1)=0, \quad \text{for all } \alpha_1 \in \R.
\]
Of course, this is just the conclusion that we can get from the first-order MP, as at the switching point~$t_1$ we must have
\[
0=m(t_1)=q'(t_1) B p(t_1).
\]
The second-order term is
\begin{align*}
r_2(\alpha) &=\alpha_1 \alpha_2 q'(t_1) [H_1,H_2] p(t_1)\\
&=- \alpha_1^2 q'(t_1) [A+B,A-B] p(t_1)\\
&= 2\alpha_1^2 q'(t_1) [ A,B ] p(t_1),
\end{align*}
so~\eqref{eq:secoopt} becomes
\be\label{eq:r2eq}
r_2(\alpha) \leq 0, \quad \text{for all } \alpha \in \mathcal Q^{2 },
\ee
where
\[
\mathcal Q^{2 } =\{ \alpha_1 \in \R : \alpha_1 B p(t_1) =0 \}.
\]
Again, this provides information that can also be derived from the first-order MP, as the fact that~$m(t_1^-)>0$ and~$m(t_1^+)<0$ implies that
\[
\dot m(t_1) \leq 0.
\]
and differentiating~\eqref{SwitchingFunction} yields
\[
\dot m(t_1) = q'(t_1)[A,B] p(t_1) .
\]
Thus,~$q'(t_1)[A,B] p(t_1) \leq 0$, so~\eqref{eq:r2eq} actually holds for all~$\alpha \in \mathcal P^2$.~\qed
\end{Example}
However, for a control with more than two bang arcs the second-order condition does
provide new information.
The next simple example demonstrates this.
\begin{Example}\label{exa:2dsimpl}
Consider the PBCS~\eqref{eq:transzero} with
\begin{align*}
A=\begin{bmatrix} -5/2 & 3/2 \\ 3 & -5/2\end{bmatrix},
\quad
B=\begin{bmatrix} 3/2 & -1/2 \\ 1 & -3/2\end{bmatrix}.
\end{align*}
Note that~$A+kB$ is Metzler for all~$k\in[-1,1]$. Consider the control
\be\label{eq:opcande}
u (t)=\begin{cases}
1,& t \in (0,t_1) \cup (t_2,t_3), \\
-1,& t \in (t_1,t_2) \cup (t_3,T),
\end{cases}
\ee
with~$t_1=1$, $t_2=2$, $t_3=3$, and~$T=4$.
The corresponding transition
matrix is
\[
C(T)=\exp(A-B) \exp(A+B)\exp(A-B) \exp(A+B).
\]
Let~$s:=\left( 9+32\exp( 5 )+9\exp( 10 ) \right)^{1/2}$.
The spectral radius of~$C(T)$ is
\[
\rho=\left ( \frac{ 9+7\exp(5)+9\exp(10)+3s(\exp(5)-1) }{ 25\exp(10) } \right)^2 ,
\]
and it is a simple eigenvalue.
The Perron right and left eigenvectors
of~$C(T)$ are
\[
v=\begin{bmatrix}
\exp(5)-1+s & 2+8\exp(5)
\end{bmatrix}'
\]
and
\[
w=\begin{bmatrix}
\exp(5)-1 + s & 4+\exp(5)
\end{bmatrix}'.
\]
Calculating the switching function~$m$
defined in~\eqref{SwitchingFunction} yields
the behavior depicted in Fig.~\ref{fig:m4s}.
Note that~$m(t)>0$ for $t \in (0,t_1) \cup (t_2,t_3)$,
and~$m(t)<0$ for~$t \in (t_1,t_2) \cup (t_3,T)$, so the control~$u$
satisfies the first-order~MP.
We now show that the second-order MP
implies that~$u$ is \emph{not} an optimal control.
Eq.~\eqref{eq:defhis} yields
\begin{align*}
H_1& =A+B,\\
H_2& =A- B,\nonumber\\
H_3& =\exp(-(A-B)) (A+B) \exp( A-B ),\nonumber\\
H_4& =\exp(-(A-B)) \exp(-(A+B))(A-B) \exp(A+B) \exp(A-B).
\end{align*}
Note that
\begin{align}\label{eq:simh3}
H_3& =\exp(-(A-B)) (A-B+2B) \exp( A-B )\nonumber\\
&=A-B+ 2\exp(-(A-B)) B \exp( A-B ).
\end{align}
\begin{comment}
Similarly,
\begin{align*}
H_4& = \exp(-(A-B)) \exp(-(A+B))(A+B-2B) \exp(A+B) \exp(A-B)\\
&= \exp(-(A-B)) (A+B) \exp(A-B)+\exp(-(A-B)) \exp(-(A+B))( -2B) \exp(A+B) \exp(A-B) \\
&= \exp(-(A-B)) (A-B+2B) \exp(A-B)+\exp(-(A-B)) \exp(-(A+B))( -2B) \exp(A+B) \exp(A-B) \\
&= A-B + 2\exp(-(A-B)) B \exp(A-B)-2\exp(-(A-B)) \exp(-(A+B))B \exp(A+B) \exp(A-B) .
\end{align*}
\end{comment}
Our goal is to find~$\bar \alpha \in \mathcal Q^4$ such that~$r_4(\bar \alpha)>0$. Indeed, this will imply that~$u$ is not optimal.
It turns out that we can find such an~$\bar \alpha$ satisfying~$\bar \alpha_1=1$
and~$\bar \alpha_4=0$. Since~$\sum_{i=1}^4 \bar \alpha_i$ must be zero,
$\bar \alpha_3=-1-\bar \alpha_2$. Then
\begin{align*}
\sum_{i=1}^4\bar \alpha_i H_i & = A+B +\bar \alpha_2(A-B)-( 1+\bar \alpha_2) (A-B+ 2\exp(-(A-B)) B \exp( A-B ))\\
& =2 B -2(1+\bar \alpha_2) \exp(-(A-B)) B \exp( A-B ),
\end{align*}
so
\begin{align*}
\sum_{i=1}^4 \bar \alpha_i H_i p(t_1)
& =2\left ( B - (1+\bar \alpha_2) \exp(-(A-B)) B \exp( A-B )\right )\exp(A+B)v\\
& =2 B \exp(A+B)v -2\sqrt{\rho} (1+\bar \alpha_2) \exp(-(A-B)) B
v.
\end{align*}
A tedious but straightforward calculation shows that
\[
B\exp(A+B)v=\exp(-5)\exp(-(A-B))Bv,
\]
so~$\sum_{i=1}^4 \bar \alpha_i H_i p(t_1) =0$
for
\[
\bar \alpha_2= ( \sqrt{\rho} \exp(5) )^{-1} -1 .
\]
Summarizing,~$\bar \alpha=\begin{bmatrix}\bar \alpha_0& 1& ( \sqrt{\rho} \exp(5) )^{-1}-1 & -( \sqrt{\rho} \exp(5) )^{-1} & 0 \end{bmatrix}' \in \mathcal Q^4$.
The second-order term is
\begin{align*}
r_4(\bar \alpha) &= q'(t_1) \sum_{1\leq i < j \leq 4 } \bar \alpha_i \bar \alpha_j [H_i,H_j] p(t_1)\\
&= q'(t_1) \left( \bar \alpha_2 [H_1,H_2]
+ \bar \alpha_3 [H_1,H_3]
+\bar \alpha_2 \bar \alpha_3 [H_2,H_3]
\right) p(t_1) ,
\end{align*}
and a calculation yields
\[
r_4(\bar \alpha)=\frac{1050 \exp(5) \left( \exp(5) -1\right) \left(12
\left(s -3\right)+\exp(5)
\left(-67+s+\exp(5)\left(67+36
\exp(5)+12 s\right)\right)\right)}{\left(4+\exp(5) \right)
\left(1+4 \exp(5) \right) \left(9-3 s+\exp(5) \left(7+9 \exp(5)
+3 s\right)\right)^2}.
\]
Clearly,~$r_4(\bar \alpha)>0$, so the second-order MP implies that~$u$ in~\eqref{eq:opcande}
is not an optimal control. The reason that~$u$ here
satisfies the conditions of the first-order MP is that it actually \emph{minimizes}
the spectral radius at time~$T=4$.
Thus, the second-order MP plays here a similar role to the second-derivative of a function:
it allows to distinguish between a maximum point and a minimum point.~\qed
\end{Example}
\begin{figure}
\caption{Switching function $m(t)$ in Example~\ref{exa:2dsimpl}
\label{fig:m4s}
\end{figure}
\section{Proofs}\label{sec:proofs}
\subsection{Proof of Thm.~\ref{thm:secsing}}
Assume that~$u^*(t)\equiv 0$ is an optimal control.
The corresponding solution of~\eqref{eq:transzero} is~$C^*(T)=\exp(AT)$.
For~$\epsilon>0$, consider the control
\be\label{eq:tildeueps}
\tilde{u}(t):= \begin{cases}
0 , & t \in [0, T-4\epsilon^{1/3}), \\
-1 , & t \in [T-4\epsilon^{1/3} ,T- 3 \epsilon^{1/3}), \\
1 , & t \in [T- 3 \epsilon^{1/3} ,T- \epsilon^{1/3}), \\
-1 , & t \in [T- \epsilon^{1/3} , T). \\
\end{cases}
\ee
Then
\begin{align*}
C(T,\tilde{u})&= \exp((A-B)\epsilon^{1/3})\exp((A+B)2 \epsilon^{1/3}) \exp((A-B)\epsilon^{1/3})\exp(A (T-4\epsilon^{1/3}))\\
&= \exp((A-B)\epsilon^{1/3})\exp((A+B)2 \epsilon^{1/3}) \exp((A-B)\epsilon^{1/3})\exp( -4\epsilon^{1/3} A) C^*(T),
\end{align*}
and it follows from the computation in~\cite[p. 719]{hermes78} (see also~\cite{krener}) that
\be\label{eq:e13}
C(T,\tilde{u})= \exp(\frac{2\epsilon}{3} [B,[B,A]] )C^*(T)+o(\epsilon).
\ee
Note that this implies that any result
derived using~$\tilde{u}$ will be a high-order MP, as
the width of the needle variations in~\eqref{eq:tildeueps} is of order~$\epsilon^{1/3}$
yet the perturbation in~$C(T,\tilde{u})$ with respect to~$C^*(T)$
is of order~$\epsilon$.
By~\eqref{eq:e13},
\[
\frac{d}{d \epsilon} C(T,\tilde{u}) |_{\epsilon=0} = (2/3) [B,[B,A]] C^*(T) ,
\]
so
\begin{align*}
\rho(C(T,\tilde{u})&)-\rho(C^*(T))\\& = (2\epsilon/3) (w^*)' [B,[B,A]] C^*(T) v^* +o(\epsilon)\\
& = (2\epsilon/3) (w^*)' [B,[B,A]] \rho(C^*(T))v^* +o(\epsilon).
\end{align*}
If~$(w^*)' [B,[B,A]] v^* > 0$ then~$\rho(C(T,\tilde{u})) > \rho(C^*(T))$ for all sufficiently
small~$\epsilon>0$, and this contradicts the optimality of~$u^*$.
This proves~\eqref{eq:abb}.~\qed
\subsection{Proof of Theorem~\ref{thm:mainhere}}
The proof is based on introducing a new control defined by
a
perturbation of the switching times~$ t_0,\dots,t_k$ to
$\tilde{t}_0 :=t_0+s \alpha_0$, $\tilde{t}_1: =t_1+s
(\alpha_0+\alpha_1)$,
\dots,
$\tilde{t}_{k }: =t_{k }+s
(\alpha_0+\alpha_1+ \dots+ \alpha_{k })$.
Here, $s\in \R$ and~$ \alpha \in
\mathcal P^{k }$.
Define~$ \tilde{u}(t;s, \alpha)$ by
$\tilde{u}(t)=r$ for $t\in (\tilde{t}_0,\tilde{t}_1)$, $\tilde{u}
(t)=-r$ for $t\in ( \tilde{t}_1,\tilde{t}_2)$, and so on.
Note that~\eqref{eq:defpk} implies that
the time length of the perturbed control is
\[
\tilde{t}_k-\tilde{t}_0= t_{k }+s
(\alpha_1+ \dots \alpha_{k })-t_0 = t_k-t_0 =T.
\]
Denote the corresponding transition matrix
by~$\tilde{ C}(t)=\tilde{ C}(t;s, \alpha)$.
Note also that~$\tilde{u}(\cdot;0,\alpha)=u^*(\cdot)$ for any~$\alpha$, so~$\tilde{C}(\cdot;0,\alpha)=C^*(\cdot)$.
Our goal is to derive an expression
for the difference~$ e(s,\alpha):=\rho(\tilde{C}(T;s,\alpha))-\rho(C^*(T))$
in the form
\be\label{eq:deffsecorder}
e(s,\alpha) =s z_1(\alpha) +\frac{s^2}{2} z_2(\alpha) +o(s^2),
\ee
where~$o(s^2) $ denotes a function~$f $ that
satisfies~$\lim_{s\to 0} \frac{f(s)}{s^2}=0$.
Suppose for a moment that~$z_1(\alpha)>0$ [$z_1(\alpha)<0$] for some~$\alpha \in \mathcal P^{k }$.
Then for any sufficiently small~$s>0$ [$s<0$],~\eqref{eq:deffsecorder} implies that
$\rho(C (T;s,\alpha) ) > \rho(C^*(T))$. This contradicts the optimality of~$u^*$, so
\begin{equation}\label{eq:pisz}
z_1(\alpha)=0,\quad \text{for all } \alpha \in \mathcal P^{k }.
\ee
Thus,~$ e(s,\alpha)=\frac{s^2}{2} z_2(\alpha) +o(s^2),$
and a similar argument implies that the second-order term must satisfy~$z_2(\alpha)\leq 0$.
As we will see below, these conditions lead to the first- and second-order optimality conditions~\eqref{eq:firsth1} and~\eqref{eq:secoopt}.
The calculation of the terms~$z_1$ and~$z_2$ in~\eqref{eq:deffsecorder} requires two steps.
The first is to derive an expression for the first- and second-order derivative of~$\tilde{C}(T;s,\alpha)$ with respect to~$s$.
This is based on the Agrachev-Gamkrelidze second-order variation for bang-bang controls~\cite{agrachev-sigalotti} (see also~\cite{ratmansky,yoav}).
The second step
is to derive an expression for the first- and second-order
derivatives of the spectral radius of a matrix with respect to perturbations of the matrix entries.
This follows the approach in~\cite{Deutsch19841}.
\subsection{First- and second-order derivatives of the transition matrix}
From here on we
consider
the case where~$k$ is even. (The derivations in the case where~$k$ is odd are similar.)
Thus,
\begin{equation}\label{eq:xt}
\tilde{ C}(T;s,\alpha)=
\exp( \tilde{\tau}_{k} Q) \exp (\tilde{\tau}_{k-1} P)
\dots
\exp (\tilde{\tau}_2 Q ) \exp (\tilde{\tau}_1 P) ,
\ee
where~$\tilde{\tau}_i: =\tilde{t}_i-\tilde{t}_{i-1}=\tau_i+s
\alpha_i$. Let~$\dot{ \tilde{C}}: =\frac{d}{ds} \tilde{C} $.
\begin{Proposition}\label{prop:ijuas}
The first- and second-order derivatives of~$ \tilde{C}(T;s,\alpha )$ with respect to~$s$ satisfy
\begin{align} \label{eq:ghis}
\tilde{H} \dot{ \tilde{C}} &= \sum_{i=1}^{k }\alpha_i \tilde{G}_i, \nonumber \\
\tilde{H} \ddot{ \tilde{ C}}&=
- \dot{\tilde{H}} \dot{ \tilde{ C}} + \sum_{1\leq i< j \leq k } \alpha_i \alpha_j [ \tilde{G}_i, \tilde{G}_j],
\end{align}
where
\begin{align}\label{eq:defgis}
\tilde{G}_1&:= \exp(-\tilde{\tau}_1 P)P\exp(\tilde{\tau}_1 P)=P,\nonumber\\
\tilde{G}_2&:= \exp(-\tilde{\tau}_1 P) Q \exp(\tilde{\tau}_1 P),\nonumber\\
\tilde{G}_3&:= \exp(-\tilde{\tau}_1 P) \exp(-\tilde{\tau}_2 Q) P \exp( \tilde{\tau}_2 Q) \exp(\tilde{\tau}_1 P) ,\\
&\vdots\nonumber
\end{align}
and
$\tilde{H} = \tilde{H}(s, \alpha) := \exp(-\tilde{\tau}_1 P) \exp(-\tilde{\tau}_2 Q)
\dots \exp(-\tilde{\tau}_{k } Q) $.
\end{Proposition}
{\sl Proof.}
Rewrite~\eqref{eq:xt} as
\[
I = \exp(-\tilde{\tau}_1 P) \exp(-\tilde{\tau}_2 Q) \dots
\exp(-\tilde{\tau}_{k-1} P)
\exp(-\tilde{\tau}_{k } Q)
\tilde{ C} .
\]
Differentiating both sides with respect to~$s$
and rearranging terms yields
\begin{align} \label{eq:dds}
\tilde{H} \dot{ \tilde{C}} & =
\alpha_1 P \tilde{C}_0 \\
& +\alpha_2 \exp(-\tilde{\tau}_1 P)
Q \tilde{ C}_1
\nonumber \\
& +\alpha_3 \exp(-\tilde{\tau}_1 P) \exp(-\tilde{\tau}_2 Q)
P \tilde{ C}_2
\nonumber \\
& \; \vdots \nonumber \\
& + \alpha_{k } \exp(-\tilde{\tau}_1 P) \exp(-\tilde{\tau}_2 Q )
\dots \exp(-\tilde{\tau}_{k-1} P)
Q \tilde{ C}_{k-1} , \nonumber
\end{align}
where~$\tilde{ C}_0 : =I$, $\tilde{ C}_1 : =\tilde{C}( \tilde{\tau}_1)$, $\tilde{ C}_2=\tilde{C}(\tilde{\tau}_1+\tilde{\tau}_2)$, and so on.
This yields the first equation in~\eqref{eq:ghis}.
Differentiating~\eqref{eq:dds} with respect to~$s$
yields
\begin{align*}
\dot{\tilde{H}} \dot{ \tilde{ C}} + \tilde{H}
\ddot{ \tilde{ C}} =
\sum_{i=1}^{k-1} \sum_{j=i+1} ^k \alpha_i \alpha_{j } [ \tilde{G}_i, \tilde{G}_{j }],
\end{align*}
and this completes the proof of Prop.~\ref{prop:ijuas}.
\qed
The next step is to determine the first- and second-order derivatives of~$
\tilde{\rho}(s,\alpha)= \rho(\tilde{C} (T;s,\alpha))$ with respect to~$s$.
\subsection{First- and second-order derivatives of the spectral radius}
Let~$\tilde{v}=\tilde{v}(s,\alpha)$, $\tilde{w}=\tilde{w}(s,\alpha)$ denote a nonnegative right and a
left eigenvector of~$ \tilde{C} (T)$
corresponding to the eigenvalue~$\tilde{\rho}$, and
normalized so that~$ \tilde{w}'\tilde{v}=1$. Note that
since~$\rho^*$ is simple,
the spectral radius of~$\tilde C(T)$
will also be simple for all~$|s|$ sufficiently small.
For a matrix~$D$,
let~$ {D}^ \#$ denote the Drazin inverse of~$D$.
\begin{Proposition}\label{prop:ehyn}
The first- and second-order derivatives of~$\tilde{\rho}$ with respect to~$s$ satisfy
\begin{align}\label{eq:rhofp}
\dot{\tilde{\rho}} |_{s=0}&= (w^*)'\left( \dot{\tilde{C}} |_{s=0}\right) v^*,\\
\ddot{\tilde{\rho}}|_{s=0} & = (w^*)' \left( \ddot{\tilde{C}} |_{s=0} \right )
v^* \nonumber\\&+ 2 (w^*)' \left(\dot{\tilde{C}} |_{s=0} \right) (\rho^*I - C^*)^ \# \left(\dot{\tilde{C}}|_{s=0} \right) v^* .\nonumber
\end{align}
\end{Proposition}
{\sl Proof.} Differentiating the equation
$
\tilde{C} \tilde{v}=\tilde{\rho} \tilde{v}
$
with respect to~$s$ yields
\be\label{eq:firstdertemp}
\dot{ \tilde{C}}{\tilde{v}} + \tilde{C} \dot{\tilde{v} } =\dot{\tilde{\rho}} \tilde{v} + \tilde{\rho} \dot{\tilde{v}} .
\ee
Multiplying on the left by~$\tilde{w}'$ and using the fact that~$\tilde{w}' \tilde{C}=
\tilde{\rho}\tilde{w}'$ yields
\[
\tilde{ w}' \dot{\tilde{C}} \tilde{v} +\tilde{ \rho}
\tilde{w}' \dot{\tilde{v} }=\dot{\tilde{\rho}} + \tilde{\rho} \tilde{ w}' \dot{\tilde{v}} ,
\]
so
\be\label{eq:der1}
\dot{\tilde{\rho}}= \tilde{ w}' \dot{\tilde{C}} \tilde{v}.
\ee
This proves~\eqref{eq:rhofp}.
To calculate the second-order derivative, differentiate~\eqref{eq:firstdertemp} with respect to~$s$.
This yields
\[
\ddot{\tilde{C}} \tilde{v} + 2 \dot{\tilde{C}} \dot{\tilde{v} } +\tilde{ C}\ddot{\tilde{v }} = \ddot{\tilde{\rho}} \tilde{ v} +2\dot{\tilde{\rho}} \dot{\tilde{v}}+\tilde{ \rho} \ddot{\tilde{v}} .
\]
Multiplying on the left by~$\tilde{w}'$ yields
\[
\tilde{w}' \ddot{\tilde{C}} \tilde{v }+ 2 \tilde{w}' \dot{\tilde{C}} \dot{\tilde{v} } +\tilde{\rho}\tilde{ w}' \ddot{\tilde{v} } = \ddot{\tilde{\rho}} +2\dot{\tilde{\rho}} \tilde{w}' \dot{\tilde{v}}+
\tilde{\rho} \tilde{ w}' \ddot{\tilde{v}} ,
\]
so
\begin{align} \label{eq:tempder2}
\ddot{\tilde{\rho}} & = \tilde{ w}' \ddot{\tilde{C}}\tilde{ v}
+ 2 \tilde{w}' \dot{\tilde{C}} \dot{\tilde{v} } -2\dot{\tilde{\rho}}\tilde{w}' \dot{\tilde{v}} \nonumber\\
& = \tilde{w}' \ddot{\tilde{C}}\tilde{ v}
+ 2 \tilde{w}' \dot{\tilde{C}} \dot{\tilde{v} } -2\left( \tilde{w}' \dot{\tilde{C}}
\tilde{v} \right) \tilde{w}' \dot{\tilde{v}}\nonumber \\
& = \tilde{w}' \ddot{\tilde{C}}\tilde{ v}
+ 2 \tilde{w}' \dot{\tilde{C}} \left( I-\tilde{v}\tilde{w}' \right ) \dot{\tilde{v}},
\end{align}
where the second equation follows from~\eqref{eq:der1}.
To simplify this expression, let~$\tilde{D}:=\tilde{\rho} I-\tilde{C}$. It follows from~\eqref{eq:firstdertemp} that
\be\label{eq:somlp}
\tilde{D} \dot{\tilde{v}}= \dot{\tilde{C}} \tilde{v} - \dot{\tilde{\rho}} \tilde{v} .
\ee
Since~$\rho^*$ is a simple eigenvalue of~$C^*$,
$\tilde{\rho}$ is a simple eigenvalue of~$\tilde{C}$ for all~$|s|$ sufficiently small, so
zero is a \emph{simple} eigenvalue of~$\tilde{D}$. Hence, there exists a unique generalized inverse~$\tilde{D}^ \#$ of~$\tilde{D}$ satisfying
\begin{equation}\label{eq:geninv}
\tilde{D}\tilde{D}^ \# =\tilde{D}^ \# \tilde{D} , \;\;
\tilde{D} \tilde{D}^ \# \tilde{D}= \tilde{D}, \;\;
\tilde{D}^ \# \tilde{D} \tilde{D}^ \#=\tilde{D}^ \#.
\ee
Multiplying~\eqref{eq:somlp} on the left by~$\tilde{D}^ \# $
and using the fact that~$\tilde{D}^ \# \tilde{v} =0$ and~\eqref{eq:geninv} yields
\[
\tilde{D} \tilde{D}^ \# \dot{\tilde{v}}=
\tilde{D}^ \# \dot{\tilde{C}} \tilde{ v} .
\]
It is not difficult to show that~$I- \tilde{D} \tilde{D}^ \# =\tilde{v} \tilde{w}'$,
so
\[
(I-\tilde{v}\tilde{w}') \dot{\tilde{v}}= \tilde{D}^ \# \dot{\tilde{C}} \tilde{v} .
\]
Multiplying this on the left by~$\tilde{w}'\dot{\tilde{C}}$ yields
\[
\tilde{w}'\dot{\tilde{C}} (I-\tilde{v}\tilde{w}') \dot{\tilde{v}}=
\tilde{w}'\dot{\tilde{C}} \tilde{D}^ \# \dot{\tilde{C}} \tilde{ v} ,
\]
and substituting this in~\eqref{eq:tempder2} yields~$\ddot{\tilde{\rho}} = \tilde{ w}' \ddot{\tilde{C}} \tilde{v} + 2 \tilde{ w}'\dot{\tilde{C}} \tilde{D}^ \# \dot{\tilde{C}}\tilde{ v}$.
Setting~$s =0$ completes the proof of Prop.~\ref{prop:ehyn}.~\qed
We can now prove Thm.~\ref{thm:mainhere}. Combining
\eqref{eq:deffsecorder}, \eqref{eq:ghis}, and~\eqref{eq:rhofp} yields
\[
z_1(\alpha)= \sum_{i=1}^{k } \alpha_i (w^*)'(H(0,\alpha) )^{-1}G_i v^*,
\]
where~$G_i:=\tilde{G}_i |_{s=0}$. It follows from~\eqref{eq:defgis} and~\eqref{eq:firsth1} that~$G_i= \exp(-\tau_1 P)H_i\exp(\tau_1 P)$.
The definition of~$q$ in~\eqref{Qeq} implies that
\be\label{eq:wth}
(w^*)'= q'(T)=q'(0)H(0,\alpha),
\ee
so
\begin{align*}
z_1(\alpha)&= \sum_{i=1}^{k } \alpha_i q'(0) G_i p(0)\\
&= \sum_{i=1}^{k } \alpha_i q'(t_1) H_i p(t_1).
\end{align*}
Combining this with~\eqref{eq:pisz} proves~\eqref{eq:firsth1}. Note that the proof so far used only first-order derivatives with respect to~$s$.
To prove~\eqref{eq:secoopt}, fix an arbitrary~$\alpha \in \mathcal Q^{k }$. Then
by~\eqref{eq:defpkpk},~$ \sum_{i=1}^{k } \alpha_i H_i p(t_1) =0$
and this implies that~$ \sum_{i=1}^{k } \alpha_i G_i p(0)=0 $, so Prop.~\ref{prop:ijuas} yields
$
\tilde{ H}(0,\alpha)\left( \dot{\tilde {C}} |_{s=0}\right) v^* = 0,
$
i.e.,
\begin{equation} \label{eq:notosub}
\left( \dot{\tilde {C}} |_{s=0}\right) v^* = 0.
\ee
Substituting this in~\eqref{eq:ghis} yields
\begin{align*}
\tilde{H} (0,\alpha)\left( \ddot{ \tilde{ C}}|_{s=0}\right) v^*&=
\sum_{1\leq i< j \leq k } \alpha_i \alpha_j [ {G}_i, G _j]v^*,
\end{align*}
and multiplying on the left by~$q'(0)$ gives
\[
(w^*)'\left( \ddot{ \tilde{ C}}|_{s=0}\right) v^* =
q'(0) \sum_{1\leq i< j \leq k } \alpha_i \alpha_j [ {G}_i, G _j]v^*.
\]
On the other-hand, substituting~\eqref{eq:notosub} in~\eqref{eq:rhofp} yields
\begin{align*}
\ddot{\tilde{\rho}} |_{s=0} & = (w^*)' \left( \ddot{\tilde{C}} |_{s=0} \right )
v^* ,
\end{align*}
so
\begin{align*}
z_2(\alpha)& = q'(0) \sum_{1\leq i< j \leq k } \alpha_i \alpha_j [ {G}_i, G _j]p(0)\\
& = q'(t_1) \sum_{1\leq i< j \leq k } \alpha_i \alpha_j [ H_i, H _j]p(t_1)
\end{align*}
and this proves~\eqref{eq:secoopt}.
This completes the proof of Thm.~\ref{thm:mainhere}.~\qed
\section{Conclusions}
The GUAS problem for switched systems is difficult because of the huge number
of possible switching laws. This led to the variational approach that
is based on trying to characterize a ``most destabilizing'' switching law.
For a PLSS, every possible switching law generates
a positive transition matrix and the problem of finding the
``most destabilizing'' switching law is equivalent to
finding the switching law that maximizes the spectral radius
of the transition matrix.
In the relaxed version of a PLSS, i.e.
a PBCS, this yields a well-defined optimal control problem, namely,
for a fixed~$T>0$, find a control~$u$
that maximizes the spectral radius
of the transition matrix at time~$T$. A first-order necessary condition
for optimality
has been derived in~\cite{lior_SIAM}.
In this paper, we derived a high-order
necessary optimality condition
for a singular control and for a bang-bang control. We demonstrated these
conditions using simple examples.
We are currently trying to use these high-order conditions
to derive new
theoretical results on the structure of the optimal control in specific problems.
The main technical difficulty is that in general the Perron right and left
eigenvectors~$v^*$ and~$w^*$ that appear in the optimality conditions
are complicated functions of the entries of the transition matrix.
\section{Appendix}
\emph{Proof of Thm.~\ref{thm:starho}.}
Pick~$t\geq 0$. Let $\Sigma^t:=\{C(t,u) : u \in \mathcal{U}\}$, i.e., the set of all possible transition matrices at time~$t$.
The definition of~$\mathcal U$ implies that~$\Sigma^t$ is compact.
Note that~\eqref{eq:rhot} yields
\[
\rho(A,B)=\limsup_{t\to \infty }\rho_t(A,B)=\limsup_{t\to \infty } \max_{M \in \Sigma^t} (\rho(M))^{1/t}.
\]
Pick a multiplicative matrix norm~$||\cdot||:\R^{n\times n} \to \R_+$.
By the
\emph{Joint Spectral Radius Theorem} (see, e.g.,~\cite[Ch.~2]{Jungers}),
\begin{align*}
\rho(A,B)
&=\limsup_{t \rightarrow \infty} \{ ||M||^{1/t} : M \in \Sigma^t \}\\
&=\limsup_{t \rightarrow \infty} \{ ||C(t,u)||^{1/t} : u \in \mathcal U \}.
\end{align*}
If~$\rho (A,B) <1$
then
\[
\limsup_{t \rightarrow \infty} \{ ||C(t,u)|| : u \in \mathcal U \} =0,
\]
so~$\lim_{t \rightarrow 0} C(t,u) = 0$ for all~$u \in \mathcal U$,
and this implies GAS of the PBCS~\eqref{eq:pscon}.
Now suppose that~$\rho(A,B) \geqslant 1$.
By~\cite[p.~22, Corollary~1.2]{Jungers},
there exists a sequence~$R_1,R_2,\dots$ in~$\Sigma^t$ such that
\[
\lim_{k\to\infty} || R_k\dots R_1 ||^{1/k} \geq 1.
\]
In other words, there exists a sequence of controls~$u_i \in \mathcal U$
such that
\[
\lim_{k\to\infty} || C(t,u_k) \dots C(t,u_1) ||^{1/k} \geq 1.
\]
Note that~$C(t,u_k) \dots C(t,u_1)$ is the transition matrix at time~$kt$
for the control obtained by concatenating all the~$u_i$s.
Thus, the PBCS is not GAS. This completes the proof.~\mathcal QED
\end{document}
|
\betagin{document}
\sloppy
\newenvironment{proo}{\betagin{trivlist} \item{\sc {Proof.}}}
{
$\square$ \end{trivlist}}
\longrightarrowg\def\symbolfootnote[#1]#2{\betagingroup
\def\thefootnote{{\mathcal O}nsymbol{footnote}}{\mathcal O}ootnote[#1]{#2}\endgroup}
\title{Deformation theory of Lie bialgebra properads}
\author{Sergei~Merkulov}
{\mathrm a\mathrm d}dress{Sergei~Merkulov: Mathematics Research Unit, Luxembourg University, Grand Duchy of Luxembourg }
\email{[email protected]}
\author{Thomas~Willwacher}
{\mathrm a\mathrm d}dress{Thomas~Willwacher: Institute of Mathematics, University of Zurich, Zurich, Switzerland}
\email{[email protected]}
\betagin{abstract}
We compute the homotopy derivations of the properads governing even and odd Lie bialgebras as well as involutive Lie bialgebras.
The answer may be expressed in terms of the Kontsevich graph complexes.
In particular, this shows that the Grothendieck-Teichm\"uller group acts faithfully (and essentially transitively) on the completions of the properads governing even Lie bialgebras and involutive Lie bialgebras, up to homotopy. This shows also that by contrast to the even case the properad governing odd Lie bialgebras admits precisely one non-trivial automorphism --- the standard rescaling automorphism,
and that it has precisely one non-trivial deformation which we describe explicitly.
\end{abstract}
\maketitle
{\lambdarge
\section{\bf Introduction}
}
\lambdabel{sec:introduction}
\subsection{Deformation theory of Lie bialgebras and graph complexes} An {\em even}\, Lie bialgebra is a vector space which carries both a Lie algebra and a Lie coalgebra structure of the same ${\mathbb Z}_2$ parity, satisfying a certain compatibility relation. Lie bialgebras were introduced by Drinfeld in \cite{D1} in the context of the
theory of Yang-Baxter equations. They have since seen numerous application, in particular in the theory
of Hopf algebra deformations of universal enveloping algebras, cf. \cite{ES} and
references therein.
If the composition of the cobracket and bracket of a Lie bialgebra is zero, the Lie bialgebra is called \emph{involutive}.
This additional condition is satisfied in many interesting examples studied in homological algebra, string topology, symplectic field theory, Lagrangian Floer theory of higher genus, and the theory of cohomology groups $H({\mathcal M}_{g,n})$ of moduli spaces of algebraic curves with labelings of punctures {\em skewsymmetrized} \cite{Ch,CFL,MW,Tu, Sch}.
{\em Odd}\, Lie bialgebras are, by definition, the ones in which Lie brackets and cobrackets have opposite ${\mathbb Z}_2$
parities. They have seen applications in Poisson geometry \cite{Me1}, deformation quantization
of Poisson structures
\cite{Me2} and, most surprisingly, in the theory of cohomology groups $H({\mathcal M}_{g,n})$ of moduli spaces of algebraic curves with labelings of punctures {\em symmetrized}\, \cite{MW}.
We study in this paper the deformation theory of Lie bialgebras in both the even and odd cases.
Let $\mathcal{L}\mathit{ieb}_{c,d}$ denote the properad governing Lie bialgebras with Lie brackets in degree $1-c$
and Lie cobrackets in degree $1-d$ so that the case $c+d\in 2{\mathbb Z}$ corresponds to even Lie bialgebras, and the case $c+d\in 2{\mathbb Z}+1$ to odd ones. The involutivity condition is non-trivial only in the even case, and we denote by $\mathcal{L}\mathit{ieb}^\diamond_{c,d}$ (with $c+d\in 2{\mathbb Z}$ by default) the properad governing involutive Lie bialgebras.
The main purpose of this note is to finish the study of the homotopy derivations of the properads $\mathcal{L}\mathit{ieb}_{c,d}$ and $\mathcal{L}\mathit{ieb}^\diamond_{c,d}$ initiated in \cite{CMW} where a minimal resolution $\mathcal{H}\mathit{olieb}^\diamond_{c,d}$ of
$\mathcal{L}\mathit{ieb}^\diamond_{c,d}$ was constructed; minimal resolutions $\mathcal{H}\mathit{olieb}_{c,d}$ of $\mathcal{L}\mathit{ieb}_{c,d}$ have been found
earlier in \cite{Ko,MaVo} for $c+d\in 2{\mathbb Z}$ and in \cite{Me1,Me2} for $c+d\in 2{\mathbb Z} +1$.
Informally speaking, one question we want to answer is which universal deformations of any (involutive) Lie bialgebra structure one can construct, using only the bracket and cobracket operations.
The answers may be formulated in terms of the cohomology of the oriented graph complex $\mathsf{GC}or_d$,
whose elements are linear combinations of isomorphism classes of directed graphs with no closed paths of directed edges.
More concretely, in \cite{CMW} the authors describe maps of dg Lie algebras
$$
F \colon \mathsf{GC}_{c+d+1}^{or}\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd) \ \ \ \ , \ \ \ \
F^\diamond \colon \mathsf{GC}_{c+d+1}^{or}[[\hbar]]\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd),
$$
where the symbol\ $\widehat{\ \ \ \ \ \ }$\ stands for the genus completion, $\hbar$ for a formal variable of homological degree $c+d$, and $\mathrm{Der}({\mathcal P})$ for the dg Lie algebra of (continuous) derivations of a dg properad ${\mathcal P}$.
Precise definitions of all the properads and the graph complexes and in particular the dg Lie algebra structures on $\mathsf{GC}or_{c+d+1}$ will be recalled in \S 2 below. Explicit formulae for the morphisms $F$ and $F^\diamond$ as well for the dg Lie algebra structures in $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ and $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$ are given in \S 3.
\subsection{Main Theorems}
The main results of the present note are the following two Theorems which are proven in \S 4.
\subsubsection{\bf Theorem} \lambdabel{thm:Fqiso}
{\em For any $c,d\in {\mathbb Z}$ the map
$$
F \colon \mathsf{GC}_{c+d+1}^{or}\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)
$$
is a quasi-isomorphism, up to one class in $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$
represented by the series
$\displaystyle
\sum_{m,n}(m+n-2)
\overbrace{
\underbrace{
\betagin{array}{c}\resizebox{6mm}{!} {\xy
(0,4.5)*+{...},
(0,-4.5)*+{...},
(0,0)*{\bullet}="o",
(-5,5)*{}="1",
(-3,5)*{}="2",
(3,5)*{}="3",
(5,5)*{}="4",
(-3,-5)*{}="5",
(3,-5)*{}="6",
(5,-5)*{}="7",
(-5,-5)*{}="8",
\ar @{-} "o";"1" <0pt>
\ar @{-} "o";"2" <0pt>
\ar @{-} "o";"3" <0pt>
\ar @{-} "o";"4" <0pt>
\ar @{-} "o";"5" <0pt>
\ar @{-} "o";"6" <0pt>
\ar @{-} "o";"7" <0pt>
\ar @{-} "o";"8" <0pt>
\endxy}\end{array}
}_{n\times}
}^{m\times}.
$
}
\subsubsection{\bf Theorem}\lambdabel{thm:Fhbarqiso}
{\em For any $c,d\in {\mathbb Z}$ with $c+d\in 2{\mathbb Z}$ the map
$$
F^\diamond \colon \mathsf{GC}_{c+d+1}^{or}[[\hbar]]\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)
$$
is a quasi-isomorphisms, up to classes $T{\mathbb K}[[\hbar]]\in \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$, generated over ${\mathbb K}[[\hbar]]$ by the series
\[
T=
\sum_{m,n,p}(m+n+2p-2) \hbar^{p}
\overbrace{
\underbrace{
\betagin{array}{c}\resizebox{8mm}{!} { \xy
(0,4.5)*+{...},
(0,-4.5)*+{...},
(0,0)*+{_p}*\cir{}="o",
(-5,5)*{}="1",
(-3,5)*{}="2",
(3,5)*{}="3",
(5,5)*{}="4",
(-3,-5)*{}="5",
(3,-5)*{}="6",
(5,-5)*{}="7",
(-5,-5)*{}="8",
\ar @{-} "o";"1" <0pt>
\ar @{-} "o";"2" <0pt>
\ar @{-} "o";"3" <0pt>
\ar @{-} "o";"4" <0pt>
\ar @{-} "o";"5" <0pt>
\ar @{-} "o";"6" <0pt>
\ar @{-} "o";"7" <0pt>
\ar @{-} "o";"8" <0pt>
\endxy}\end{array}
}_{n\times}
}^{m\times}.
\]
}
\subsection{Some applications} The most important cases for applications correspond to (i) the case $c=d=1$ where one deals
with the classical (i.e.\ the ones in which with all the generators have homological degree zero) Lie bialgebra properads $\mathcal{L}\mathit{ieb}:=\mathcal{L}\mathit{ieb}_{1,1}$ and $\mathcal{L}\mathit{ieb}^\diamond:=\mathcal{L}\mathit{ieb}^\diamond_{1,1}$, and the case (ii) $c=0$, $d=1$ where one gets a properad $\mathcal{L}\mathit{ieb}_{odd}:= \mathcal{L}\mathit{ieb}_{0,1}$ of odd Lie bialgebras which has the property \cite{Me1,Me2} that the representations of its minimal resolution $\mathcal{H}\mathit{olieb}_{odd}$ in a graded
vector space $V$ are in 1-1 correspondence with formal graded Poisson structures ${\partial}i \in {\mathcal T}_{poly}^{\geq 1}(V)$ on $V$
which vanish at $0\in V$.
As has been shown in \cite{Wi1,Wi2} the cohomology of the oriented graph complexes $H(\mathsf{GC}or_{d+1})$ may be identified with the cohomology of the ``plain" graph complexes{\mathcal O}ootnote{The superscript $2$ in the symbol $\mathsf{GC}_d^2$ means that we consider graphs with at least bivalent vertices, while the symbol $\mathsf{GC}_d$ is reserved traditionally for a complex of graphs with at least trivalent vertices.} $\mathsf{GC}_{d}^2$ introduced
(for $d=2$) by Kontsevich in \cite{Kon}. The latter complexes have been studied in \cite{Wi1} where it was proven, in particular, that there is an isomorphism
of Lie algebras,
$$
H^0(\mathsf{GC}_{2}^2)= {\mathfrak g}{\mathfrak r}{\mathfrak t}_1,
$$
where ${\mathfrak g}{\mathfrak r}{\mathfrak t}_1$ is the Lie algebra of the Grothendieck-Teichm\"uller group $GRT_1$ introduced by Drinfeld in
\cite{D2}. Then, according to \cite{Wi1,Wi2} and, respectively, \cite{CMW} , one concludes that
$$
H^0(\mathsf{GC}or_{3})={\mathfrak g}{\mathfrak r}{\mathfrak t}_1 \ \ \mbox{and}\ \ \ H^0({\mathsf G}{\mathsf C}_3^{or}[[\hbar]])={\mathfrak g}{\mathfrak r}{\mathfrak t}_1
$$
and hence obtains (as corollaries to the Main Theorems {\ref{thm:Fqiso}} and {\ref{thm:Fhbarqiso}} above) the faithful actions of the group $GRT_1$ on the completed properads $\widehat{\mathcal{H}\mathit{olieb}}$ and $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}$ and
and hence on their representations (the precise meaning of representation of a {\em completed}\, properad is given in \S {\ref{3: subsect on GCor_3 to Der(LieB)}} below).
It is a folklore conjecture that the cohomology group $H^1(\mathsf{GC}_{2}^2)=H^1(\mathsf{GC}or_{3})$ vanishes; if proven, the Main Theorems would imply that the properads $\mathcal{H}\mathit{olieb}$ and $\mathcal{H}\mathit{olieb}^\diamond$ and of their completions
are {\em rigid}, i.e.\ admit no nontrivial deformations.
By contrast, one has $H^0(\mathsf{GC}or_2)=0$, $H^1(\mathsf{GC}or_2)={\mathbb K}$ and $H^2(\mathsf{GC}or_2)={\mathbb K}$. Then the Main Theorem {\ref{thm:Fqiso}} says
that the properad $\mathcal{H}\mathit{olieb}_{odd}$ and its genus completion admits precisely one homotopy non-trivial automorphism -- the standard rescaling automorphism, and that its completed
version $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ has precisely {one non-trivial} deformation which we describe explicitly in \S {{\ref{4: subsec on applications}}. This unique deformed version of $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ leads to the notion of {\em quantizable Poisson structures} \cite{MW3, KMW}, the ones whose deformation quantization is a trivial procedure not requiring a choice of an associator as in the case of deformation quantization of ordinary Poisson structures.
\subsection{Some notation}
The set $\{1,2, \ldots, n\}$ is abbreviated to $[n]$; its group of automorphisms is
denoted by ${\mathbb S}_n$;
the trivial one-dimensional representation of
${\mathbb S}_n$ is denoted by ${\mbox{1 \hskip -8pt 1}}_n$, while its one dimensional sign representation is
denoted by ${\mathit s \mathit g\mathit n}_n$.
The cardinality of a finite set $A$ is denoted by $\# A$.
We work throughout in the category of ${\mathbb Z}$-graded vector spaces over a field ${\mathbb K}$
of characteristic zero.
If $V=\oplus_{i\in {\mathbb Z}} V^i$ is a graded vector space, then
$V[k]$ stands for the graded vector space with $V[k]^i:=V^{i+k}$ and
and $s^k$ for the associated isomorphism $V\bar{i}ghtarrow V[k]$; for $v\in V^i$ we set $|v|:=i$.
For a pair of graded vector spaces $V_1$ and $V_2$, the symbol ${\mathrm H\mathrm o\mathrm m}_i(V_1,V_2)$ stands
for the space of homogeneous linear maps of degree $i$, and
${\mathrm H\mathrm o\mathrm m}(V_1,V_2):=\bigoplus_{i\in {\mathbb Z}}{\mathrm H\mathrm o\mathrm m}_i(V_1,V_2)$; for example, $s^k\in {\mathrm H\mathrm o\mathrm m}_{-k}(V,V[k])$.
For a
properad ${\mathcal P}$ we denote by ${\mathcal P}\{k\}$ the properad which is uniquely defined by
the following property:
for any graded vector space $V$ a representation
of ${\mathcal P}\{k\}$ in $V$ is identical to a representation of ${\mathcal P}$ in $V[k]$.
The degree shifted operad of Lie algebras ${\mathcal L} \mathit{ie}\{d\}$ is denoted by ${\mathcal L} ie_{d+1}$.
while its minimal resolution by ${\mathcal H} \mathit{olie}_{d+1}$; representations of ${\mathcal L} ie_{d+1}$ are vector spaces equipped with Lie brackets of degree $-d$.
For a right (resp., left) module $V$ over a group $G$ we denote by $V_G$ (resp.\
$_G\hspace{-0.5mm}V$)
the ${\mathbb K}$-vector space of coinvariants:
$V/\{g(v) - v\ |\ v\in V, g\in G\}$ and by $V^G$ (resp.\ $^GV$) the subspace
of invariants: $\{Å{\mathcal O}orall g\in G\ :\ g(v)=v,\ v\in V\}$. If $G$ is finite, then these
spaces are canonically isomorphic as $char({\mathbb K})=0$.
For a vector space $V$ and a formal parameter $\hbar$ we denote by $V[[\hbar]]$ the topological vector space
of formal power series in $\hbar$ with coefficients in $V$, and by $\hbar^k V[[\hbar]]$ a vector space
of $V[[\hbar]]$ spanned by series of the form $\hbar^k f$ for some $f\in V[[\hbar]]$.
\subsection{Remark} A part of this paper contains an extended version of an appendix in the preprint
\cite{CMW} which was removed from the final version of loc.\ cit.\ following the recommendation of a referee.
\section{\bf Properads of Lie bialgebras and graph complexes}\lambdabel{sec:preliminaries}
}
\subsection{Lie $n$-bialgebras} A {\em Lie n-bialgebra}\, is a graded vector space $V$
equipped with linear maps,
$$
\varepsilontriangle: V\bar{i}ghtarrow V\wedge V \ \ \ \mbox{and}\ \ \ [\ , \ ]: \wedge^2 (V[n])
\bar{i}ghtarrow V[n],
$$
such that the first operation $\varepsilontriangle$ makes $V$ into a Lie coalgebra, the second operation
$[\ ,\ ]$ makes $V[n]$ into a Lie algebra, and the compatibility condition
$$
\varepsilontriangle [a, b] = \sum a_1\otimesimes [a_2, b] + [a,
b_1]\otimesimes b_2 - (-1)^{(|a|+n)(|b|+n)}( [b, a_1]\otimesimes a_2
+ b_1\otimesimes [b_2, a]),
$$
holds for any $a,b\in V$ with $\varepsilontriangle a=:\sum a_1\otimesimes a_2$, $\varepsilontriangle b=:\sum
b_1\otimesimes b_2$.
The case $n=0$ gives us the ordinary definition of Lie bialgebra \cite{D1}.
The case $n=1$ is if interest because minimal resolutions of Lie 1-bialgebras control
local Poisson geometry \cite{Me1,Me2}. For $n$ even
it makes sense to introduce an {\em involutive Lie
$n$-bialgebra}\, as a Lie $n$-bialgebra $(V, [\ ,\ ], \varepsilontriangle)$ such that the
composition map
$$
\betagin{array}{ccccc}
V & \stackrel{\varepsilontriangle}{\longrightarrow} & \Lambda^2V & \stackrel{[\ ,\ ]}{\longrightarrow} & V[-n]\\
a & \longrightarrow & \sum a_1\otimesimes a_2 &\longrightarrow & [a_1,a_2]
\end{array}
$$
vanishes (for odd $n$ this condition is trivial for symmetry reasons).
\subsection{Properads of (involutive) Lie bialgebras.} Let ${\mathcal L} ieb_{n}$
(resp.\ ${\mathcal L} ieb^\diamond_{n}$) denote the properad of (resp.\ involutive) Lie $n$-bialgebras.
Let us consider their degree shifted versions,
\[
{\mathcal L} ieb_{c,d} = {\mathcal L} ieb_{c+d-2}\{1-c\}, \ \ \ \ {\mathcal L} ieb_{c,d}^\diamond = {\mathcal L} ieb_{c+d-2}\{1-c\}
\]
in which the cobracket generator has degree $1-c$, and the bracket generator degree $1-d$. It is worth emphasizing that
the symbol ${\mathcal L} ieb_{c,d}^\diamond$ tacitly assumes that $c+d\in 2{\mathbb Z}$, i.e.\ that the numbers $c$ and $d$ have the same parity. Let us describe these properads and their minimal resolutions explicitly.
By definition, $\mathcal{L}\mathit{ieb}cd$ is a quadratic properad given as the quotient,
$$
\mathcal{L}\mathit{ieb}_{c,d}:={\mathcal F} ree\lambdangle E{\bar{a}}ngle/\lambdangle{\mathcal R}{\bar{a}}ngle,
$$
of the free properad generated by an ${\mathbb S}$-bimodule $E=\{E(m,n)\}_{m,n\geq 1}$ with
all $E(m,n)=0$ except
$$
E(2,1):={\mbox{1 \hskip -8pt 1}}_1\otimes {\mathit s \mathit g\mathit n}_2^{c}[c-1]=\mbox{span}\left\lambdangle
\betagin{array}{c}\betagin{xy}
<0mm,-0.55mm>*{};<0mm,-2.5mm>*{}**@{-},
<0.5mm,0.5mm>*{};<2.2mm,2.2mm>*{}**@{-},
<-0.48mm,0.48mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0.5mm,0.5mm>*{};<2.7mm,2.8mm>*{^{_2}}**@{},
<-0.48mm,0.48mm>*{};<-2.7mm,2.8mm>*{^{_1}}**@{},
\end{xy}\end{array}
=(-1)^{c}
\betagin{array}{c}\betagin{xy}
<0mm,-0.55mm>*{};<0mm,-2.5mm>*{}**@{-},
<0.5mm,0.5mm>*{};<2.2mm,2.2mm>*{}**@{-},
<-0.48mm,0.48mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0.5mm,0.5mm>*{};<2.7mm,2.8mm>*{^{_1}}**@{},
<-0.48mm,0.48mm>*{};<-2.7mm,2.8mm>*{^{_2}}**@{},
\end{xy}\end{array}
\bar{i}ght{\bar{a}}ngle
$$
$$
E(1,2):= {\mathit s \mathit g\mathit n}_2^{d}\otimes {\mbox{1 \hskip -8pt 1}}_1[d-1]=\mbox{span}\left\lambdangle
\betagin{array}{c}\betagin{xy}
<0mm,0.66mm>*{};<0mm,3mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.2mm,-2.2mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-2.2mm,-2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0.39mm,-0.39mm>*{};<2.9mm,-4mm>*{^{_2}}**@{},
<-0.35mm,-0.35mm>*{};<-2.8mm,-4mm>*{^{_1}}**@{},
\end{xy}\end{array}
=(-1)^{d}
\betagin{array}{c}\betagin{xy}
<0mm,0.66mm>*{};<0mm,3mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.2mm,-2.2mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-2.2mm,-2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0.39mm,-0.39mm>*{};<2.9mm,-4mm>*{^{_1}}**@{},
<-0.35mm,-0.35mm>*{};<-2.8mm,-4mm>*{^{_2}}**@{},
\end{xy}\end{array}
\bar{i}ght{\bar{a}}ngle
$$
by the ideal generated by the following elements
\betagin{equation}\lambdabel{R for LieB}
{\mathcal R}:\left\{
\betagin{array}{c}
\betagin{array}{c}\resizebox{7mm}{!}{
\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0mm,-0.49mm>*{};<0mm,-3.0mm>*{}**@{-},
<0.49mm,0.49mm>*{};<1.9mm,1.9mm>*{}**@{-},
<-0.5mm,0.5mm>*{};<-1.9mm,1.9mm>*{}**@{-},
<-2.3mm,2.3mm>*{\circ};<-2.3mm,2.3mm>*{}**@{},
<-1.8mm,2.8mm>*{};<0mm,4.9mm>*{}**@{-},
<-2.8mm,2.9mm>*{};<-4.6mm,4.9mm>*{}**@{-},
<0.49mm,0.49mm>*{};<2.7mm,2.3mm>*{^3}**@{},
<-1.8mm,2.8mm>*{};<0.4mm,5.3mm>*{^2}**@{},
<-2.8mm,2.9mm>*{};<-5.1mm,5.3mm>*{^1}**@{},
\end{xy}}\end{array}
+
\betagin{array}{c}\resizebox{7mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0mm,-0.49mm>*{};<0mm,-3.0mm>*{}**@{-},
<0.49mm,0.49mm>*{};<1.9mm,1.9mm>*{}**@{-},
<-0.5mm,0.5mm>*{};<-1.9mm,1.9mm>*{}**@{-},
<-2.3mm,2.3mm>*{\circ};<-2.3mm,2.3mm>*{}**@{},
<-1.8mm,2.8mm>*{};<0mm,4.9mm>*{}**@{-},
<-2.8mm,2.9mm>*{};<-4.6mm,4.9mm>*{}**@{-},
<0.49mm,0.49mm>*{};<2.7mm,2.3mm>*{^2}**@{},
<-1.8mm,2.8mm>*{};<0.4mm,5.3mm>*{^1}**@{},
<-2.8mm,2.9mm>*{};<-5.1mm,5.3mm>*{^3}**@{},
\end{xy}}\end{array}
+
\betagin{array}{c}\resizebox{7mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0mm,-0.49mm>*{};<0mm,-3.0mm>*{}**@{-},
<0.49mm,0.49mm>*{};<1.9mm,1.9mm>*{}**@{-},
<-0.5mm,0.5mm>*{};<-1.9mm,1.9mm>*{}**@{-},
<-2.3mm,2.3mm>*{\circ};<-2.3mm,2.3mm>*{}**@{},
<-1.8mm,2.8mm>*{};<0mm,4.9mm>*{}**@{-},
<-2.8mm,2.9mm>*{};<-4.6mm,4.9mm>*{}**@{-},
<0.49mm,0.49mm>*{};<2.7mm,2.3mm>*{^1}**@{},
<-1.8mm,2.8mm>*{};<0.4mm,5.3mm>*{^3}**@{},
<-2.8mm,2.9mm>*{};<-5.1mm,5.3mm>*{^2}**@{},
\end{xy}}\end{array}
\ \ , \ \
\betagin{array}{c}\resizebox{8.4mm}{!}{ \betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0mm,0.69mm>*{};<0mm,3.0mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.4mm,-2.4mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-1.9mm,-1.9mm>*{}**@{-},
<-2.4mm,-2.4mm>*{\circ};<-2.4mm,-2.4mm>*{}**@{},
<-2.0mm,-2.8mm>*{};<0mm,-4.9mm>*{}**@{-},
<-2.8mm,-2.9mm>*{};<-4.7mm,-4.9mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<3.3mm,-4.0mm>*{^3}**@{},
<-2.0mm,-2.8mm>*{};<0.5mm,-6.7mm>*{^2}**@{},
<-2.8mm,-2.9mm>*{};<-5.2mm,-6.7mm>*{^1}**@{},
\end{xy}}\end{array}
+
\betagin{array}{c}\resizebox{8.4mm}{!}{ \betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0mm,0.69mm>*{};<0mm,3.0mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.4mm,-2.4mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-1.9mm,-1.9mm>*{}**@{-},
<-2.4mm,-2.4mm>*{\circ};<-2.4mm,-2.4mm>*{}**@{},
<-2.0mm,-2.8mm>*{};<0mm,-4.9mm>*{}**@{-},
<-2.8mm,-2.9mm>*{};<-4.7mm,-4.9mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<3.3mm,-4.0mm>*{^2}**@{},
<-2.0mm,-2.8mm>*{};<0.5mm,-6.7mm>*{^1}**@{},
<-2.8mm,-2.9mm>*{};<-5.2mm,-6.7mm>*{^3}**@{},
\end{xy}}\end{array}
+
\betagin{array}{c}\resizebox{8.4mm}{!}{ \betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0mm,0.69mm>*{};<0mm,3.0mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.4mm,-2.4mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-1.9mm,-1.9mm>*{}**@{-},
<-2.4mm,-2.4mm>*{\circ};<-2.4mm,-2.4mm>*{}**@{},
<-2.0mm,-2.8mm>*{};<0mm,-4.9mm>*{}**@{-},
<-2.8mm,-2.9mm>*{};<-4.7mm,-4.9mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<3.3mm,-4.0mm>*{^1}**@{},
<-2.0mm,-2.8mm>*{};<0.5mm,-6.7mm>*{^3}**@{},
<-2.8mm,-2.9mm>*{};<-5.2mm,-6.7mm>*{^2}**@{},
\end{xy}}\end{array}
\\
\betagin{array}{c}\resizebox{5mm}{!}{\betagin{xy}
<0mm,2.47mm>*{};<0mm,0.12mm>*{}**@{-},
<0.5mm,3.5mm>*{};<2.2mm,5.2mm>*{}**@{-},
<-0.48mm,3.48mm>*{};<-2.2mm,5.2mm>*{}**@{-},
<0mm,3mm>*{\circ};<0mm,3mm>*{}**@{},
<0mm,-0.8mm>*{\circ};<0mm,-0.8mm>*{}**@{},
<-0.39mm,-1.2mm>*{};<-2.2mm,-3.5mm>*{}**@{-},
<0.39mm,-1.2mm>*{};<2.2mm,-3.5mm>*{}**@{-},
<0.5mm,3.5mm>*{};<2.8mm,5.7mm>*{^2}**@{},
<-0.48mm,3.48mm>*{};<-2.8mm,5.7mm>*{^1}**@{},
<0mm,-0.8mm>*{};<-2.7mm,-5.2mm>*{^1}**@{},
<0mm,-0.8mm>*{};<2.7mm,-5.2mm>*{^2}**@{},
\end{xy}}\end{array}
-
\betagin{array}{c}\resizebox{7mm}{!}{\betagin{xy}
<0mm,-1.3mm>*{};<0mm,-3.5mm>*{}**@{-},
<0.38mm,-0.2mm>*{};<2.0mm,2.0mm>*{}**@{-},
<-0.38mm,-0.2mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,-0.8mm>*{\circ};<0mm,0.8mm>*{}**@{},
<2.4mm,2.4mm>*{\circ};<2.4mm,2.4mm>*{}**@{},
<2.77mm,2.0mm>*{};<4.4mm,-0.8mm>*{}**@{-},
<2.4mm,3mm>*{};<2.4mm,5.2mm>*{}**@{-},
<0mm,-1.3mm>*{};<0mm,-5.3mm>*{^1}**@{},
<2.5mm,2.3mm>*{};<5.1mm,-2.6mm>*{^2}**@{},
<2.4mm,2.5mm>*{};<2.4mm,5.7mm>*{^2}**@{},
<-0.38mm,-0.2mm>*{};<-2.8mm,2.5mm>*{^1}**@{},
\end{xy}}\end{array}
- (-1)^{d}
\betagin{array}{c}\resizebox{7mm}{!}{\betagin{xy}
<0mm,-1.3mm>*{};<0mm,-3.5mm>*{}**@{-},
<0.38mm,-0.2mm>*{};<2.0mm,2.0mm>*{}**@{-},
<-0.38mm,-0.2mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,-0.8mm>*{\circ};<0mm,0.8mm>*{}**@{},
<2.4mm,2.4mm>*{\circ};<2.4mm,2.4mm>*{}**@{},
<2.77mm,2.0mm>*{};<4.4mm,-0.8mm>*{}**@{-},
<2.4mm,3mm>*{};<2.4mm,5.2mm>*{}**@{-},
<0mm,-1.3mm>*{};<0mm,-5.3mm>*{^2}**@{},
<2.5mm,2.3mm>*{};<5.1mm,-2.6mm>*{^1}**@{},
<2.4mm,2.5mm>*{};<2.4mm,5.7mm>*{^2}**@{},
<-0.38mm,-0.2mm>*{};<-2.8mm,2.5mm>*{^1}**@{},
\end{xy}}\end{array}
- (-1)^{d+c}
\betagin{array}{c}\resizebox{7mm}{!}{\betagin{xy}
<0mm,-1.3mm>*{};<0mm,-3.5mm>*{}**@{-},
<0.38mm,-0.2mm>*{};<2.0mm,2.0mm>*{}**@{-},
<-0.38mm,-0.2mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,-0.8mm>*{\circ};<0mm,0.8mm>*{}**@{},
<2.4mm,2.4mm>*{\circ};<2.4mm,2.4mm>*{}**@{},
<2.77mm,2.0mm>*{};<4.4mm,-0.8mm>*{}**@{-},
<2.4mm,3mm>*{};<2.4mm,5.2mm>*{}**@{-},
<0mm,-1.3mm>*{};<0mm,-5.3mm>*{^2}**@{},
<2.5mm,2.3mm>*{};<5.1mm,-2.6mm>*{^1}**@{},
<2.4mm,2.5mm>*{};<2.4mm,5.7mm>*{^1}**@{},
<-0.38mm,-0.2mm>*{};<-2.8mm,2.5mm>*{^2}**@{},
\end{xy}}\end{array}
- (-1)^{c}
\betagin{array}{c}\resizebox{7mm}{!}{\betagin{xy}
<0mm,-1.3mm>*{};<0mm,-3.5mm>*{}**@{-},
<0.38mm,-0.2mm>*{};<2.0mm,2.0mm>*{}**@{-},
<-0.38mm,-0.2mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,-0.8mm>*{\circ};<0mm,0.8mm>*{}**@{},
<2.4mm,2.4mm>*{\circ};<2.4mm,2.4mm>*{}**@{},
<2.77mm,2.0mm>*{};<4.4mm,-0.8mm>*{}**@{-},
<2.4mm,3mm>*{};<2.4mm,5.2mm>*{}**@{-},
<0mm,-1.3mm>*{};<0mm,-5.3mm>*{^1}**@{},
<2.5mm,2.3mm>*{};<5.1mm,-2.6mm>*{^2}**@{},
<2.4mm,2.5mm>*{};<2.4mm,5.7mm>*{^1}**@{},
<-0.38mm,-0.2mm>*{};<-2.8mm,2.5mm>*{^2}**@{},
\end{xy}}\end{array}
\end{array}
\bar{i}ght.
\end{equation}
Similarly, $\mathcal{L}\mathit{ieb}^\diamondcd$ (with $c+d\in 2{\mathbb Z}$ by default) is a quadratic properad
$
{\mathcal F} ree\lambdangle E{\bar{a}}ngle/\lambdangle{\mathcal R}_\diamond{\bar{a}}ngle
$
generated by the same ${\mathbb S}$-bimodule $E$ modulo the relations
$$
{\mathcal R}_\diamond:= {\mathcal R} \ \bigsqcup
\betagin{array}{c}\resizebox{4mm}{!}
{\xy
(0,0)*{\circ}="a",
(0,6)*{\circ}="b",
(3,3)*{}="c",
(-3,3)*{}="d",
(0,9)*{}="b'",
(0,-3)*{}="a'",
\ar@{-} "a";"c" <0pt>
\ar @{-} "a";"d" <0pt>
\ar @{-} "a";"a'" <0pt>
\ar @{-} "b";"c" <0pt>
\ar @{-} "b";"d" <0pt>
\ar @{-} "b";"b'" <0pt>
\endxy}
\end{array}
$$
It is clear from the association
$
\varepsilontriangle \leftrightarrow
\betagin{xy}
<0mm,-0.55mm>*{};<0mm,-2.5mm>*{}**@{-},
<0.5mm,0.5mm>*{};<2.2mm,2.2mm>*{}**@{-},
<-0.48mm,0.48mm>*{};<-2.2mm,2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
\end{xy}$,
$
[\ , \ ] \leftrightarrow
\betagin{xy}
<0mm,0.66mm>*{};<0mm,3mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.2mm,-2.2mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-2.2mm,-2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
\end{xy}
$
that there is a one-to-one correspondence between representations of $\mathcal{L}\mathit{ieb}cd$ (resp.,
$\mathcal{L}\mathit{ieb}^\diamondcd$) in a finite dimensional space $V$ and (resp., involutive) Lie $(c+d-2)$-bialgebra
structures in $V[c-1]$.
The minimal resolution $\mathcal{H}\mathit{olieb}_{c,d}$ of the properad $\mathcal{L}\mathit{ieb}cd$ was constructed in \cite{Ko,MaVo} for $d+c\in 2{\mathbb Z}$ and in
\cite{Me1,Me2} for $d+c\in 2{\mathbb Z}+1$. It
is generated by the following (skew)symmetric corollas of degree $1 +c(1-m)+d(1-n)$
\betagin{equation}\lambdabel{2: symmetries of HoLiebcd corollas}
\betagin{array}{c}\resizebox{17mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-10.5mm,5.9mm>*{^{\sigma(1)}}**@{},
<0mm,0mm>*{};<-4mm,5.9mm>*{^{\sigma(2)}}**@{},
<0mm,0mm>*{};<10.0mm,5.9mm>*{^{\sigma(m)}}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-10.5mm,-6.9mm>*{^{{\mathbf a}u(1)}}**@{},
<0mm,0mm>*{};<-4mm,-6.9mm>*{^{{\mathbf a}u(2)}}**@{},
<0mm,0mm>*{};<10.0mm,-6.9mm>*{^{{\mathbf a}u(n)}}**@{},
\end{xy}}\end{array}
=(-1)^{c|\sigma|+d|{\mathbf a}u|}
\betagin{array}{c}\resizebox{14mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array} \ \ {\mathcal O}orall \sigma\in {\mathbb S}_m, {\mathcal O}orall{\mathbf a}u\in {\mathbb S}_n
\end{equation}
and has the differential
given on the generators by
\betagin{equation}\lambdabel{LBk_infty}
\delta
\betagin{array}{c}\resizebox{14mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array}
\ \ = \ \
\sum_{[1,\ldots,m]=I_1\sqcup I_2\atop
{|I_1|\geq 0, |I_2|\geq 1}}
\sum_{[1,\ldots,n]=J_1\sqcup J_2\atop
{|J_1|\geq 1, |J_2|\geq 1}
}\hspace{0mm}
{\partial}m
\betagin{array}{c}\resizebox{22mm}{!}{ \betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<0mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<12.4mm,4.8mm>*{}**@{-},
<0mm,0mm>*{};<-2mm,7mm>*{\overbrace{\ \ \ \ \ \ \ \ \ \ \ \ }}**@{},
<0mm,0mm>*{};<-2mm,9mm>*{^{I_1}}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<0mm,-7mm>*{\underbrace{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \
}}**@{},
<0mm,0mm>*{};<0mm,-10.6mm>*{_{J_1}}**@{},
<13mm,5mm>*{};<13mm,5mm>*{\circ}**@{},
<12.6mm,5.44mm>*{};<5mm,10mm>*{}**@{-},
<12.6mm,5.7mm>*{};<8.5mm,10mm>*{}**@{-},
<13mm,5mm>*{};<13mm,10mm>*{\ldots}**@{},
<13.4mm,5.7mm>*{};<16.5mm,10mm>*{}**@{-},
<13.6mm,5.44mm>*{};<20mm,10mm>*{}**@{-},
<13mm,5mm>*{};<13mm,12mm>*{\overbrace{\ \ \ \ \ \ \ \ \ \ \ \ \ \ }}**@{},
<13mm,5mm>*{};<13mm,14mm>*{^{I_2}}**@{},
<12.4mm,4.3mm>*{};<8mm,0mm>*{}**@{-},
<12.6mm,4.3mm>*{};<12mm,0mm>*{\ldots}**@{},
<13.4mm,4.5mm>*{};<16.5mm,0mm>*{}**@{-},
<13.6mm,4.8mm>*{};<20mm,0mm>*{}**@{-},
<13mm,5mm>*{};<14.3mm,-2mm>*{\underbrace{\ \ \ \ \ \ \ \ \ \ \ }}**@{},
<13mm,5mm>*{};<14.3mm,-4.5mm>*{_{J_2}}**@{},
\end{xy}}\end{array}
\end{equation}
where the signs on the r.h.s\ are uniquely fixed for $c+d\in 2{\mathbb Z}$ by the fact that they all equal to $+1$ if $ c$ and $d$ are even integers, and for $c+d\in 2{\mathbb Z}+1$ the signs are given explicitly in
\cite{Me1}.
The minimal resolution $\mathcal{H}\mathit{olieb}_{c,d}^\diamond$ of the properad $\mathcal{L}\mathit{ieb}^\diamondcd$ was constructed in \cite{CMW}. It is a free properad generated
by the following (skew)symmetric corollas of degree $1+c(1-m-a)+d(1-n-a)$
\betagin{equation}\lambdabel{equ:LoBgenerators}
\betagin{array}{c}\resizebox{16mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-10,-8)*{_1};
(-6,-8)*{_2};
(10,-8)*{_n};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-10,8)*{_1};
(-6,8)*{_2};
(10,8)*{_m};
\endxy}\end{array}
=(-1)^{(d+1)(\sigma+{\mathbf a}u)}
\betagin{array}{c}\resizebox{20mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-12,-8)*{_{{\mathbf a}u(1)}};
(-6,-8)*{_{{\mathbf a}u(2)}};
(12,-8)*{_{{\mathbf a}u(n)}};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-12,8)*{_{\sigma(1)}};
(-6,8)*{_{\sigma(2)}};
(12,8)*{_{\sigma(m)}};
\endxy}\end{array}\ \ \ {\mathcal O}orall \sigma\in {\mathbb S}_m, {\mathcal O}orall {\mathbf a}u\in {\mathbb S}_n,
\end{equation}
where $m+n+ a\geq 3$, $m\geq 1$, $n\geq 1$, $a\geq 0$. The differential in
$\mathcal{H}\mathit{olieb}_{d}^\diamond$ is given on the generators by
\betagin{equation}\lambdabel{2: d on Lie inv infty}
\delta
\betagin{array}{c}\resizebox{16mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-10,-8)*{_1};
(-6,-8)*{_2};
(10,-8)*{_n};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-10,8)*{_1};
(-6,8)*{_2};
(10,8)*{_m};
\endxy}\end{array}
=
\sum_{l\geq 1}\sum_{a=b+c+l-1}\sum_{[m]=I_1\sqcup I_2\atop
[n]=J_1\sqcup J_2} {\partial}m
\betagin{array}{c}
\betagin{array}{c}\resizebox{21mm}{!}{\xy
(0,0)*+{b}*\cir{}="b",
(10,10)*+{c}*\cir{}="c",
(-9,6)*{}="1",
(-7,6)*{}="2",
(-2,6)*{}="3",
(-3.5,5)*{...},
(-4,-6)*{}="-1",
(-2,-6)*{}="-2",
(4,-6)*{}="-3",
(1,-5)*{...},
(0,-8)*{\underbrace{\ \ \ \ \ \ \ \ }},
(0,-11)*{_{J_1}},
(-6,8)*{\overbrace{ \ \ \ \ \ \ }},
(-6,11)*{_{I_1}},
(6,16)*{}="1'",
(8,16)*{}="2'",
(14,16)*{}="3'",
(11,15)*{...},
(11,6)*{}="-1'",
(16,6)*{}="-2'",
(18,6)*{}="-3'",
(13.5,6)*{...},
(15,4)*{\underbrace{\ \ \ \ \ \ \ }},
(15,1)*{_{J_2}},
(10,18)*{\overbrace{ \ \ \ \ \ \ \ \ }},
(10,21)*{_{I_2}},
(0,2)*-{};(8.0,10.0)*-{}
**\crv{(0,10)};
(0.5,1.8)*-{};(8.5,9.0)*-{}
**\crv{(0.4,7)};
(1.5,0.5)*-{};(9.1,8.5)*-{}
**\crv{(5,1)};
(1.7,0.0)*-{};(9.5,8.6)*-{}
**\crv{(6,-1)};
(5,5)*+{...};
\ar @{-} "b";"1" <0pt>
\ar @{-} "b";"2" <0pt>
\ar @{-} "b";"3" <0pt>
\ar @{-} "b";"-1" <0pt>
\ar @{-} "b";"-2" <0pt>
\ar @{-} "b";"-3" <0pt>
\ar @{-} "c";"1'" <0pt>
\ar @{-} "c";"2'" <0pt>
\ar @{-} "c";"3'" <0pt>
\ar @{-} "c";"-1'" <0pt>
\ar @{-} "c";"-2'" <0pt>
\ar @{-} "c";"-3'" <0pt>
\endxy}\end{array}
\end{array}
\end{equation}
where the summation parameter $l$ counts the number of internal edges connecting the two vertices
on the r.h.s., and the signs are fixed by the fact that they all equal to $+1$ for $c$ and $d$
odd integers.
Our purpose in this paper is to relate deformation complexes of all the properads
considered above to various graph complexes whose cohomology is partially computed, and whose relations with the Grothendieck-Teuchm\"uller Lie algebra are well-understood.
\subsection{Complete variants}\lambdabel{sec:completions}
Note that the defining relations for the properads $\mathcal{L}\mathit{ieb}cd$ and $\mathcal{L}\mathit{ieb}^\diamondcd$ do not mix composition diagrams of different loop orders. It follow that the mentioned properads are all graded by the loop order (here also called genus) of composition diagrams.
In particular, fixing the arity, the operations are finite linear combinations (not series) of composites of generators.
For some applications, including in particular the integration of derivations to automorphisms, it is more convenient to consider the completed versions by the genus grading $\widehat{\mathcal{L}\mathit{ieb}}cd$ and $\widehat{\mathcal{L}\mathit{ieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$. Concretely, the operations of fixed arity in the complete versions of our properads are given by infinite series (instead of just linear combinations) of composites of generators.
Similarly, the resolutions $\mathcal{H}\mathit{olieb}_{c,d}$ and $\mathcal{H}\mathit{olieb}_{c,d}^\diamond$ receive a grading by the loop order (or genus), and we may also consider the completed versions (with respect to this grading) $\widehat{\mathcal{H}\mathit{olieb}}cd$ and $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$.
Here it should be noted that with respect to the genus grading the generator \eqref{equ:LoBgenerators} must be considered as living in degree $a$ to make this grading consistent with the definition of differential \eqref{2: d on Lie inv infty}.
\subsection{Directed graph complexes} A {\em graph}\, $\Gamma$ is a 1-dimensional $CW$ complex whose 0-cells are called {\em vertices}\, and 1-cells are called {\em edges}. The set of vertices of $\Gamma$ is denoted by $V(\Gamma)$ and the set of edges by $E(\Gamma)$. A graph $\Gamma$ is called {\em directed}\, if its edge $e\in E(\Gamma)$ comes equipped with an orientation or, plainly speaking,
with a choice of a direction.
Let $G_{n,l}$ be a set of directed graphs $\Gamma$ with $n$ vertices and $l$ edges such that
some bijections $V(\Gamma)\bar{i}ghtarrow [n]$ and $E(\Gamma)\bar{i}ghtarrow [l]$ are fixed, i.e.\ every edges and every vertex of $\Gamma$ has a fixed numerical label. There is
a natural right action of the group ${\mathbb S}_n \times {\mathbb S}_l$ on the set $G_{n,l}$ with ${\mathbb S}_n$ acting by relabeling the vertices and ${\mathbb S}_l$ by relabeling the
edges.
For each fixed integer $d$, a collection of ${\mathbb S}_n$-modules,
$$
{\mathcal D}{\mathcal G} ra_{d}=\left\{{\mathcal D}{\mathcal G} ra_d(n):= {\partial}rod_{l\geq 0} {\mathbb K} \lambdangle G_{n,l}{\bar{a}}ngle \otimes_{ {\mathbb S}_l} {\mathit s \mathit g\mathit n}_l^{\otimes |d-1|} [l(d-1)] \bar{i}ght\}_{n\geq 1}
$$
is an operad with respect to the following operadic composition,
$$
\betagin{array}{rccc}
\circ_i: & {\mathcal D}{\mathcal G} ra_d(n) \times {\mathcal D}{\mathcal G} ra_d(m) &\longrightarrow & {\mathcal D}{\mathcal G} ra_d(m+n-1), \ \ {\mathcal O}orall\ i\in [n]\\
& (\Gamma_1, \Gamma_2) &\longrightarrow & \Gamma_1\circ_i \Gamma_2,
\end{array}
$$
where $\Gamma_1\circ_i \Gamma_2$ is defined by substituting the graph $\Gamma_2$ into the $i$-labeled vertex $v_i$ of $\Gamma_1$ and taking a sum over re-attachments of dangling edges (attached before to $v_i$) to vertices of $\Gamma_2$
in all possible ways.
For any operad ${\mathcal P}=\{{\mathcal P}(n)\}_{n\geq 1}$ in the category of graded vector spaces,
the linear the map
$$
\betagin{array}{rccc}
[\ ,\ ]:& {\mathsf P} \otimes {\mathsf P} & \longrightarrow & {\mathsf P}\\
& (a\in {\mathcal P}(n), b\in {\mathcal P}(m)) & \longrightarrow &
[a, b]:= \sum_{i=1}^n a\circ_i b - (-1)^{|a||b|}\sum_{i=1}^m b\circ_i a\ \in {\mathcal P}(m+n-1)
\end{array}
$$
makes a graded vector space
$
{\mathsf P}:= {\partial}rod_{n\geq 1}{\mathcal P}(n)$
into a Lie algebra \cite{KM}; moreover, these brackets induce a Lie algebra structure on the subspace
of invariants
$
{\mathsf P}^{\mathbb S}:= {\partial}rod_{n\geq 1}{\mathcal P}(n)^{{\mathbb S}_n}$. In particular,
the graded vector space
$$
\mathsf{dfGC}_{d}:= {\partial}rod_{n\geq 1} {\mathcal G} ra_{d}(n)^{{\mathbb S}_n}[d(1-n)]
$$
is a Lie algebra with respect to the above Lie brackets, and as such it can be identified
with the deformation complex ${\mathsf D\mathsf e\mathsf f }({\mathcal L} ie_d\stackrel{0}{\bar{i}ghtarrow} {\mathcal G} ra_{d})$ of a zero morphism. Hence non-trivial Maurer-Cartan elements of $(\mathsf{fGC}_{d}, [\ ,\ ])$ give us non-trivial morphisms of operads
$$
f:{\mathcal L} ie_d {\longrightarrow} {\mathcal D}{\mathcal G} ra_{d}.
$$
One such non-trivial morphism $f$ is given explicitly on the generator of ${\mathcal L} ie_{d}$ by \cite{Wi1}
\betagin{equation}\lambdabel{2: map from Lie to dgra}
f \left(\betagin{array}{c}\betagin{xy}
<0mm,0.66mm>*{};<0mm,3mm>*{}**@{-},
<0.39mm,-0.39mm>*{};<2.2mm,-2.2mm>*{}**@{-},
<-0.35mm,-0.35mm>*{};<-2.2mm,-2.2mm>*{}**@{-},
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<0.39mm,-0.39mm>*{};<2.9mm,-4mm>*{^{_2}}**@{},
<-0.35mm,-0.35mm>*{};<-2.8mm,-4mm>*{^{_1}}**@{},
\end{xy}\end{array}\bar{i}ght)=
\betagin{array}{c}\resizebox{6.3mm}{!}{\xy
(0,1)*+{_1}*\cir{}="b",
(8,1)*+{_2}*\cir{}="c",
\ar @{->} "b";"c" <0pt>
\endxy}
\end{array} - (-1)^d
\betagin{array}{c}\resizebox{7mm}{!}{\xy
(0,1)*+{_2}*\cir{}="b",
(8,1)*+{_1}*\cir{}="c",
\ar @{->} "b";"c" <0pt>
\endxy}
\end{array}=:\xy
(0,0)*{\bulletllet}="a",
(5,0)*{\bullet}="b",
\ar @{->} "a";"b" <0pt>
\endxy
\end{equation}
Note that elements of $\mathsf{dfGC}_{d}$ can be identified with graphs from ${\mathcal D}{\mathcal G} ra_d$ whose vertices' labels are symmetrized (for $d$ even) or skew-symmetrized (for $d$ odd) so that in pictures we can forget about labels of vertices and denote them by unlabelled black bullets as in the formula above. Note also that graphs from $\mathsf{dfGC}_{d}$ come equipped with an orientation, $or$, which is a choice of ordering of edges (for $d$ even) or a choice of ordering of vertices (for $d$ odd) up to an even permutation on both cases. Thus every graph $\Gamma\in \mathsf{dfGC}_{d}$ has at most two different orientations, $pr$ and $or^{opp}$, and one has
the standard relation, $(\Gamma, or)=-(\Gamma, or^{opp})$; as usual, the data $(\Gamma, or)$ is abbreviate by $\Gamma$ (with some choice of orientation implicitly assumed). Note that the homological degree of graph $\Gamma$ from $\mathsf{dfGC}_{d}$ is given by
$
|\Gamma|=d(\# V(\Gamma) -1) + (1-d) \# E(\Gamma).
$
The above morphism (\ref{2: map from Lie to dgra}) makes
$(\mathsf{dfGC}_{d}, [\ ,\ ])$ into a {\em differential}\, Lie algebra with the differential
$$
\delta:= [\xy
(0,0)*{\bulletllet}="a",
(5,0)*{\bullet}="b",
\ar @{->} "a";"b" <0pt>
\endxy ,\ ].
$$
This dg Lie algebra contains a dg subalgebra $\mathsf{dGC}_{d}$ spanned by connected graphs
with at least bivalent vertices.
It was proven in \cite{Wi1} that
$$
H^\bullet(\mathsf{dfGC}_{d})= \mathsf{dGC}_{d}
$$
so that there is no loss of generality of working with $\mathsf{dGC}_{d}$ instead of
$\mathsf{dfGC}_{d}$. Moreover, one has an isomorphism of Lie algebras \cite{Wi1},
$$
H^0(\mathsf{dGC}_{d})={\mathfrak g}{\mathfrak r}{\mathfrak t}_1,
$$
where ${\mathfrak g}{\mathfrak r}{\mathfrak t}_1$ is the Lie algebra of the Grothendieck-teichm\"u ller group $GRT_1$ introduced by Drinfeld in the context of deformation quantization of Lie bialgebras. Nowadays, this group play an important role in many other areas of mathematics (e.g.\ in the knot theory, in deformation quantization
of Poisson manifolds, and in the classification theory of solutions of Kashiwara-Vergne problem, see \cite{Fu} for a review, and many references cited there).
\subsubsection{\bf Remark} Often one considers instead of ${\mathcal D}{\mathcal G} ra_d$ an operad ${\mathcal G} ra_d=\{{\mathcal G} ra_d(n)\}$ defined by
$$
{\mathcal G} ra_d(n):= {\partial}rod_{l\geq 0} {\mathbb K} \lambdangle G_{n,l}{\bar{a}}ngle \otimes_{ {\mathbb S}_l \ltimes
({\mathbb S}_2)^l} {\mathit s \mathit g\mathit n}_l^{|d|}\otimes {\mathit s \mathit g\mathit n}_2^{\otimes l|d-1|} [l(d-1)]
$$
where the group $({\mathbb S}_2)^l$ acts on graphs from $G_{n,l}$ by flipping directions of the edges.
Then, arguing as above, one arrives to the graph complex
$$
\mathsf{fGC}_d:={\mathsf D\mathsf e\mathsf f }({\mathcal L} ie_d\stackrel{f}{\bar{i}ghtarrow} {\mathcal G} ra_{d})
$$
of {\em undirected}\, graphs. It contains three important dg Lie subalgebras:
(i) $\mathsf{fcGC}_d \subset \mathsf{fGC}_d$ which is spanned by connected graphs, (ii) $\mathsf{GC}_d^2 \subset \mathsf{fcGC}_d$ which is spanned by graphs with at least bivalent vertices, and (iii) $\mathsf{GC}_d \subset \mathsf{GC}_d^2$ which is spanned by graphs with at least trivalent vertices.
It was shown in \cite{Ko1,Wi1} that the cohomology of these subalgebras (which determine completely the cohomology of the full graph complex $\mathsf{fGC}_d$) are related to each other as follows
$$
H^\bullet(\mathsf{fGC}_{d}) = H^\bullet(\mathsf{GC}_{d}^2) = H^\bullet(\mathsf{GC}_{d})\ \oplus\ \bigoplus_{j\geq 1\atop j\equiv 2d+1 \mod 4} {\mathbb K}[d-j],
$$
where the summand $ {\mathbb K}[d-j]$ is generated by the loop-type graph with $j$ binary vertices.
It was proven in \cite{Wi1} that the complex of directed graphs and the complex of undirected
graphs have the same cohomology,
$$
H^\bullet(\mathsf{dGC}_{d})=H^\bullet(\mathsf{GC}_{d}^2).
$$
In the present context it is more suitable to work with the directed complex $\mathsf{dGC}_{d}$
rather than with $\mathsf{GC}_{d}^2$.
\subsection{Oriented graph complexes} A graphs $\Gamma$ from the operad ${\mathcal D}{\mathcal G} ra_d$
is called {\em oriented}\, if it contains no {\em wheels}, that is, directed paths of edges
forming a closed circle. The subspace
${\mathcal G} ra_d^{or}\subset {\mathcal D} {\mathcal G} ra_d$ spanned by oriented graphs is a suboperad. For example,
$$
\betagin{array}{c}
\xy
(0,-1.5)*{_{_2}},
(6.2,-1.5)*{_{_1}},
(3,6)*{^{^3}},
(0,0)*{\bulletllet}="a",
(6,0)*{\bullet}="b",
(3,5)*{\bullet}="c",
\ar @{->} "a";"b" <0pt>
\ar @{->} "a";"c" <0pt>
\ar @{<-} "c";"b" <0pt>
\endxy\end{array}
\in {\mathcal G} ra_d^{or} \ \ \ \ \mbox{ but}\ \ \ \
\betagin{array}{c}
\xy
(0,-1.5)*{_{_2}},
(6.2,-1.5)*{_{_1}},
(3,6)*{^{^3}},
(0,0)*{\bulletllet}="a",
(6,0)*{\bullet}="b",
(3,5)*{\bullet}="c",
\ar @{->} "a";"b" <0pt>
\ar @{<-} "a";"c" <0pt>
\ar @{->} "c";"b" <0pt>
\endxy\end{array}
{\noindent}t\in {\mathcal G} ra_d^{or}(3).
$$
The morphism (\ref{2: map from Lie to dgra}) factors through the inclusion ${\mathcal G} ra_d\subset
{\mathcal D}{\mathcal G} ra_d$ so that one can consider a graph complex
$$
\mathsf{fGC}^{or}_d:={\mathsf D\mathsf e\mathsf f }\left({\mathcal L} ie_d \stackrel{f}{\bar{i}ghtarrow} {\mathcal G} ra_d^{or}\bar{i}ght)
$$
and its subcomplex $\mathsf{GC}or_d$ spanned by connected graphs with at least bivalent vertices
and with no bivalent vertices of the form $\xy
(0,0)*{}="a",
(4,0)*{\bullet}="b",
(8,0)*{}="c",
\ar @{->} "a";"b" <0pt>
\ar @{->} "b";"c" <0pt>
\endxy$. This subcomplex determines the cohomology of the full graph complex,
$H^\bullet(\mathsf{fGC}^{or}_d)=\odot^\bullet (H^\bullet(\mathsf{GC}or_d)$.
It was proven in \cite{Wi2} that
$$
H^\bullet(\mathsf{GC}or_{d+1})=H^\bullet(\mathsf{dGC}_d)=H^\bullet(\mathsf{GC}_d^2).
$$
In particular, one has a remarkable isomorphism of Lie algebras,
$
H^0(\mathsf{GC}or_3)={\mathfrak g}{\mathfrak r}{\mathfrak t}$. Moreover $H^i(\mathsf{GC}or_3)=0$ for $i\leq 2$ and $H^{-1}(\mathsf{GC}or_3)$
is a 1-dimensional space generated by the graph $
\betagin{array}{c}\resizebox{4mm}{!}{ \xy
\ar@/^0.6pc/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^{-0.6pc}/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\endxy}\end{array}
$.
Consider next a Lie algebra $(\mathsf{GC}or_3[[\hbar]], [\ ,\ ]$, where $\mathsf{GC}or_3[[\hbar]]$ is the topological vector space spanned by formal power series in a formal parameter $\hbar$ of homological degree $2$, and $[\ ,\ ]$ are the Lie brackets obtained from the standard ones in
$\mathsf{GC}or_d$ by the continuous extension. It was shown in \cite{CMW} that the formal power series
\betagin{equation}\lambdabel{2: Phi_hbar MC element}
{\mathbb P}hi_\hbar:= \sum_{k=1}^\infty \hbar^{k-1} \underbrace{
\betagin{array}{c}\resizebox{6mm}{!} {\xy
(0,0)*{...},
\ar@/^1pc/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^{-1pc}/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^0.6pc/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^{-0.6pc}/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\endxy}
\end{array}}_{k\ \mathrm{edges}}
\end{equation}
is a Maurer-Cartan element in the Lie algebra $(\mathsf{f}{\mathsf G}{\mathsf C}_3^{or}[[\hbar]], [\ ,\ ]$ and hence makes the latter into a {\em differential}\, Lie algebra with the differential
$$
\delta_\hbar=[{\mathbb P}hi_\hbar,\ ].
$$
It was proven in \cite{CMW} that
$H^0( \mathsf{GC}_3^{or}[[\hbar]], \delta_\hbar)\simeq
H^0(\mathsf{GC}_3^{or},\delta)\simeq \mathfrak{grt}_1$ as Lie algebras. Moreover, $H^i( \mathsf{GC}_3^{or}[[\hbar]], \delta_\hbar)=0$
for all $i\leq -2$ and $H^{-1}( \mathsf{GC}_3^{or}[[\hbar]], \delta_\hbar)$ is a 1-dimensional vector space class generated by the formal power series
$
\sum_{k=2}^\infty (k-1)\hbar^{k-2}\underbrace{
\betagin{array}{c}\resizebox{6mm}{!} {\xy
(0,0)*{...},
\ar@/^1pc/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^{-1pc}/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^0.6pc/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\ar@/^{-0.6pc}/(0,-5)*{\bulletllet};(0,5)*{\bulletllet}
\endxy}
\end{array}}_{k\ \mathrm{edges}}
$
Sometimes we do not show in our pictures directions of edges of oriented graphs
by assuming tacitly that the flow goes from the bottom to the top (as in the case of properads).
{\Large
\section{\bf Deformation complexes of properads\\ and directed graph complexes}
}
\subsection{Deformation complexes of properads}
\lambdabel{2 sec:defcomplexes}
For ${\mathcal C}$ a coaugmented co(pr)operad, we will denote by $\Omegaega({\mathcal C})$ its cobar construction.
Concretely, $\Omegaega({\mathcal C})={\mathcal F} ree\lambdangle\overline {\mathcal C}[-1]{\bar{a}}ngle$ as a graded (pr)operad where $\overline {\mathcal C}$ the cokernel of the coaugmetation and ${\mathcal F} ree\lambdangle\dots{\bar{a}}ngle$ denotes the free (pr)operad generated by an ${\mathbb S}$-(bi)module.
We will often use complexes of derivations of (pr)operads and deformation complexes of (pr)operad maps.
For a map of properads $f: \Omegaega({\mathcal C}){\to} {\mathcal P}$, we will denote by
\betagin{equation}\lambdabel{equ:Defdefi}
{\mathsf D\mathsf e\mathsf f }( \Omegaega({\mathcal C})\stackrel{f}{\to} {\mathcal P} )\cong {\partial}rod_{m,n} {\mathrm H\mathrm o\mathrm m}_{{\mathbb S}_m\times {\mathbb S}_n}({\mathcal C}(m,n), {\mathcal P}(m,n))
\end{equation}
the associated convolution complex. It is natural structure of a dg Lie algebra \cite{MV} controlling deformations of the morphism $f$.
We will also consider the Lie algebra $\mathrm{Der}({\mathcal P})$ of derivations of the properad ${\mathcal P}$; in fact,
we will use a minor variation of the standard definition (given, e.g., in \cite{Ta}) defined as follows. Let ${\mathcal P}^+$ be the free properad generated by ${\mathcal P}$ and one other operation
$\betagin{xy}
<0mm,-0.55mm>*{};<0mm,-3mm>*{}**@{-},
<0mm,0.5mm>*{};<0mm,3mm>*{}**@{-},
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
\end{xy}$ of arity $(1,1)$ and of cohomological degree $+1$. On ${\mathcal P}^+$ we define a differential $\delta^+$ by setting its value on the new generator by
$$
\delta^+ \betagin{xy}
<0mm,-0.55mm>*{};<0mm,-3mm>*{}**@{-},
<0mm,0.5mm>*{};<0mm,3mm>*{}**@{-},
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
\end{xy} := \betagin{xy}
<0mm,0mm>*{};<0mm,-3mm>*{}**@{-},
<0mm,0mm>*{};<0mm,6mm>*{}**@{-},
<0mm,0mm>*{\bulletllet};
<0mm,3mm>*{\bulletllet};
\end{xy}
$$
and on any other element $a\in {\mathcal P}(m,n)$ (which we identify pictorially with the $(m,n)$-corolla
whose vertex is decorated with $a$) by the formula
$$
\delta^+
\betagin{xy}
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
<0mm,0mm>*{};<-8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0mm,0mm>*{};<4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<0mm,0mm>*{};<-8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0mm,0mm>*{};<4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}:= \delta
\betagin{xy}
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
<0mm,0mm>*{};<-8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0mm,0mm>*{};<4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<0mm,0mm>*{};<-8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0mm,0mm>*{};<4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}
+
\overset{m-1}{\underset{i=0}{\sum}}
\betagin{xy}
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
<0mm,0mm>*{};<-8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-3.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-6mm,5mm>*{..}**@{},
<0mm,0mm>*{};<0mm,5mm>*{}**@{-},
<0mm,5mm>*{\bulletllet};
<0mm,5mm>*{};<0mm,8mm>*{}**@{-},
<0mm,5mm>*{};<0mm,9mm>*{^{i\hspace{-0.2mm}+\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<3.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<6mm,5mm>*{..}**@{},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-4mm,5.5mm>*{^i}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<0mm,0mm>*{};<-8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0mm,0mm>*{};<4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}
- (-1)^{|a|}
\overset{n-1}{\underset{i=0}{\sum}}
\betagin{xy}
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
<0mm,0mm>*{};<-8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-3.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-6mm,-5mm>*{..}**@{},
<0mm,0mm>*{};<0mm,-5mm>*{}**@{-},
<0mm,-5mm>*{\bulletllet};
<0mm,-5mm>*{};<0mm,-8mm>*{}**@{-},
<0mm,-5mm>*{};<0mm,-10mm>*{^{i\hspace{-0.2mm}+\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<3.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<6mm,-5mm>*{..}**@{},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-4mm,-6.9mm>*{^i}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
<0mm,0mm>*{};<-8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0mm,0mm>*{};<4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
\end{xy}.
$$
where $\delta$ is the original differential in ${\mathcal P}$.
The dg properad $({\mathcal P}^+, \delta^+)$ is uniquely characterized by the property: there is a 1-1 correspondence between representations
$$
\rho: {\mathcal P}^+ \longrightarrow {\mathcal E} nd_V
$$
of $({\mathcal P}^+, \delta^+)$ in a dg vector space $(V,d)$, and representations of ${\mathcal P}$ in the same space $V$
but equipped with a deformed differential $d+D$, where $D:=\rho(\betagin{xy}
<0mm,-0.55mm>*{};<0mm,-3mm>*{}**@{-},
<0mm,0.5mm>*{};<0mm,3mm>*{}**@{-},
<0mm,0mm>*{\bulletllet};<0mm,0mm>*{}**@{},
\end{xy})$.
Clearly any ${\mathcal P}$-algebra is a ${\mathcal P}^+$-algebra by letting $D$ act trivially, so that we have a properad map ${\mathcal P}^+\to {\mathcal P}$.
Now, slightly abusively, we define $\mathrm{Der}({\mathcal P})$ as the complex of derivations of ${\mathcal P}^+$ preserving the map ${\mathcal P}^+\to {\mathcal P}$. Concretely, in all relevant cases ${\mathcal P}=\Omegaega({\mathcal C})$ is the cobar construction of a coaugmented coproperad ${\mathcal C}$. The definition is then made such that $\mathrm{Der}({\mathcal P})[-1]$ is identified with \eqref{equ:Defdefi} as a complex. On the other hand, if we were using ordinary derivations we would have to modify \eqref{equ:Defdefi} by replacing ${\mathcal C}$ by the cokernel of the coaugmentation $\overline{{\mathcal C}}$ on the right-hand side, thus complicating statements of several results.
We assure the reader that this modification is minor and made for technical reasons in the cases we consider, and results about our $\mathrm{Der}({\mathcal P})$ can be easily transcribed into results about the ordinary derivations if necessary.
Note however that $\mathrm{Der}({\mathcal P})$ carries a natural Lie bracket through the commutator.
The deformation complex of a (wheeled) properad ${\mathcal P}$ is by definition the dg Lie algebra $\mathrm{Der}(\tilde {\mathcal P})$ of derivations of a cofibrant resolution $\tilde {\mathcal P}\stackrel{\sim}{\to}{\mathcal P}$. It may be identified as a complex with the deformation complex of the identity map $\tilde {\mathcal P}\to \tilde {\mathcal P}$ (which controls deformations of ${\mathcal P}$-algebras) up to a degree shift:
\[
\mathrm{Der}(\tilde {\mathcal P}) \cong {\mathsf D\mathsf e\mathsf f }(\tilde {\mathcal P}\to \tilde {\mathcal P})[1].
\]
Note however that both $\mathrm{Der}({\mathcal P})$ and ${\mathsf D\mathsf e\mathsf f }(\tilde {\mathcal P}\to \tilde {\mathcal P})$ have natural dg Lie (or ${\mathcal L} ie_\infty$) algebra structures that are \emph{not} preserved by the above map. Furthermore, there is a quasi-isomorphism of dg Lie algebras
\betagin{equation}\lambdabel{equ:Defsimpl}
{\mathsf D\mathsf e\mathsf f }(\tilde {\mathcal P}\to \tilde {\mathcal P})\to {\mathsf D\mathsf e\mathsf f }(\tilde {\mathcal P}\to {\mathcal P})
\end{equation}
The zeroth cohomology $H^0(\mathrm{Der}(\tilde {\mathcal P}))$ is of particular importance. It is a differential graded Lie algebra whose elements act on the space of
$\tilde {\mathcal P}$-algebra structures on any vector space. We shall see that in the examples we are interested in this dg Lie algebra is very rich, and that it acts non-trivially in general.
Using the explicit structure of the minimal resolutions of the properads $\mathcal{L}\mathit{ieb}cd$ and $\mathcal{L}\mathit{ieb}^\diamondcd$ (see \S 2.2 above) we can write down explicit models for the deformation complexes,
\betagin{align*}
\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d}) &=
{\partial}rod_{m,n\geq 1} \left(\mathcal{H}\mathit{olieb}_{c,d}(m,n) \otimesimes {\mathit s \mathit g\mathit n}_m^{\otimes |c|}\otimesimes {\mathit s \mathit g\mathit n}_n^{\otimes |d|}\bar{i}ght)^{{\mathbb S}_m\times {\mathbb S}_m}[1+c(1-m)+d(1-n)]
\\
\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d}^\diamond) &= {\partial}rod_{m,n\geq 1} \left(\mathcal{H}\mathit{olieb}_{c,d}^\diamond(m,n)\otimesimes {\mathit s \mathit g\mathit n}_m^{\otimes |c|}\otimesimes {\mathit s \mathit g\mathit n}_n^{\otimes |d|} \bar{i}ght)^{{\mathbb S}_m\times {\mathbb S}_n}[[\hbar]] [1+c(1-m)+d(1-n)]
\end{align*}
Here $\hbar$ is a formal variable of degree $c+d$.
Each of the models on the right has a natural combinatorial interpretation as a graph complex.
For example, $\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d})$ may be interpreted as a complex of directed graphs which have incoming and outgoing legs but have no closed paths of directed edges, for example
$$
\Gamma= \resizebox{15mm}{!}{ \xy
(0,0)*{\bullet}="d1",
(10,0)*{\bullet}="d2",
(-5,-5)*{}="dl",
(5,-5)*{}="dc",
(15,-5)*{}="dr",
(0,10)*{\bullet}="u1",
(10,10)*{\bullet}="u2",
(5,15)*{}="uc",
(15,15)*{}="ur",
(0,15)*{}="ul",
\ar @{<-} "d1";"d2" <0pt>
\ar @{<-} "d1";"dl" <0pt>
\ar @{<-} "d1";"dc" <0pt>
\ar @{<-} "d2";"dc" <0pt>
\ar @{<-} "d2";"dr" <0pt>
\ar @{<-} "u1";"d1" <0pt>
\ar @{<-} "u1";"d2" <0pt>
\ar @{<-} "u2";"d2" <0pt>
\ar @{<-} "u2";"d1" <0pt>
\ar @{<-} "uc";"u2" <0pt>
\ar @{<-} "ur";"u2" <0pt>
\ar @{<-} "ul";"u1" <0pt>
\endxy} \in \mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d})
$$
The value of the differential $\delta$ on an element $\Gamma\in \mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d})$ is obtained by splitting vertices of $\Gamma$ and by attaching new corollas at each single external leg of $\Gamma$,
$$
\delta \Gammamma =
\delta_{\mathcal{H}\mathit{olieb}_{c,d}}\Gammamma
{\partial}m
\sum\betagin{array}{c}
\resizebox{9mm}{!}{ \xy
(0,0)*+{\Gamma}="Ga",
(-5,5)*{\bullet}="0",
(-8,2)*{}="-1",
(-8,8)*{}="1",
(-5,8)*{}="2",
(-2,8)*{}="3",
\ar @{-} "0";"Ga" <0pt>
\ar @{-} "0";"-1" <0pt>
\ar @{-} "0";"1" <0pt>
\ar @{-} "0";"2" <0pt>
\ar @{-} "0";"3" <0pt>
\endxy}\end{array}
{\partial}m
\sum\betagin{array}{c}
\resizebox{9mm}{!}{ \xy
(0,0)*+{\Gamma}="Ga",
(-5,-5)*{\bullet}="0",
(-8,-2)*{}="-1",
(-8,-8)*{}="1",
(-5,-2)*{}="2",
(-2,-8)*{}="3",
\ar @{-} "0";"Ga" <0pt>
\ar @{-} "0";"-1" <0pt>
\ar @{-} "0";"1" <0pt>
\ar @{-} "0";"2" <0pt>
\ar @{-} "0";"3" <0pt>
\endxy}\end{array}
$$
Here $\delta_{\mathcal{H}\mathit{olieb}_{c,d}}$ acts on the vertices of $\Gamma$ by formula (\ref{LBk_infty}).
Similarly, $\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d}^\diamond)$ may be interpreted as a complex of $\hbar$-power series of graphs with weighted vertices, for example,
$$
\Gamma= \resizebox{15mm}{!}{ \xy
(0,0)*+{_3}*\cir{}="d1",
(10,0)*+{_2}*\cir{}="d2",
(-5,-5)*{}="dl",
(5,-5)*{}="dc",
(15,-5)*{}="dr",
(0,10)*+{_0}*\cir{}="u1",
(10,10)*+{_3}*\cir{}="u2",
(5,15)*{}="uc",
(15,15)*{}="ur",
(0,15)*{}="ul",
\ar @{<-} "d1";"d2" <0pt>
\ar @{<-} "d1";"dl" <0pt>
\ar @{<-} "d1";"dc" <0pt>
\ar @{<-} "d2";"dc" <0pt>
\ar @{<-} "d2";"dr" <0pt>
\ar @{<-} "u1";"d1" <0pt>
\ar @{<-} "u1";"d2" <0pt>
\ar @{<-} "u2";"d2" <0pt>
\ar @{<-} "u2";"d1" <0pt>
\ar @{<-} "uc";"u2" <0pt>
\ar @{<-} "ur";"u2" <0pt>
\ar @{<-} "ul";"u1" <0pt>
\endxy}
\in \mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d}^\diamond).
$$
The value of the differential $\delta$ on an element $\Gamma\in \mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d}^\diamond)$ consists of three summands,
$$
\delta \Gammamma=
\delta_{\mathcal{H}\mathit{olieb}_{c,d}^\diamond}\Gammamma
{\partial}m
\sum \hbar^{p+k-1}\betagin{array}{c}
\resizebox{10mm}{!}{ \xy
(-3,4)*+{_k},
(0,0)*+{\Gamma}="Ga",
(-7,7)*+{_p}*\cir{}="0",
(-10,4)*{}="-1",
(-10,11)*{}="1",
(-7,11)*{}="2",
(-4,11)*{}="3",
(-5.3,6.8)*-{};(-0.2,1.5)*-{}
**\crv{(0.4,5)};
(-6.9,5.1)*-{};(-1.4,0.3)*-{}
**\crv{(-5,-1.5)};
(-6.0,5.4)*-{};(-1.4,0.7)*-{}
**\crv{(-4,-0.5)};
\ar @{-} "0";"-1" <0pt>
\ar @{-} "0";"1" <0pt>
\ar @{-} "0";"2" <0pt>
\ar @{-} "0";"3" <0pt>
\endxy}\end{array}
{\partial}m
\sum \hbar^{p+k-1}\betagin{array}{c}
\resizebox{10mm}{!}{ \xy
(-3,-4)*+{_k},
(0,0)*+{\Gamma}="Ga",
(-7,-7)*+{_p}*\cir{}="0",
(-10,-4)*{}="-1",
(-10,-11)*{}="1",
(-7,-11)*{}="2",
(-4,-11)*{}="3",
(-5.3,-6.8)*-{};(-0.2,-1.5)*-{}
**\crv{(0.4,-5)};
(-6.9,-5.1)*-{};(-1.4,-0.3)*-{}
**\crv{(-5,1.5)};
(-6.0,-5.4)*-{};(-1.4,-0.7)*-{}
**\crv{(-4,-0.5)};
\ar @{-} "0";"-1" <0pt>
\ar @{-} "0";"1" <0pt>
\ar @{-} "0";"2" <0pt>
\ar @{-} "0";"3" <0pt>
\endxy}\end{array}
$$
where the first summand comes from the action of the $\mathcal{H}\mathit{olieb}_{c,d}^\diamond$-differential
on internal vertices of $\Gamma$ by formula (\ref{2: d on Lie inv infty}), and in
the two terms on the right one sums over all ways of attaching a new vertex to some subset of the incoming or outgoing legs ($k$ many), and sums over all possible decorations $p$ of the added vertex, with an appropriate power of $\hbar$ as prefactor. Note that the power of $\hbar$ counts the number of loops added to the graph, if we count a vertex decorated by $p$ as contributing $p$ loops.
The Lie bracket is combinatorially obtained by inserting graphs into vertices of another.
The cohomology of all these graph complexes is hard to compute. We may however simplify the computation of three of the above four complexes by using formula (\ref{equ:Defsimpl}) and equivalently study instead the following much ``smaller" complexes,
\betagin{align*}
{\mathsf D\mathsf e\mathsf f }(\mathcal{H}\mathit{olieb}_{c,d}\bar{i}ghtarrow \mathcal{L}\mathit{ieb}cd) &=
{\partial}rod_{m,n\geq 1} \left(\mathcal{L}\mathit{ieb}cd(m,n) \otimesimes {\mathit s \mathit g\mathit n}_m^{\otimes |c|}\otimesimes {\mathit s \mathit g\mathit n}_n^{\otimes |d|}\bar{i}ght)^{{\mathbb S}_m\times {\mathbb S}_m}[c(1-m)+d(1-n)]
\\
{\mathsf D\mathsf e\mathsf f }(\mathcal{H}\mathit{olieb}_{c,d}^\diamond\bar{i}ghtarrow \mathcal{L}\mathit{ieb}^\diamondcd) &= {\partial}rod_{m,n\geq 1} \left(\mathcal{L}\mathit{ieb}^\diamondcd(m,n)\otimesimes {\mathit s \mathit g\mathit n}_m^{\otimes |c|}\otimesimes {\mathit s \mathit g\mathit n}_n^{\otimes |d|} \bar{i}ght)^{{\mathbb S}_m\times {\mathbb S}_n} [[\hbar]][c(1-m)+d(1-n)]
\end{align*}
Note however that in passing from $\mathrm{Der}(\dots)$ to the (quasi-isomorphic) simpler complexes ${\mathsf D\mathsf e\mathsf f }(\dots)$ above we lose the dg Lie algebra structure, or rather there is a different Lie algebra structure on the above complexes.
The above complexes may again be interpreted as graph complexes. For example ${\mathsf D\mathsf e\mathsf f }(\mathcal{H}\mathit{olieb}_{c,d}\to \mathcal{L}\mathit{ieb}cd)$ consists of oriented trivalent graphs with incoming and outgoing legs, modulo the Jacobi and Drinfeld five term relations. The differential is obtained by attaching a trivalent vertex at one external leg in all possible ways.
\subsection{Complete variants}
It is more convenient for our purposes to consider (genus-)completed versions of the deformation complexes of the previous subsection. In particular, the the genus filtration endows the properads $\widehat{\mathcal{H}\mathit{olieb}}cd$ and $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$ with complete topologies, and we define the complexes of \emph{continuous} derivations $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ and $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$.
More concretely, the difference of the complete and incomplete versions is as follows:
\betagin{itemize}
\item Elements of $\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d})$ of a fixed cohomological degree can be identified with possible infinite series of graphs. However, in each fixed arity, the series must be a finite linear combination.
On the other hand, elements of $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ in a fixed degree are simply all series of graphs.
\item Similarly, elements of $\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d}^\diamond)$ may be understood as power series in $\hbar$ with coefficients in the
series of graphs with the same finiteness condition as before.
On the other hand elements of $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$ are power series in $\hbar$ with coefficients arbitrary series of graphs.
\end{itemize}
Below we will always work with the complete versions of our properads and deformation/derivation complexes.
\subsection{\bf A map from the graph complex $\mathsf{GC}or_{c+d+1}$ to $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$}\lambdabel{3: subsect on GCor_3 to Der(LieB)} A derivation $D$ of any free properad ${\mathcal P}$
is uniquely determined by its values, $D(e)\in {\mathcal P}$, on the generators $e$ of ${\mathcal P}$.
There is a natural right action of the dg Lie algebra $\mathsf{fGC}^{or}_{c+d+1}$ on the genus completed dg properad $\widehat{\mathcal{H}\mathit{olieb}}cd$ by properadic derivations (cf.\ \cite{CMW}), i.e.\ there is a canonical morphism
of dg Lie algebras,
\betagin{equation}\lambdabel{2: Morhism F from GC_3^or}
\betagin{array}{rccc}
F\colon & \mathsf{fGC}^{or}_{c+d+1} &\to & \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)\\
& \Gamma & \to & F(\Gamma)
\end{array}
\end{equation}
with values of the derivation $F(\Gamma)$
on the generators of the (genus) completed properad $\widehat{\LieBi}_\infty$
given explicitly by
\betagin{equation} \lambdabel{equ:def GC action 1}
\left(\betagin{array}{c}\resizebox{12mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array}\bar{i}ght)\cdot F(\Gamma)
=
\sum_{s:[n]\bar{i}ghtarrow V(\Gamma)\atop \hat{s}:[m]\bar{i}ghtarrow V(\Gamma)} \betagin{array}{c}\resizebox{9mm}{!} {\xy
(-6,7)*{^1},
(-3,7)*{^2},
(2.5,7)*{},
(7,7)*{^m},
(-3,-8)*{_2},
(3,-6)*{},
(7,-8)*{_n},
(-6,-8)*{_1},
(0,4.5)*+{...},
(0,-4.5)*+{...},
(0,0)*+{\Gamma}="o",
(-6,6)*{}="1",
(-3,6)*{}="2",
(3,6)*{}="3",
(6,6)*{}="4",
(-3,-6)*{}="5",
(3,-6)*{}="6",
(6,-6)*{}="7",
(-6,-6)*{}="8",
\ar @{-} "o";"1" <0pt>
\ar @{-} "o";"2" <0pt>
\ar @{-} "o";"3" <0pt>
\ar @{-} "o";"4" <0pt>
\ar @{-} "o";"5" <0pt>
\ar @{-} "o";"6" <0pt>
\ar @{-} "o";"7" <0pt>
\ar @{-} "o";"8" <0pt>
\endxy}\end{array}
\end{equation}
where the sum is taken over all ways of attaching the incoming and outgoing legs to the graph $\Gamma$, and we set to zero every resulting graph if it contains a vertex with valency $<3$ or
with no at least one incoming or at least one outgoing edge.
Let us first check that the map $f$ has degree zero. If $\Gamma\in \mathsf{fGC}_{c+d+1}^{or}$ has $p$ vertices and $l$ edges, then $|\Gamma|=(c+d+1)(p-1) - (c+d)l$ so that the total degree of the l.h.s.\ in
(\ref{equ:def GC action 1}) equals
$$
(c+d+1)(p-1) - (c+d)l + 1 +c(1-m)+d(1-n)= (c+d+1)p - (c+d)l-cm-dn.
$$
On the other hand, each summand on the r.h.s.\ gives us a graph in $ \mathsf{ Lie}Bi_\infty$
with $p$ vertices, and each vertex $v$ has $|v|_{out} + \# \hat{s}^{-1}(v)$ output edges and $|v|_{in}+ \# {s}^{-1}(v)$ input edges,
where $|v|_{in}$ (resp.\ $|v|_{out}$) counts the number of outputs (resp.\ inputs) of $v$ in $\Gamma$. The degree of such a graph is equal to
\betagin{equation}rn
\sum_{v\in V(\Gamma)}
\left(1+c(1-|v|_{out} - \# \hat{s}^{-1}(v)) + d(1-|v|_{in}- \# s^{-1}(v))\bar{i}ght)&=&p +c(p-l-m) +d(p-l-n)\\
&=& (c+d+1)p- (c+d)l -cm-dn
\end{equation}rn
as required. There is an implicit rule of signs in-built into formula (\ref{equ:def GC action 1}) which is completely analogous to the one defined in \S 7 of \cite{MaVo}.
Consider for a moment $\widehat{\mathcal{H}\mathit{olieb}}cd$ as a {\em non-differential} \, (completed) free properad, and let $\mathrm{Der}_{non-d}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ stand for its Lie algebra of derivations
which can be identified with the vector space
$$
{\partial}rod_{m,n} {\mathrm H\mathrm o\mathrm m}_{{\mathbb S}_m^{op}\times {\mathbb S}_n}\left({\mathbb K}\lambdangle {\mathfrak C_n^m} {\bar{a}}ngle,\widehat{\mathcal{H}\mathit{olieb}}cd(m,n)\bar{i}ght)={\partial}rod_{m,n\geq 1} \left(\mathcal{L}\mathit{ieb}cd(m,n) \otimesimes {\mathit s \mathit g\mathit n}_m^{\otimes |c|}\otimesimes {\mathit s \mathit g\mathit n}_n^{\otimes |d|}\bar{i}ght)^{{\mathbb S}_m\times {\mathbb S}_m}[c(1-m)+d(1-n)]
$$
as any derivation is uniquely determined by its values on the generators
$$
{\mathfrak C_n^m}:=\betagin{array}{c}\resizebox{12mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array}
$$
which can be chosen arbitrary.
It is immediate from the definition of the Lie algebra structure $[\ ,\ ]$ in $\mathsf{GC}or_{c+d+1}$
that the map
$$
F: \mathsf{fGC}_{c+d+1}^{or} \longrightarrow \mathrm{Der}_{non-d}(\widehat{\LieBi}_\infty)
$$
given by the formula (\ref{equ:def GC action 1})
respects the Lie brackets,
$$
\left(\mathfrak C_n^m \cdot F(\Gamma_1)\bar{i}ght)\cdot F(\Gamma_2) - (-1)^{|\Gamma_1||\Gamma_2|}
\left(\mathfrak C_n^m \cdot F(\Gamma_2)\bar{i}ght)\cdot F(\Gamma_1) = \mathfrak C_n^m \cdot F([\Gamma_1,\Gamma_2])
$$
for any $\Gamma_1,\Gamma_2\in \mathsf{GC}or_{c+d+1}$. This result implies that any Maurer-Cartan element $\Gamma$ in the Lie algebra
$(\mathsf{fGC}^{or}_{c+d+1}, [\ ,\ ])$ gives rise to a continuous differential
$$
d_\Gamma: \mathfrak C_n^m \longrightarrow \mathfrak C_n^m \cdot F(\Gamma)
$$
in the properad $\widehat{\mathcal{H}\mathit{olieb}}cd$. A remarkable (and almost obvious) fact is that the Maurer-Cartan element
$$
\Gamma= \xy
(0,-3)*{\bulletllet}="a",
(0,3)*{\bullet}="b",
\ar @{->} "a";"b" <0pt> \endxy
$$
induces the standard differential (\ref{LBk_infty}) in $\widehat{\mathcal{H}\mathit{olieb}}cd$. This implies that the morphism (\ref{2: Morhism F from GC_3^or}) induces (by changing the right action into a left action via a standard sign factor) a map of {\em dg}\, Lie algebras
\betagin{equation}\lambdabel{2: F final from GC_3^or to DerLieBi}
F\colon {\mathsf G}{\mathsf C}_3^{c+d+1}\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)\,
\end{equation}
which is proven below to be a quasi-isomorphism (up to one class).
\subsubsection{\bf Remark}
Interpreting the right hand side in (\ref{2: F final from GC_3^or to DerLieBi}) as a graph complex itself (see section {\S \ref{2 sec:defcomplexes}}), we see that the map $F$ sends a graph $\Gammamma\in \mathsf{GC}_{c+d+1}^{or}$ to the series of graphs
\[
\sum_{m,n\geq 1}
\sum_{s:[n]\bar{i}ghtarrow V(\Gamma)\atop \hat{s}:[m]\bar{i}ghtarrow V(\Gamma)}
\overbrace{
\underbrace{\betagin{array}{c}\resizebox{10mm}{!} { \xy
(0,4.5)*+{...},
(0,-4.5)*+{...},
(0,0)*+{\Gamma}="o",
(-5,6)*{}="1",
(-3,6)*{}="2",
(3,6)*{}="3",
(5,6)*{}="4",
(-3,-6)*{}="5",
(3,-6)*{}="6",
(5,-6)*{}="7",
(-5,-6)*{}="8",
\ar @{-} "o";"1" <0pt>
\ar @{-} "o";"2" <0pt>
\ar @{-} "o";"3" <0pt>
\ar @{-} "o";"4" <0pt>
\ar @{-} "o";"5" <0pt>
\ar @{-} "o";"6" <0pt>
\ar @{-} "o";"7" <0pt>
\ar @{-} "o";"8" <0pt>
\endxy}\end{array}
}_{n\times}
}^{m\times}
\]
where the second summation symbol has exactly the same meaning as in (\ref{equ:def GC action 1}).
\subsection{\bf A map from the graph complex $\mathsf{GC}or_{c+d+1}[[\hbar]]$ to {$\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}_{c,d}^\diamond})$}}
There is a natural right action of the {\em non-differential}\, Lie algebra $(\mathsf{GC}_{c+d+1}^{or}[[\hbar]], [\ ,\ ])$ on the {\em non-differential}\,
free operad $\mathcal{H}\mathit{olieb}_{c,d}^\diamond$ by continuous derivations, that is, there is a continuous morphism of
topological Lie algebras,
$$
F^\diamond: \left( \mathsf{fGC}^{or}_{c+d+1}[[\hbar]], [\ ,\ ]\bar{i}ght) \longrightarrow \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)
$$
For any monomial $\hbar^k\Gamma\in \mathsf{GC}_{c+d+1}^{or}[[\hbar]]$ the value of the associated derivation $F^\diamond(\hbar^k \Gamma)$
on the generators of $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$
is given, by definition, by
\betagin{equation} \lambdabel{equ:def GC action 2}
\left(
\betagin{array}{c}\resizebox{14mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-10,-8)*{_1};
(-6,-8)*{_2};
(10,-8)*{_n};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-10,8)*{_1};
(-6,8)*{_2};
(10,8)*{_m};
\endxy}\end{array}
\bar{i}ght)\cdot F^(\hbar^k\Gamma)
:=
\left\{\betagin{array}{ll} \displaystyle
\sum_{s:[n]\bar{i}ghtarrow V(\Gamma)\atop \hat{s}:[m]\bar{i}ghtarrow V(\Gamma)}
\sum_{a=k+\sum_{v\in V(\Gamma)} a_v\atop a_v\geq 0}
\betagin{array}{c}\resizebox{9mm}{!} {\xy
(-6,7)*{^1},
(-3,7)*{^2},
(2.5,7)*{},
(7,7)*{^m},
(-3,-8)*{_2},
(3,-6)*{},
(7,-8)*{_n},
(-6,-8)*{_1},
(0,4.5)*+{...},
(0,-4.5)*+{...},
(0,0)*+{\Gamma}="o",
(-6,6)*{}="1",
(-3,6)*{}="2",
(3,6)*{}="3",
(6,6)*{}="4",
(-3,-6)*{}="5",
(3,-6)*{}="6",
(6,-6)*{}="7",
(-6,-6)*{}="8",
\ar @{-} "o";"1" <0pt>
\ar @{-} "o";"2" <0pt>
\ar @{-} "o";"3" <0pt>
\ar @{-} "o";"4" <0pt>
\ar @{-} "o";"5" <0pt>
\ar @{-} "o";"6" <0pt>
\ar @{-} "o";"7" <0pt>
\ar @{-} "o";"8" <0pt>
\endxy}\end{array}
& \ \ \ \ \ \mbox{if}\ \ k\leq a\\
0 & \ \ \ \ \ \mbox{if}\ \ k>a,
\end{array}
\bar{i}ght.
\end{equation}
where the first sum is taking over all ways to attach $m$ output legs and $n$ input legs to the vertices
of the graph $\Gamma$, and the second sum is taken over all ways to decorate the vertices of $\Gamma$ with non-negative integers $a_1,\ldots,a_{\# V(\Gamma)}$ such they sum to $a-k$; moreover, we set a graph on the r.h.s.\ to zero is if there is at least one vertex $v$ with the number $n_v$ of incoming edges equal to zero, or the number $m_v$ of outgoing number equal to zero, or if the condition $n_v+m_v+a_v\geq 3$ is violated. There is an implicit rule of signs in formula
(\ref{equ:def GC action 2}) which is identical to the one in the subsection above.
It is easy to check that $f^\diamond$ has degree zero and respects Lie brackets. Therefore, it sends any
Maurer-Cartan element in $( \mathsf{fGC}^{or}_3[[\hbar]], [\ ,\ ])$ into a differential in the free prop
$\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$. It is again almost immediate to see that the differential induced by the
Maurer-Cartan element (\ref{2: Phi_hbar MC element}) is precisely the one given in (\ref{2: d on Lie inv infty}), i.e.\ the one which makes $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$ into a minimal resolution of $\widehat{\mathcal{L}\mathit{ieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd$.
Therefore we conclude that there is a morphism of {\em dg}\, Lie algebras
\betagin{equation}\lambdabel{3: F from GCor[[hbar]] to Der LoBinfty}
f^\diamond: \left(\mathsf{GC}or_3[[\hbar]], \delta_\hbar=[{\mathbb P}hi_\hbar,\ ]\bar{i}ght) \longrightarrow \left(\mathrm{Der}(\widehat{\LieBi}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}_\infty), d:=[\delta,\ ]\bar{i}ght).
\end{equation}
We shall prove below that this map is almost a quasi-isomorphism.
\subsubsection{\bf Remark}
Interpreting $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$ as a graph complex (see section {\S \ref{2 sec:defcomplexes}}), we can reformulate the map $F^\diamond$ as the one which sends a monomial $\hbar^k\Gammamma\in \mathsf{GC}_{c+d+1}^{or}[[\hbar]]$ to the series of graphs
\[
\sum_{m,n\geq 1}
\sum_{s:[n]\bar{i}ghtarrow V(\Gamma)\atop \hat{s}:[m]\bar{i}ghtarrow V(\Gamma)}
\sum_{a=k+\sum_{v\in V(\Gamma)} a_v\atop a_v\geq 0}
\overbrace{
\underbrace{\betagin{array}{c}\resizebox{10mm}{!} { \xy
(0,4.5)*+{...},
(0,-4.5)*+{...},
(0,0)*+{\Gamma}="o",
(-5,6)*{}="1",
(-3,6)*{}="2",
(3,6)*{}="3",
(5,6)*{}="4",
(-3,-6)*{}="5",
(3,-6)*{}="6",
(5,-6)*{}="7",
(-5,-6)*{}="8",
\ar @{-} "o";"1" <0pt>
\ar @{-} "o";"2" <0pt>
\ar @{-} "o";"3" <0pt>
\ar @{-} "o";"4" <0pt>
\ar @{-} "o";"5" <0pt>
\ar @{-} "o";"6" <0pt>
\ar @{-} "o";"7" <0pt>
\ar @{-} "o";"8" <0pt>
\endxy}\end{array}
}_{n\times}
}^{m\times}
\]
where the second and third summation symbols have exactly the same meaning as in (\ref{equ:def GC action 2}). We shall use this fact below.
{
\Large
\section{\bf Computations of the cohomology of deformation complexes}
}
In this section we compute the cohomology of several of the deformation complexes, show Theorems {\ref{thm:Fqiso}} and {\ref{thm:Fhbarqiso}}, and discuss their concrete applications.
\subsection{The proof of Theorem {\ref{thm:Fqiso}}} \lambdabel{app:defproof1}
Let us recall the definition of the graph complex $\widehat{\GC}^{or}_d$ from \cite[section 3.3]{Wi2}.
The elements of $\widehat{\GC}^{or}_d$ are ${\mathbb K}$-linear series in directed acyclic graphs with outgoing legs such that all vertices are at least bivalent, and such that there are no bivalent vertices with one incoming and one outgoing edge.{\mathcal O}ootnote{The last condition is again not present on \cite{Wi2}, but it does not change the cohomology.} We set to zero graphs containing vertices without outgoing edges. Here is an example graph:
$$
\betagin{array}{c}\resizebox{6mm}{!} {\xy
(0,0)*{\bullet}="o",
(-5,6)*{}="d1",
(-2,6)*{}="d2",
(2,6)*{}="d3",
(5,6)*{}="d4",
\ar @{->} "o";"d1" <0pt>
\ar @{->} "o";"d2" <0pt>
\ar @{->} "o";"d3" <0pt>
\ar @{->} "o";"d4" <0pt>
\ar@/^0.6pc/(0,-8)*{\bulletllet};(0,0)*{\bulletllet}
\ar@/^{-0.6pc}/(0,-8)*{\bulletllet};(0,0)*{\bulletllet}
\endxy}
\end{array}
.$$
The degrees are computed just as for graphs occurring in $\mathsf{GC}or_d$, with the external legs considered to be of degree 0. For the description of the differential we refer the reader to \cite[section 3.3]{Wi2}.
There is a map ${\mathbb P}si: \mathsf{GC}or_d\to \widehat{\GC}^{or}_d$ sending a graph $\Gammamma$ to the linear combination
\betagin{equation}\lambdabel{equ:hairymap}
\Gammamma \mapsto
\sum_{j=1}^\infty
\overbrace{
\betagin{array}{c}\resizebox{9mm}{!} {\xy
(0,5.2)*+{...},
(0,0)*+{\Gamma}="o",
(-3,7)*{}="5",
(3,7)*{}="6",
(5,7)*{}="7",
(-5,7)*{}="8",
\ar @{->} "o";"5" <0pt>
\ar @{->} "o";"6" <0pt>
\ar @{->} "o";"7" <0pt>
\ar @{->} "o";"8" <0pt>
\endxy}\end{array}
}^{j\times}
\end{equation}
where the picture on the right means that one should sum over all ways of connecting $j$ outgoing edges to the graph $\Gammamma$. Graphs for which there remain vertices with no outgoing edge are identified with $0$.
The following proposition has been shown in loc. cit.
\subsubsection{\bf Proposition}[Proposition 3 of \cite{Wi2}]\lambdabel{prop:GChGC}
{\em The map ${\mathbb P}si: \mathsf{GC}or_d \to \widehat{\GC}^{or}_d$ is a quasi-isomorphism up to the class in $H(\widehat{\GC}^{or}_d)$ represented by the graph cocycle
\betagin{equation}
\lambdabel{equ:singleclass}
\sum_{j\geq 2}
(j-1)
\overbrace{
\betagin{array}{c}\resizebox{9mm}{!} {\xy
(0,5.2)*+{...},
(0,0)*{\bullet}="o",
(-3,7)*{}="5",
(3,7)*{}="6",
(5,7)*{}="7",
(-5,7)*{}="8",
\ar @{->} "o";"5" <0pt>
\ar @{->} "o";"6" <0pt>
\ar @{->} "o";"7" <0pt>
\ar @{->} "o";"8" <0pt>
\endxy}\end{array}
}^{j\times}.
\end{equation}
}
There is a map $G:\widehat{\GC}^{or}_{c+d+1} \to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ sending a graph $\overbrace{
\betagin{array}{c}\resizebox{9mm}{!} {\xy
(0,5.2)*+{...},
(0,0)*+{\Gamma}="o",
(-3,7)*{}="5",
(3,7)*{}="6",
(5,7)*{}="7",
(-5,7)*{}="8",
\ar @{->} "o";"5" <0pt>
\ar @{->} "o";"6" <0pt>
\ar @{->} "o";"7" <0pt>
\ar @{->} "o";"8" <0pt>
\endxy}\end{array}}^{m\times} \in \widehat{\GC}^{or}_{c+d+1}$ to the series
\betagin{equation}
G( \Gammamma )=
\sum_{n}
\overbrace{
\underbrace{ \betagin{array}{c}\resizebox{9mm}{!} {\xy
(0,4.9)*+{...},
(0,-4.9)*+{...},
(0,0)*+{\Gamma}="o",
(-5,6)*{}="1",
(-3,6)*{}="2",
(3,6)*{}="3",
(5,6)*{}="4",
(-3,-6)*{}="5",
(3,-6)*{}="6",
(5,-6)*{}="7",
(-5,-6)*{}="8",
\ar @{->} "o";"1" <0pt>
\ar @{->} "o";"2" <0pt>
\ar @{->} "o";"3" <0pt>
\ar @{->} "o";"4" <0pt>
\ar @{<-} "o";"5" <0pt>
\ar @{<-} "o";"6" <0pt>
\ar @{<-} "o";"7" <0pt>
\ar @{<-} "o";"8" <0pt>
\endxy}\end{array}
}_{n\times}
}^{m\times}
\end{equation}
The map $F : \mathsf{GC}_{c+d+1}^{or}\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ from Theorem {\ref{thm:Fqiso}} factors through the map $G$ above, i.~e., it can be written as the composition
\[
\mathsf{GC}_{c+d+1}^{or}\stackrel{{\mathbb P}si}{\to} \widehat{\GC}^{or}_{c+d+1} \stackrel{G}{\to} \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd).
\]
In view of Proposition {\ref{prop:GChGC}}, Theorem {\ref{thm:Fqiso}} hence follows immediately from the following result.
\subsubsection{\bf Proposition}\lambdabel{prop:Gqiso}
{\em The map $G:\widehat{\GC}^{or}_{c+d+1} \to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ is a quasi-isomorphism.
}
\betagin{proof}
For a graph in $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ we will call its \emph{skeleton} the graph obtained in the following way:
\betagin{enumerate}
\item Remove all input legs and recursively remove all valence 1 vertices created.
\item Remove valence 2 vertices with one incoming and one outgoing edge and connect the two edges.
\end{enumerate}
An example of a graph and its skeleton the following
\betagin{align*}
{\mathbf e}xt{graph: }&
\betagin{array}{c}
\resizebox{17mm}{!}
{ \xy
(0,-10)*{\bullet}="u",
(-5,-5)*{\bullet}="L",
(5,-5)*{\bullet}="R",
(0,0)*{\bullet}="d",
(-5,-15)*{}="u1",
(5,-15)*{}="u2",
(0,5)*{}="d1",
(-10,0)*{\bullet}="b",
(-10,-5)*{\bullet}="a",
(-15,-10)*{}="a1",
(-5,-10)*{}="a2",
(-15,5)*{}="b1",
\ar @{->} "u1";"u" <0pt>
\ar @{->} "u2";"u" <0pt>
\ar @{->} "u";"L" <0pt>
\ar @{->} "u";"R" <0pt>
\ar @{->} "L";"d" <0pt>
\ar @{->} "R";"d" <0pt>
\ar @{->} "d";"d1" <0pt>
\ar @{->} "L";"b" <0pt>
\ar @{->} "a1";"a" <0pt>
\ar @{->} "a2";"a" <0pt>
\ar @{->} "b";"b1" <0pt>
\ar @{->} "a";"b" <0pt>
\endxy}
\end{array}
&
{\mathbf e}xt{skeleton: }&
\betagin{array}{c}
\resizebox{17mm}{!}
{ \xy
(0,-10)*{\bullet}="u",
(-5,-5)*{\bullet}="L",
(5,-5)*{\bullet}="R",
(0,0)*{\bullet}="d",
(-5,-15)*{}="u1",
(5,-15)*{}="u2",
(0,5)*{}="d1",
(-15,-10)*{}="a1",
(-5,-10)*{}="a2",
(-15,5)*{}="b1",
\ar @{->} "u";"L" <0pt>
\ar @{->} "u";"R" <0pt>
\ar @{->} "L";"d" <0pt>
\ar @{->} "R";"d" <0pt>
\ar @{->} "d";"d1" <0pt>
\ar @{->} "L";"b1" <0pt>
\endxy}
\end{array}
\end{align*}
We put a filtration on $\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ by the total number of vertices in the skeleton. Let $\mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ be the associated graded.
Note that for elements in the image of some graph $\Gammamma\in \widehat{\GC}^{or}_{c+d+1}$ under $G$ the skeleton is just the graph $\Gammamma$, and hence there is a map of complexes $\widehat{\GC}^{or}_{c+d+1} \to \mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$, where we consider the left hand side with zero differential.
We claim that the induced map $\widehat{\GC}^{or}_{c+d+1} \to H(\mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd))$ is an isomorphism. From this claim the Proposition follows immediately by a standard spectral sequence argument.
The differential on $\mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ does not change the skeleton. Hence the complex $\mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd)$ splits into a direct product of complexes, say $\tilde C_\gammamma$, one for each skeleton $\gammamma$
\[
\mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}cd) = {\partial}rod_\gammamma \tilde C_\gammamma .
\]
Furthermore, each skeleton represents an automorphism class of graphs, and we may write
\[
\tilde C_\gammamma = C_{\tilde \gammamma}^{\mathrm{Aut}_\gammamma}
\]
where $C_{\tilde \gammamma}$ is an appropriately defined complex for one representative $\tilde \gammamma$ of the isomorphism class $\gammamma$ and $\mathrm{Aut}_\gammamma$ is the automorphism group associated to the skeleton. In other words, the $\tilde \gammamma$ now has distinguishable vertices and edges.
More concretely, the complex $C_{\tilde \gammamma}$ is the complex of ${\mathbb K}$-linear series of graphs obtained from $\tilde \gammamma$ by
\betagin{enumerate}
\item Adding some bivalent vertices with one input and one output on edges. We call these vertices ``edge vertices''.
\item Attaching input forests at the vertices, such that all vertices are at least trivalent and have at least one input and one output.
We call the forest attached to a vertex the forest of that vertex.
\end{enumerate}
An example is the following:
\[
\betagin{array}{c}
\resizebox{17mm}{!}
{ \xy
(0,-10)*{\bullet}="u",
(-5,-5)*{\bullet}="L",
(5,-5)*{\bullet}="R",
(0,0)*{\bullet}="d",
(-5,-15)*{}="u1",
(5,-15)*{}="u2",
(0,5)*{}="d1",
(-15,-10)*{}="a1",
(-5,-10)*{}="a2",
(-15,5)*{}="b1",
\ar @{->} "u";"L" <0pt>
\ar @{->} "u";"R" <0pt>
\ar @{->} "L";"d" <0pt>
\ar @{->} "R";"d" <0pt>
\ar @{->} "d";"d1" <0pt>
\ar @{->} "L";"b1" <0pt>
\endxy}
\end{array}
\ \ \stackrel{{\mathbf e}xt{add edge vertices}}{\longrightarrowgrightarrow} \ \
\betagin{array}{c}
\resizebox{17mm}{!}
{ \xy
(0,-10)*{\bullet}="u",
(-5,-5)*{\bullet}="L",
(5,-5)*{\bullet}="R",
(0,0)*{\bullet}="d",
(-5,-15)*{}="u1",
(5,-15)*{}="u2",
(0,5)*{}="d1",
(-10,0)*{\bullet}="b",
(-15,-10)*{}="a1",
(-5,-10)*{}="a2",
(-15,5)*{}="b1",
\ar @{->} "u";"L" <0pt>
\ar @{->} "u";"R" <0pt>
\ar @{->} "L";"d" <0pt>
\ar @{->} "R";"d" <0pt>
\ar @{->} "d";"d1" <0pt>
\ar @{->} "L";"b" <0pt>
\ar @{->} "b";"b1" <0pt>
\endxy}
\end{array}
\ \ \stackrel{{\mathbf e}xt{add input forests}}{\longrightarrowgrightarrow}\ \
\betagin{array}{c}
\resizebox{17mm}{!}
{ \xy
(0,-10)*{\bullet}="u",
(-5,-5)*{\bullet}="L",
(5,-5)*{\bullet}="R",
(0,0)*{\bullet}="d",
(-5,-15)*{}="u1",
(5,-15)*{}="u2",
(0,5)*{}="d1",
(-10,0)*{\bullet}="b",
(-10,-5)*{\bullet}="a",
(-15,-10)*{}="a1",
(-5,-10)*{}="a2",
(-15,5)*{}="b1",
\ar @{->} "u1";"u" <0pt>
\ar @{->} "u2";"u" <0pt>
\ar @{->} "u";"L" <0pt>
\ar @{->} "u";"R" <0pt>
\ar @{->} "L";"d" <0pt>
\ar @{->} "R";"d" <0pt>
\ar @{->} "d";"d1" <0pt>
\ar @{->} "L";"b" <0pt>
\ar @{->} "a1";"a" <0pt>
\ar @{->} "a2";"a" <0pt>
\ar @{->} "b";"b1" <0pt>
\ar @{->} "a";"b" <0pt>
\endxy}
\end{array}
\]
We next put another filtration on $C_{\tilde \gammamma}$ by the number of edge vertices added in the first step above and consider the associated graded $\mathrm{gr} C_{\tilde \gammamma}$.
Note that the differential of $\mathrm{gr} C_{\tilde \gammamma}$ acts on each of the forests attached to the vertices separately and hence the complex splits into a (completed) tensor product of complexes, one for each such vertex. Let us call the complex made from the possible forests at the vertex $v$ the forest complex at that vertex.
By the same argument showing that the cohomology of a free ${\mathcal L} ie_\infty$ algebra generated by a single generator is two dimensional, we find that the
forest complex at $v$ has either one or two dimensional cohomology. If the vertex $v$ has no incoming edge then the cohomology of the forest complex is one dimensional, the class being represented by the forests
\[
\sum_{j\geq 1}
\underbrace{
\betagin{array}{c}\resizebox{9mm}{!} {\xy
(0,-4.5)*+{...},
(0,2)*{_v},
(0,0)*{\bullet}="o",
(-5,-5)*{}="1",
(-3,-5)*{}="2",
(3,-5)*{}="3",
(5,-5)*{}="4",
\ar @{<-} "o";"1" <0pt>
\ar @{<-} "o";"2" <0pt>
\ar @{<-} "o";"3" <0pt>
\ar @{<-} "o";"4" <0pt>
\endxy}\end{array}
}_{j \times} \,.
\]
If the vertex $v$ already has an incoming edge, then there is one additional class obtained by not adding any input forest.
Hence we find that $H(\mathrm{gr} C_{\tilde \gammamma})$ is spanned by graphs obtained from $\tilde \gammamma$ as follows:
\betagin{enumerate}
\item Add some bivalent vertices with one input and one output on edges.
\item For each vertex that is either not at least trivalent or does not have an incoming edge, sum over all ways of attaching incoming legs at that vertex.
\item For at least trivalent vertices with an incoming edge, there is a choice of either not adding anything at that vertex, or summing over all ways of attaching incoming legs at that vertex. Let us call the vertices for which the first choice is made bald vertices and the others hairy.
\end{enumerate}
Let us look at the next page in the spectral sequence associated to our filtration on $C_{\tilde \gammamma}$.
The differential creates one edge vertex by either splitting an existing edge vertex or by splitting a skeleton vertex. Again, the complex splits into a product of complexes, one for each edge of $\tilde \gammamma$. For each such edge we have to consider 3 cases separately:
\betagin{enumerate}
\item Both endpoints in $\tilde \gammamma$ are hairy.
\item Both endpoints in $\tilde \gammamma$ are bald.
\item One endpoints is hairy and one is bald.
\end{enumerate}
We leave it to the reader to check that:
\betagin{enumerate}
\item In the first case the cohomology is one-dimensional, represented by a single edge without edge vertices.
\item In the third case the cohomology vanishes.
\end{enumerate}
Since there is necessarily at least one hairy vertex in the graph, the second assertion implies that if there is a bald vertex as well, the resulting complex is acyclic.
Hence all vertices must be hairy. By the first assertion the cohomology is one-dimensional for each skeleton.
One easily checks that the representative is exactly the image of the skeleton considered as element in $\widehat{\GC}^{or}_{c+d+1}$. Hence the proposition follows.
\end{proof}
\subsubsection{\bf Remark}\lambdabel{rem:alternative Fqiso proof}
There is also an alternative way of computing the cohomology of the deformation complex $\mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d})$ (and of its completion).
Namely, by Koszulness of $\mathcal{L}\mathit{ieb}cd$ this complex is quasi-isomorphic to ${\mathsf D\mathsf e\mathsf f }(\mathcal{H}\mathit{olieb}_{c,d}\to \mathcal{L}\mathit{ieb}cd)[1]$.
It is well known that the prop governing Lie bialgebras $\mathsf{ Lie}BiP$ may be written as
\[
\mathsf{ Lie}BiP(n,m) \cong \bigoplus_N {\mathcal L} ieP(n,N) \otimesimes_{{\mathbb S}_N} {\mathcal L} ieCP(N,m)
\]
using the props governing Lie algebras and Lie coalgebras. Interpreting elements of the above prop as linear combinations directed acyclic graphs, the sub-properad $\mathsf{ Lie}Bi$ may be obtained as that formed by the connected such graphs.
It is hence an easy exercise to check that ${\mathsf D\mathsf e\mathsf f }(\mathcal{H}\mathit{olieb}_{c,d} \to \mathcal{L}\mathit{ieb}cd)[1]$ is identical to the complex ${\mathsf D\mathsf e\mathsf f }(\mathrm{hoe}_{c+d} \to e_{c+d})_{\rm conn}[1]$ from \cite{Wi1}, up to unimportant completion issues.
The cohomology of the latter complex has been computed in loc. cit. to be
\[
H(\mathsf{GC}_{c+d}) \oplus \bigoplus_{j\geq 1\atop j\equiv 2(c+d)+1 \bmod 4} {\mathbb K}[c+d-j] \oplus {\mathbb K} \, .
\]
Using the main result of \cite{Wi2} this agrees with the cohomology as computed by Theorem {\ref{thm:Fqiso}}. Conversely, the above proof of Theorem {\ref{thm:Fqiso}} together with this remark yields an alternative proof of the main result of \cite{Wi2}.
\subsection{The proof of Theorem {\ref{thm:Fhbarqiso}}}\lambdabel{app:defproof2}
Let us next consider Theorem {\ref{thm:Fhbarqiso}}, whose proof will be a close analog of that of Theorem {\ref{thm:Fqiso}} in the previous subsection.
There is a natural differential graded Lie algebra structure on $\widehat{\GC}^{or}_{c+d+1}$ such that the map ${\mathbb P}si: \mathsf{GC}or_{c+d+1}\to \widehat{\GC}^{or}_{c+d+1}$ from the previous section is a map of Lie algebras. The map ${\mathbb P}si$ extends $\hbar$-linearly to a map of graded Lie algebras
$$
{\mathbb P}si_\hbar: \mathsf{GC}or_{c+d+1}[[\hbar]]\to \widehat{\GC}^{or}_{c+d+1}[[\hbar]].
$$
The Maurer-Cartan element ${\mathbb P}hi_\hbar\in \mathsf{GC}or_{c+d+1}[[\hbar]]$ from (\ref{2: Phi_hbar MC element}) is sent to a Maurer-Cartan element $\hat {\mathbb P}hi_\hbar:={\mathbb P}si_\hbar({\mathbb P}hi_\hbar) \in \widehat{\GC}^{or}_{c+d+1}[[\hbar]]$. We endow $\widehat{\GC}^{or}_{c+d+1}[[\hbar]]$ with the differential
\[
d_\hbar \Gammamma = [\hat {\mathbb P}hi_\hbar, \, ].
\]
In particular it follows that we have a map of differential graded Lie algebras
\[
{\mathbb P}si_\hbar: (\mathsf{GC}or_{c+d+1}[[\hbar]], d_\hbar)\to (\widehat{\GC}^{or}_{c+d+1}[[\hbar]], d_\hbar).
\]
The map $F_\hbar$ from Theorem {\ref{thm:Fhbarqiso}} factors through $\widehat{\GC}^{or}_{c+d+1}[[\hbar]]$:
\[
\mathsf{GC}_{c+d+1}^{or}[[\hbar]]\stackrel{{\mathbb P}si_\hbar}{\longrightarrowgrightarrow} \widehat{\GC}^{or}_{c+d+1}[[\hbar]] \stackrel{G_\hbar}{\longrightarrowgrightarrow} \mathrm{Der}(\mathcal{L}\mathit{ieb}^\diamondcd).
\]
The second map $G_\hbar: \widehat{\GC}^{or}_{c+d+1}[[\hbar]]\to \mathrm{Der}(\mathcal{H}\mathit{olieb}_{c,d})$ sends $\hbar^N\Gammamma$, for $\Gammamma\in \widehat{\GC}^{or}_{c+d+1}$ to
\[
\sum_{j\geq 1}\sum_{k=0}^N \hbar^{N-k}
\betagin{array}{c} \xy
(0,-5.5)*+{...},
(0,0)*+{_{\Gamma_k}}*\cir{}="o",
(-5,-7)*{}="1",
(-3,-7)*{}="2",
(3,-7)*{}="3",
(5,-7)*{}="4",
(-3,5)*{}="5",
(3,5)*{}="6",
(5,5)*{}="7",
(-5,5)*{}="8",
\ar @{<-} "o";"1" <0pt>
\ar @{<-} "o";"2" <0pt>
\ar @{<-} "o";"3" <0pt>
\ar @{<-} "o";"4" <0pt>
\endxy
\end{array}
\]
where we again sum over all ways of attaching the incoming legs, setting to zero graphs with vertices without incoming edges. Furthermore, $\Gammamma_k$ is the linear combination of graphs obtained by summing over all ways of assigning weights to the vertices of $\Gammamma$ such that the total weight is $k$.
We have the following two results, from which Theorem {\ref{thm:Fhbarqiso}} immediately follows.
\subsubsection{\bf Proposition}
{\em The map ${\mathbb P}si_\hbar: (\mathsf{GC}_{c+d+1}^{or}[[\hbar]],d_\hbar) \to (\widehat{\GC}^{or}_{c+d+1}[[\hbar]],d_\hbar)$ is a quasi-isomorphism up to the classes $T{\mathbb K}[[\hbar]]\subset \widehat{\GC}^{or}_{c+d+1}[[\hbar]]$ where
\[
T=
\sum_{m,p}
(m+2p-2)
\overbrace{
\xy
(0,4.5)*+{...},
(0,0)*+{_p}*\cir{}="o",
(-5,-5)*{}="1",
(-3,-5)*{}="2",
(3,-5)*{}="3",
(5,-5)*{}="4",
(-3,5)*{}="5",
(3,5)*{}="6",
(5,5)*{}="7",
(-5,5)*{}="8",
\ar @{->} "o";"5" <0pt>
\ar @{->} "o";"6" <0pt>
\ar @{->} "o";"7" <0pt>
\ar @{->} "o";"8" <0pt>
\endxy
}^{m\times}.
\]
}
\betagin{proof}[Proof sketch]
Take filtrations on $\mathsf{GC}or_{c+d+1}[[\hbar]]$ and $\widehat{\GC}^{or}_{c+d+1}[[\hbar]]$ by the power of $\hbar$.
The differential on the associated graded spaces is the $\hbar$-linear extension of the differentials on $\mathsf{GC}or_{c+d+1}$ and $\widehat{\GC}^{or}_{c+d+1}$. Hence by Proposition {\ref{prop:GChGC}} the map ${\mathbb P}si_\hbar$ is a quasi-isomorphism on the level of the associated graded spaces, up to the classes above. The result follows by a standard spectral sequence argument, noting that the above element $T$ is indeed $d_\hbar$-closed.
\end{proof}
\subsubsection{\bf Proposition}\lambdabel{prop:Ghbarqiso}
{\em The map $G_\hbar: \widehat{\GC}^{or}_{c+d+1}[[\hbar]]\to \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$ is a quasi-isomorphism.
}
\betagin{proof}[Proof sketch]
Take filtrations on $\widehat{\GC}^{or}_{c+d+1}[[\hbar]]$ and
\[
\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)\cong
{\partial}rod_{m,n\geq 1} \left(\widehat{\mathcal{L}\mathit{ieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd(m,n)\otimesimes {\mathit s \mathit g\mathit n}_m^{\otimes |c|}\otimesimes {\mathit s \mathit g\mathit n}_n^{\otimes |d|} \bar{i}ght)^{{\mathbb S}_m\times {\mathbb S}_n} [[\hbar]][c(1-m)+d(1-n)]
\]
by genus and by powers of $\hbar$.
Then we claim that the induced map on the associated graded complexes $\mathrm{gr} G_\hbar: \mathrm{gr}\widehat{\GC}^{or}_{c+d+1}[[\hbar]]\to \mathrm{gr}\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$ is a quasi-isomorphism, thus showing the proposition by a standard spectral sequence argument.
To show the claim, we proceed analogously to the proof of Proposition {\ref{prop:Gqiso}}. Let us go through the proof again and highlight only the differences.
The skeleton of a graph is defined as before, except that one also forgets the weights of all vertices.
The complex $\mathrm{gr} \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}cd)$ splits into a product of subcomplexes that we again call $\tilde C_\gammamma$, one for each skeleton $\gammamma$. Again
\[
\tilde C_\gammamma = C_{\tilde \gammamma}^{\mathrm{Aut}_\gammamma}
\]
for some representative $\tilde \gammamma$ of the isomorphism class $\gammamma$. Hence it again suffices to compute the cohomology of $C_{\tilde \gammamma}$.
Graphs contributing are again obtained by adding edge vertices and input forests, except that now all vertices are also assigned an arbitrary weight.
Again we take a filtration on the number of edge vertices, which leaves us with the task of computing
the cohomology of a complex associated to one forest attached to a vertex $v$. We find that representatives of cohomology classes are either:
\betagin{itemize}
\item Vertex $v$ with any weight and no attached forest. Let us call such a $v$ again bald.
\item Vertex $v$ with weight $0$ and input legs attached in all possible ways, let us call such a $v$ again hairy.
\end{itemize}
The differential on the second page of the spectral sequence again adds one edge vertex, which however can have a non-zero weight now, and if it has a non-zero weight it may be bald.
We may introduce another filtration by the number of non-hairy edge vertices. The differential on the associated graded creates one hairy edge vertex.
The resulting complex is a tensor product of complexes, one for each edge. The complexes associated to each edge again can have three different types: (i) both endpoints in the skeleton are hairy, (ii) both are bald or (iii) one is hairy, one is bald. Again one checks that in case (iii) the complex is acyclic and in case (i) one-dimensional, the cohomology class represented by a single edge.
Hence, since at least one vertex must be hairy, all vertices must be. Hence we recover at this stage the image of $\widehat{\GC}^{or}_{c+d+1}[[\hbar]]$ and are done.
\end{proof}
\subsection{Some applications}\lambdabel{4: subsec on applications} As remarked in the Introduction,
the most interesting cases for applications are ``classical" properads
$$
\mathcal{L}\mathit{ieb}:=\mathcal{L}\mathit{ieb}_{1,1}, \ \ \ \mathcal{L}\mathit{ieb}^\diamond:=\mathcal{L}\mathit{ieb}^\diamond_{1,1}, \ \ \ \ \mathcal{L}\mathit{ieb}_{odd}:= \mathcal{L}\mathit{ieb}_{0,1}.
$$
In the even case the Lie and co-Lie generators of these Lie bialgebra properads have homological degree zero, in the odd case the co-Lie generator has degree 1 and the Lie generator the degree zero.
In all three cases the associated dg Lie algebras
$$
\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}})\ \ , \ \ \ \ \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm}) \ \ , \ \ \ \ \mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}_{odd})
$$
are generated by graphs $\Gamma$ with $\# V(\Gamma) + \# E(\Gamma)\geq 1$ so that all the three
are {\em positively}\, graded dg Lie algebras with respect to this parameter, and it makes sense to talk about the {\em groups}
$$
\exp\left(\mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}})\bar{i}ght)\ \ , \ \ \ \ \exp\left(\mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm})\bar{i}ght) \ \ , \ \ \ \ \exp\left(\mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}_{odd})\bar{i}ght)
$$
which can be identified with, respectively, degree zero subalgebras $\mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}})$, $\mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm})$, $\mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}_{odd})$ equipped with the standard Baker-Campbell-Hausdorff multiplication.
The subsets of co-cycles
$$
Z^0(\widehat{\mathcal{H}\mathit{olieb}})\subset \mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}), \ \ Z^0(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm})\subset \mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm}), \ \
Z^0(\widehat{\mathcal{H}\mathit{olieb}})\subset \mathrm{Der}^0(\widehat{\mathcal{H}\mathit{olieb}}_{odd})
$$
are precisely the automorphism groups of the completed properads,
$$
\mathrm{Aut}(\widehat{\mathcal{H}\mathit{olieb}})\ \ , \ \ \ \ \mathrm{Aut}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm}) \ \ , \ \ \ \ \mathrm{Aut}(\widehat{\mathcal{H}\mathit{olieb}}_{odd}),
$$
with the zero elements corresponding to the identity automorphisms.
We say that two automorphisms $f,g\in \mathrm{Aut}(...)$ from the list above are homotopy equivalent, $f\sim g$, if they differ
by a coboundary, $f-g=dh$ as elements in the corresponding dg Lie algebra $\mathrm{Der}(...)$, i.e.
if $f$ and $g$ define the same cohomology classes in $H^0(\mathrm{Der}(...))$. The subset of automorphisms homotopy equivalent to the identity automorphism (i.e.\ to zero in $\mathrm{Der}(...)$) is a normal subgroup
in $\mathrm{Aut}(...)$, and the quotient by this normal subgroup is called the {\em group of homotopy non-trivial}\, automorphisms and is denoted, respectively, by
\betagin{equation}\lambdabel{4: three auto groups}
\mathbf{Aut}(\widehat{\mathcal{H}\mathit{olieb}})\ \ , \ \ \ \ {\mathbf{Aut}}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm}) \ \ , \ \ \ \ \mathbf{Aut}(\widehat{\mathcal{H}\mathit{olieb}}_{odd}).
\end{equation}
Then we have the following three corollaries to the Main Theorems.
\subsubsection{\bf Proposition} (i) {\em The group $\mathbf{Aut}(\widehat{\mathcal{H}\mathit{olieb}})$ is equal to the Grothendieck-Teichm\"uller group $GRT=GRT_1\ltimes {\mathbb K}^*$ with the subgroup ${\mathbb K}^*$
acting on $\widehat{\mathcal{H}\mathit{olieb}}$ by the following rescaling transformations of the generators},
\betagin{equation}\lambdabel{4: rescalings of the generators}
\betagin{array}{c}\resizebox{12mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array}
\longrightarrow \lambda^{m+n-2}
\betagin{array}{c}\resizebox{12mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array}
\ \ \ \ \ \ \ \ \ {\mathcal O}orall\ \lambda\in {\mathbb K}^*.
\end{equation}
(ii) {\em The group ${\mathbf{Aut}}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm})$ is equal to $GRT=GRT_1\ltimes {\mathbb K}^*$ with the subgroup ${\mathbb K}^*$ acting on $\widehat{\mathcal{H}\mathit{olieb}}$ by rescaling transformations},
$$
\betagin{array}{c}\resizebox{12mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-10,-8)*{_1};
(-6,-8)*{_2};
(10,-8)*{_n};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-10,8)*{_1};
(-6,8)*{_2};
(10,8)*{_m};
\endxy}\end{array}
\longrightarrow
\lambda^{m+n+a-2}
\betagin{array}{c}\resizebox{12mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-10,-8)*{_1};
(-6,-8)*{_2};
(10,-8)*{_n};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-10,8)*{_1};
(-6,8)*{_2};
(10,8)*{_m};
\endxy}\end{array}
$$
(iii) {\em The group ${\mathbf{Aut}}(\widehat{\mathcal{H}\mathit{olieb}}_{odd})$ is equal to ${\mathbb K}^*$ which acts $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ by rescaling transformations of the generators as in (\ref{4: rescalings of the generators})}.
\betagin{proof} All three groups in (\ref{4: three auto groups}) can be identified as sets with, respectively, the zero-th cohomology groups
$$
H^0(\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}))\ \ , \ \ \ \ H^0(\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}\hspace{-2mm})) \ \ , \ \ \ \ H^0(\mathrm{Der}(\widehat{\mathcal{H}\mathit{olieb}}_{odd}))
$$
which in turn, by the Main Theorems, can be identified with the zero-th cohomology groups
$$
H^0(\mathsf{GC}or_3, \delta) \oplus {\mathbb K} \ , \ \ \ H^0(\mathsf{GC}or_3[[\hbar]], \delta_\hbar)\oplus {\mathbb K} \ , \ \ \ H^0(\mathsf{GC}or_2, \delta)\oplus {\mathbb K},
$$
which in turn, according to \cite{Wi1}, \cite{CMW} and, respectively, \cite{Wi2}, are equal as Lie algebras to
$$
{\mathfrak g}{\mathfrak r}{\mathfrak t}_1 \oplus {\mathbb K}, \ \ \ \ \ \ \ {\mathfrak g}{\mathfrak r}{\mathfrak t}_1 \oplus {\mathbb K},\ \ \ \mbox{{and\ respectively}} \ \ \ \ {\mathbb K}.
$$
This proves all the claims.
\end{proof}
This Proposition implies a highly non-trivial action of the Grothendieck-teichm\"uller group $GRT_1$
on the completed properads $\widehat{\mathcal{H}\mathit{olieb}}$ and $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}$\hspace{-1mm} and hence on their representations.
However one must be careful when talking about representations of the {\em completed}\, free properads. We introduce a topology on properads $\widehat{\mathcal{H}\mathit{olieb}}/\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ (respectively on $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}$\hspace{-2mm}) as the one induced by the genus filtration (respectively, by the filtration defined by the parameter ``genus + total $a$-weight", cf. \cite{CMW}). If $W$ is a topological vector space, then by a representation of $\widehat{\mathcal{H}\mathit{olieb}}/\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ (respectively, $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}$\hspace{-2mm}) in $W$ we mean a {\em continuous}\, map of topological properads
$$
\rho: \widehat{\mathcal{H}\mathit{olieb}}/\widehat{\mathcal{H}\mathit{olieb}}_{odd} \longrightarrow {\mathcal E} nd_W, \ \ \ \ \ \rho^\diamond:\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea} \longrightarrow {\mathcal E} nd_W.
$$
Let us describe a sufficiently large class of such representations. Consider an arbitrary dg space $V$, and let $\hbar$ be formal parameter of degree zero. The vector space
$W:=V[[\hbar]]$ can be equipped with the standard $\hbar$-adic topology. Then maps $\rho$ and $\rho^\diamond$ satisfying on the generators the condition
$$
\rho\left(
\betagin{array}{c}\resizebox{12mm}{!}{\betagin{xy}
<0mm,0mm>*{\circ};<0mm,0mm>*{}**@{},
<-0.6mm,0.44mm>*{};<-8mm,5mm>*{}**@{-},
<-0.4mm,0.7mm>*{};<-4.5mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,5mm>*{\ldots}**@{},
<0.4mm,0.7mm>*{};<4.5mm,5mm>*{}**@{-},
<0.6mm,0.44mm>*{};<8mm,5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,5.5mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,5.5mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,5.5mm>*{^{m\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,5.5mm>*{^m}**@{},
<-0.6mm,-0.44mm>*{};<-8mm,-5mm>*{}**@{-},
<-0.4mm,-0.7mm>*{};<-4.5mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-1mm,-5mm>*{\ldots}**@{},
<0.4mm,-0.7mm>*{};<4.5mm,-5mm>*{}**@{-},
<0.6mm,-0.44mm>*{};<8mm,-5mm>*{}**@{-},
<0mm,0mm>*{};<-8.5mm,-6.9mm>*{^1}**@{},
<0mm,0mm>*{};<-5mm,-6.9mm>*{^2}**@{},
<0mm,0mm>*{};<4.5mm,-6.9mm>*{^{n\hspace{-0.5mm}-\hspace{-0.5mm}1}}**@{},
<0mm,0mm>*{};<9.0mm,-6.9mm>*{^n}**@{},
\end{xy}}\end{array}
\bar{i}ght)\in \hbar^{m+n-2}{\mathrm H\mathrm o\mathrm m}(V^{\otimes n}, V^{\otimes m}))[[\hbar]], \ \ \ \ \ \ \ \
\rho^\diamond\left(
\betagin{array}{c}\resizebox{12mm}{!}{\xy
(-9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,-6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,-6)*{\ldots};
(-10,-8)*{_1};
(-6,-8)*{_2};
(10,-8)*{_n};
(-9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(-5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(9,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(5,6)*{};
(0,0)*+{a}*\cir{}
**\dir{-};
(0,6)*{\ldots};
(-10,8)*{_1};
(-6,8)*{_2};
(10,8)*{_m};
\endxy}\end{array}
\bar{i}ght)\in \hbar^{m+n+a-2}{\mathrm H\mathrm o\mathrm m}(V^{\otimes n}, V^{\otimes m}))[[\hbar]]
$$
define a\, {\em continuous}\, representation of the properads $\widehat{\mathcal{H}\mathit{olieb}}/\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ and, respectively, $\widehat{\mathcal{H}\mathit{olieb}}^{\Ba{c}
_{\hspace{-2mm}\diamond} \Ea}$\hspace{-2mm} in the topological space $V[[\hbar]]$.
\subsubsection{\bf On the unique non-trivial deformation of $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$}
It was proven in \cite{Wi2} that the cohomology group $H^1(\mathsf{GC}or_2)$ is one-dimensional and is spanned by the following graph
$$
\Upsilon_4:=\betagin{array}{c}\xy
(0,0)*{\bulletllet}="1",
(-7,16)*{\bulletllet}="2",
(7,16)*{\bulletllet}="3",
(0,10)*{\bulletllet}="4",
\ar @{<-} "2";"4" <0pt>
\ar @{<-} "3";"4" <0pt>
\ar @{<-} "4";"1" <0pt>
\ar @{<-} "2";"1" <0pt>
\ar @{<-} "3";"1" <0pt>
\endxy\end{array}
+
2
\betagin{array}{c}\xy
(0,0)*{\bulletllet}="1",
(-6,6)*{\bulletllet}="2",
(6,10)*{\bulletllet}="3",
(0,16)*{\bulletllet}="4",
\ar @{<-} "4";"3" <0pt>
\ar @{<-} "4";"2" <0pt>
\ar @{<-} "3";"2" <0pt>
\ar @{<-} "2";"1" <0pt>
\ar @{<-} "3";"1" <0pt>
\endxy\end{array}
+
\betagin{array}{c}\xy
(0,16)*{\bulletllet}="1",
(-7,0)*{\bulletllet}="2",
(7,0)*{\bulletllet}="3",
(0,6)*{\bulletllet}="4",
\ar @{->} "2";"4" <0pt>
\ar @{->} "3";"4" <0pt>
\ar @{->} "4";"1" <0pt>
\ar @{->} "2";"1" <0pt>
\ar @{->} "3";"1" <0pt>
\endxy\end{array}.
$$
Moreover $H^2(\mathsf{GC}or_2)={\mathbb K}$ and is spanned by a graph with four vertices. This means that one can construct by induction a new Maurer-Cartan element in the Lie algebra $\mathsf{GC}or_2$ (the integer subscript in the summand $\Upsilon_n$ stands for the number of vertices of graphs)
$$
\Upsilon_{KS}= \xy
(0,0)*{\bulletllet}="a",
(6,0)*{\bullet}="b",
\ar @{->} "a";"b" <0pt>
\endxy + \Upsilon_4
+ \Upsilon_6 + \Upsilon_8 + \ldots
$$
as all obstructions have more than $7$ vertices and hence do not hit the unique cohomology class
in $H^2(\mathsf{GC}or_2)$. Up to gauge equivalence, this new Maurer-Cartan element $\Upsilon$ is the {\em only}\, non-trivial deformation of the standard Maurer-Cartan element $\xy
(0,0)*{\bulletllet}="a",
(6,0)*{\bullet}="b",
\ar @{->} "a";"b" <0pt>
\endxy$.
We call this element {\em Kontsevich-Shoikhet}\, one as it was first found by Boris Shoikhet in \cite{Sh} with a reference to an important contribution by Maxim Kontsevich via an informal communication.
By Main theorem {\ref{thm:Fqiso}}, the Maurer-Cartan element $\Upsilon_{KS}$ equips the completed
non-differential properad $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ with a new differential denoted by
$\delta_{KS}$. If continuous representations of $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ equipped with the standard differential $\delta$ (originating from $\xy
(0,0)*{\bulletllet}="a",
(6,0)*{\bullet}="b",
\ar @{->} "a";"b" <0pt>
\endxy$) in a topological vector space $V[[\hbar]]$ can be identified with ordinary formal Poisson structures ${\partial}i\in {\mathcal T}_{poly}(V)[[\hbar]]$, the continuous representations of $\widehat{\mathcal{H}\mathit{olieb}}_{odd}$ equipped with the new differential $\delta_{KS}$ give us a notion of {\em quantizable Poisson structure}\, ${\partial}i^{quant}\in {\mathcal T}_{poly}(V)[[\hbar]]$ (this notion can be globalized from a vector space $V$ to an arbitrary manifold $M$). It was proven in \cite{MW3} that for {\em finite}-dimensional vector spaces
$V$ (or manifolds $M$), there is a one-to-one correspondence between ordinary Poisson structures and quantizable ones, but this correspondence
$$
\left\{ \mbox{Ordinary Poisson structures}\ {\partial}i\ \mbox{on}\ M \bar{i}ght\}
\stackrel{1:1}{\leftrightarrow}
\left\{ \mbox{Quantizable Poisson structures}\ {\partial}i^{quant} \ \mbox{on}\ M\bar{i}ght\}
$$
is highly non-trivial and depends on a choice of Drinfeld associator. Moreover, quantizable
Poisson structures can be deformation quantized in a trivial (and essentially unique) perturbative way \cite{MW3} so that all the subtleties of the deformation quantization are hidden in the above correspondence.
\def$'${$'$}
\betagin{thebibliography}{10}
\bibitem[CMW]{CMW} R.\ Campos, S.\ Merkulov and T.\ Willwacher {\em The Frobenius properad is Koszul},
arXiv:1402.4048. To appear in Duke Math.\ J.
\bibitem[C]{Ch} M.\ Chas, {\em Combinatorial Lie bialgebras of curves on surfaces},
Topology {\bf 43} (2004), no. 3, 543--568.
\bibitem[CFL]{CFL} K.\ Cieliebak, K.\ Fukaya and J.\ Latschev, {\em Homological algebra related to surfaces with boundary}, arXiv:1508.02741 (2015)
\bibitem[CS]{ChSu} M.\ Chas and D.\ Sullivan, {\em Closed string operators in topology leading to Lie bialgebras and higher string algebra}, in: { The legacy of Niels Henrik Abel}, pp.\ 771--784, Springer, Berlin, 2004.
\bibitem[D1]{D1}
V.\ Drinfeld,
{\em Hamiltonian structures on Lie groups, Lie bialgebras and the geometric
meaning of the classical Yang-Baxter equations}, Soviet Math. Dokl. {\bf 27} (1983) 68--71.
\bibitem[D2]{D2}
V. Drinfeld, {\em On quasitriangular quasi-Hopf algebras and a group closely connected
with $Gal(\bar{Q}/Q)$}, Leningrad Math. J. {\bf 2}, No.\ 4 (1991), 829--860.
\bibitem[ES]{ES} P.\ Etingof and O.\ Schiffmann,
Lectures on Quantum Groups, International Press, 2002.
\bibitem[F]{Fu} H.\ Furusho, {\em Four Groups Related to Associators}, preprint arXiv:1108.3389 (2011).
\bibitem[KM]{KM} M.\ Kapranov and Yu.I.\ Manin, {\em Modules and Morita theorem for operads}. Amer. J. Math. {\bf 123} (2001), no. 5, 811-838.
\bibitem[KMW]{KMW}
A.\ Khoroshkin, S.\ Merkulov and T.\ Willwacher, {\em On quantizable odd Lie bialgebras}, preprint arXiv:1512.04710 (2015).
\bibitem[Ko1]{Ko1} M.\ Kontsevich, {\em Formal (non)commutative symplectic geometry}. In Proceedings of the I. M. Gelfand seminar
1990-1992, pages 173-188. Birkhauser, 1993.
\bibitem[Ko2]{Kon} M.\ Kontsevich, {\em Formality Conjecture}, In: D. Sternheimer et al. (eds.),
Deformation Theory and Symplectic
Geometry, Kluwer 1997, 139-156.
\bibitem[Ko3]{Ko} M.\ Kontsevich, a letter to Martin Markl, November 2002.
\bibitem[MaVo]{MaVo} M.\ Markl and A.A.\ Voronov,
{\em PROPped up graph cohomology}, in: Algebra, arithmetic, and geometry: in honor of Yu. I. Manin. Vol. II,
Progr. Math., 270, Birkh\"auser Boston, Inc., Boston, MA (2009) pp. 249--281.
\bibitem[M1]{Me1} S.A.\ Merkulov, {\em Prop profile of Poisson geometry},
Commun.\ Math.\ Phys. {\bf 262} (2006),
117-135.
\bibitem[M2]{Me2} S.A.\ Merkulov,
{\em Graph complexes with
loops and wheels}, in:
``Algebra, Arithmetic
and Geometry - Manin Festschrift" (eds. Yu.\ Tschinkel and Yu.\ Zarhin),
Progress in Mathematics, Birkha\"user (2010), pp. 311-354.
\bibitem[MV]{MV} S.A.\ Merkulov and B.\ Vallette,
{\em Deformation theory of representations of prop(erad)s I \& II},
Journal f\"ur die reine und angewandte Mathematik (Crelle) {\bf 634}, 51--106,
\& {\bf 636}, 123--174 (2009).
\bibitem[MW1]{MW1} S.\ Merkulov and T.\ Willwacher, {\em Grothendieck-Teichm\"uller and Batalin-Vilkovisky}, Lett.\ Math.\ Phys. {\bf 104} (2014) No.\ 5, 625-634.
\bibitem[MW2]{MW} S.A. Merkulov and T.\ Willwacher, {\em Props of ribbon graphs, involutive Lie bialgebras and moduli spaces of curves}, preprint arXiv:1511.07808 (2015) 51pp.
\bibitem[MW3]{MW3} S.A. Merkulov and T.\ Willwacher, {\em On explicit quantization
of Lie bialgebras}, to appear.
\bibitem[Sc]{Sch} T.\ Schedler, {\em A Hopf algebra quantizing a necklace Lie algebra
canonically associated
to a quiver}. Intern. Math. Res. Notices (2005), 725--760.
\bibitem[Sh]{Sh} B.\ Shoikhet, {\em An $L_\infty$ algebra structure on polyvector fields
}, preprint arXiv:0805.3363, (2008).
\bibitem[Ta]{Ta} D.\ Tamarkin, {\em Action of the Grothendieck-Teichmueller group on the operad of Gerstenhaber algebras}, preprint math/0202039 (2002).
\bibitem[Tu]{Tu} V.G.\ Turaev, {\em Skein quantization of Poisson algebras of loops on
surfaces},
Ann. Sci. Ecole Norm. Sup. (4) 24, no. 6, (1991) 635--704.
\bibitem[W1]{Wi1} T.\ Willwacher, {\em M.\ Kontsevich's graph complex and the
Grothendieck-Teichm\"uller Lie algebra},
Invent. Math. 200 (2015), no. 3, 671--760.
\bibitem[W2]{Wi2} T.\ Willwacher, {\em The oriented graph complexes},
Comm. Math. Phys. 334 (2015), no. 3, 1649--1666.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Convergence analysis of a family of 14-node brick elements
\thanks{This project is supported by
NNSFC (Nos. 11301053,61033012,19201004,11271060,61272371), ``the Fundamental
Research Funds for the Central Universities''. Also partially supported by NRF of Korea (Nos. 2011-0000344, F01-2009-000-10122-0).}}
\author[a]{Zhaoliang Meng\thanks{Corresponding author: [email protected]}}
\author[a,b]{Zhongxuan Luo}
\author[c,d]{Dongwoo Sheen}
\author[c]{Sihwan Kim}
\affil[a]{\it \small School of Mathematical Sciences,Dalian
University of Technology, Dalian, 116024, China}
\affil[b]{\it School of Software,Dalian
University of Technology, Dalian, 116620, China}
\affil[c]{\it
Department of Mathematics, Seoul National University,
Seoul 151-747, Korea.}
\affil[d]{\it
Interdisciplinary Program in
Computational Sciences \& Technology, Seoul National University,
Seoul 151-747, Korea.}
\maketitle
\begin{abstract}
In this paper, we will give \cor{a} convergence analysis for a family of
14-node elements which was proposed by I. M.
Smith and D. J. Kidger[Int. J. Numer. Meth. Engng., 35:1263--1275, 1992]. The 14 DOFs are
taken as the values at the eight vertices and the six
face-centroids. For second-order elliptic problem\cor{s}, we will show that
among all the Smith-Kidger 14-node elements,
Type 1, Type 2 and \cor{Type 5 elements provide
optimal-order convergent solutions while Type 6 element gives one-order lower convergent solutions.
Motivated by our proof, we also find that the order of convergence of
the Type 6 14-node nonconforming element improves to be optimal if
we change the DOFs into the values at the eight vertices and
the integration values on the six faces. We also show that Type 1, Type 2 and
Type 5 keep the optimal-order convergence if the integral DOFs on the six
faces are adopted.}
\\[6pt]
\textbf{Keywords:}
Nonconforming element; Brick element; 14-node element;
Second-order elliptic problem; Smith-Kidger element
\end{abstract}
\section{Introduction}
\cor{Among many three-dimensional brick elements, there have been well-known
simplest conforming elements such as the trilinear element, the 27-node element and
seredipity elements. For the} nonconforming case,
Rannacher-Turek \cite{rannacher-turek} presented the rotated trilinear
elements with the two types of 6 DOFs:
the face-centroid values type and the face integrals type.
Douglas-Santos-Sheen-Ye \cite{dssy-nc-ell} then
modified the element of Rannacher-Turek such that the face-centroid values
type and the face integrals type are identical, that is, the element fulfills
the mean value property ``the face-centroid value = the face average
integral''. Later Park-Sheen
presented a $P_1$-nonconforming finite element on cubic meshes which
has only 4 DOFs \cite{parksheen-p1quad}. Wilson
also defined a linear-order nonconforming brick element \cite{ciar,
wilson-brick} with 11 DOFs whose polynomial space consists of trilinear
polynomials plus $\{ 1-x_1^2, 1-x_2^2, 1-x_3^2\}$ on ${\hat{\bK}}=[-1,1]^3$
(see \cite[Page 217, Remark 4.2.3]{ciar}).
All these three dimensional elements are of $O(h)$ convergence rate in energy
norm.
In the direction of obtaining higher-order convergent nonconforming elements,
Smith and Kidger \cite{smith-kidger-14node} successfully developed
three-dimensional brick elements of 14 DOFs by adding additional polynomials
to $P_2$. They investigated six
most possible 14 DOFs elements systematically considering the Pascal
pyramid, and concluded that their Type 1 (as well as Type 2) and Type
6 elements are successful ones. The additional polynomial space
for Type 1 element is the span of the
four nonsymmetric cubic polynomials $\{x_1x_2x_3, x_1^2x_2, x_2^2x_3, x_3^2x_1\}$
while that for Type 6 element is the span of
$\{x_1x_2x_3, x_1x_2^2x_3^2,$ $x_1^2x_2x_3^2,
x_1^2x_2^2x_3 \}.$ Only recently a new nonconforming brick
element of 14 DOFs with quadratic and cubic convergence in the
energy and $L_2$ norms, respectively, is introduced by
Meng, Sheen, Luo, and Kim
\cite{2013-Meng-p-}, which has the same type of DOFs but has only
cubic polynomials added to $P_2$. And then, the authors compared
these 14-node elements numerically, see \cite{2013-Kim-p-}.
Numerical tests \cor{show} that at least for second-order elliptic
problems Meng-Sheen-Luo-Kim and some of Smith-Kidger elements are
convergent with optimal order or with lower order.
\cor{A convergence} analysis for Meng-Sheen-Luo-Kim element was \cor{reported} in
\cite{2013-Meng-p-} and is fairly easy because it satisfies the
patch test of Irons \cite{1977-Irons-p557}, which implies that a
successful $P_k$-nonconforming element needs to satisfy that on each
interface the jump of adjacent polynomials be orthogonal to
$P_{k-1}$ polynomials on the interface. Unfortunately, it was found
in mathematics that the patch test is neither necessary nor
sufficient, see \cite{2002-Shi-p221} and the \cor{references} therein. As
\cor{shown} in this paper, the Smith-Kidge elements can only pass \cor{a} lower
order patch test or can not pass it, but give optimal \cor{order} convergence
from our numerical results or lower convergence order. Thus,
the convergence analysis for Smith-Kidger element \cor{seems to be} quite different
and complex. For the convergence of the nonconforming element which
\cor{fail to} pass the patch test, \cor{see the works of} Stummel, Shi, {\it etc.}
\cite{1979-Stummel-p449-471,1987-Shi-p391-405,1984-Shi-p1233-1246,2000-Shi-p97-102}.
In this paper, we will \cor{provide a} convergence analysis for Smith-Kidger \cor{elements}
for second-order elliptic \cor{problems}. We show that although the patch test
\cor{fails}, Type 1, 2 and 5 Smith-Kidger \cor{elements are of} optimal
convergence order, \cor{while} Type 6 element loses one order of accuracy.
Furthermore, we also present a new brick element with the same DOFs, which is also
convergent \cor{in optimal orders}. Finally, if the
value at the eight vertices and the integration \cor{values over} six faces are taken as the
DOFs, then we can show that Type 1, 2, 5, and 6 elements and the proposed new element
can get optimal convergence order, which implies that Type 6 element
improves one order of accuracy.
The paper is organized as follows. In \cor{Section} 2, we will introduce
Smith-Kidger {elements} and give the basis functions firstly. In
\cor{Section} 3, we define \cor{an} interpolation operator and present
\cor{our} convergence analysis for Type 1 Smith-Kidger element. In \cor{Section} 4,
we will analyze the other elements and present the corresponding
error estimates very briefly. In \cor{Section} 5, a new 14-node brick
element is proposed. Finally, in \cor{Section} 6, we conclude our
results.
\section{The quadratic nonconforming brick elements}
Let ${\hat{\bK}}=[-1,1]^3$ and denote the vertices and {face-centroids} by
$V_j,1\leq j\leq 8,$ and $M_k,1\leq k\leq 6,$ respectively. (see Fig. \ref{fig:cube})
\begin{figure}
\caption{$V_j$ denotes the vertices, $j=1,2,\ldots,8$, and $M_k$ denotes
the \cor{face-centroids}
\label{fig:cube}
\end{figure}
Smith and Kidger \cite{smith-kidger-14node} defined the following six 14-node elements:
\begin{subeqnarray}
{\hat{\bP}}_{SK}^{(1)} &=& P_2({\hat{\bK}})\oplus\mathrm{Span}\{\hat{x}_1\hat{x}_2\hat{x}_3, \hat{x}_1^2\hat{x}_2, \hat{x}_2^2\hat{x}_3, \hat{x}_3^2\hat{x}_1\}, \\
{\hat{\bP}}_{SK}^{(2)} &=& P_2({\hat{\bK}})\oplus\mathrm{Span}\{\hat{x}_1\hat{x}_2\hat{x}_3, \hat{x}_1\hat{x}_2^2, \hat{x}_2\hat{x}_3^2, \hat{x}_3\hat{x}_1^2\}, \\
{\hat{\bP}}_{SK}^{(3)} &=& P_2({\hat{\bK}})\oplus\mathrm{Span}\{\hat{x}_1\hat{x}_2\hat{x}_3, \hat{x}_1^3, \hat{x}_2^3, \hat{x}_3^3\}, \\
{\hat{\bP}}_{SK}^{(4)} &=& P_2({\hat{\bK}})\oplus\mathrm{Span}\{\hat{x}_1\hat{x}_2\hat{x}_3, \hat{x}_1^2\hat{x}_2\hat{x}_3, \hat{x}_1\hat{x}_2^2\hat{x}_3, \hat{x}_1\hat{x}_2\hat{x}_3^2\}, \\
{\hat{\bP}}_{SK}^{(5)} &=& P_2({\hat{\bK}})\oplus\mathrm{Span}\{\hat{x}_1\hat{x}_2\hat{x}_3, \hat{x}_1^2\hat{x}_2+\hat{x}_1\hat{x}_2^2, \hat{x}_2^2\hat{x}_3+\hat{x}_2\hat{x}_3^2, \hat{x}_3^2\hat{x}_1 + \hat{x}_3\hat{x}_1^2\},\\
{\hat{\bP}}_{SK}^{(6)} &=& P_2({\hat{\bK}})\oplus\mathrm{Span}\{\hat{x}_1\hat{x}_2\hat{x}_3, \hat{x}_1\hat{x}_2^2\hat{x}_3^2,
\hat{x}_1^2\hat{x}_2\hat{x}_3^2, \hat{x}_1^2\hat{x}_2^2 \hat{x}_3\},
\end{subeqnarray}
whose DOFs are the function values at the eight vertices and the six
face-centroids. They reported that Type 3 element fails and
is inadmissible. We also remark that Type 4 element is also inadmissible since
$(\hat{x}_1^2-1)\hat{x}_2\hat{x}_3\in {\hat{\bP}}_{SK}^{(4)}$ vanishes at all these points. In \cite{2013-Kim-p-}, we observe that Type 1 (and 2)
and Type 5 elements give optimal convergence results both in $L^2$-
and $H^1$-norms at least for second-order elliptic problems,
while Type 6 element loses one order of accuracy in each norm.
In what follows, we will give \cor{an} error estimate for \cor{Type 1}
Smith-Kidger element \cor{in detail}; error estimates for the other
types can be obtained completely \cor{analogously} and thus are stated very briefly.
\cor{To begin with,
denote by $V_m, m = 1,\cdots, 8,$ the eight vertices
$(j,k,l), |j| = |k| = |l|= 1, j,k,l\in \mathbb Z,$
and and by $V_m, m = 1,\cdots, 6,$ the six face-centroids
$(j,k,l), |j| + |k| + |l|= 1, j,k,l\in \mathbb Z.$
The basis functions corresponding to the eight vertices $V_j, j = 1,\cdots,8,$
are denoted by ${\hat\phi}^V_{V_j},$ and those corresponding to
the six face-centroids by ${\hat\phi}^F_{M_j}, j = 1, \cdots, 6.$}
\cor{To describe the brick elements in a uniform fashion, set
\begin{eqnarray}\label{eq:r}
\begin{split}
r_0(\hat{x}_1,\hat{x}_2,\hat{x}_3) = \hat{x}_1\hat{x}_2\hat{x}_3,~ r_1(\hat{x}_1,\hat{x}_2,\hat{x}_3) =\hat{x}_1\hat{x}_3^2
,\\
r_2(\hat{x}_1,\hat{x}_2,\hat{x}_3) =\hat{x}_2 \hat{x}_1^2,~r_3(\hat{x}_1,\hat{x}_2,\hat{x}_3) = \hat{x}_3 \hat{x}_2^2,
\end{split}
\end{eqnarray}
so that
\[
{\hat{\bP}}_{SK}^{(1)} = P_2({\hat{\bK}})\oplus\mathrm{Span}\{
r_0(\hat{x}_1,\hat{x}_2,\hat{x}_3),~ r_1(\hat{x}_1,\hat{x}_2,\hat{x}_3),~r_2(\hat{x}_1,\hat{x}_2,\hat{x}_3),~r_3(\hat{x}_1,\hat{x}_2,\hat{x}_3)\}.
\]
Then the basis functions for Type 1 Smith-Kidger elements are
given as follows:}
\noindent for $(j,k,l) = V_m, m = 1,\cdots, 8,$
\begin{eqnarray}\label{eq:phiV}
\begin{split}
&&{\hat\phi}^V_{(j,k,l)} (\hat{x}_1,\hat{x}_2,\hat{x}_3) =\frac1{16}\left[-1+\hat{x}_1^2+\hat{x}_2^2+\hat{x}_3^2)\right]+\frac{1}{8}\left[jk\hat{x}_1\hat{x}_2+jl\hat{x}_1\hat{x}_3+kl\hat{x}_2\hat{x}_3\right.
\\
&&\qquad\left. +jklr_0(\hat{x}_1,\hat{x}_2,\hat{x}_3) +
r(\hat{x}_1,\hat{x}_2,\hat{x}_3) \right],
\end{split}
\end{eqnarray}
and, for $(j,k,l) =M_m,m=1,\cdots,6,$
\begin{eqnarray}\label{eq:phiF}
{\hat\phi}^F_{(j,k,l)}(\hat{x}_1,\hat{x}_2,\hat{x}_3
)=\frac{1}{4}+\frac{1}{2}\ell(\hat{x}_1,\hat{x}_2,\hat{x}_3) +\frac{1}{4}q(\hat{x}_1,\hat{x}_2,\hat{x}_3)
-\frac{1}{2} r(\hat{x}_1,\hat{x}_2,\hat{x}_3),
\end{eqnarray}
where the linear, quadratic, and the remaining higher-order terms are defined by
\begin{subeqnarray}\label{eq:ell-q-r}
&&\ell(\hat{x}_1,\hat{x}_2,\hat{x}_3)= j\hat{x}_1 + k\hat{x}_2 + l\hat{x}_3,\\
&& q(\hat{x}_1,\hat{x}_2,\hat{x}_3)
= -(\hat{x}_1^2+\hat{x}_2^2+\hat{x}_3^2)+ 2 (j\hat{x}_1^2 + k\hat{x}_2^2+ l\hat{x}_3^2),
\\
&&
r(\hat{x}_1,\hat{x}_2,\hat{x}_3)= jr_1(\hat{x}_1,\hat{x}_2,\hat{x}_3) + kr_2(\hat{x}_1,\hat{x}_2,\hat{x}_3) + lr_3(\hat{x}_1,\hat{x}_2,\hat{x}_3).
\end{subeqnarray}
Assume that $\Omega\in \mathbb{R}^3$ is a parallelepiped domain with boundary
$\Gamma$. Let $(\mathscr{T}_h)_{h>0}$ be a regular family of \cor{triangulations} of $\Omega$ into
parallelepipeds ${\mathbf K}_j,j=1,2,\ldots,N_{\mathbf K}$, where $h=\max_{{\mathbf K}\in \mathscr{T}_h}h_{\mathbf K}$ with
$h_{\mathbf K}=\text{diam}({\mathbf K})$. For each ${\mathbf K}\in\mathscr{T}_h$, let $T_{\mathbf K}: {\hat{\bK}}\rightarrow
\mathbb R^3$ be an invertible affine mapping such that
\[
{\mathbf K} = T_{\mathbf K}({\hat{\bK}}),
\]
and set $\partialrtialhi_{{\mathbf K}} = {\hat\phi}\circ T_{\mathbf K}^{-1}: {\mathbf K} \rightarrow \mathbb R$ for
all ${\hat\phi} \in {\hat{\bP}}_{SK}^{(1)}$, whose collection will be designated by
\[
\mathbb{P}_{\mathbf K} = \mathrm{Span}\{\partialrtialhi_{{\mathbf K}}\mid\,{\hat\phi}\in{\hat{\bP}}_{SK}^{(1)} \}.
\]
Let $N_V$ and $N_F$ denote the numbers of vertices and faces, respectively. Then set
\begin{eqnarray*}
&& \mathscr{V}_h=\{V_1,V_2,\cdots,V_{N_V}:\quad \text{the set of all vertices of ${\mathbf K}\in\mathscr{T}_h$}\},\\
&& \mathscr{F}_h=\{F_1,F_2,\cdots,F_{N_F}:\quad \text{the set of all faces of
${\mathbf K}\in\mathscr{T}_h$}\},\\
&& \mathscr{F}_h^i=\{F_1,F_2,\cdots,F_{N_F^i}:\quad \text{the set of all interior faces of ${\mathbf K}\in\mathscr{T}_h$}\},\\
&& \mathscr{M}_h=\{M_1,M_2,\cdots,M_{N_F}:\quad \text{the set of all face-centroids
on $\mathscr{F}_h$}\},\\
&& \mathscr{F}_h^{(1)}=\{F\in\mathscr{F}_h:\quad \text{the set of all faces with outward
normal $(\partialrtialm 1,0,0)$}\},\\
&& \mathscr{F}_h^{(2)}=\{F\in\mathscr{F}_h:\quad \text{the set of all faces with outward
normal $(0,\partialrtialm 1,0)$}\},\\
&& \mathscr{F}_h^{(3)}=\{F\in\mathscr{F}_h:\quad \text{the set of all faces with outward
normal $(0,0,\partialrtialm 1)$}\}.
\end{eqnarray*}
Obviously we have $\mathscr{F}_h=\mathscr{F}_h^{(1)}\cup\mathscr{F}_h^{(2)}\cup\mathscr{F}_h^{(3)}$.
We consider the following nonconforming finite element spaces:
\begin{eqnarray*}
&& \mathscr{NC}^h=\{\partialrtialhi: \Omega\rightarrow \mathbb{R} |~ \partialrtialhi|_{\mathbf K}\in \mathbb{P}_{\mathbf K} \forall
{\mathbf K}\in\mathscr{T}_h, \partialrtialhi \text{ is continuous
at all } V_j\in \mathscr{V}_h, M_k\in \mathscr{M}_h \},\\
&& \mathscr{NC}^h_0=\{\partialrtialhi\in\mathscr{NC}^h |~ \partialrtialhi(V)=0\,\,\forall V_j\in \mathscr{V}_h\cap \Gamma\text{ and }\partialrtialhi(M)=0\,\, \forall
M_k\in \mathscr{M}_h\cap\Gamma\}.
\end{eqnarray*}
\section{The interpolation operator and convergence analysis}
In this section we will define an interpolation operator and analyze convergence in the case of
Dirichlet boundary value problems. The case of Neumann boundary value problem is quite similar and the
results will be omitted.
\subsection{The second order elliptic problem}
Denote by $(\cdot,\cdot)$ the $L^2(\Omega)$ inner product and \cor{by
$\left\langle\cdot,\cdot\right\rangle$}
the duality pairing between $H^{-1}(\Omega)$ and $H^1_0(\Omega)$, which
is an extension of the duality paring between $L^2(\Omega)$ \cor{and itself}. By $\|\cdot\|_k$ and
$|\cdot|_k$ we adopt the standard notations for the norm and seminorm for the Sobolev spaces $H^k(\Omega)$.
Consider then the following Dirichlet boundary value problem:
\begin{subequations}\label{eq:elliptic}
\begin{eqnarray}
-\Delta u &=&f, \quad \Omega, \\
u&=&0,\quad \Gamma,
\end{eqnarray}
\end{subequations}
with $f\in H^1(\Omega)$.
We will further assume that the coefficients
are sufficiently smooth and that the elliptic problem
\eqref{eq:elliptic} has an $H^3(\Omega)$-regular solution such that
$\|u\|_{3} \le C \|f\|_1$.
The weak problem is then given as usual:
find $u\in H_0^1(\Omega)$ such that
\begin{equation}
a(u, v) = \left\langlef,v\right\rangle, \quad v\in H^1_0(\Omega),
\label{eq:weak}
\end{equation}
where $a: H^1_0(\Omega)\times H^1_0(\Omega) \rightarrow \mathbb{R}$ is the bilinear
form defined by $a(u,v) = (\nabla u,\nabla v)$ for all $u,v\in H^1_0(\Omega)$. The nonconforming \cor{Galerkin} method for Problem
\eqref{eq:elliptic} states as follows: find $u_h\in\mathscr{NC}^h_0$ such that
\begin{equation}
a_h(u_h, v_h) = \left\langlef, v_h\right\rangle,\quad v_h\in\mathscr{NC}^h_0,
\label{eq:solution}
\end{equation}
where
$$
a_h(u, v) =\sum_{{\mathbf K}\in \mathscr{T}_h}a_{\mathbf K}(u,v),
$$
with $a_{\mathbf K}$ being the restriction of $a$ to ${\mathbf K}$.
Notice that in order to have point values defined properly we need to recall
the following Sobolev embedding theorem
\begin{eqnarray*}
W^{m,p}(\Omega)\longrightarrow C^0(\bar{\Omega}),\ \text{if}\
\frac{1}{p}-\frac{m}{d}<0.
\end{eqnarray*}
Thus we should have $p>\frac{3}{m}$. For a given cube ${\mathbf K}\in \mathscr{T}_h$, define the local interpolation operator
$\Pi_{\mathbf K} : W^{1,p}({\mathbf K})\cap H^1_0(\Omega)\longrightarrow {\hat{\bP}}_{SK}^{(1)},~ p>3,$ by
$$
\Pi_{\mathbf K}\partialrtialhi(V_i)=\partialrtialhi(V_i),\quad \Pi_{\mathbf K}\partialrtialhi(M_j)=\partialrtialhi(M_j)
$$
for all vertices $V_i$ and face-centroids $M_j$ of ${\mathbf K}$.
The global interpolation operator $\Pi_h$: $W^{1,p}(\Omega)\cap H^1_0(\Omega)\rightarrow\mathscr{NC}^h_0$
is then defined through the local interpolation operator $\Pi_{\mathbf K}$ by $\Pi_h|_{\mathbf K}=\Pi_{\mathbf K}$ for all ${\mathbf K}\in\mathscr{T}_h$. Since $\Pi_h$
preserves $P_2$ for all ${\mathbf K}\in\mathscr{T}_h$, it follows from the Bramble-Hilbert Lemma that
\begin{eqnarray}\label{eq:hilbert}
\begin{split}
\sum_{{\mathbf K}\in\mathscr{T}_h}\|\partialrtialhi-\Pi_h\partialrtialhi\|_{0,{\mathbf K}}+h\sum_{{\mathbf K}\in\mathscr{T}_h}\|\partialrtialhi-\Pi_h\partialrtialhi\|_{1,{\mathbf K}}\le
Ch^k\|\partialrtialhi\|_k,
\\
\partialrtialhi\in W^{k,p}(\Omega)\cap H_0^1(\Omega),1\le k\le 3.
\end{split}
\end{eqnarray}
We now consider the energy-norm error estimate and first consider the
following Strang lemma \cite{strang-fix}.
\begin{lemma}
Let $u\in H^1(\Omega)$ and $u_h\in \mathscr{NC}^h_0$ be the solutions of Eq.
\eqref{eq:weak} and Eq. \eqref{eq:solution}, respectively.
Then
\begin{eqnarray}\label{eq:strang}
\|u-u_h\|_h\leq C\Big\{\inf_{v_h\in \mathscr{NC}^h_0}\|u-v_h\|_h+\sup_{w_h\in\mathscr{NC}^h_0}
\frac{|a_h(u,w_h)-\left\langle f,w_h\right\rangle|}{\|w_h\|_h}\Big\}.
\end{eqnarray}
\label{lem:strang}
\end{lemma}
Here, and in what follows, $\|\cdot\|_h$ denotes the usual broken energy norm
such that
$\|v\|_h = \sqrt{ a_h(v,v)}.$
Due to \eqref{eq:hilbert}, the first term in the right side
of \eqref{eq:strang} is bounded by
\begin{eqnarray}\label{eq:int_error}
\inf_{v_h\in \mathscr{NC}^h_0}\|u-v_h\|_h\leq \|u-\Pi_h u\|_h\leq
C h^s|u|_{s+1},~1<s\leq 2.
\end{eqnarray}
Denote by $f_{jk}$ the trace of $f\at{{\mathbf K}_j}$ on $F_{jk}=\partialrtial{\mathbf K}_j\cup\partialrtial{\mathbf K}_k$
if it is nonempty. Similarly,
the face $F_{jk}$ will designate the boundary of $K_j$ common with that of $K_k.$
Now let us bound the second term of the right side of \eqref{eq:strang} which
denotes the consistency error.
For a given cube ${\mathbf K}\in \mathscr{T}_h$, denote by $F_K^{x_1+}$ and
$F_K^{x_1-}$ the face
of ${\mathbf K}$ with outward normal $(1,0,0)$ and $(-1,0,0)$, respectively. Similarly,
we denote the other faces by
$F_K^{x_2+},F_K^{x_2-},F_K^{x_3+},$ and $F_K^{x_3-}$ so that
$\partialrtialartial
{\mathbf K}=\{F_K^{x_1+},F_K^{x_1-},F_K^{x_2+},F_K^{x_2-},F_K^{x_3+},F_K^{x_3-}\}$. Thus,
integrating by parts elementwise, we have
\begin{eqnarray}
a_h(u,w_h)-\left\langle f,w_h\right\rangle&=&\sum_{{\mathbf K}\in\mathscr{T}_h}\left\langle\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}},w_h\right\rangle_{\partialrtialartial {\mathbf K}} \nonumber \\
&=&
\sum_{{\mathbf K}\in\mathscr{T}_h}\int_{F_K^{x_1+}\cup F_K^{x_1-}}\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}} w_h~\mathrm{d}
s+\sum_{{\mathbf K}\in\mathscr{T}_h}\int_{F_K^{x_2+}\cup F_K^{x_2-}}\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}
w_h~\mathrm{d} s \nonumber \\
&&+\sum_{{\mathbf K}\in\mathscr{T}_h}\int_{F_K^{x_3+}\cup F_K^{x_3-}}\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}
w_h~\mathrm{d} s=: E_1+E_2+E_3, \label{eq:ExEyEz}
\end{eqnarray}
where ${\boldsymbol\nu}$ is the unit outward normal to ${\mathbf K}$.
Before proceeding, we need the following lemma.
\begin{lemma}\label{lem:orth}
By $F_k$ denote the face containing the centroid $M_k$ and by
$V_j^{F_k},j=1,2,3,4,$ denote the vertices on the \cor{face} $F_k$. If
$p\in{\hat{\bP}}_{SK}^{(1)},$ $p(V_j^{F_k})=0,j=1,2,3,4,$ and $p(M_k)=0$, then
\begin{eqnarray}\label{eq:orth_property1}
\int_{F_k}p(x_1,x_2,x_3)\mathrm{d} s=0,\quad
k=1,2,\ldots,6.
\end{eqnarray}
\end{lemma}
\begin{proof}
Without loss of generality, we assume that $M_1=(1,0,0)$. In this case,
we have $p\in{\hat{\bP}}_{SK}^{(1)}$ and $p(1,\partialrtialm 1,\partialrtialm 1)= p(1,0,0)=0.$ We need to prove that
\begin{eqnarray}\label{eq:orth_property}
\int_{F_1}p(1,x_2,x_3)\,\mathrm{d} x_2\mathrm{d}x_3=0.
\end{eqnarray}
It follows from $p(1,\partialrtialm 1,\partialrtialm 1)=0$ that
\begin{eqnarray}
p(1,x_2,x_3)=l_1(x_2,x_3)(x_2^2-1)+l_2(x_2,x_3)(x_3^2-1), \quad l_j\in P_1(\mathbb R^2),~j=1,2.
\label{eq:orth}
\end{eqnarray}
Set
$$
l_j(x_2,x_3)=a_jx_2+b_jx_3+c_j,\quad j=1,2.
$$
Then $p(1,0,0)=0$ implies that $c_1+c_2=0,$ which reduces \eqref{eq:orth} to
\begin{eqnarray*}
p(1,x_2,x_3)=(a_1x_2+b_1x_3)(x_2^2-1)+(a_2x_2+b_2x_3)(x_3^2-1)+c_1(x_2^2-x_3^2).
\end{eqnarray*}
Since
\begin{equation}
{\hat{\bP}}_{SK}^{(1)}|_{x_1=1}=\mathrm{Span}\{1,x_2,x_3,x_2^2,x_2x_3,x_3^2,x_2^2x_3\},
\label{eq:spanx}
\end{equation}
invoking $p\in {\hat{\bP}}_{SK}^{(1)}$, we have
\begin{eqnarray*}
a_1=a_2=b_2=0,
\end{eqnarray*}
which leads to
\begin{eqnarray}\label{eq:poly_form}
p(1,x_2,x_3)=b_1x_3(x_2^2-1)+c_1(x_2^2-x_3^2).
\end{eqnarray}
It follows from \eqref{eq:poly_form} that
\eqref{eq:orth_property1} holds. This completes the proof.
\end{proof}
This lemma implies that Type 1 element can pass lower order patch
test (test functions are in $P_0$ not $P_1$), which will lead to a
convergence solution for the second order elliptic problems, but the
convergence order is not optimal. To bound $E_1,E_2,E_3$, we also
need some interpolation operators.
\subsection{Some interpolation and projection operators}
For the reference element ${\hat{\bK}}=[-1,1]\times [-1,1]\times[-1,1]$, consider
the interpolation problem on the face of $F_{{\hat{\bK}}}^{x_1+}$: the interpolation
points are $(1,1,1)$, $(1,1,-1)$, $(1,-1,1)$, $(1,-1,-1)$, and $(1,0,0)$, which are the four
vertices and the centroid of $F_{{\hat{\bK}}}^{x_1+},$ with the interpolation space
${\hat Q}_1^*( F_{{\hat{\bK}}}^{x_1+} ),$ where
\begin{equation*}
{\hat Q}_1^*( F_{{\hat{\bK}}}^{x_1+} ):=\mathrm{Span}\{1,\hat{x}_2,\hat{x}_3,\hat{x}_2\hat{x}_3,\hat{x}_3^2\} \subset
{\hat{\bP}}_{SK}^{(1)}|_{\hat{x}_1=1}
\end{equation*}
is an enriched bilinear polynomial space on the face
$F_{{\hat{\bK}}}^{x_1+}$ (see \eqref{eq:spanx}).
The above interpolation problem has a solution by using the bubble function
\begin{eqnarray*}
b(\hat{x}_2,\hat{x}_3)=1-\hat{x}_3^2,
\end{eqnarray*}
and the standard bilinear interpolation basis functions
\begin{eqnarray*}
&& q_1(\hat{x}_2,\hat{x}_3) =\frac14 (1+\hat{x}_2)( 1 + \hat{x}_3),\quad
q_2(\hat{x}_2,\hat{x}_3)=\frac14(1 - \hat{x}_2)( 1+ \hat{x}_3) ,\\
&& q_3(\hat{x}_2,\hat{x}_3)= \frac14 (1 - \hat{x}_2)(1 - \hat{x}_3),\quad
q_4(\hat{x}_2,\hat{x}_3)= \frac14 (1+ \hat{x}_2)( 1- \hat{x}_3),\\
\end{eqnarray*}
as follows:
\begin{eqnarray*}
\hat{\varphi}_j = q_j - \frac14b,\, j =1,\cdots,4; \quad
\hat{\varphi}_5=b.
\end{eqnarray*}
Thus for a \cor{continuous} function $f$ defined on the face
$F_{{\hat{\bK}}}^{x_1+}$, the interpolation
polynomial is given by
\begin{eqnarray}\label{eq:hsIF}
\hat{\mathscr{I}}_F^{x_1+}f=f(1,1,1)\hat{\varphi}_1+f(1,-1,1)\hat{\varphi}_2+f(1,-1,-1)\hat{\varphi}_3+f(1,1,-1)\hat{\varphi}_4+f(1,0,0)\hat{\varphi}_5.
\end{eqnarray}
And then we can also define the interpolation operator on
the opposite face with the same space and denote it by
$\hat{\mathscr{I}}_F^{x_1-}$. Similarly,
define the interpolation operators on the other
faces of ${\hat{\bK}}$ with the corresponding spaces:
\begin{eqnarray*}
&&{\hat Q}_1^*( F_{{\hat{\bK}}}^{x_2} ):=\mathrm{Span}\{1,\hat{x}_3,\hat{x}_1,\hat{x}_3\hat{x}_1,\hat{x}_1^2\} \subset
{\hat{\bP}}_{SK}^{(1)}|_{\hat{x}_2=\partialrtialm 1},\\
&&{\hat Q}_1^*( F_{{\hat{\bK}}}^{x_3} ):=\mathrm{Span}\{1,\hat{x}_1,\hat{x}_2,\hat{x}_1\hat{x}_2,\hat{x}_2^2\} \subset
{\hat{\bP}}_{SK}^{(1)}|_{\hat{x}_3=\partialrtialm 1}
\end{eqnarray*}
and denote them by
$\hat{\mathscr{I}}_F^{x_2+},\hat{\mathscr{I}}_F^{x_2-},\hat{\mathscr{I}}_F^{x_3+},\hat{\mathscr{I}}_F^{x_3-}$, respectively.
For a given ${\mathbf K}\in\mathscr{T}_h$, we can define the interpolation operator
$\mathscr{I}_F^{x_i\partialrtialm}$ by $\mathscr{I}_F^{x_i\partialrtialm}=\hat{\mathscr{I}}_F^{x_i\partialrtialm}\circ
T_{\mathbf K}^{-1}$.
Notice that $\jump{\mathscr{I}_F^{x_i+}w_h}{F}= 0$ for all interior faces $F$ for every $w_h\in \mathscr{NC}^h_0.$
Moreover, the above interpolation operators
preserve linear polynomials on each surface as stated in the following lemma.
\begin{lemma}\label{lem:sIF}
$\mathscr{I}_F^{x_i\partialrtialm}$
map $w_h\in \mathscr{NC}^h_0$ such that their images across interior faces are continuous
for all interior faces $F$. Moreover, they
preserve bilinear polynomials on faces.
\end{lemma}
Moreover, the above interpolation has the following interesting property:
\begin{lemma}\label{lem:sIF-wh}
For all $w_h\in \mathscr{NC}^h_0$ and ${\mathbf K}\in\mathscr{T}_h,$
\begin{eqnarray}\label{eq:Fx}
w_h|_{F_K^{x_i+}}-\mathscr{I}_F^{x_i+}(w_h\at{{\mathbf K}})=w_h|_{F_K^{x_i-}}-\mathscr{I}_F^{x_i-}(w_h\at{{\mathbf K}}),
\quad i=1,2,3.
\end{eqnarray}
\end{lemma}
\begin{proof}
We only prove the case of $i=1$ in Eq. \eqref{eq:Fx}
which suffices to prove the statement on the reference element ${\hat{\bK}}$:
\begin{eqnarray} \label{eq:Fhx}
\hat{w}_h|_{\hat{F}_{{\hat{\bK}}}^{x_1+}}-\hat{\mathscr{I}}_{\hat{F}}^{x_1+}(\hat{w}_h)=\hat{w}_h|_{\hat{F}_{{\hat{\bK}}}^{x_1-}}-\hat{\mathscr{I}}_{\hat{F}}^{x_1-}(\hat{w}_h)\quad\forall
\hat{w}_h\in {\hat{\bP}}_{SK}^{(1)}.
\end{eqnarray}
Due to the interpolation property,
$\hat{w}_h|_{\hat{F}_{{\hat{\bK}}}^{x_1+}}-\hat{\mathscr{I}}_{\hat{F}}^{x_1+}(\hat{w}_h)=0$
for all $\hat{w}_h\in
{\hat Q}_1^*( F_{{\hat{\bK}}}^{x_1+}),$ and the same is true if
$x_1^+$ is replaced by $x_1^-.$
Thus, it suffices to show that \eqref{eq:Fx} holds for all $\hat{w}_h\in
{\hat{\bP}}_{SK}^{(1)}|_{\hat{x}_1=1} \setminus {\hat Q}_1^*(
F_{{\hat{\bK}}}^{x_1+}),$ which
is nothing but $\mathrm{Span}\{\hat{x}_2^2, \hat{x}_2^2 \hat{x}_3\}.$ Since both
$\hat{x}_2^2$ and $\hat{x}_2^2 \hat{x}_3$ are independent of $\hat{x}_1$, it is obvious that
\eqref{eq:Fhx} holds for each of them.
This proves the lemma.
\end{proof}
\begin{comment}
\cor{Set}
$RQ_1=\mathrm{Span}\{1,\hat{x}_1,\hat{x}_2,\hat{x}_3,\hat{x}_1^2-\hat{x}_2^2,\hat{x}_1^2-\hat{x}_3^2\}$
\cor{following \cite{rannacher-turek}.}
For the reference element ${\hat{\bK}}$, define an
interpolation operator $R_{{\hat{\bK}}}: C({\hat{\bK}})\rightarrow RQ_1$ by
\begin{eqnarray*}
R_{{\hat{\bK}}}f (\hat{M}_j)=f (\hat{M}_j), j=1,\ldots,6,
\text{ for all } f\in C({\hat{\bK}}).
\end{eqnarray*}
Obviously,
\begin{eqnarray*}
R_{{\hat{\bK}}}f =\sum_{j=1}^6f (\hat{M}_j){\hat\psi}_j,
\end{eqnarray*}
where
\begin{eqnarray*}
{\hat\psi}_j (\hat{x}_1,\hat{x}_2,\hat{x}_3)
= \begin{cases} \frac{1}{6} \left[1-(\hat{x}_1^2 + \hat{x}_2^2 + \hat{x}_3^2)\right] +\frac{1}{2}
(\hat{x}_j^2+\hat{x}_j),\quad & j = 1, 2, 3,\\
\frac{1}{6}\left[1- (\hat{x}_1^2 + \hat{x}_2^2 + \hat{x}_3^2)\right] +\frac{1}{2}
(\hat{x}_{7-j}^2-\hat{x}_{7-j}),\quad & j = 4, 5, 6.
\end{cases}
\end{eqnarray*}
Thus we have
\begin{eqnarray}
R_{{\hat{\bK}}}f|_{\hat{x}_1=1} &=&\cor{\sum_{j=1}^6}f (\hat{M}_j){\hat\psi}_j
(1,\hat{x}_2,\hat{x}_3) \nonumber \\
&=&\cor{\sum_{j=2}^5}f (\hat{M}_j){\hat\psi}_j (1,\hat{x}_2,\hat{x}_3)+f(\hat{M}_1)
\left(1-\frac{\hat{x}_2^2+\hat{x}_3^2}{6}\right)
-f(\hat{M}_6)\frac{\hat{x}_2^2+\hat{x}_3^2}{6} \nonumber \\
&=&
\Theta({\hat{\bK}},f,\hat{x}_2,\hat{x}_3)+f(\hat{M}_1).\label{eq:RK1}
\end{eqnarray}
\cor{Since} ${\hat\psi}_j(1,\hat{x}_2,\hat{x}_3)={\hat\psi}_j(-1,\hat{x}_2,\hat{x}_3)$ for
$j=2,3,4,5$, one gets
\begin{eqnarray}
R_{{\hat{\bK}}}f|_{\hat{x}_1=-1} &=&\sum_{j=1}^6f (\hat{M}_j){\hat\psi}_j
(\cor{-1},\hat{x}_2,\hat{x}_3) \nonumber \\
&=&\sum_{j=2}^5f (\hat{M}_j){\hat\psi}_j (1,\hat{x}_2,\hat{x}_3)-f(\hat{M}_1)
\frac{\hat{x}_2^2+\hat{x}_3^2}{6}
+f(\hat{M}_6)\left(1-\frac{\hat{x}_2^2+\hat{x}_3^2}{6} \right) \nonumber \\
&=&
\Theta({\hat{\bK}},f,\hat{x}_2,\hat{x}_3)+f(\hat{M}_6). \label{eq:RK2}
\end{eqnarray}
For any given ${\mathbf K}\in\mathscr{T}_h$, we can define the interpolation operator
$R_{{\mathbf K}}:=R_{{\hat{\bK}}}\cor{\circ} T_K^{-1}$. Denote by $M_K^{x+}$ and $M_K^{x-}$ the
centroids of the faces with outward normal $(1,0,0)$ and $(-1,0,0)$,
respectively.
From \eqref{eq:RK1} and \eqref{eq:RK2}, we have the following observation.
\begin{lemma}\label{lem:RbK}
For any $f\in C({\mathbf K})$, we have
\begin{eqnarray*}
R_{{\mathbf K}}f|_\cor{F_K^{x+}} -f(M_K^{x+})= R_{{\mathbf K}}f|_\cor{F_K^{x-}} - f(M_K^{x-})
=\Theta({\mathbf K},f,x_2,x_3).
\end{eqnarray*}
\end{lemma}
\end{comment}
Define
$RQ=\mathrm{Span}\{1,\hat{x}_1,\hat{x}_2,\hat{x}_3,\hat{x}_1^2-\hat{x}_2^2,\hat{x}_1^2-\hat{x}_3^2\}$.
For the reference element ${\hat{\bK}}$, let $R_{{\hat{\bK}}}: H^2({\hat{\bK}})\rightarrow RQ$ be an
interpolation operator defined by
\begin{eqnarray*}
R_{{\hat{\bK}}}{\hat\phi} (\hat{M}_j)={\hat\phi} (\hat{M}_j), j=1,\ldots,6
\end{eqnarray*}
for all ${\hat\phi}\in H^2({\hat{\bK}})$. It is exactly the so-called rotation element.
Obviously,
\begin{eqnarray*}
R_{{\hat{\bK}}}{\hat\phi} =\sum_{i=1}^6{\hat\phi} (\hat{M}_i){\hat\psi}_i (\hat{x},x_2,x_3)
\end{eqnarray*}
where
\begin{eqnarray*}
\begin{cases}
{\hat\psi}_i=\frac{1}{6}\left(1+3\hat{x}_i+\sum_{1\leq j\leq
3,j\neq i} (\hat{x}_i^2-\hat{x}_j^2) \right),\\
{\hat\psi}_{7-i}=\frac{1}{6}\left(1-3\hat{x}_i+\sum_{1\leq j\leq
3,j\neq i} (\hat{x}_i^2-\hat{x}_j^2) \right),
\end{cases} i=1,2,3.
\end{eqnarray*}
It is easy to notice that for $i=1,2,\ldots,6$
\begin{eqnarray*}
&&{\hat\psi}_i\at{\hat{x}_j=1}={\hat\psi}_i\at{\hat{x}_j=-1}, \quad
\text{if $j\neq i$ and $j\neq 7-i$}
\end{eqnarray*}
and for $i=1,2,3$
\begin{eqnarray*}
&&{\hat\psi}_i\at{\hat{x}_i=1}={\hat\psi}_{7-i}\at{\hat{x}_i=-1}=1-\frac{1}{6}\sum_{1\leq j\leq
3,j\neq i}\hat{x}_j^2, \\
&&{\hat\psi}_i\at{\hat{x}_i=-1}={\hat\psi}_{7-i}\at{\hat{x}_i=1}=-\frac{1}{6}\sum_{1\leq j\leq
3,j\neq i}\hat{x}_j^2.
\end{eqnarray*}
Thus we have
\begin{eqnarray*}
R_{{\hat{\bK}}}{\hat\phi}|_{\hat{x}_j=1} &=&\sum_{i=1}^6{\hat\phi}
(\hat{M}_i){\hat\psi}_i\at{x_j=1}\\
&=&\sum_{1\leq i\leq 6,i\neq j,i\neq 7-j}{\hat\phi}
(\hat{M}_i){\hat\psi}_i\at{x_j=1}+{\hat\phi}(\hat{M}_j)
\left(1-\frac{1}{6}\sum_{1\leq i\leq
3,i\neq j}\hat{x}_i^2\right)\\
&&-{\hat\phi}(\hat{M}_{7-j})\left(\frac{1}{6}\sum_{1\leq i\leq
3,i\neq j}\hat{x}_i^2\right)\\
&:=&
\Theta({\hat{\bK}},{\hat\phi},\{\hat{x}_1,\hat{x}_2,\hat{x}_3\}\setminus\hat{x}_j)+{\hat\phi}(\hat{M}_j),
\end{eqnarray*}
and
\begin{eqnarray*}
R_{{\hat{\bK}}}{\hat\phi}|_{\hat{x}_j=-1} &=&\sum_{i=1}^6{\hat\phi}
(\hat{M}_i){\hat\psi}_i\at{x_j=-1} \\
&=&\sum_{1\leq i\leq 6,i\neq j,i\neq 7-j}{\hat\phi}
(\hat{M}_i){\hat\psi}_i\at{x_j=1}-{\hat\phi}(\hat{M}_j)
\left(\frac{1}{6}\sum_{1\leq i\leq
3,i\neq j}\hat{x}_i^2\right)\\
&&-{\hat\phi}(\hat{M}_{7-j})\left(1-\frac{1}{6}\sum_{1\leq i\leq
3,i\neq j}\hat{x}_i^2\right)\\
&=&
\Theta({\hat{\bK}},{\hat\phi},\{\hat{x}_1,\hat{x}_2,\hat{x}_3\}\setminus\hat{x}_j)+{\hat\phi}(\hat{M}_{7-j}),
\end{eqnarray*}
For any given ${\mathbf K}\in\mathscr{T}_h$, we can define the interpolation operator
$R_{{\mathbf K}}:=R_{{\hat{\bK}}}\cdot T_K^{-1}$. Denote by $M_K^{x_j+}$
and $M_K^{x_j-}$ the
centroids of the faces $F_K^{x_j+}$ and $F_K^{x_j-}$,
respectively. Then for any $\partialrtialhi\in H^2({\mathbf K})$, we have
\begin{eqnarray*}
&&R_{{\mathbf K}}\partialrtialhi|_{F_K^{x_j+}}=\Theta({\mathbf K},\partialrtialhi,\{x_1,x_2,x_3\}\setminus
x_j)+\partialrtialhi(M_K^{x_j+}),\\
&&R_{{\mathbf K}}\partialrtialhi|_{F_K^{x_j-}}=\Theta({\mathbf K},\partialrtialhi,\{x_1,x_2,x_3\}\setminus
x_j)+\partialrtialhi(M_K^{x_j-}).
\end{eqnarray*}
\subsection{The error estimates}
Turn to bound $|E_1| + |E_2| + |E_3|$ in \eqref{eq:ExEyEz}.
Below, we will give an estimate of $|E_1|$ in detail while
similar estimates of $|E_2|$ and $|E_3|$ will be omitted.
It is easy to notice that for any $F\in \partialrtialartial
{\mathbf K}'\cap\partialrtialartial{\mathbf K}''\cap \mathscr{F}_h^{(1)}\neq \emptyset $ and $w\in\mathscr{NC}^h_0$, we have
\begin{eqnarray*}
&&\int_F \nabla u \big(w|_{{\mathbf K}'}-w|_{{\mathbf K}''}\big)\mathrm{d} s\\
&=& \int_F \nabla u
\big((w|_{{\mathbf K}'}-\mathscr{I}_F^{x_1}(w))-(w|_{{\mathbf K}''}-\mathscr{I}_F^{x_1}(w))\big)\mathrm{d} s\\
&=& \int_F \big(\nabla u-M_F(\nabla u)\big)
\big((w|_{{\mathbf K}'}-\mathscr{I}_F^{x_1}(w))-(w|_{{\mathbf K}''}-\mathscr{I}_F^{x_1}(w))\big)\mathrm{d} s
\end{eqnarray*}
where $M_F(\nabla u)$ denotes the value of
$\nabla u$ at the centroid of $F$. The second equality holds
due to the orthogonality \eqref{eq:orth_property1}.
Hence we have
\begin{eqnarray*}
E_1 &=&\sum_{{\mathbf K}\in\mathscr{T}_h}\sum_{i=1}^3\int_{F_K^{x_1+}\cup
F_K^{x_1-}}\frac{\partialrtialartial u}{\partialrtialartial x_i}w\nu_i\mathrm{d} s\\
&=&
\sum_{{\mathbf K}\in\mathscr{T}_h}\sum_{i=1}^3\left(\int_{F_K^{x_1+}}\frac{\partialrtialartial u}{\partialrtialartial x_i}
(w-\mathscr{I}_F^{x_1+}(w))\nu_i\mathrm{d} s +\int_{F_K^{x_1-}}\frac{\partialrtialartial u}{\partialrtialartial x_i}
(w-\mathscr{I}_F^{x_1-}(w))\nu_i\mathrm{d} s\right)\\
&=&
\sum_{{\mathbf K}\in\mathscr{T}_h}\sum_{i=1}^3\left(\int_{F_K^{x_1+}}\left(\frac{\partialrtialartial
u}{\partialrtialartial x_i}-M_{F_K^{x_1+}}(\frac{\partialrtialartial u}{\partialrtialartial x_i})\right)
(w-\mathscr{I}_F^{x_1+}(w))\nu_i\mathrm{d} s\right.\\
&&\left.+\int_{F_K^{x_1-}}\left(\frac{\partialrtialartial
u}{\partialrtialartial x_i}-M_{F_K^{x_1-}}(\frac{\partialrtialartial u}{\partialrtialartial x_i})\right)
(w-\mathscr{I}_F^{x_1-}(w))\nu_i\mathrm{d} s\right),
\end{eqnarray*}
where ${\boldsymbol\nu}=(\nu_1,\nu_2,\nu_3)^T$ is the outward normal derivative of
$F$.
Thus due to \eqref{eq:Fx}, we arrive at
\begin{eqnarray*}
E_1 &=&
\sum_{{\mathbf K}\in\mathscr{T}_h}\sum_{i=1}^3\left(\int_{F_K^{x_1+}}\left(\frac{\partialrtialartial u}{\partialrtialartial x_i}
-\Theta({\mathbf K},\frac{\partialrtialartial u}{\partialrtialartial
x_i},x_2,x_3)-M_{F_K^{x_1+}}(\frac{\partialrtialartial u}{\partialrtialartial x_i})\right)
(w-\mathscr{I}_F^{x_1+}(w))\nu_i\mathrm{d} s\right.\\
&&\left.+\int_{F_K^{x_1-}}\left(\frac{\partialrtialartial u}{\partialrtialartial x_i}
-\Theta({\mathbf K},\frac{\partialrtialartial u}{\partialrtialartial
x_i},x_2,x_3)-M_{F_K^{x_1-}}(\frac{\partialrtialartial u}{\partialrtialartial x_i})\right)
(w-\mathscr{I}_F^{x_1-}(w))\nu_i\mathrm{d} s\right)\\
&=& \sum _{{\mathbf K}\in\mathscr{T}_h}\sum_{i=1}^3\left(\int_{F_K^{x_1+}}\left(\frac{\partialrtialartial u}{\partialrtialartial x_i}
-R_{{\mathbf K}}\frac{\partialrtialartial u}{\partialrtialartial x_i}\right)
(w-\mathscr{I}_F^{x_1+}(w))\nu_i\mathrm{d} s\right.\\
&&\left.+\int_{F_K^{x_1-}}\left(\frac{\partialrtialartial u}{\partialrtialartial x_i}-R_{{\mathbf K}}\frac{\partialrtialartial u}{\partialrtialartial x_i}\right)
(w-\mathscr{I}_F^{x_1-}(w))\nu_i\mathrm{d} s\right).
\end{eqnarray*}
Since $R_{{\mathbf K}}$ and $\mathscr{I}_F^{x}$ preserves $P_1({\mathbf K})$ and
$P_1(F_K^{x_1})$,
respectively, it
follows from trace theorem and Cauchy-Schwartz inequality, we get
\begin{eqnarray*}
|E_1|\leq Ch^2||w||_h||u||_{H^3(\Omega)}.
\end{eqnarray*}
Similarly, we also have
\begin{eqnarray*}
|E_2|\leq Ch^2||w||_h||u||_{H^3(\Omega)},\ |E_3|\leq Ch^2||w||_h||u||_{H^3(\Omega)}.
\end{eqnarray*}
Hence
\begin{eqnarray*}
\sup_{w\in\mathscr{NC}^h_0}\frac{|a_h(u,w)-\langle f,w\rangle|}{\|w\|_h}=\sup_{w\in\mathscr{NC}^h_0}
\frac{|E_1+E_2+E_3|}{||w||_h}\leq Ch^2 ||u||_{H^3(\Omega)}.
\end{eqnarray*}
By collecting the above results, we get the following energy-norm error estimate.
\begin{thm}\label{thm:h1}
Let $u\in H^3(\Omega)\cap H_0^1(\Omega)$ and $u_h\in \mathscr{NC}^h_0$ satisfy
\eqref{eq:weak} and \eqref{eq:solution}, respectively. Then we have the energy norm
error estimate:
\begin{eqnarray*}
||u-u_h||_h\leq Ch^2||u||_3.
\end{eqnarray*}
\end{thm}
By \cor{a} standard Aubin-Nitsche duality argument, \cor{an} $L_2(\Omega)$-error
estimate can be easily obtained.
\begin{thm}\label{thm:l2}
Let $u\in H^{3}(\Omega)\cap H_0^1(\Omega)$ and $u_h\in\mathscr{NC}^h_0$ be the solution of
\eqref{eq:weak} and \eqref{eq:solution}, respectively. Then we have
\begin{eqnarray*}
\|u-u_h\|_{0}\leq Ch^{3} \|u\|_3.
\end{eqnarray*}
\end{thm}
\begin{proof}
Let $\eta_h = \Pi_h u - u_h\in \mathscr{NC}^h_0$ and consider the dual problem:
\begin{subeqnarray}
-\Delta \partialrtialsi &=& \eta_h,\quad\Omega,\\
\partialrtialsi &=& 0,\quad\partialrtial\Omega.
\end{subeqnarray}
Since $\eta_h \in L^2(\Omega)$, the elliptic regularity implies that
$\|\partialrtialsi\|_2 \le C \|\eta_h\|.$ An application of \eqref{eq:hilbert} to the
triangle inequality makes us to prove only
$\|\eta_h \|_h \le C h^2 \|u\|_3.$
First, we have from Theorem \ref{thm:h1} and \eqref{eq:hilbert} that
\begin{eqnarray}\label{eq:eta-tri}
\|\eta_h \|_h \le \|u-u_h\|_h + \| u - \Pi_h u\|_h \le C h^2 \|u\|_3.
\end{eqnarray}
Following the arguments in the derivation of
the energy estimate, we have
\begin{eqnarray*}
\|\eta_h\|^2 &=& -\sum_{{\mathbf K}\in\mathscr{T}_h} \left(\eta_h,\Delta
\partialrtialsi\right)_{\mathbf K} \\
&=& \sum_{{\mathbf K}\in\mathscr{T}_h} \left( \nabla \eta_h, \nabla \partialrtialsi\right)_{\mathbf K} -
\sum_{{\mathbf K}\in\mathscr{T}_h} \left\langle\eta_h, {\boldsymbol\nu}\cdot \nabla \partialrtialsi\right\rangle_{\partialrtial{\mathbf K}}\\
&=& a_h(\eta_h,\partialrtialsi) -
\sum_{{\mathbf K}\in\mathscr{T}_h} \left\langle\eta_h - \mathscr{I}_F \eta_h , {\boldsymbol\nu}\cdot\nabla \partialrtialsi
\right\rangle_{\partialrtial{\mathbf K}},\\
&=& a_h(\eta_h,\partialrtialsi) -
\sum_{{\mathbf K}\in\mathscr{T}_h} \left\langle\eta_h - \mathscr{I}_F \eta_h , {\boldsymbol\nu}\cdot\left(\nabla \partialrtialsi
-M_{F}(\nabla \partialrtialsi)\right)\right\rangle_{\partialrtial{\mathbf K}},
\end{eqnarray*}
where $M_F(\nabla \partialrtialsi)$ denotes the value of
$\nabla \partialrtialsi$ at the centroid of $F$.
Hence, invoking the elliptic regularity and \eqref{eq:eta-tri}
\begin{eqnarray}
\|\eta_h\|^2 &\le&
|a_h(\eta_h,\partialrtialsi)| +
\left[\sum_{{\mathbf K}\in\mathscr{T}_h} \left|\eta_h - \mathscr{I}_F
\eta_h\right|^2_{0,\partialrtial{\mathbf K}}\right]^{\frac12}
\left[\sum_{{\mathbf K}\in\mathscr{T}_h} \left|{\boldsymbol\nu}\cdot\left(\nabla \partialrtialsi -
M_F(\nabla \partialrtialsi)\right)\right|^2_{0,\partialrtial{\mathbf K}}\right]^{\frac12} \nonumber \\
&\le& |a_h(\eta_h,\partialrtialsi)| +
C h^{\frac12} \|\eta_h\|_h h^{\frac12} \| \partialrtialsi \|_2 \nonumber\\
&\le& |a_h(\eta_h,\partialrtialsi)| + C h^3 \|u\|_h~ \|\eta_h\|.\label{eq:etah}
\end{eqnarray}
Thus it remains to bound $ |a_h(\eta_h,\partialrtialsi)|.$ For this, write
\begin{eqnarray}\label{eq:eta-psi}
a_h(\eta_h,\partialrtialsi) = a_h(\eta_h,\partialrtialsi - \Pi_h\partialrtialsi) + a_h(\Pi_h u - u, \Pi_h\partialrtialsi) + a_h(u - u_h, \Pi_h\partialrtialsi).
\end{eqnarray}
The first term in \eqref{eq:eta-psi} is bounded as follows:
\begin{eqnarray}
\nonumber
|a_h(\eta_h,\partialrtialsi - \Pi_h\partialrtialsi) | &\le& C \|\eta_h\|_h \|\partialrtialsi-\Pi_h\partialrtialsi\|_h \\ &\le&
C h^2 \|u\|_3 h\|\partialrtialsi\|_2 \le C h^3 \|u\|_3 h\|\eta_h\|.
\label{eq:eta-psi-1}
\end{eqnarray}
Since the second term in \eqref{eq:eta-psi} can be decomposed by
\begin{eqnarray*}
a_h(\Pi_h u - u, \Pi_h\partialrtialsi) &=&
\sum_{{\mathbf K}\in\mathscr{T}_h}\left(\Pi_h u - u,-\Delta(\Pi_h \partialrtialsi)\right)_{\mathbf K} \\
&&\qquad+ \sum_{{\mathbf K}\in\mathscr{T}_h} \left\langle\Pi_h u -
u,{\boldsymbol\nu}\cdot\nabla\Pi_h \partialrtialsi)\right\rangle_{\partialrtial{\mathbf K}}\\
&=& \sum_{{\mathbf K}\in\mathscr{T}_h}\left(\Pi_h u - u,-\Delta\Pi_h \partialrtialsi\right)_{\mathbf K} \\
&&\qquad+ \sum_{{\mathbf K}\in\mathscr{T}_h} \left\langle\Pi_h u -
u,{\boldsymbol\nu}\cdot\left(\nabla\Pi_h \partialrtialsi -M_F\left(\nabla\Pi_h
\partialrtialsi\right)\right)\right\rangle_{\partialrtial{\mathbf K}},
\end{eqnarray*}
it can be estimated as follows:
\begin{eqnarray}\label{eq:eta-psi-2}
|a_h(\Pi_h u - u, \Pi_h\partialrtialsi)| \le C h^3\| u\|_3 \|\partialrtialsi\|_2
+ C h^{\frac32} \|u\|_3 h^{\frac12} \|\partialrtialsi\|_2 \le C h^3\| u\|_3 \|\eta_h\|.
\end{eqnarray}
The third term in \eqref{eq:eta-psi} is bounded in the same fashion as
\begin{eqnarray}
|a_h(u - \Pi_h u, \Pi_h\partialrtialsi ) | \le C h^3\| u\|_3 \|\eta_h\|.
\label{eq:eta-psi-3}
\end{eqnarray}
Collecting \eqref{eq:eta-psi-1}--\eqref{eq:eta-psi-3} and plugging into
\eqref{eq:eta-psi} combined with \eqref{eq:etah}, one sees that
the theorem follows by deviding both sides by $\|\eta_h\|.$
\end{proof}
\section{Error estimates of other Smith-Kidger elements}
In this section we claim that the approximation of the solutions of the second-order elliptic problem
with Type 2 and 5 Smith-Kidger elements is also convergent in
optimal order. In these case, it is easy to check that the
orthogonality in Lemma \ref{lem:orth} holds. The difference during
the proof lies in the construction of the interpolation operator.
For the second type element, the interpolation of
$\mathscr{I}_F^{x_1}$,
$\mathscr{I}_F^{x_2}$, $\mathscr{I}_F^{x_3}$ should be $\mathrm{Span}\{1,x_2,x_3,x_2x_3,x_2^2\}$,
$\mathrm{Span}\{1,x_1,x_3,x_1x_3,x_3^2\}$ and $\mathrm{Span}\{1,x_1,x_2,x_1x_2,x_1^2\}$, respectively.
And for the fifth type element, the corresponding interpolation spaces
should be taken as $\mathrm{Span}\{1,x_2,x_3,x_2x_3,x_3^2+x_2^2\}$,
$\mathrm{Span}\{1,x_1,x_3,x_1x_3,x_1^2+x_3^2\}$ and $\mathrm{Span}\{1,x_1,x_2,x_1x_2,x_1^2+x_2^2\}$,
respectively.
For Type 6 element, the orthogonality in Lemma \ref{lem:orth} does
not hold, but the Eq. \eqref{eq:Fx} hods. Thus, we have
\begin{eqnarray*}
|E_1| &=& \left|\sum_{{\mathbf K}\in\mathscr{T}_h}\left(\int_{F_K^{x_1+}}\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}
(w-\mathscr{I}_F^{x_1+}(w))\,\mathrm{d} s +\int_{F_K^{x_1-}}\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}
(w-\mathscr{I}_F^{x_1-}(w))\,\mathrm{d} s\right)\right|\\
&=& \left|\sum_{{\mathbf K}\in\mathscr{T}_h}\left(\int_{F_K^{x+}}\left(\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}-P_{{\mathbf K}}^0(\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}})\right)
(w-\mathscr{I}_F^{x_1+}(w))\,\mathrm{d} s\right.\right.\\
&&\left.\left.+\int_{F_K^{x_1-}}\left(\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}-P_{{\mathbf K}}^0(\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}})\right)
(w-\mathscr{I}_F^{x_1-}(w))\,\mathrm{d} s\right)\right|\\
&\leq & Ch||u||_2||w||_h,
\end{eqnarray*}
where $$P_{{\mathbf K}}^0\left(\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}}\right)=\frac{1}{|{\mathbf K}|}\int_{{\mathbf K}}\frac{\partialrtial u}{\partialrtial{\boldsymbol\nu}} \,\mathrm{d} s,$$
and $|{\mathbf K}|=\int_{{\mathbf K}}\,\mathrm{d} s$.
By a similar derivation, we will get
\begin{thm}
Let $u\in H^2(\Omega)\cap H_0^1(\Omega)$ satisfy
\eqref{eq:weak} and $u_h$ be the solution of \eqref{eq:solution} with
the sixth type element. Then we have the energy norm
error estimate:
\begin{eqnarray*}
||u-u_h||_h\leq Ch||u||_2,\\
||u-u_h||_0\leq Ch^{2} \|u\|_2.
\end{eqnarray*}
\end{thm}
\section{A new 14-node brick element}
In this section, we present a new element with 14-node. The degrees
of freedom are the same with those in Smith-Kidger element and
Meng-Sheen-Luo-Kim element. But the shape function space is taken as
$P_2\oplus \mathrm{Span}\{\cor{x_1x_2x_3,} x_1(x_2^2+x_3^2),x_2(x_1^2+x_3^2),x_3(x_1^2+x_2^2)\}$.
\cor{
Denote the corresponding higher-degree polynomials to those in \eqref{eq:r} as
follows:
\begin{eqnarray}\label{eq:new-r}
\begin{split}
r_0(\hat{x}_1,\hat{x}_2,\hat{x}_3) = \hat{x}_1\hat{x}_2\hat{x}_3,~ r_1(\hat{x}_1,\hat{x}_2,\hat{x}_3) =
\frac12\hat{x}_1(\hat{x}_2^2+\hat{x}_3^2)
,\\
r_2(\hat{x}_1,\hat{x}_2,\hat{x}_3) =\frac12\hat{x}_2(\hat{x}_1^2+\hat{x}_3^2), ~r_3(\hat{x}_1,\hat{x}_2,\hat{x}_3) = \frac12\hat{x}_3(\hat{x}_1^2+\hat{x}_2^2).
\end{split}
\end{eqnarray}
Then, again equipped with the 8 vertex values plus 6 face integrals DOFs,
the basis functions corresponding to the vertices and face-integrals are given
exactly same as the formulae \eqref{eq:phiV}, \eqref{eq:phiF}, and \eqref{eq:ell-q-r}.
}
\cor{In order to analyze convergence}, we need to verify the orthogonality in
Lemma \ref{lem:orth} and Eq. \eqref{eq:Fhx}.
The orthogonality can be checked directly \cor{as in} the proof in Lemma
\ref{lem:orth}. \cor{In order to check} Eq. \eqref{eq:Fhx}, \cor{it is enough
to
define}
the corresponding interpolation spaces as $\mathrm{Span}\{1,x_2,x_3,x_2x_3,x_3^2+x_2^2\}$, $\mathrm{Span}\{1,x_1,x_3,x_1x_3,x_1^2+x_3^2\}$
and $\mathrm{Span}\{1,x_1,x_2,x_1x_2,x_1^2+x_2^2\}$, respectively. Thus, \cor{by following the
same argument as in the previous sections}, we also get optimal convergence
for the
second-order elliptic problems. That is, in this case, Theorems
\ref{thm:h1} and \ref{thm:l2} hold.
\section{Further remarks and conclusions}
In this paper, we have proved that for second-order elliptic
problems, the Smith-Kidger element of type 1, 2 and 5 can obtain
optimal convergence order both in energy norm and $L_2(\Omega)$ norm,
while the sixth type element loses one order of accuracy in each
norm. In the proof, the key points lie in that they have weak
orthogonality (Lemma \ref{lem:orth}) and satisfy Eq. \eqref{eq:Fhx}.
In \cite{2013-Meng-p-}. \cor{We} also proposed another kind of DOFs, that
is, the \cor{values} at the eight vertices and the integration {values over} six
faces. Indeed, it is easy to check that Type 1, 2, 5 and the new
element presented in this paper \cor{give} optimal convergence \cor{orders}
for second-order elliptic problems. Besides, we can show that
\cor{if the face-centroid values DOFs are replaced by the face integrals DOFs,}
Type 6 \cor{element}
also \cor{are of optimal-order} convergence \cor{owing to} a weak
orthogonality, \cor{thus} improving one order accuracy.
\end{document}
|
\begin{document}
\maketitle
\begin{abstract}
Let $ 1\leq p< \infty$ and let $\psi\in L^{p}(\mathbb R^d)$. We study $p-$Riesz bases of quasi shift invariant spaces $V^p(\psi;Y)$.
\end{abstract}
\section{Introduction}
\label{sec:intro}
Let $ 1\leq p< \infty$ and let $\psi\in L^{p}(\mathbb R^d)$. We consider the shift invariant space
$
V^p(\psi)= \overline{\Span \{\tau_k \psi\}_{k\in\Z^d } },
$
where $\tau_{ s}f(x)=f(x+ s)$ is the translation and ``bar'' denotes the closure in $L^p(\mathbb R^d)$.
Shift-invariant spaces appear naturally in signal theory and in other branches of applied sciences.
In \cite{AST} \cite{GS} and in the recent preprint \cite{HL}
{\it quasi-shift invariant spaces} of functions are considered.
Given $ X = \{x_j\}_{j\in\Z^d} $, a countable and discrete\footnote{ A countable set $X\subset \mathbb R^d$ is {\it discrete} if for every $x_j\in X$ there exists $\delta_j>0$ such that $|x_j-x_k|_2>\delta_j$ for every $k\ne j$.} subset of $\mathbb R^d$ and a function
$\psi\in L^p(\mathbb R^d)$, we let
\begin{equation}\label{def-quasi}
V^p ( \psi;\,X) = \overline{ {\rm Span}\{\tau_{x_j}\psi \} } .\end{equation}
Thus, $V^p(\psi)=V^p(\psi; \Z^d)$.
Quasi-shift invariant spaces are also called {\it Spline-type spaces} in \cite{F}, \cite{FMR} \cite{FO}, \cite{Ro}.
Following \cite{AST}, \cite{CS},
we say that the translates $\{\tau_{x_j}\psi \}_{j\in\Z^d}$ form a p-Riesz basis in $V^p ( \psi;\,X) $ if there exist constants $A,\ B>0$ such that, for every finite set of coefficients $\vec d=\{d_j\} \subset\mathbb C $,
\begin{equation}\label{E-p-basis-2}
A \|\vec d\|_{\ell^p} \leq \| \sum_j d_j\tau_{ x_j} \psi \|_{p} \leq B\|\vec d\|_{\ell^p}.
\end{equation}
Here and throughout the paper, we have let $\|f\|_p =\left(\int_{\mathbb R^d} |f(x)|^p dx\right)^{\frac 1p}$ and $\|\vec c\|_{\ell^p}=(\sum_{j\in\Z^d} |c_j|^p)^{\frac 1p}$. If $x=(x_1, ...,\, x_d), \ y=(y_1, ...,\, y_d) \in\mathbb R^d$, we will often let $ x\cdot y= x_1y_1+...+x_dy_d $ and $|x |_2
=( x\cdot x )^{\frac 12}$. We will also let $|x|_\infty= \sup_{1\leq j\leq d}|x_j|$.
If \eqref{E-p-basis-2} holds, then it is possible to prove that
\begin{equation}\label{d-Vp} \footnote{ A proof of this ientity was kindly provided to us by K. Hamm}
V^p(\psi; X)= \{ f= \sum_{k\in\Z^d}d_k \tau_{x_k}\psi(x), \ \vec d \in \ell^p\ \}
\end{equation}
and the sequence $\{d_k \}_{k\in\Z^d}$ is uniquely determined by $f$.
$p-$Riesz bases
allow a stable reconstruction of functions in $V^p(\psi; X)$; when $X=\Z^d$ and $\mathcal B=\{\tau_j\psi\}_{j\in\Z^d}$ is a $p-$Riesz basis of $V^p(\psi)$,
the coefficient $d_j$ in \eqref{d-Vp} can be expressed in an unique way in terms of the functions in the dual basis of $\mathcal B$. See \cite{S}, \cite{BR} and also \cite{AS} for explicit reconstruction formulas.
When $\psi$ has compact support, it is known (see e.g. \cite[Prop. 1.1]{AS}, \cite{JM}, \cite{R}) that $\mathcal B$ is a $p-$Riesz basis in $V^p(\psi)$ if and only if
$ \sum_{m\in\Z^d} |\hat\psi(y+m)|^2\ne 0 $ for every $y\in [-\frac 12, \frac 12)^d$ and every $m\in\Z^d$. See also Lemma \ref{L-eq-cond-V2} in Section 2.
We have denoted with $\hat\psi(y)=\int_{\mathbb R^d} e^{2\pi i x\cdot y}f(x)dx$ the Fourier transform of $\psi$.
The proof of the aforementioned result relies on the lattice structure of $\Z^d$ and on standard Fourier analysis technique and does not easily generalize to other sets of translations.
Let $\psi\in L^p(\mathbb R^d)$, $1\leq p< \infty$, and let $X=\{x_j\}_{j\in\Z^d}$ be a discrete set of $\mathbb R^d$. It is natural to consider the following problem:
\noindent
{\bf Problem 1.} {\it Let $\mathcal B_X=\{\tau_{x_j}\psi \}_{j\in\Z^d}$ be a $p-$Riesz basis for $V^p ( \psi; X ) $; can we find $\delta>0$ so that, for every $Y=\{y_j\}_{j\in\Z^d}\subset \mathbb R^d$ with $ \sup_j|y_j-x_j|_2 <\delta $, the set $\mathcal B_Y=\{\tau_{y_j}\psi \}_{j\in\Z^d}$ is a $p-$Riesz basis for $V^p ( \psi;\, X )$? }
This problem cannot be solved if $\psi$ has compact support. For example, let $\psi(x)=\rect(x)$ be the characteristic function of the interval $[-\frac 12, \frac 12)$ and let $X=\Z$; let $Y=\{y_n\}_{n\in\Z}$ be such that $y_0=\delta>0$
and $y_n=n$ when $n\ne 0$. All functions in $ V^p(\rect;\,Y)$ vanish in the interval $[-\frac 12, -\frac 12+\delta]$ and so $ V^p(\rect;\, Y)\ne V^p(\rect)$.
We prove in Section 3 that Problem 1 can be solved when $p=2$ and $\psi$ is {\it band-limited}, i.e., when the Fourier transform of $\psi$ has compact support. See also Section 5 for more remarks and comments on problem 1
We are concerned with the following problem:
\noindent
{\bf Problem 2.} {\it With the notation of Problem 1: let $\mathcal B_X $ be a $p-$Riesz basis for $V^p ( \psi;\, X ) $ and let $Y=\{y_n\}_{n\in\Z^d}$ that satisfies $\sup_n|y_n-x_n|_2<\delta$; is $\mathcal B_Y $ a $p-$Riesz basis for $V^p ( \psi;\, Y )$ whenever $\delta$ is sufficiently small?
}
It is proved in \cite{FMR} that Problem 2 has always solution when $X$ is {\it relatively separated}, i.e., when $X= X_1\cup...\cup X_k$, with $X_j=\{x_{j,n}\}_{n\in\Z^d} $ and $\inf_{n\ne m} |x_{j,n}-x_{j,m}|_2 >0$.
In Section 2 we prove the following theorem.
\begin{theorem}\label{C-PW} Suppose that that $\psi$ is in the Sobolev space $ W^{1,p}(\mathbb R^d) $, with $1<p<\infty$, and that $\{\tau_{x_j}\psi\}_{j\in\Z^d}$ is a $p-$Riesz basis of $V^p(\psi; X)$. For every $j\in\Z^d$ there exists $\delta_j>0$ such that $\{\tau_{y_j}\psi\}_{j\in\Z^d}$ is a $p-$Riesz basis of $V^p(\psi;\, Y)$ whenever $|x_j-y_j|_2<\delta_j$.
\end{theorem}
We recall that $W^{1,p}(D)$ is the space of $L^p(D)$ functions whose partial distributional derivatives are also in $L^p(D)$ and that
$W^{1,p}_0(D)$ is the closure of $ C^\infty_0(D)$ in $W^{1,p}(D)$.
When $X$ is not relatively separated the $\delta_j$'s in Theorem \ref{C-PW} may not have a positive lower bound, but we can still solve Problem 2 in the cases considered in Theorems \ref{T-non-bandlimited} and \ref{T-stab-Vpsi-bis} below.
\begin{theorem}\label{T-non-bandlimited} Assume that $\psi \in L^1(\mathbb R^d)\cap L^2(\mathbb R^d)$ satisfies
\begin{equation}\label{e-amalgam-psi}
0<c=\sum_{ k\in\Z^d} \inf_{x\in [0,1)^d} | \hat \psi(x+ k)| ^2< \sum_{k\in\Z^d} \sup_{x\in [0,1)^d} | \hat \psi(x+k)| ^2 =C<\infty.\end{equation}
Then, Problem 2 can be solved when $p=2$ and $\{e^{2\pi i x_n\cdot x}\}_{n\in\Z^d}$ is a Riesz basis in $L^2([0,1)^d)$.
\end{theorem}
We recall that the {\it amalgam space} $W(L^\infty,\, \ell^q)$ is the set of measurable functions $f:\mathbb R^d\to\mathbb C$ for which
$ ||f||_{W(L^\infty,\, \ell^q)}=\left(\sum_{n\in\Z^d} \sup_{x\in [0,1)^d} |f(x+n)|_2^q \right)^{\frac 1q}<\infty.$ The amalgam space $W(L^r,\, \ell^q)$ can be defined in a similar manner.
The assumption \eqref{e-amalgam-psi} implies that $\hat\psi$ is in the {\it amalgam space} $W(L^\infty,\, \ell^2)$.
From Theorem \ref{T-non-bandlimited} follows that if $\psi $ satisfies
\eqref{e-amalgam-psi}, then $ \{\tau_{y_k}\psi\}_{k\in\Z^d}$ is a $2-$Riesz basis of $V^2(\psi, Y)$ whenever $|k-y_k|_\infty<\frac 14$. See the remark after the proof of Theorem \ref{T-non-bandlimited} in Section 4.
Exponential Riesz bases in $L^2(0,1)$ are completely understood and classified \cite{Pavlov}. To the best of our knowledge, no such characterization exists for exponential bases on $L^2((0,1)^d)$ when $d>1$.
For our next theorem we consider $\psi$ in the Sobolev space $W^{1,p}_0(\mathbb R^d)$;
we denote with $\partial_j\psi=\frac{\partial\psi}{\partial x_j}$ the partial derivative (in distribution sense) of $\psi$ and we let $\nabla \psi= (\partial_1\psi,\, ...,\, \partial_d\psi)$ be the gradient of $\psi$. Let $Y=\{y_k\}_{k\in\Z^d}$ and $$L=\sup_{k\in\Z^d}|{ y_{k}}-k|_2<\infty.$$ We prove the following
\begin{theorem}\label{T-stab-Vpsi-bis}
Let $D= (a_1,\,b_1)\times...\times (a_d, \,b_d)$ be a bounded rectangle in $\mathbb R^d$. Let
$\psi\in W^{1,p}_0(D)$, with $1\leq p<\infty$, and let $\{\tau_k \psi \}_{k\in\Z^d}$ be a $p-$Riesz basis of $V^p(\psi)$ with frame constants $0<A\leq B<\infty$.
If \begin{equation}\label{e-1}
C=L\sum_{j=1}^d (1+[b_j-a_j +L])^{p-1} \|\partial_j\psi \|_p^p< A, \end{equation} the set $\{\tau_{y_k}\psi\}_{k\in\Z^d}$ is a $p-$Riesz basis of $V^p(\psi;\, Y )$ with constants $B+C$ and $A-C$.
\end{theorem}
The proofs of Theorems \ref{T-non-bandlimited} and \ref{T-stab-Vpsi-bis} are in Section 3.
Our Theorem \ref{T-stab-Vpsi-bis} can be compared to \cite[Theorem 3.5]{FMR}. In this theorem it is assumed that $|\nabla(\psi)|$ is in the amalgam space $W(L^\infty,\, \ell^1)$,
and that $\inf_{x\in [0,1)^d}\sum_{k\in\Z^d} |\nabla \psi(x+k)|_2 >0$.
In the aforementioned theorem is proved that $\{\tau_{y_k}\psi\}_{k\in\Z^d}$ is a Riesz basis of $V^2(\psi;\, Y )$
if $C'=L^2(1+2L)^{2d}||\nabla \psi||_{W(L^\infty,\, \ell^1)}^2<A$. Generalizations to functions for which $|\nabla(\psi)|$ is in the { amalgam space} $W(L^q,\, \ell^1)$, with $q>d$ are also possible (see Remark 3.2 in \cite{FMR}).
Our Theorem \ref{T-stab-Vpsi-bis} reduces to \cite[Theorem 3.5]{FMR} when $p>d$ and $\psi$ has compact support. For example,
when $\psi$ has support in $[0,1)^d$, the norm in $W(L^p, \ell^1)$ reduces to $||\nabla \psi ||_p $.
The constant $C$ in Theorem \ref{T-stab-Vpsi-bis} may be smaller than $C'$ defined above when the support of $\psi$ is small.
Theorem \ref{T-stab-Vpsi-bis} does not apply when $\psi=\rect$ or when $\psi$ is a step function;
For $J\ge 1$, we let $ {\cal S}_J =\left\{s(t)=\sum_{|j|\leq J} s_j \rect( t-j),\, s_j\in\mathbb R\right\}\, $. We let $p'=\frac{p}{p-1}$ be the dual exponent of $p$.
The following theorem is proved in Section 4.2.
{\begin{theorem}\label{T-step}
Assume that $g\in {\cal S}_J$ and that $ \{\tau_k g\}_{k\in\Z} $ is a $p-$Riesz basis for $V^p(g)$,
with frame constants $A$ and $B$. If
$$2^p L J \, \|g\|_{p'}^{p } <A\, ,$$
the sequence $\{\tau_{y_k}g\}_{k\in\Z} $ is a Riesz basis for $V^p(g; Y)$.
\end{theorem}
\noindent
{\it Acknowledgement.} We are grateful to the anonymous referee of this paper for her/his thorough reading of our manuscript and for providing suggestions that have improved the quality of our work.
We also wish to thank K. Hamm for providing a proof of the identity \eqref{d-Vp} for $p\ne 2$.
\section{ Preliminaries}
\subsection{Notation}
We denote with $\l f ,\, g \r =\int_{\mathbb R^d} f(x)\bar g(x)dx$ and $\|f\|_2= \sqrt{\l f, f\r}$ the standard inner product and norm in $L^2(\mathbb R^d)$. For a given $p\in\mathbb R^d$ and $\delta>0$, we let $B(p,\delta ) =\{x\in\mathbb R^d : | x-p|_2<\delta\}$.
We let $\rect(x)= \chi_{[-\frac 12, \frac 12)}(x)$ be the characteristic function of the interval $[-\frac 12, \frac 12)$ and
$\beta^{s }=\rect^{(s+1)}(x)=\rect*... *\rect(x) $ be the $s+1-$times iterated convolution of $\rect $. The function $\beta^{s}(x)$, a piecewise polynomial function of degree $s $, is a {\it B-spline} of order $s $.
See \cite{Sch2}, where the B-splines were first introduced, and \cite{PBP}, \cite{UAE} and the references cited there.
\subsection{$p-$ Riesz bases}
Recall that a Schauder basis in a separable Banach space $V$ is a linearly independent set $\mathcal B=\{v_j\}_{j\in\Z}$ such that: $\overline{span(\mathcal B)}=V$, and
there exists a sequence of bounded linear functions $f_j : X\to\mathbb C$ (the {\it functional coefficients of the basis}) such that
$x=\sum_j f_j(x) v_j$ for every $x\in V$.
Following \cite{CCS}, \cite{CS} and \cite{AST}, we say that $\mathcal B$ is a $p-$Riesz basis of $V$, with $1\leq p< \infty$, if $\overline{ \Span(\mathcal B) }=V$, if every series $\sum_n a_nv_n$ converges in $V$ when $\vec a=(a_n)_{n\in\Z}\in \ell^p$ and
if there exist constants $A,\ B>0$ such that, for every finite sequence of coefficients $\{d_j\}_{j\in\Z} \subset\mathbb C $, we have
$$
A \|\vec d\|_{\ell^p} \leq \| \sum_j d_jv_j \|_{p} \leq B\|\vec d\|_{\ell^p}.
$$
Every $p-$Riesz basis is a Schauder basis. As mentioned in the introduction, when $V=V^p(\psi)$ and $\psi$ has compact support,
the functional coefficients of the basis $\{\tau_k\psi\}_{k\in\Z}$ of $V^p(\psi)$ can be written in terms of the dual functions of the basis.
The following results are well known (see e.g. \cite[Prop. 1.1]{AS}, \cite{JM}, \cite{R}).
\begin{lemma}\label{L-eq-cond-V2} a) Let $\psi\in L^p_0(\mathbb R^d) $. The set $\mathcal B=\{\tau_k\psi\}_{k\in\Z^d}$ is a $p-$Riesz basis in $V^p(\psi)$ if and only if
\begin{equation}\label{cond-sum} \sum_{m \in\Z^d} |\hat \psi(y+m)|^2\ne 0 \quad \mbox{for every $y\in [-\frac 12, \frac 12)^d$}.\end{equation}
b) If $\psi \in W(L^\infty, \ell^1)$ is continuous and if $\mathcal B$ is Riesz basis in $V^2(\psi)$, then
$\mathcal B$ is a $p$-Riesz basis in $V^p(\psi)$ for every $1\leq p<\infty$.
\end{lemma}
\begin{proof} For the convenience of the reader we prove that if $ \psi \in L^2_0(\mathbb R^d)$,
$\mathcal B$ is a Riesz basis of $V^2(\psi)$ with constants $0< A\leq B<\infty$ if and only if the following inequality holds for every $ y\in Q=[-\frac 12, \frac 12)^d .$
\begin{equation}\label{e-pointwise-FT}
A\leq \sum_{m \in\Z^d} |\hat \psi(y+m)|^2 \leq B.
\end{equation}
We can verify (using e.g. the Poisson summation formula) that the function in \eqref{cond-sum} is continuous in $\overline Q$, and so \eqref{e-pointwise-FT} is equivalent to \eqref{cond-sum}.
Let $\{c_k\}_{k\in\Z^d}\subset\mathbb C $ be a finite set of coefficients such that $\sum_k|c_k|^2=1$.
The Fourier transform of $f=\sum_{k\in\Z^d}c_k\tau_k\psi$ is
$$
\hat f(y)=\hat\psi(y)\sum_{k\in\Z^d}c_k e^{2\pi i y\cdot k} = \hat\psi(y)M(y).
$$
and by Plancherel's theorem
$ \displaystyle
\|f\|_2^2= \|\hat f\|_2^2=\sum_{m \in\Z^d} \int_{m+[-\frac 12, \frac 12)^d } |\hat f(y)|^2 dy
=
\sum_{m \in\Z^d}\int_{Q } |\hat \psi(y+m)|^2 |M(y)|^2 dy =\int_{Q } |M(y)|^2 \sum_{m \in\Z^d} |\hat \psi(y+m)|^2 dy.
$
Let $g= \sum_{m \in\Z^d} |\hat \psi(y+m)|^2 $; if \eqref{e-pointwise-FT} holds, from $||f||_2^2= \int_Q |M(y)|^2 g(y)dy $ and $\int_{Q } |M(y)|^2dy =\sum_k |c_k|^2=1$ follows that $A\leq ||f||_2^2\leq B$.
Conversely, from
$A \sum_k |c_k|^2\leq \|f\|_2^2\leq B \sum_k |c_k|^2 $ and the above considerations, follows that
\begin{equation}\label{e3}
A \|M^2 \|_{L^1(Q )} \leq \int_{Q } |M(y)|^2 g(y)dy \leq B \|M^2 \|_{L^1(Q )} .
\end{equation}
Every non-negative $h\in L^1(Q )$ can be written as $ h= |M|^2$, with $M\in L^2(Q )$.
The dual of $L^1(Q )$ is $L^\infty(Q )$ and so $\|g\|_{L^\infty(Q )} = \sup_{\|h\|_{L^1(Q )}=1}\int_{Q } f(y) g(y)dy$. From \eqref{e3} follows that
$ A\leq \|g\|_{L^\infty(Q )} \leq B
$
as required.
\end{proof}
We will use the following Paley-Wiener type result.
\begin{lemma}\label{L-PW} Let $X$, $Y\subset\mathbb R^d$ be countable and discrete. Suppose that $\{\tau_{x_j}\psi\}_{j\in\Z^d}$ is a $p-$Riesz basis of $V^p(\psi; X) $ with constants $A\leq B$.
If the inequality
$$\left\Vert\sum_j a_j(\tau_{x_j}\psi-\tau_ {y_j}\psi)\right\Vert_p^p\leq C\sum_n|a_n|^p
$$
holds for all finite sequences $\{a_n\}_{n\in\Z^d}\in\mathbb C$ with a constant $C<A$, the sequence
$\{\tau_{y_j}\psi\}_{j\in\Z^d}$ is a $p-$Riesz basis of $V^p(\psi;\, Y )$ with constants $B+C$ and $A-C$.
\end{lemma}
\begin{proof}
Assume that $\sum_n|a_n|^p =1$; we have:
$$
\left\Vert\sum_j a_j \tau_{y_j}\psi \right\Vert_p\leq \left\Vert\sum_j a_j(\tau_{x_j}\psi-\tau_ {y_j}\psi)\right\Vert_p+\left\Vert\sum_j a_j \tau_{x_j}\psi\right \Vert_p
\leq C+B
$$
and
$$
\left\Vert\sum_j a_j \tau_{y_j} \psi\right\Vert_p\ge \left\Vert\sum_j a_j \tau_{x_j} \psi\right\Vert_p-\left\Vert\sum_j a_j(\tau_{x_j}\psi-\tau_ {y_j}\psi)\right\Vert_p
\ge A-C.
$$
\end{proof}
\begin{proof}[Proof of Theorem \ref{C-PW} ]
Assume $p\in (1, \infty)$ and $\sum_j|a_j|^p=1$. Let $p'=\frac p{p-1}$ is the dual exponent of $p$ and let $\{\delta_j\}_{j\in\Z^d}$ be a sequence of positive constants such that
$\sum_j |\delta_j|^{p'} <\infty$,
We recall that, when $1<p<\infty$, a function $f\in L^p(\mathbb R^d)$ is in the Sobolev space $W^{1, p}(\mathbb R^d)$ if and only if there is a constant $c>0$ that depends on $f$ but not on $\delta$, such that
\begin{equation}\label{e-ineq-sob}
\omega_p(\delta, f)=\sup_{|t|<\delta} ||\tau_t f -f ||_p \leq c \delta
\end{equation}
for every $\delta>0$. Furthermore, one can choose $c=\|\nabla f\|_p $. See e.g. Prop. 9.3 in \cite{B}.
By \eqref{e-ineq-sob} and H\"older'
s inequality, $$
\left\Vert\sum_j a_j (\tau_{x_j} \psi - \tau_{y_j}\psi)\right\Vert\leq \sum_j |a_j|\, \|\tau_{x_j} \psi - \tau_{y_j}\psi\|_p \leq \sum_j |a_j|\, \delta_j
$$
$$\leq c \left( \sum_j |a_j|^p\right)^{\frac 1p}\left( \sum_j |\delta_j|^{p'}\right)^{\frac 1{p'}}=
c\left( \sum_j |\delta_j|^{p'}\right)^{\frac 1{p'}}.
$$
We can chose the $\delta_j$ so small that $ c\left( \sum_j |\delta_j|^{p'}\right)^{\frac 1{p'}} <A$ and use Lemma \ref{L-PW} to complete the proof.
\end{proof}
\section{Problem 1 $(p=2)$}
In this section we prove that Problem 1 can be solved when $p=2$ and $\hat\psi $ has compact support.
\begin{theorem}\label{T-band-lim} Let $\psi\in L^2(\mathbb R^d)$. Assume that $\hat\psi $ has compact support and that there exist constants $c,\ C>0$ such that $$c\leq\inf_{x\in \mathbb R^d}|\hat \psi(x)|\leq \sup_{x\in \mathbb R^d}|\hat \psi(x)|\leq C.$$
Let $\{\tau_{x_j}\psi\}_{j\in\Z^d}$ be a Riesz basis in $V^2(\psi, X)$.
There exists $\delta >0$ such that if $Y=\{y_j\}_{j\in\Z^d}\subset\mathbb R^d$ satisfies $\sup_j|x_j-y_j|<\delta $, then also $\{\tau_{x_j}\psi\}_{j\in\Z^d}$ is a Riesz basis of $V^2(\psi)$.
\end{theorem}
\begin{proof}
Let $D=\supp(\hat\psi)$.
When $p=2$, Plancherel theorem implies that the set $\{\tau_{x_j}\psi\}_{j\in\Z^d}$ is a Riesz basis in $V^2(\psi)$ if and only if the set $\{e^{2\pi i x_j\cdot x}\}_{j\in\Z^d} $ is a Riesz basis on $L^2(\mathbb R^d,\ \hat\psi\,dx)$.
Our assumptions on $\hat\psi$ imply that the norm on $ L^2(\mathbb R^d,\ \hat\psi\,dx)$ is equivalent to the norm on $ L^2(D )$ and that $\{e^{2\pi i x_j\cdot x}\}_{j\in\Z^d}$ is an exponential Riesz basis on $L^2(D)$.
Exponential Riesz bases on bounded domains of $\mathbb R^d$ are stable under small perturbations (see \cite{PW} and also Section 2.3 in \cite{KN}); we can find $\delta>0$ such that, if $Y=\{y_j\}_{j\in\Z^d}\subset\mathbb R^d$ satisfies $\sup_j|x_j-y_j|<\delta $, then also $\{e^{2\pi i y_j\cdot x}\}_{j\in\Z^d} $ is a Riesz basis on $L^2(D)$ and hence also in $L^2(\mathbb R^d,\ \hat\psi\,dx)$.
\end{proof}
\noindent
{\it Example.} Let $d=1$ and let $\psi=\sinc(x)=\frac{\sin(\pi x)}{\pi x}$; the Fourier transform of $\tau_k\psi(x)=\sinc (x-k)$ is $e^{2\pi i kx}\rect(x)=e^{2\pi i kx}\chi_{[-\frac 12, \frac 12)}(x)$, and so $V^2(\psi)$ is isometrically isomorphic to $ \overline{\Span\{e^{2\pi i jx}\rect(x)\}_{j\in\Z^d}} = L^2(-\frac 12, \frac 12)$.
By Kadec's theorem (\cite{K}, \cite{Y}) if $Y=\{y_n\}_{n\in\Z^d}\subset\mathbb R$ is such that $ \sup_n|y_n-n| \leq \delta < \frac 14$, the set $\{e^{2\pi i y_n x}\}_{n\in\Z^d}$ is still a Riesz basis of $L^2(-\frac 12,\frac 12)$ and therefore, the set $\{\sinc(x-y_n)\}_{n\in\Z^d}$ is a Riesz basis for $V^2(\sinc)$. Thus,
$V^2(\sinc; \, Y) =V^2(\sinc)$.
Things are not so clear when $p\ne 2$. For example, the trigonometric system $\mathcal B=\{e^{2\pi i n x}\}_{n\in\Z^d}$ is a Schauder basis in $L^p(-\frac 12, \frac 12)$ when $1<p<\infty$, but it is not a $p-$Riesz basis and the previous example cannot be generalized in an obvious way. Stability results for the Schauder basis $\mathcal B$ in $L^p(-\frac 12, \frac 12)$ are proved in
\cite{Russo} and in \cite{Sed16}.
\section{Problem 2}
In this section we prove Theorems \ref{T-non-bandlimited} and \ref{T-stab-Vpsi-bis}.
Let $X=\{x_n\}_{n\in\Z^d}$ and $\mathcal B =\{e^{2\pi i x\cdot x_n}\}_{n\in\Z^d}$. We first prove the following
\begin{lemma}\label{L-bases-amal}
Let $\psi\in L^2(\mathbb R^d)\cap L^1(\mathbb R^d)$ be as in \eqref{e-amalgam-psi}; if $\mathcal B$ is a Riesz basis in $L^2([0,1)^d)$ with constants $A_1$ and $B_1$ then $\{\tau_{x_n}\psi \}$ is a Riesz basis of $V^2(\psi, X)\}$ with constants $A=A_1c$ and $B=B_1 C$.
\end{lemma}
\begin{proof}
For $k\in\Z^d$, we let $c_k=\inf_{x\in (0,1]^d}| \hat \psi(x+ k)|^2$ and $C_k=\sup_{x\in (0,1]^d}| \hat \psi(x+ k)|^2$. Let $\{d_j\} $ be a finite set of complex coefficient such that $\sum_j |d_j|^2=1$. Since $\mathcal B$ is a Riesz basis in $L^2((0,1]^d)$, for every given $k\in\Z^d$ we have that
$$A_1\leq \left\Vert \sum_n d_n e^{-2\pi i x_n \cdot k} \, e^{2\pi i x_n \cdot y} \right\Vert_{L^2((0,1]^d)}^2\leq B_1
.$$
From this inequality follows at once that
$$c_k A_1\leq \left\Vert \sum_n d_n e^{-2\pi i x_n \cdot k}e^{2\pi i x_n \cdot y} \hat\psi(.-k)\right\Vert_{L^2((0,1]^d)}^2\leq C_kB_1.
$$
With $c=\sum_{k\in\Z^d} c_k$ and $C=\sum_{k\in\Z^d} C_k = ||\psi||_{W(L^\infty,\, \ell^2)}^2$, we have
$$A_1 c\leq \sum_{k\in\Z^d}\left\Vert \sum_n d_n e^{ 2\pi i x_n \cdot (.-k)} \hat\psi(.-k) \right\Vert_{L^2((0,1]^d)}^2\leq B_1 C.
$$
In view of $\sum_{k\in\Z^d}\left\Vert g(.-k) \right\Vert_{L^2((0,1]^d)}= ||g||_2$, we obtain
$$A_1 c \leq \left\Vert \sum_n d_n e^{2\pi i x_n \cdot y} \hat\psi \right\Vert_2^2\leq B_1 C.
$$
By Plancherel's theorem, the latter is equivalent to $A_1 c \leq \left\Vert \sum_n d_n \tau_{x_n}\psi \right\Vert_2\leq B_1 C
$
and so $\{\tau_{x_k}\psi \}_{k\in\Z^d}$ is a Riesz basis of $V^2(\psi, X) $, as required. \end{proof}
\begin{proof}[Proof of Theorem \ref{T-non-bandlimited}] Let $\mathcal B =\{e^{2\pi i x\cdot x_n}\}_{n\in\Z^d}$ be a
Riesz basis in $L^2([0,1)^d)$; it is proved in \cite{PW} (see also Section 2.3 in \cite{KN}) that we can find $\delta>0$ such that, if $Y=\{y_j\}_{j\in\Z^d}\subset\mathbb R^d$ satisfies $\sup_j|x_j-y_j|_2<\delta $, then also $\{e^{2\pi i y_j\cdot x}\}_{j\in\Z^d} $ is a Riesz basis in $L^2([0,1)^d)$.
By Lemma \ref{L-bases-amal}, $\{\tau_{y_n}\psi \}$ is a Riesz basis of $V^2(\psi, Y) $.
\end{proof}
\noindent
{\it Remark.}
When $Y=\{y_k\}_{k\in\Z^d}$ is such that $\sup_{k\in\Z^d}|k-y_k|_\infty<\frac 14$, by the multi-dimensional generalization of Kadec's theorem proved in \cite{SZ} we have that $ \{e^{2\pi i y_j\cdot x}\}_{j\in\Z^d} $ is a Riesz basis in $L^2([0,1)^d)$ and
by Lemma \ref{L-bases-amal}, $\{\tau_{y_n}\psi \}_{n\in\Z^d}$ is a Riesz basis of $V^2(\psi, Y)\}$.
\subsection{Proof of Theorem \ref{T-stab-Vpsi-bis}}
In order to prove Theorem \ref{T-stab-Vpsi-bis} we need some preliminary result: first, we prove the following
\begin{lemma}\label{L-const-Vp} Let $(a,b)\subset\mathbb R$, with $a<b<\infty$, and let $1\leq p < \infty$. Let
$\psi\in L^p_0(a,b)$.
For every finite set of coefficients $\{\alpha_j\}\subset\mathbb C$, we have that
$$ \left\Vert\sum_k \alpha_k \tau_k\psi \right\Vert_p^p \leq \|\psi\|_p^p([b-a]+1)^{p-1} \sum_k |\alpha_k|^p
$$
where $[\ ]$ denotes the integer part.
\end{lemma}
\begin{proof}
For simplicity we let $a=0$. When $b\leq 1$ the supports of the $\tau_k\psi $'s are disjoint and
so $\|f\|_p^p=\left\Vert\sum_k \alpha_k \tau_k\psi \right\Vert_p^p =\|\psi\|_p^p\sum_k |\alpha_k|^p
$. When $b>1$ the supports of the $\tau_k\psi $ overlap, and there are at most $ [b]+1$ of such supports that intersect at each point. By the elementary inequality
$ \left(x_1+\dots+x_m\right)^p\leq m^{p-1} \left(x_1^p+\dots+x_m^p\right)$ (which is valid when the $x_j $ are non-negative) we have that
$$
|f(t)|^p = |\sum_{k} a_k \tau_k\psi(t) |^p \leq ([b]+1)^{p-1} \sum_{k} |a_k|^p|\tau_k\psi(t)|^p
$$
and so $\|f\|_p^p\leq ([b]+1)^{p-1} \|\psi\|_p^p\sum_k |\alpha_k|^p$
as required.
\end{proof}
Let $Y= \{y_k\}_{k\in\Z^d}$ be a discrete subset of $\mathbb R^d$.
Assume that $L=\sup_{k\in\Z^d}|{ y_{k}}-k|_2<\infty$. We prove the following
\begin{lemma}\label{L-stab-Vpsi}
Let $D=\prod_{j=1}^d [a_j,b_j] $ and let $\psi \in W^{1,p}_0(D)$. Then,
for every finite set of coefficients $\{\alpha_j\} \subset\mathbb C$ such that $\sum_k |\alpha_k|^p=1$, we have that
\begin{equation}\label{e11}\left\Vert\sum_k \alpha_k(\tau_k \psi -\tau_{y_k}\psi) \right\Vert_p^p \leq L\sum_{j=1}^d(1+[b_j-a_j +L])^{p-1} \|\partial_j\psi \|_p^p.
\end{equation}
\end{lemma}
\begin{proof} When $d=1$ and $D=(a,b)$, we prove that
\begin{equation}\label{e1}\left\Vert\sum_k \alpha_k(\tau_k \psi -\tau_{y_k}\psi )\right\Vert_p^p \leq L(1+[b-a +L])^{p-1} \|\psi'\|_p^p
\end{equation}
where $\psi'(t) $ denotes the distributional derivative of $\psi$.
Assume first that $y_k>k$.
Observing that
$
\psi (t+y_k)- \psi(t+k) =\int_k^{y_k}\psi'( t+x )dx
$ and that $ |k-y_k|\leq L$,
we have that
$$\left\Vert\sum_k \alpha_k(\tau_k \psi -\tau_{y_k}\psi) \right\Vert_p^p =\left\Vert\sum_k \alpha_k\int_{t+k}^{t+y_k}\psi'(x )dx \right\Vert_p^p $$
$$
\leq \left\Vert\sum_k|\alpha_k|\int_{t+ k}^{ t+k+L}|\psi'(x )|dx \right\Vert_p^p = \left\Vert\sum_k|\alpha_k|\tau_k g\right\Vert_p^p
$$
where we have let $g(t)=\int_{t }^{ t +L }|\psi'(x )|dx $.
It is easy to verify that $g(t)$ is supported in the interval $[a-L, b]$. Indeed,
$\psi' $ is supported in $[a,b]$ and so $g(t) \equiv 0$ whenever
$ t +[0,L]\cap[a,b]=\emptyset$. Thus, $g(t)\equiv 0$ when $t+L<a$ or $t >b$, or: $g(t)\equiv 0$ when $t\in \mathbb R-[a-L, b ]$, as required.
By Lemma \ref{L-const-Vp}
\begin{equation}\label{2}
\left\Vert\sum_k \alpha_k(\tau_k\psi -\tau_{y_k}\psi )\right\Vert_p^p\leq
\left\Vert\sum_k |\alpha_k| \tau_k g\right\Vert_p^p \leq (1+[b-a +L])^{p-1} \|g\|_p^p\, .
\end{equation}
We apply a change of variables and Minkowsky's integral inequality; we gather
$$
\|g\|_p = \left\Vert \int_{t }^{ t +L}|\psi'(x )|dx\right\Vert_p =\left\Vert \int_{0 }^{ L}|\psi'(x+t )|dx\right\Vert_p
$$$$
\leq L \|\psi'\|_p
$$
which together with the inequality \eqref{2} concludes the proof of \eqref{e1}. When $y_k<k$ the proof if similar, but the function $g(t) $ defined above should be replaced by $g(t)=\int_{t }^{ t -L }|\psi'(x )|dx $, a function supported in the interval $[a, b+L]$.
When $d=2$ we can let $y_k=(y_{k,1}, y_{k,2})$ and $k=(k_1,\, k_2)$ and write
$$
\left\Vert\sum_k \alpha_k (\tau_k \psi- \tau_{y_k}\psi) \right\Vert_p $$$$\leq \left\Vert\sum_k \alpha_k(\tau_{(k_1, k_2)} \psi -\tau_{(y_{k, 1}, k_2) }\psi) \right\Vert_p + \left\Vert\sum_k \alpha_k(\tau_{(y_{k, 1}, k_2) }\psi -\tau_{(y_{k, 1}, y_{k,2}) } \psi) \right\Vert_p
$$
$$
= \left\Vert\sum_k \alpha_k(\tau_{ k_1 } \psi_1 -\tau_{ y_{k, 1} }\psi_1 ) \right\Vert_p +
\left\Vert\sum_k \alpha_k(\tau_{ k_2 }\psi_2 -\tau_{ y_{k,2} }\psi_2) \right\Vert_p
$$
where we have let $\psi_1= \tau_{(0,k_2)}\psi$ and $\psi_2= \tau_{(y_{k_1}, 0) }\psi$. The inequality \eqref{e1}, applied to $\psi_1$ and $\psi_2$, yields \eqref{e11}.
The case $d>2$ is similar.
\end{proof}
\begin{proof}[Proof of Theorem \ref{T-stab-Vpsi-bis}]
Follows from Lemmas \ref{L-PW} and \ref{L-stab-Vpsi}. \end{proof}
\subsection{ $\rect$ and step functions}
Since Sobolev spaces $W^{1,p}(\mathbb R)$ do not contain discontinuous functions, we cannot apply
Theorem \ref{T-stab-Vpsi-bis} when $\psi$ is a step function.
Let $\psi=\rect$;
it is immediate to verify that, for every $1\leq p<\infty$, the set $\{\tau_{j}\rect \}_{j\in\Z }$ is a $p-$Riesz basis of $ V^p(\rect)$ with frame constants $A=B=1$. Throughout this section we let $Y=\{y_k\}_{k\in\Z }\subset \mathbb R$, with $L=\sup_{k\in\Z^d} |y_k-k| $ and we assume $1\leq p<\infty$.
Lemma \ref{L-stab-Vo-Lp} below is an easy generalization of Lemma 10 in \cite{DV1}.
\begin{lemma}\label{L-stab-Vo-Lp}
Assume $0\leq L<1$. For every finite set of coefficients {$\{\alpha_k\}_{n\in\Z^d}\subset\mathbb C$} we have that
\begin{equation}\label{e-cond-PW-p}\left\Vert\sum_k \alpha_k(\rect(t- k)-\rect(t- y_k))\right\Vert_p^p < 2^pL \sum_k |\alpha_k|^p.
\end{equation}
\end{lemma}
\begin{proof}
Assume $ \sum_k |\alpha_k|^p=1$.
Let
{\begin{equation}\label{e-sum-In}
f(t)=\sum_k \alpha_k\left(\rect(t- k)-\rect(t- y_k)\right)= \sum_k \alpha_k \chi_{I_k},
\end{equation}}
where $I_j $ denotes the support of $ \rect (t- j)-\rect (t- y_j)$.
When $ y_j\ne j$, $I_j$ is union of two intervals that we denote with $I_j^+$ and $I_j^-$. When $y_j>j$, we let
$$ I_j^-=( j-\frac 12, \ y_j-\frac 12),\quad I_j^+=( j+\frac 12, \ y_j+\frac 12).
$$
We use (improperly) the same notation to denote $I_{j}^{+}$ and $I_j^-$ also when $y_j<j$.
Since we have assumed $|y_h-h|\leq L<1$, for every given interval {$J=I_h^{\pm}$} there is at most another interval $I_k^{\pm}$ that overlap with $J$; thus, for every $t\in\mathbb R$, the sum in \eqref{e-sum-In} has at most $2$ terms.
By the elementary inequality
$ \left(x_1+\dots+x_m\right)^p\leq m^{p-1} \left(x_1^p+\dots+x_m^p\right)$ we have that $|f(t)|^p \leq 2^{p-1} \sum_k |\alpha_k|^p \chi_{I_k}(t)$, and
$ \|f\|_p^p\leq 2^{p-1}\sup_k |I_k|= 2^{p-1}(2L) =2^p\,L$
and
the proof of the Lemma is concluded.
\end{proof}
Lemma \ref{L-stab-Vo-Lp} and Lemma \ref{L-PW} yield the following
\begin{theorem}\label{T-rect-X}
With the notation of Lemma \ref{L-stab-Vo-Lp}, the set $\{\tau_{y_k}\rect \}_{k\in\Z }$ is a $p-$Riesz basis in $V^p(\rect;\, Y)$ if $2^p L <1$.
\end{theorem}
\begin{corollary}\label{C-f*rect} Let $\psi_0\in L^1(\mathbb R)$ and let $\psi=\rect*\psi_0$. Suppose that $ \{\tau_k\psi \}_{k\in\Z }$ is a $p-$Riesz basis for $V^p(\psi)$.
For every finite set of coefficients $ \{ \alpha_{k} \}_{n\in\Z }\subset \mathbb C$ with $\sum_k|\alpha_k|^p=1$, we have that
$$
\left\Vert\sum_k \alpha_k(\psi(t- k)-\psi(t- y_k))\right\Vert_p^p < 2^p L \|\psi_0\|_1^p
$$
and the set $\{\psi(t-y_k)\}_{k\in\Z }$ is a $p-$Riesz basis for for $V^p ( \psi; Y )$ whenever
\begin{equation}\label{eq:pv-p2}
2^pL\|\psi_0\|_1^p < A.\,
\end{equation}
\end{corollary}
\noindent
{\it Remark.} If $\hat\psi_0(x)\ne 0 $ for every $x\in\mathbb R$, then the set $ \{\tau_k\psi \}_{k\in\Z }$ is a $p-$Riesz basis for $V^p(\psi)$. Indeed, $ \sum_{m \in\Z } |\widehat \rect(y+m)|^2 =\sum_{m \in\Z } |\sinc(x+k)|^2\ne 0$ whenever $x\in [-\frac 12, \frac 12)$ and so also $\sum_{m \in\Z } |\hat\psi(x+k)|^2 = $ $\sum_{m \in\Z }|\hat\psi_0(x+k)\widehat{\rect}(x+k)|^2\ne 0$; by Lemma \ref{L-eq-cond-V2} the set $\{\tau_k\psi \}_{k\in\Z }$ is a $p-$Riesz basis for $V^p(\psi)$.
\begin{proof}[proof of Corollary \ref{C-f*rect}]
Let
$$ F (t) = \sum_ {k} \alpha_{ k}\left(\psi(t- k)-\psi(t- y_k)\right), \quad
f (y)=\sum_{k\in\Z } \, \alpha_{ k} \left(\rect( y- k )\!-\! \rect ( y- {x_{k}} \right)$$
and we show that
$
\|F\|_p^p\leq 2^p L\|\psi_0\|_1^p .
$
We gather
\begin{align*}
F (t) &= \int_{-\infty}^\infty\!\!\! \psi_0(t-y) \! {{\sum_{k\in\Z }}\, \alpha_{ k} \left(\rect( y- k )\!-\! \rect ( y- {x_{k}} )\right)dy}
\\ &
= \psi_0 * f (t).
\end{align*}
Thus, by Young's inequality and Lemma \ref{L-stab-Vo-Lp},
$$
\|F\|_p^p\leq \|\psi_0\|_1 ^p\|f \|_p^p \leq 2^pL \|\psi_0\|_1 ^p
$$
and the proof of the corollary is concluded.
\end{proof}
Let $\beta_m(x)=\rect^{(m+1)}$ be the B-spline of order $m>1$. We recall that $\beta_m$ is supported in the interval $[-\frac {m+1}{2}, \frac {m+1}{2}]$ and $\beta_m(x) \in W^{1,p}(\mathbb R)$ whenever $m\ge 1$. It is easy to verify by induction on $m$ that
$\|\beta^m\|_p\leq 1$ and $\|\beta_m'\|_p\leq 2$.
It is known that $\{\tau_k\beta_m\} _{k\in\Z }$ is a Riesz basis of $V^2(\beta_m)$ whose Riesz constants $A(m)$ and $B(m) $ are explicitly evaluated in \cite{M}. See also \cite{SelRad16}.
By the observations after Lemma \ref{L-eq-cond-V2}, $\{\tau_k\beta_m\}_{k\in\Z }$ is a $p-$Riesz basis of $V^p(\beta_m)$ with constants $A_p(m)>0$ and $B_p(m)<\infty $.
We prove the following
\begin{corollary}\label{C-splines}
Assume that $L <2^{-p}A_p(m)$. Then, the set $\{ \tau_{y_k}\beta_m \}_{k\in\Z }$ is a $p-$Riesz basis of $V^p ( \psi, Y )$.
\end{corollary}
\begin{proof} We apply Corollary \ref{C-f*rect} with $\psi_0=\beta^{m-1}$.
\end{proof}
\noindent
{\it Remark.}
We could have used Theorem \ref{T-stab-Vpsi-bis} to prove Corollary \ref{C-splines}, but we would have obtained a lower upper bound for $L$ (namely,
$ L <\frac{A_p(m)}{2(2+m)^{p-1}}$).
\begin{proof}[proof of Theorem \ref{T-step}]
Let $g(t)=\sum_{|j|\leq J} s_j\rect( t-j)$.
Let $\{\alpha_{k}\}_{n\in\Z^d} \subset\mathbb C$ be a finite set of coefficients such that $\sum_{k} |\alpha_{k}|^p=1$. Let
$$f(t) =\sum_{k} \alpha_{k}\, \left(g\left( t- k\right)-g\left( t- x_{k}\right) \right)\, .$$
As in previous theorems, we find conditions on $L$ for which $\|f\|_p^p<A$. We have
\begin{align*}
f(t) &=
\sum_{|j|\leq J} s_j \sum_{k} \alpha_{k} \,\left( \rect( t-j- k )-\rect( t-j- x_{k} ) \right)
\\ &= \sum_{|j|\leq J} s_j f_j(t). \end{align*}
By Minkowski and H\"{o}lder inequalities, and noting that $\sum_{|j|\leq J} |s_j |^q= \|g\|_q^q$, it follows that
\begin{align}\label{e-as2bis}
\|f\|_p \leq \sum_{|j|\leq J}|s_j | \|f_j\|_p&\leq \left( \sum_{|j|\leq J} |s_j |^{p'} \right)^{\frac 1{p'}}\left(\sum_{|j|\leq J}\|f_j\|_p^p\right)^{\frac 1p} \notag \\
&= \|g\|_{p'}\, \left(\sum_{|j|\leq J}\|f_j\|_p^p\right)^{\frac 1p}\, ,
\end{align}
With the change of variables $ t-j=t'$ in the integral below, we obtain
$$
\int_\mathbb R |f_j(t)|^pdt= \int_\mathbb R\left|
\sum_{k} \alpha_{k} \left( \rect( t-j- k )-\rect( t-j- x_{k} ) \right)\right|^pdt
$$
$$
=
\int_\mathbb R\left|
\sum_{k} \alpha_{k} \left( \rect( t'- k )-\rect( t'- x_{k} ) \right)\right|^pdt'
$$
$$
=
\left\Vert\sum_k \alpha_k(\rect(t- k)-\rect(t- y_k)\right\Vert_p^p \, .
$$
From Lemma \ref{L-stab-Vo-Lp}, follows that the integral above is
$ \leq 2^p L $.
We gather: $\|f\|_p^p \leq 2^p L J \, \|g\|_{p'}^{p } $.
By assumption $2^p L J \, \|g\|_{p'}^{p }<A$, and by Lemma \ref{L-PW} Theorem \ref{T-step} follows.
\end{proof}
\section{Remarks and open problems}
We have discussed Problem 1 when $p=2$ and the Fourier transform of the window function $\psi$ has compact support. When $\psi$ is not band-limited, Plancherel's theorem implies that the set $\{\tau_{x_j}\psi\}_{j\in\Z^d}$ is a Riesz basis in $V^2(\psi, X)$ if and only if the set ${\mathcal V}= \{e^{2\pi i x_j\cdot x}|\hat\psi|\}_{j\in\Z^d} $ is a Riesz sequence in $L^2(\mathbb R^d)$, and hence a Riesz basis in $V=\overline{\mbox{Span}({\mathcal V}))}$. By a theorem of Krein-Milman-Rutman (see e.g. \cite[Theorem 11]{Y}) for every $j\in\Z^d$ there exists $\epsilon_j>0$ such that every set of functions $\{g_j\}_{j\in\Z^d}\subset V$ is a Riesz basis of $V$ whenever $||g_j-e^{\pi i x_j\cdot x}|\hat\psi||_2<\epsilon_j$. We can find $\delta_j>0$ such that
$||(e^{\pi i x_j\cdot x} -e^{\pi i y_j\cdot x})\hat\psi||_2<\epsilon_j$ whenever $|x_j-y_j|_2<\delta_j$, but we do not know whether the $\delta_j$'s have a lower bound or not.
For functions $\psi$ in $L^p(\mathbb R^d) $ for every $p\in[1,\infty)$
it would be interesting to prove conditions that would ensure that a $q$-Riesz basis in $V^q(\psi, X)$ for some $q$ is automatically a $p$-Riesz basis in $V^p(\psi, X)$ for all $p$. Lemma \ref{L-eq-cond-V2} (b) shows that, for certain $\psi$, if the set $\{\tau_k\psi\}_{k\in\Z^d}$ is a 2-Riesz basis of $V^2(\psi)$, it is also a p-Riesz in $V^p(\psi)$ but the method of proof of this result does not generalize well to other sets of translations. Results in \cite{AldBaska} and \cite{ShinSun} may help generalize Lemma \ref{L-eq-cond-V2}.
It would also be interesting to define and investigate p-Riesz bases in quasi-shift invariant spaces $V^p(\psi, X)$ when $0<p<1$. Wavelet in $L^p$ with $0<p<1$ have been considered in \cite {GHT}. We feel that the results contained in Section 3 of \cite {GHT} may help the understanding of $V^p(\psi, X)$ when $0<p<1$.
\end{document}
|
\begin{document}
\title{Spatial Aggregation and Temporal Convolution Networks for Real-time Kriging}
\author{Yuankai Wu}
\email{[email protected]}
\orcid{0000-0003-4435-9413}
\affiliation{
\institution{McGill University}
\city{Montreal}
\state{Quebec}
\country{Canada}
\postcode{H3A 0C3}
}
\author{Dingyi Zhuang}
\email{[email protected]}
\affiliation{
\institution{McGill University}
\city{Montreal}
\state{Quebec}
\country{Canada}
\postcode{H3A 0C3}
}
\author{Mengying Lei}
\email{[email protected]}
\affiliation{
\institution{McGill University}
\city{Montreal}
\state{Quebec}
\country{Canada}
\postcode{H3A 0C3}
}
\author{Aurélie Labbe}
\email{[email protected]}
\affiliation{
\institution{HEC Montreal}
\city{Montreal}
\state{Quebec}
\country{Canada}
\postcode{H3T 2A7}
}
\author{Lijun Sun}
\email{[email protected]}
\authornotemark[*]
\affiliation{
\institution{McGill University}
\city{Montreal}
\state{Quebec}
\country{Canada}
\postcode{H3A 0C3}
}
\renewcommand{XXX and XXX, et al.}{XXX and XXX, et al.}
\begin{abstract}
Spatiotemporal kriging is an important application in spatiotemporal data analysis, aiming to recover/interpolate signals for unsampled locations based on observed signals. The critical challenge for spatiotemporal kriging is how to effectively model and leverage the spatiotemporal dependencies within the data. Recently, graph neural networks (GNNs) have shown great promise for kriging. However, standard GNNs often require a carefully designed adjacency matrix and aggregation function for a specific problem. To address this issue, we present SATCN---Spatial Aggregation and Temporal Convolution Networks---to provide a universal and flexible solution to perform spatiotemporal kriging in various spatiotemporal datasets without the need for model specification. Specifically, we propose a novel spatial aggregation network (SAN) inspired by Principal Neighborhood Aggregation, which uses multiple aggregation functions to help one node gather diverse information from its neighbors. To simulate signals on unsampled sensors, a masking strategy that prevents the unsampled sensors from sending messages to their neighborhood is introduced to SAN. We capture temporal dependencies by the temporal convolutional networks, which allows our model to cope with data of diverse sizes. To make SATCN generalized to unseen nodes and even unseen graph structures, we employ an inductive strategy to train SATCN. We conduct extensive experiments on six real-world spatiotemporal datasets, including traffic speed, energy and temperature recordings. Our results demonstrate the superiority of SATCN over traditional and GNN-based kriging models.
\end{abstract}
\begin{CCSXML}
•Information systems~Information systems applications~Spatial-temporal systems~Sensor networks
\end{CCSXML}
\keywords{Spatiotemporal data, Kriging, Graph Neural Networks}
\maketitle
\section{Introduction}
Spatiotemporal data is ubiquitous in a wide variety of domains, including traffic estimation/prediction, climate modeling, neuroscience, and earth sciences \citep{cressie2015statistics,banerjee2014hierarchical,atluri2018spatio}. In general, spatiotemporal data characterizes how a particular variable or a group of variables vary in space over time. For example, sea surface temperature data can help climate researchers to understand the effect of climate change and identify abnormal weather patterns; traffic speed data collected from a network of sensors can reveal the evolution of traffic congestion on a highway network over time. A unique property of spatiotemporal data is that the variables often show strong dependencies within/across both the spatial and the temporal dimensions. This property makes the data instances structurally correlated with each other. How to effectively model these dependencies and relations is a critical challenge in spatiotemporal data analysis.
This paper focuses on the \textit{spatiotemporal kriging} problem, of which the goal is to perform signal interpolation for unsampled locations given the signals from sampled locations during the same period. The most prevalent method for kriging is Gaussian Process (GP) regression \citep{williams2006gaussian}, which captures the correlation among all data points using a carefully specified kernel/covariance structure. For spatiotemporal problems, one can design an appropriate kernel structure to capture the correlations both within and across the spatial and temporal dimensions (see e.g., \citep{luttinen2012efficient}). However, GP is computationally very expensive in practice: the cost of GP models scales cubically with the number of data points, and learning accurate hyperparameters is very challenging for complex kernel structures with computational issues such as local optimum. An alternative for the large-scale dataset is the low-rank matrix/tensor completion \citep{bahadori2014fast,yu2016temporal,takeuchi2017autoregressive}. These models essentially impose a low-rank assumption to capture the global consistency in the data, and further include regularization structures to encode local consistency (e.g., autoregressive regularizer for temporal smoothness \citep{yu2016temporal} and Laplacian regularizer for spatial smoothness \citep{bahadori2014fast}). These models provide a powerful solution for missing data imputation when data is missing at random. However, matrix/tensor completion is essentially transductive \citep{zhang2019inductive}: for a new spatial location, we have to retrain the full model even with only minor changes in the spatial and temporal dimension \citep{wu2020inductive}. In addition, spatiotemporal kriging corresponds to a challenging whole-row missing scenario in a spatiotemporal matrix with zero global information, and thus model accuracy relies heavily on the specification of spatial Laplacian regularizer locally.
Recently, deep learning models have shed new light on spatiotemporal data analysis. In general, spatiotemporal data comes from readings on a sensor network and can be naturally modeled as time-varying signals on a spatial graph structure. Numerous neural network architectures for learning over graphs \citep{DBLP:conf/iclr/KipfW17, defferrard2016convolutional, xu2018powerful} have been proposed in recent years, and graph neural networks (GNN) have been widely applied in modeling spatiotemporal data. Although the spatiotemporal kriging problem can be considered a ``forecasting'' problem in space, most existing GNN-based studies only focus on the multivariate time series forecasting problem on a fixed spatial graph \citep{yu2018spatio, li2018diffusion, wu2019graph, dai2020hybrid}, which cannot be generalized to model unseen spatiotemporal locations with varying spatial graphs. Two recent studies have developed GNN-based models for spatiotemporal kriging: KCN \citep{appleby2020kriging} and IGNNK \citep{wu2020inductive}. \citet{appleby2020kriging} developed Kriging Convolutional Networks (KCN) as an extension of GNNs for kriging tasks. \citet{wu2020inductive} proposed Inductive Graph Neural Networks for Kriging (IGNNK). Both studies suggest that GNNs are promising tools for the real-time spatiotemporal kriging problem; however, two challenging issues remain to apply the models to diverse real-world applications. The first limitation is that all GNNs use a predefined rule to transform spatial information into an adjacency matrix (e.g., using Gaussian kernel as in \citep{wu2020inductive, appleby2020kriging}). The predefined rules for constructing the adjacency matrix play a crucial role in model performance, as the adjacency matrix determines how GNNs transform and aggregate information. In addition, the complex spatial dependencies make it difficult to specify an aggregation function of GNNs that can capture sufficient information of the target datasets \citep{corso2020principal}. To achieve high accuracy, the models require extensive fine-tuning on hyperparameters of the predefined rule and the type of aggregation function. Second, both existing GNN-based kriging models have not fully utilized temporal dependencies. For example, KCN \citep{appleby2020kriging} treats observation time as an additional feature for GNNs, and ignores the temporal modeling issue in spatiotemporal datasets. IGNNK, on the other hand, considers observations over a particular time period as features \citep{wu2020inductive}; as a result, it cannot handle inputs with different sizes of temporal windows.
To address the aforementioned limitations, we propose a general framework, called Spatial Aggregation and Temporal Convolution Networks (SATCN), for spatiotemporal kriging. We utilize temporal convolutional networks (TCN) to model the temporal dependencies and make our framework flexible on both spatial and temporal dimensions. To address the tuning issue in modeling spatial dependencies, we propose a novel Spatial Aggregation Network (SAN) structure inspired by Principal Neighborhood Aggregation (PNA)---a recent aggregation framework proposed by \citep{corso2020principal}. Instead of performing aggregation in a predefined adjacency matrix, each node in SAN aggregates the information of its $\mathcal{K}$-nearest neighbors together with the corresponding distance information. In addition, SAN allows for multiple different aggregators in a single layer. To provide SAN with generalization power for kriging tasks, we prevent those missing/unobserved nodes from sending messages to their neighbors. Finally, we train SATCN with the objective to reconstruct the full signals from all nodes. Our experiments on large-scale spatiotemporal datasets show that SATCN outperforms its deep learning and other conventional counterparts, suggesting that the proposed SATCN framework can better characterize spatiotemporal dependencies in diverse types of data. To summarize, the primary contributions of the paper are as follows:
\begin{itemize}
\item We design a node masking strategy for real-time kriging tasks. This universal masking strategy naturally adapts to all GNNs for spatiotemporal modeling with missing/corrupted data.
\item We leverage the temporal dependencies by temporal convolutional networks (TCN). With an inductive training strategy, our model can cope with data of diverse sizes on spatial and temporal dimensions.
\item We propose a spatial aggregation network (SAN)---an architecture combining multiple message aggregators with degree-scalers---to capture the complex spatial dependencies in large datasets.
\end{itemize}
\section{Related Work}
\subsection{Spatiotemporal Kriging}
Given a set of spatiotemporal data samples $\{x{\left(s^1, t^1\right)}, \ldots, x{\left(s^n, t^n\right)}\}$, the spatiotemporal kriging problem aims to learn a model (or function) $f$. Once the model is learned, it can be used to predict the value $x{(s^*, t^*)}$ at other spatiotemporal points $(s^*, t^*)$ \citep{cressie2015statistics}. Traditional kriging techniques were formulated as a weighted sum of the values of their known neighbors. For example, ordinary kriging uses the following
\begin{equation}
x{(s^*, t^*)} = \sum^N_{n =1} \lambda_n x{(s^n, t^n)},
\end{equation}
where $x{(s^n, t^n)}$ is the $n$-th known spatiotemporal point, $\lambda_n$ is the coefficient vector with constraint $\sum^N_{n = 1}\lambda_n = 1$. In spatiotemporal kriging, $\lambda_n$ is calculated based on a spatiotemporal variance (i.e., variogram) \citep{cressie1988spatial,sherman2011spatial}:
\begin{equation}
\gamma(h, u) = \frac{1}{2}{Var}\left[x{\left(s+h, t+u\right)} - x{(s, t)}\right],
\end{equation}
where $h$ and $u$ are the spatial lag vector and temporal lag, respectively.
Note that developing a parametric model for spatiotemporal variogram is not a trivial task given the unique spatial and temporal dependencies within data. Thus, applying spatiotemporal kriging to a certain spatiotemporal dataset is often involved with careful specification and learning of the variogram. This process is equivalent to fitting a Gaussian process regression model with a carefully specified kernel structure, as a function of both space and time \citep{cressie1988spatial,williams2006gaussian,luttinen2012efficient}.
\subsection{Graph Neural Networks}
Graph neural networks (GNNs) are proposed to aggregate information from graph structure. Based on the information gathering mechanism, GNNs can be categorized into spectral approaches and spatial approaches. The essential operator in spectral GNN is the graph Laplacian, which defines graph convolutions as linear operators that diagonalize in the graph Laplacian operator \citep{mallat1999wavelet}. The generalized spectral GNN was first introduced in \cite{bruna2014spectral}. Then, \citet{defferrard2016convolutional} proposed to use Chebyshev polynomial filters on the eigenvalues to approximate the convolutional filters. Most of the state-of-the-art deep learning models for spatiotemporal data \citep{yu2018spatio, li2018diffusion,wu2019graph} are based on the concept of Chebynet. In \citep{wu2020inductive}, the authors intended to train a spectral GNN for inductive kriging task, indicating the effect of GNN for modeling spatial dependencies. However, in all spectral based approaches, the learned networks are dependent on the Laplacian matrix. This brings two drawbacks to spectral based approaches: 1) They are computationally expensive as the information aggregation has to be made in the whole graph. 2) a GNN trained on a specific structure could not be directly generalized to a graph with a different structure.
In contrast, spatial GNN approaches directly perform information aggregation on spatially close neighbors. In general, the commonalities between representative spatial GNNs can be abstracted as the following message passing mechanism \citep{gilmer2017neural}:
\begin{equation}
\begin{split}
a^l_{v} = AGGREGATE&^{(l)}(\{x^l_u: u \in N(v) \}), \\
x^l_{v} = COMB&INE^{(l)}(x^{l-1}_{v},a^l_{v}),
\end{split}
\end{equation}
where $x^l_v$ is the feature vector of node $v$ at the $l$-th layer, $N(v)$ is a set of nodes adjacent to $v$, and $AGGREGATE$ and $COMBINE$ are parameterized functions. Here, $x^l_u$ is the message of node $u$ passing to its neighbors. Each node aggregates messages from their neighboring nodes to compute the next message. Spatial approaches have produced state-of-the-art results on several tasks \citep{dwivedi2020benchmarkgnns}, and demonstrate the inductive power to generalize the message passing mechanism to unseen nodes or even entirely new (sub)graphs \citep{hamilton2017inductive,velivckovic2018graph}. In \citep{appleby2020kriging}, spatial GNNs were applied to kriging task on a static graph. However, this work did not fully consider the temporal dependencies.
\section{Methodology}
\subsection{Problem Description}
Our work focuses on the same real-time spatiotemporal kriging task as in \citep{wu2020inductive} (see Figure \ref{Fig:2}). Let $[t_1, t_2] = \{t_1, t_1 + 1, \ldots, t_{2} - 1, t_2\}$ denote a set of time points. Suppose we have data from $n$ sensors during a historical period $[1, p]$ ($n = 8$ in Figure \ref{Fig:2}, corresponding to sensors $\{1,\ldots,8\}$). Note that we use three terms---sensor, location and node---interchangeably throughout this paper. We denote the available training data by a multivariate time series matrix $X \in \mathbb{R}^{n \times p}$. Our goal is to train a kriging model $\mathcal{S}(\cdot; \Lambda)$ based on $X$, with $\Lambda$ being the set of model parameters. Figure~\ref{Fig:2} also shows two test data examples with different sizes (in terms of the number of sensors and the number of time points---$k_1$ in sample \#1 and $k_2$ in sample \#2). It should be noted that in our setting the kriging tasks could vary over time (or for each test sample) given sensor availability: some sensors might be not functional, some sensors may retire, and new sensors can also be introduced. Taking the two test samples in Figure~\ref{Fig:2} as an example; our spatiotemporal kriging task is to estimate the the signals $X^m_t \in \mathbb{R}^{n^m_t \times k_t}$ on $n^m_t$ unknown locations/sensors in orange (i.e., sensors \{9,10,11\} and \{9,11,12\}) based on observed data $X^o_t \in \mathbb{R}^{n^o_t \times k_t}$ (in green) from $n^o_t$ sensors. Given the variation in data availability, we prefer to have a model that is invariant to the size of the matrix $X^o_t$ and $X^m_t$.
\begin{figure}
\caption{Illustration of real-time kriging. Note that sensor availability (observed) for each test sample may vary: for example, test sample \#1 does not cover sensor \#8.}
\label{Fig:2}
\end{figure}
\begin{figure}
\caption{Overall architecture of SATCN. The color of the marks represents the reading value of the sensors.}
\label{Fig:1}
\end{figure}
\subsection{Spatial Aggregation and Temporal Convolution Networks (SATCN)}
We introduce SATCN---a novel deep learning architecture---as the spatiotemporal kriging model $\mathcal{S}(\cdot; \Lambda)$, where $\Lambda$ is the set of parameters. We design SATCN to generate the estimation of $\hat{X}_t^m$ based on input $X_t^o$ and additional information of the underlying graph, i.e., $\hat{X}_t^m = \mathcal{S}\left(X_t^o,\cdots;\Lambda\right)$. The proposed SATCN consists of four layers: two spatial aggregation networks (SANs) and two temporal convolutional networks (TCNs) in alternating order (see Figure~\ref{Fig:1}). The input size and output size of SATCN are ${n_t \times (k_t + u)}$ and ${n_t \times k_t}$, respectively, where $u$ means the size reduction as a result of TCN (e.g., if we use two TCNs with a width-$w_t$ kernel in SATCN, the size reduction will be $u = 2(w_t - 1)$). The spatial aggregation layer is built on a special graph neural network---principal neighborhood aggregation (PNA) \citep{corso2020principal}. Note that the input of SATCN also contains the unknown sensors on which we will perform kriging (see the \textit{masked spatial aggregation} layer in Figure~\ref{Fig:1}); however, we propose a masking strategy to forbid the unknown locations to send messages to their neighbors. We next introduce the details of SATCN.
\subsection{Training Sample and Adjacency Matrix Construction}
Our first step is to prepare training samples from the historical dataset $X$. The random sampling procedure is given in Algorithm~\ref{alg:A1}. The key idea is to randomly simulate $n_m$ unknown locations among the $n$ observed locations. As SATCN uses GNNs to capture the spatial dependencies, we construct a graph over locations/sensors for training. In such a graph, the $n_m$ unknown locations cannot pass messages to their neighbors. We define the adjacency matrix $A$ of this graph according to the following rule:
\begin{equation}
A_{ij} = \begin{cases}
1 - \frac{dist(s_i, s_j)}{d_{max}}, & \text{if } i \in \bar{\Omega}_{\mathcal{K}_j},\\
0, & \text{otherwise},
\end{cases}
\label{adj}
\end{equation}
where $\Omega$ is the set of unknown sensors with $|\Omega| = n_m$, $\bar{\Omega}_{\mathcal{K}_j}$ is the set of $k$-nearest neighbors for the $j$-th sensor in known set $\bar{\Omega}$, $dist(s_i, s_j)$ is the distance between the sensors $i$ and $j$, and $d_{max}$ is the maximum distance between any two sensors in the training data. In some applications, the missing locations are evolving with time (e.g., some observations from satellites are obscured by clouds). To deal with those cases, the adjacency matrix $A$ should be time-evolving, that is, the locations with missing data are always forbidden to send messages to their neighbors. We also set the values of masking locations to $0$, ensuring the model has no access to unknown observations; however, the set values have no impact on our model as the masking locations have been forbidden to send messages. Considering that SATCN contains two spatial aggregation layers, we expect the unknown sensors to also generate meaningful information after the aggregation in the first layer. Therefore, we define the adjacency matrix $\hat{A}$ of the subsequent SAN layer as:
\begin{equation}
\hat{A}_{ij} = \begin{cases}
1 - \frac{dist(s_i, s_j)}{d_{max}}, & \text{if } i \in {\mathcal{K}_j},\\
0, & \text{otherwise},
\end{cases}
\label{adj_}
\end{equation}
where ${\mathcal{K}_j}$ is the set of $k$-nearest neighbors for the $j$-th sensor .
\begin{algorithm}[t]
\caption{Training sample generation}
\label{alg:A1}
\begin{algorithmic}[1]
\REQUIRE Historical data $X$ from $n$ observed sensors/locations over period $[1,p]$ (size $n\times p$). \\ Parameters: window length $k$, SATCN temporal lag $u$, sample size $S$ in each iteration, and the maximum number of iterations $I_{\max}$. \\
\FOR {$\text{iteration} = 1:I_{\max}$}
\FOR {$\text{sample} = 1:S$}
\STATE Randomly choose a time point $j$ within range $\left[1+u, p-k\right]$. Let $J_{\text{sample}} = \left[j-u,j+k\right)$, $J^*_{\text{sample}} = \left[j,j+k\right)$.
\STATE Obtain submatrix signal $X_{\text{sample}} = X[:, J_{\text{sample}}]$ with size of $n \times (k + u)$, $X^*_{\text{sample}} = X[:, J^*_{\text{sample}}]$ with size of $n \times k$.
\STATE Generate a random set $\Omega_s$ with size $n_m$ (number of nodes selected as missing) with $n_m \leq n$.
\STATE Let $X_{\text{sample}}[\Omega_s, :] = 0$ to ensure the model has no access to unknown observations.
\STATE Construct adjacency matrix $A_{\text{sample}} \in \mathbb{R}^{n \times n}$ and $\hat{A}_{\text{sample}} \in \mathbb{R}^{n \times n}$ based on Eqs.~\eqref{adj} and \eqref{adj_}.
\ENDFOR
\STATE Use sets $\{X_{1:S}\}$, $\{X^*_{1:S}\}$, $\{A_{{1:S}}\}$, $\{\hat{A}_{{1:S}}\}$ to train SATCN.
\ENDFOR
\end{algorithmic}
\end{algorithm}
To train the model, we use all spatiotemporal points in $X^*_{1:S}$ as test data. We use the mean squared error (MSE) term to measure the reconstruction loss between output $\mathcal{S}(X, A, \hat{A}; \Lambda)$ and true label $X^*_{1:S}$. The learning objective of SATCN is
\begin{equation}
\min_\Lambda \frac{1}{S} \sum^S_{s = 1} \|\mathcal{S}(X_s, A_s, \hat{A}_s; \Lambda) - X^*_s \|^2_F.
\label{loss}
\end{equation}
\subsection{Spatial Aggregation Network (SAN)}
Based on the constructed adjacency matrices $A_s$ and $\hat{A}_s$, the goal of performing spatial aggregation is to capture the distribution of messages that a sensor receives from its neighbors. Most existing approaches use a single aggregation method, with $mean$ and $weighted$ $sum$ being the most popular choices in spatiotemporal applications (see e.g., \citep{xu2018powerful, DBLP:conf/iclr/KipfW17}). However, the single aggregation method has two limitations: 1) in many cases, the single aggregation fails to recognize the small difference between different messages \citep{corso2020principal, dehmamy2019understanding}; and 2) the true distances to neighbor locations have a large impact on kriging, but existing GNNs do not fully consider the effect of distances. Therefore, for the spatiotemporal kriging task, it is necessary to introduce distance-sensitive aggregators in SATCN. Inspired by the recent work of principal neighborhood aggregation proposed by \citet{corso2020principal}, we leverage multiple aggregators on nodes and edges (links) to capture the spatial dependencies. Specifically, we introduce the following aggregators to SATCN:
\begin{itemize}
\item {\textbf{Mean:}} $\mu_i(x^l) = \frac{1}{k} \sum_{A^l_{ji} > 0} x^l_j$,
\item {\textbf{Weighted Mean:}} $\hat{\mu}_i(x^l) = \frac{\sum_{A^l_{ji} > 0} A^l_{ji} x^l_j}{\sum_{A^l_{ji} > 0} A^l_{ji}} $,
\item {\textbf{Softmax:}} $\text{softmax}_i(x^l) = \sum_{A^l_{ji} > 0} \frac{x^l_j exp(x^l_j)}{\sum_{A^l_{ki} > 0}{exp(x^l_k)}}$,
\item {\textbf{Softmin:}} $\text{softmin}_i(x^l) = -\text{softmax}_i(-x_i^l)$,
\item {\textbf{Standard deviation:}}
$\sigma_i(x^l)=\sqrt{RELU\left(\mu_i\left(\left({x^l}\right)^2\right) - \mu_i\left({x^l}\right)^2\right) + \epsilon}$,
\item {\textbf{Mean distance:}} $\mu^d_i(A^l) = \frac{1}{k} \sum_{A^l_{ji} > 0} A^l_{ji}$;
\item {\textbf{Standard distance deviation:}}
$\sigma^d_i(A^l)=\sqrt{RELU\left(\mu^d_i\left(\left({A^l_{:i}}\right)^2\right) - \mu^d_i\left({A^l_{:i}}\right)^2\right) + \epsilon}$,
\end{itemize}
where $x^l$ denotes the feature vector of node $i$ in the $l$-th layer, $A^l_{ji}$ denotes the distance from $j$ to $i$ given by the $l$-th layer adjacency matrix, $A^l_{ji}>0$ means that there exists message passing flow from $j$ to $i$, and $\epsilon$ in standard deviation is a small positive value to ensure $\sigma$ is differentiable.
{{Mean aggregation}} treats all neighbors of a node equally without considering the effect of distance. {{Weighted mean aggregation}} takes distance weights $A^l_{ji}$ into consideration. {{Softmax aggregation}} and {{Softmin aggregation}} give indirect measures for the maximum and minimum value of the received messages, which offer more generalization capability of GNN \citep{velivckovic2019neural}. The aggregations above are essentially the same as in \citep{corso2020principal}, capturing the variation of features that one node receives from its neighbors. In addition, we include {{Mean distance aggregation}} and {{Standard deviation distance aggregation}}, which characterize the distribution of spatial distances from a certain node to the neighboring nodes. To make GNNs better capture the effect of spatial distance, we suggest further adding linear and inverse linear scalers \citep{corso2020principal, xu2018powerful}:
\begin{equation}
\begin{split}
S_i^{linear}\left(agg_i\right) &= \frac{(\sum A^l_{:i}) agg_i}{deg}, \\ S_i^{inverse}(agg_i)& = \frac{deg \cdot agg_i}{\left(\sum A^l_{:i}\right)},
\end{split}
\end{equation}
where $deg$ is the average weighted degree of the adjacency matrix constructed by all training locations using the rule in Eq.~\eqref{adj_}.
We then combine the aggregators and scalers using the tensor product $\otimes$:
\begin{equation}
\bigoplus = \begin{bmatrix}
I \\
S^{linear} \\
S^{inverse}
\end{bmatrix} \otimes \begin{bmatrix}
\mu \\
\hat{\mu} \\
\text{softmax} \\
\text{softmin} \\
\sigma \\
\mu^d \\
\sigma^d
\end{bmatrix},
\end{equation}
where $I$ is an identity matrix, $\otimes$ is to multiply all scalers and aggregators together and then stack them on top of each other. We add weights and activation function to $\bigoplus x$ obtaining SAN:
\begin{equation}
X_{:t}^{l+1} = f^l\left(\Phi^l \bigoplus X_{:t}^l + b^l\right),
\end{equation}
where $X_{:t}^l \in \mathbb{R}^{n \times c_l}$ is the $l$-th layer output at $t$-th time point, $\Phi^l \in \mathbb{R}^{c_{l+1} \times n_{sc}n_{ag}c_l}$ is the $l$-th layer weights, $b^l \in \mathbb{R}^{c_{l+1}}$ is the bias, $n_{sc}$ is the number of scalers, $n_{ag}$ is the number of aggregators and $f^l$ is the activation function. For each time point $t$, the equal spatial aggregation operation with the same weights $\Phi^l$ and $b^l$ is imposed on $X^l_{:t} \in \mathbb{R}^{n \times c_l}$ in parallel. By this means, the SAN can
be generalized in 3D spatiotemporal variables, denoted as ``$\Phi^l \oplus_{S} X^l$''. For example, the inputs $X^1$ of SATCN is with size of $n \times (k + u)$, $\bigoplus X_{:t}^1$ will result in a $n \times (k + u) \times 21$ tensor. Given a $\Phi^1 \in \mathbb{R}^{c_2 \times 21}$. $\Phi^1 \oplus_{S} X^1$ will result in a tensor with size $n \times (k + u) \times c_2$. We illustrate the operation for the Masked SAN (the first layer of SATCN) in Figure \ref{Fig:3}.
\begin{figure}
\caption{The masked spatial aggregation network. In this figure, an unknown star node colored in black is the nearest neighbor of the blue star node, but we prevent it from passing messages to the blue star. After the message aggregation and scale in SAN, the blue star receive meaningful information. To visualize this process, we transform its star shape to the circle shape. }
\label{Fig:3}
\end{figure}
\subsection{Temporal Convolutional Network (TCN)}
To capture temporal dependencies, we take advantage of the
temporal gated convolution network (TGCN) proposed in \citep{yu2018spatio}. The advantage of TGCN on the kriging task includes: 1) TGCN can deal with input sequences with varying length, allowing us to perform kriging for test samples with different numbers of time points; 2) TGCN is superior to recurrent neural networks (RNN) with fast training and lightweight architecture \citep{gehring2017convolutional}. As illustrated in Figure \ref{Fig:4}, a shared gated 1D convolution is applied on each node along the temporal dimension. A width-$w_l$ TGCN passes messages from $w_l$ neighbors to the target time point. Note that we do not use padding in our TGCN, and thus a width-$w_l$ TGCN will shorten the temporal length to $w_l - 1$. TGCN maps the input $X^l \in \mathbb{R}^{n \times k_l \times c_l}$ to a tensor
\begin{equation}
\left[P^l, Q^l\right] = \Gamma^l \ast X^l + b^l,
\end{equation}
where $\left[P^l, Q^l\right] \in \mathbb{R}^{n \times (k_l-w_l+1) \times 2c_{l+1}}$, $P^l$ and $Q^l$ is split in half with the same size of $c_{l+1}$ channels. $\ast$ is the shared 1D convolutional operation, $\Gamma^l \in \mathbb{R}^{w_l \times c_l \times 2c_{l+1}}$ is the convolution kernel. A gated linear unit (GLU) with $P^l$ and $Q^l$ as inputs further adds nonlinearity to the layer’s output: $P^l \odot sigmoid(Q^l) \in \mathbb{R}^{n \times (k_l-w_l+1) \times c_{l+1}}$. ``$\odot$'' represents element-wise product. A benefit brought by TCN is that our model is not dependent on the length of training/test time period. In our model, data of each time point is only related to other points within a receptive field determined by $w_l$. The length of temporal window has no impact on our model as time points beyond the receptive field cannot pass information to the target time point.
\begin{figure}
\caption{Illustration of a width-2 temporal gated convolution network (TGCN).}
\label{Fig:4}
\end{figure}
In SATCN, the first TGCN layer is followed by the masked SAN layer. We cannot use TGCN as the first layer for the kriging task. Since the unknown nodes in the inputs contain no information, directly applying TGCN to them is meaningless. However, masked SAN will aggregate neighboring information to the unknown nodes, TGCN can be applied to all the nodes in the outputs of the masked SAN.
\subsection{Output Layer}
As shown in Figure \ref{Fig:1}, the last TGCN layer maps the outputs of SAN into a feature map $X^l \in \mathbb{R}^{n \times k_l \times c_l}$. We apply a shared fully-connected layer to all spatiotemporal points in $X^l$ obtaining the final output $\hat{X}^* \in \mathbb{R}^{n \times k_l}$
\begin{equation}
\hat{X}^*_{s,t} = W^o X^l_{s, t,:} + b^o,
\end{equation}
where $W^o \in \mathbb{R}^{c_5 \times 1}$, $b^o \in \mathbb{R}^{1}$. $\hat{X}^* \in \mathbb{R}^{n \times k_l}$ is the final output of our model, and we use the loss function in Eq.~\eqref{loss} to train our model.
\section{Experiments}
In this section, we conduct experiments on several real-world spatiotemporal datasets to evaluate the performance of SATCN.
\subsection{Experiment Setup}
\subsubsection{Dataset Descriptions.}
We evaluate the performance of SATCN on six real-world datasets in diverse settings: (1) \textbf{METR-LA}\footnote{\url{https://github.com/liyaguang/DCRNN}} is a 5-min traffic speed dataset collected from 207 highway sensors in Los Angeles, from Mar 1, 2012 to Jun 30, 2012. (2) \textbf{NREL}\footnote{\url{https://www.nrel.gov/grid/solar-power-data.html}} contains 5-minutes solar power data collected from 137 photovoltaic power stations in Alabama in 2006. (3) \textbf{PeMS-Bay}\footnote{\url{https://github.com/liyaguang/DCRNN}} is another traffic speed dataset similar to METR-LA, collected from 325 sensors in the Bay Area from Jan 1, 2017 to May 13, 2017. (4) \textbf{NOAA}\footnote{\url{https://github.com/MengyangGu/GPPCA}} records the monthly global air and marine temperature anomalies from Jan 1999 to Dec 2018 with a $5^{\circ}\times 5^{\circ}$ latitude-longitude resolution. As an anomaly dataset, \textbf{NOAA} does not satisfy the low-rank assumption of matrix/tensor factorization based kriging method. (5) \textbf{MODIS}\footnote{\url{https://modis.gsfc.nasa.gov/data/}} consists of daily land surface temperatures measured by the Terra platform on the MODIS satellite with 3255 down-sampled grids from Jan 1, 2019 to Jan 16, 2021. It is automatically collected by \textit{MODIStsp} package in \textit{R}. This dataset poses a challenging task as it contains 39.6\% missing data. (6) \textbf{USHCN}\footnote{\url{https://www.ncdc.noaa.gov/ushcn/introduction}} consists of monthly precipitation of 1218 stations in the US from 1899 to 2019. The temporal resolutions of METR-LA, NREL and PeMS-Bay are all 5-min. With regards to spatial distance, we compute the pairwise haversine distance matrices for NREL, NOAA, MODIS and USHCN; both METR-LA and PeMS-Bay use travel distance to determine the reachability among sensors in the transportation network. For datasets except USHCN, we use the first 70\% (in time) and approximately 75\% locations/sensors (in space) to train the models. We perform spatiotemporal kriging on the rest 25\% locations/sensors over the last 30\% temporal points. we use USHCN to evaluate model performance under sparse observations and insufficient training data. we use the first 50\% (in time) and approximately 30\% locations/sensors (in space) to train the models, and perform spatiotemporal kriging on the rest 50\% locations/sensors over the last 70\% temporal points. The detailed description of every dataset are given in the supplemental material.
\subsubsection{Baseline Models.}
We choose both traditional kriging models and state-of-the-art deep learning models as our baseline models. The group of traditional models includes: (1) \textbf{kNN}: K-nearest neighbors, which estimates the signal on unknown sensors by taking the average values of \textit{K} nearest sensors in the network. (2) \textbf{OKriging}: ordinary kriging \citep{cressie1988spatial}, which corresponds to a Gaussian process regression model with a global mean parameter. We implement OKriging with the \textit{autoKrige} function in \textit{R} package \textit{automap}. The OKriging method only uses spatial dependencies. We tried to implement a spatiotemporal kriging baseline via the \textit{R} package \textit{gstat}. However, learning a proper spatiotemporal variogram is very challenging give the large size of the datasets. Thus, we did not include a spatiotemporal kriging baseline in this work. (3) \textbf{GLTL}: Greedy Low-rank Tensor Learning, a transductive tensor factorization model for spatiotemporal co-kriging \citep{bahadori2014fast}. GLTL can handle the co-kriging problem with multiple variables (e.g., $location\times time \times variable$). We reduce GLTL into a matrix version, as our task only involves one variable for all the datasets. In addition, we compare SATCN with the following state-of-the-art deep-learning approaches: (4) \textbf{IGNNK}: Inductive Graph Neural Network for kriging \citep{wu2020inductive}, an inductive model that combines dynamic subgraph sampling techniques and a spectral GNN \citep{li2018diffusion} for the spatiotemporal kriging task.
We use a Gaussian kernel to construct the adjacency matrix for GNN as in \citep{wu2020inductive}:
\begin{equation}
W_{ij} = \exp\left(-\left(\frac{\text{dist}\left(v_i , v_j\right)}{\sigma}\right)^2\right),
\end{equation}
where $W_{ij}$ stands for adjacency or closeness between sensors $v_i$ and $v_j$, $\text{dist}\left(v_i , v_j\right)$ is the distance between $v_i$ and $v_j$, and $\sigma$ is a normalization parameter. This adjacency matrix can be considered a squared-exponential (SE) kernel (Gaussian process) with $\sigma/\sqrt{2}$ as the lengthscale parameter. Different from \citep{wu2020inductive} which chooses $\sigma$ empirically, we first build a Gaussian process regression model based on the training data from one time point and estimate the lengthscale hyperparemter $l$, and then define $\sigma=\sqrt{2}l$. We find that this procedure improves the performance of IGNNK substantially compared with \citep{wu2020inductive}. (5) \textbf{KCN-Sage}: In \citep{appleby2020kriging}, several GNN structures are proposed for kriging tasks, and we use KCN-Sage based on GraphSAGE \citep{hamilton2017inductive} as it achieves the best performance in \citep{appleby2020kriging}. The original KCN models cannot be directly applied under the inductive settings. To adapt KCNs to our experiments, we use Eq.~\eqref{adj_} to construct the adjacency matrices of KCN-Sage and Algorithm~\ref{alg:A1} to train the model.
\subsubsection{Evaluation Metrics}
We measure model performance using the following three metrics:
\begin{equation}
\begin{split}
\text{RMSE} &= \sqrt{ \frac{1}{|N|} \sum_{i\in N} (x_i - \hat{x})^2}, \\ \text{MAE}&=\frac{1}{|N|} \sum_{i\in N} \left|x_i - \hat{x}\right|, \\
R^2 &= 1 - \frac{\sum_{i\in N} (x_i - \hat{x}_{i})^2}{\sum_{i\in N} (x_i - \bar{x} )^2},
\end{split}
\end{equation}
where $x_i$ and $\hat{x}_{i}$ are the true value and estimation, respectively, and $\bar{x}$ is the mean value of the data.
\subsubsection{Hyperparameters of SATCN} All SATCNs are implemented with a four-layer (SAN-TCN-SAN-TCN) structure. Note that deeper structure with dense or residual connections can be further explored in the future work. For PeMS-Bay and METR-LA, feature sizes in the four layers are $[21, 64, 16, 8]$, respectively. For other datasets, the numbers of features are $[21, 48, 16, 8]$. In the first masked SAN layer, we use all the seven aggregators, while in the second SAN layer, we only use five by removing the mean distance and standard deviation distance aggregators. The number of neighbors $\mathcal{K}$ has a significant impact on the performance of SAN. For MODIS, the number of neighbors $\mathcal{K}$ is set to 4. For other datasets, $\mathcal{K}s$ are set to 3. The detailed implementation of baseline algorithms is provided in the supplementary checklist. The window length $k$ in Algorithm~\ref{alg:A1} is set to 6 for all datasets.
\begin{table*}[!ht]
\caption{Kriging performance comparison of different models on six datasets.}
\label{tab:comparison}
\centering
\begin{tabular}{crrrrrrrrr}
\toprule
\multirow{2}{*}{} & \multicolumn{3}{c}{METR-LA} & \multicolumn{3}{c}{NREL} & \multicolumn{3}{c}{PeMS-Bay} \\
\cmidrule{2-10}
Model & RMSE & MAE & $R^2$ & RMSE & MAE & $R^2$ & RMSE & MAE & $R^2$ \\
\midrule
SATCN & \textbf{8.884} & \textbf{5.618} & \textbf{0.851} &\textbf{2.772} & \textbf{1.748} &\textbf{0.936} &\textbf{6.969} & \textbf{3.995} &\textbf{0.471} \\
IGNNK & {9.048} & {5.941} & {0.827} & {2.808} & {1.765} & {0.934} &8.951 &4.684 &0.123 \\
KCN-SAGE & 9.704 & 6.191 &0.802 &2.868 &1.814 &0.931 & 7.011 &4.099 &0.464 \\
kNN & 11.071 & 6.927 & 0.741 & 4.192 & 2.850 &0.810 & 7.687 & 4.391 & 0.356 \\
GLTL & 9.668 & 6.559&0.803 & 4.840 & 3.372 &0.747 & 9.524 & 5.480 & 0.010 \\
OKriging & - & - & - & 3.470 & 2.381 &0.869 & - & - & - \\
\midrule
& \multicolumn{3}{c}{NOAA} & \multicolumn{3}{c}{MODIS} & \multicolumn{3}{c}{USHCN}\\
\midrule
SATCN &\textbf{0.374} &\textbf{0.214} &\textbf{0.875} &\textbf{1.441} &\textbf{0.992} &\textbf{0.977} &\textbf{3.715} &\textbf{2.306} &\textbf{0.688} \\
IGNNK & 0.430 & 0.249 & 0.835 &- &- &- & 4.270 & 2.616 & 0.605\\
KCN-SAGE &0.385 &0.219 &0.872 &1.525 &1.016 &0.974 &3.727 &2.372 &0.686 \\
kNN &0.427 &0.241 &0.838 & 1.700 & 1.135 & 0.918 & 3.936 & 2.519 & 0.641 \\
GLTL & 0.975 & 0.605 & 0.202 &- &- &- & 4.281 & 2.845 & 0.575\\
OKriging &0.412 &0.234 &0.848 & 1.602 & 1.090 & 0.972 &3.932 &2.475 &0.641 \\
\bottomrule
\end{tabular}
\end{table*}
\subsection{Overall Performance}
\subsubsection{Performance Comparison.} In Table. \ref{tab:comparison}, we present the results of SATCN and all baselines on six datasets. As the spatial relationship of METR-LA and PeMS-Bay are determined by road reachability, we cannot apply OKriging--which directly defines locations in a geospatial coordinate---on these two datasets. As can be seen, the proposed SATCN consistently outperforms other baseline models, providing the lowest error and the highest $R^2$ for almost all datasets. SATCN generally outperforms the spectral-based GNN counterparts--- IGNNK on those datasets, we also observe that SATCN and KCN-Sage take less samples to converge compared with the spectral approach IGNNK. It indicates that the spatial GNNs are more suitable for inductive tasks compared with the spectral ones.
\begin{figure*}
\caption{Kriging results in Jun 8th, 2020 of the MODIS dataset. Land temperature ground truth and the absolute error of SATCN, KCN-Sage, and OKriging, respectively. Panel (a) shows absolute errors, and panel (b) shows the unknown/corrupted locations under the cloud. Note that for (b) we do not have access to ground truth.}
\label{fig:modis_result}
\label{fig:modis_cloud}
\end{figure*}
\begin{figure}
\caption{Kriging performance on an unknown node in METR-LA dataset on July 28th, 2012.}
\label{fig:satcn_metr}
\end{figure}
Another interesting finding from Table~\ref{tab:comparison} is that SATCN performs well on MODIS, which contains 39\% missing data. IGNNK and GLTL fail to work under this dataset because that data shows substantial spatially correlated corruptions (i.e., the cloud). SATCN is robust to missing data as we always make sure that every location has $\mathcal{K}$ observable neighbors using the adjacency matrix construction rule given in Eq.~\eqref{adj}. We visualize the kriging results of a typical MODIS example in Figure~\ref{fig:modis_result}. In this case, a large fraction of the observations are covered by cloud. We find that OKriging gives more interpolation results with large errors, as there are more red points concentrating on the bottom left region. This suggests that OKriging may fail to fit a proper variogram under sparse observations. SATCN slightly outperforms KCN-SAGE in this case because it can better utilize temporal dependencies and spatial information. We also visualize the interpolation results on the areas covered by clouds in Figure~\ref{fig:modis_cloud}. From the results we can find that SATCN gives a more physically consistent predictions for areas covered by clouds. Results of KCN-SAGE and KNN contain more small-scaled anomalies because they do not take advantage of temporal and distance information. OKriging generates over-smoothed predictions, which are not consistent with the observations of known areas. In the most challenging task---USHCN, in which we only have 30\% sensors over 50\% time period for training, the spatial approaches SATCN and KCN-Sage still give accurate interpolations. IGNNK and GLTL, which use Gaussian kernel adjacency matrix, are even worse than OKriging and kNN. The reason is that there will exist a large number of isolated nodes with Gaussian adjacency matrix under sparse observations. In contrast, our adjacency matrix definition given in Eq~\eqref{adj} makes sure that each sensor has $\mathcal{K}$ neighbors. Figure~\ref{fig:satcn_metr} gives the temporal visualization of kriging results for one sensor from METR-LA dataset. It is clear that SATCN model produces the closest estimation toward true values. With the learned temporal dependencies, SATCN can better approximate the sudden speed drop of morning peak during 6:00 AM-9:00 AM due to the benefits from the temporal dependencies.
\subsubsection{Parameter Sensitivity Analysis:} SATCN has many parameter settings, the key parameters include the number of neighbors $\mathcal{K}$, the TCN kernel length $w$, and the choice of aggregators and scalers.
\begin{figure}
\caption{SATCN performance under different settings.}
\label{fig4a}
\label{fig4b}
\label{fig4c}
\label{fig4d}
\end{figure}
To evaluate the impact of $\mathcal{K}$, we fix TCN kernel length to [2, 2] and [3, 3] for METR-LA and NOAA datasets, and vary $\mathcal{K}$ in $\{2, 3, 4, 6, 8\}$. The results are reported in Figure~\ref{fig4a}. For METR-LA and NOAA, the models with three neighbors achieve the lowest RMSE. We speculate that both traffic and temperature data can only propagate in a small spatial range. The correlation between close locations is strong but diminishes quickly when the distance increases. The key difference between NOAA and METR-LA is that METR-LA performs poorly when the number of neighbors is 2. Conversely, the model with two neighbors performs well on NOAA. It indicates that the NOAA data covering the global temperature is more locally correlated.
To evaluate the impact of $w$, we fix $\mathcal{K}$ to 3, and vary $w$ from [1, 1] to [5, 5]. The results are given in Figure~\ref{fig4b}. Compared with $\mathcal{K}$, $w$ only affects SATCN marginally. Varying the temporal receptive field only has a little impact on SATCN. Surprisingly, the models with $w = [1, 1]$ also perform well. A potential reason is that all datasets have a large size, and in this case the strong spatial consistency alone could be sufficient to support the spatiotemporal kriging task. In other words, we can achieve a reasonably good result by performing kriging on each time snapshot.
The spatial aggregation network contains numerous aggregators and scalers. To distinguish their contribution, we also study the effects of different aggregator-scaler combinations (see Figure \ref{fig4c} and \ref{fig4c}). We evaluate several models: \textbf{WSC} denotes the model without linear and inverse linear scalers; \textbf{WST} denotes the model without standard deviation aggregator $\sigma$; \textbf{WD} denotes the model without distance-related aggregators $\hat{\mu}$, $\mu^d$ and $\sigma^d$; \textbf{WSO} denotes the model without softmax and softmin aggregators; \textbf{WME} denotes the model without mean related aggregators $\hat{\mu}$ and $\mu$. For NOAA and METR-LA, \textbf{WME} gives the worst performance. It suggests that the mean and normalized mean values of the neighbors contain the most important information for kriging. For METR-LA, \textbf{WST}, \textbf{WSO} and \textbf{WD} all give worse performance than the model with all aggregators. It suggests that using multiple aggregators can improve the kriging performance for traffic speed dataset. For NOAA, deleting aggregators other than mean values does not have a great impact on the kriging performance. This is caused by the fact that NOAA records temperature anomalies, the remaining aggregators are not as meaningful as the mean values. The linear and inverse linear scalers do not have a great impact in those two datasets. However, we still use them in our model. The reason is that they contain some global information in the training graph, as they are based on the average weighted degree of the graph constructed by all training locations.
\section{Conclusion}
In this paper, we propose a novel spatiotemporal kriging framework named SATCN. We use spatial graph neural networks to capture spatial dependencies and temporal convolutional networks to capture temporal dependencies in this framework. Specifically, SATCN introduces a masking strategy to forbid message passing from unobserved locations and multiple aggregators to allow the model to better understanding the spatial dependencies. We evaluate SATCN on six real-world datasets varying from traffic speed to global temperature, showing that SATCN outperforms other baseline models in most cases. SATCN is robust to the missing data, and it can still work on datasets whose missing data ratio is up to nearly 40\%. SATCN is flexible in dealing with problem of diverse sizes in terms of the number of the nodes and the length of time window. This flexibility allows us to model time-varying systems, such as moving sensors or crowdsourcing systems. The masked spatial aggregation network proposed in this paper can also be viewed as a graph neural network for general missing data cases. This framework can be further integrated into time series forecasting tasks under missing data scenarios.
\appendix
\section{Appendix}
\subsection{Data Description}
We use seven real-world spatiotemporal datasets to evaluate our model (see Table \ref{tab:data} for an overview). They are:
\textbf{METR-LA}\footnote{\url{https://github.com/liyaguang/DCRNN}} consists of traffic speed information collected by highway loop detectors in Los Angeles \citep{li2018diffusion,wu2020inductive}. We follow the experiment settings of \citet{li2018diffusion} to select 4 months data from Mar 1st 2012 to Aug 30th 2012 with 207 sensors.
\textbf{NREL}\footnote{\url{https://www.nrel.gov/grid/solar-power-data.html}} contains many energy datasets, and here we choose the Alabama Solar Power Data for Integration Studies \citep{sengupta2018national}. This dataset includes 5-minute solar power records of 137 photovoltaic power plants in 2006. We follow the work of \citet{wu2020inductive} by only keeping the data from 7 am-7 pm everyday in order to attenuate this effect.
\textbf{PeMS-Bay}\footnote{\url{https://github.com/liyaguang/DCRNN}} is also a traffic speed dataset that is collected in Bay Area by Performance Measurement System (PeMS). Same as the work of \citet{li2018diffusion}, we choose 325 sensors from Jan 1st 2017 to May 13th 2017.
\textbf{NOAA}\footnote{\url{https://github.com/MengyangGu/GPPCA}} records the global gridded air and marine temperature anomalies from U.S. National Oceanic and Atmospheric Administration (NOAA) \citep{gu2020generalized}. NOAA contains the monthly data from Jan 1999 to Dec 2018 with $5^{\circ}\times 5^{\circ}$ latitude-longitude resolution. Following the work of \citep{gu2020generalized}, we leave out polar circles and choose 1639 observed grids. With the largest 20 singular values take only 46.6\% portion, the dataset is hard to be considered low-rank.
\textbf{MODIS}\footnote{\url{https://modis.gsfc.nasa.gov/data/}} consists of daytime land surface measured by the Terra platform on the MODIS satellite with 3255 downsampled grids from Jan 1, 2019 to Jan 16, 2021. It is automatically collected by \textit{MODIStsp} package in \textit{R}. We select a region with fixed longitude-latitude bounds as the target (latitude: 30.5$\sim$37.6; longitude: -96.8$\sim$-89.7). Because the clouds will sometimes cover the satellites, there are more than 39.6\% entries are missing in the collected data. Moreover, the missing data are generally the continuous spatial area, which is really challenging for models to train.
\textbf{USHCN}\footnote{\url{https://www.ncdc.noaa.gov/ushcn/introduction}} contains monthly precipitation of 1218 locations from 1899 to 2019, which is collected by the U.S. Historical Climatology Network (USHCN). As \citet{wu2020inductive} mentioned, USHCN dataset is pretty dispersed, with variance-to-mean ratio exceeds 500. Thus it can help examine the model performance on time series with substantial oscillations. We summarize these datasets in Table \ref{tab:data}.
\begin{table*}[!ht]
\small
\centering
\caption{Real-world spatiotemporal datasets description}
\begin{tabular}{cccccc}
\toprule
& Sensors & Time length & Distance type& Normalization parameter $\sigma$ & Missing ratio \\
& & (frequency)& & of adjacency matrix (IGNNK and GLTL) & & \\
\midrule
METR-LA & 207 & 34272 (5-min)& Travelling distance & 557 & 8.11\%\\
NREL & 137 & 105120 (5-min) & Haversine distance &14,000 & - \\
PeMS-Bay & 325 & 52116 (5-min)& Travelling distance & 1.5 & 0.003\%\\
NOAA & 1639 & 240 (1-month)& Haversine distance& 6,000,000 & -\\
MODIS & 3225 & 747 (1-day)& Haversine distance & 112,000 & 39.62\%\\
USHCN & 1218 & 1440 (1-month)& Haversine distance & 100,000 & 3.07\%\\
\bottomrule
\end{tabular}
\label{tab:data}
\end{table*}
\begin{table*}[!ht]
\small
\centering
\caption{Training and test Parameters for each dataset}
\begin{tabular}{ccccccc}
\toprule
{Parameters} & METR-LA & NREL & USHCN & NOAA & PeMS-Bay & MODIS \\
\midrule
window length $h$ (IGNNK) & 24 & 16 & 6 & 1 & 24 & - \\
number of evaluation windows (test) & 428 & 1971 & 72 & 72 & 2171 & - \\
number of kriging nodes ($n_t^u$) & 50 & 30 & 800 & 500 & 80 & - \\
sampled observed nodes size $n_o$ (IGNNK) & 100 & 100 & 600 & 1100 & 240 & - \\
sampled masked nodes size $n_m$ (IGNNK) & 50 & 30 & 300 & 400 & 80 & - \\
masked nodes size $n_m$ (SATCN and KCN-Sage) & 40 & 12 & 10 & 42 & 40 & 0 \\
hidden feature dimension $z$ &100 & 100 & 100 & 100 & 100 & - \\
activation function $\sigma$ & \textit{relu} & \textit{relu} & \textit{relu} & \textit{relu} & \textit{relu} & - \\
batch size ($S$) & 4 & 8 & 8 & 2 & 4 & - \\
number of iterations ($I_{\max}$) & 186750 & 287250 & 30750 & 63000 & 285000 & - \\
order of diffusion convolution & 2 & 2 & 2 & 2 & 2 & - \\
\bottomrule
\end{tabular}
\label{tab:para}
\end{table*}
\subsection{Benchmark Settings}
\textbf{kNN} For traffic datasets METR-LA/PeMS-Bay/PeMSD4, the distance is based on the road distance in traffic network. While haversine distance is used in geospatial datasets including NREL, NOAA, MODIS, and USHCN. For each dataset, we tune the best $k$ according to the lowest kriging RMSE errors. The selection of $k$ for different datasets includes: \textit{METR-LA:} 7; \textit{NREL:} 10; \textit{PeMS-Bay:} 4; \textit{USHCN:} 7; \textit{NOAA:} 2; and \textit{MODIS:} 3.
\textbf{OKriging} Ordinary kriging is a common used geostatistical method in interpolating values by fitting prior spatial covariance in Gaussian process. We use \textit{Automap} package in \textit{R} to implement the ordinary kriging. To keep consistent with the construction of adjacency matrix, we fix the variogram kernels as "Gaussian" and automatically learns their parameters by \textit{autoKrige} function. Note that we apply the method at each snapshot. In other words, at each time point, we provide the corresponding values of observed nodes to the algorithm to obtain the interpolated column vector as kriging result. This task fulfills embarrassingly parallel for all time steps.
{\color{red}\textbf{GLTL} Greedy Low-rank Tensor Learning \citep{bahadori2014fast} is designed for both co-kriging and forecasting tasks for multiple variables. There are only one variable for our datasets. We implement this with the \textit{MATLAB} source code\footnote{\url{http://roseyu.com/code.html}} of the authors. We choose the \textit{Orthogonal} algorithm according to its superior performance in \citep{bahadori2014fast}. We set the maximum number of iterations to 1000 and the convergence stopping criteria to $1\times 10^{-10}$. For all the datasets, we use the same Gaussian kernel based adjacency matrices. The Laplacian matrix is calculated by $L=D-W$ where $D$ is a diagonal matrix with $D_{ii} = \sum_{j}W_{ij}$. Same to the implementation in \cite{bahadori2014fast}, we rescale the Laplacian matrix by ${L}=\frac{L}{\max_{ij} L_{ij}}$ as well. An essential parameter is $\mu$ for the weight of Laplacian regularizer, which is tuned by performing grid search from $\{0.05,0.5,5,50,500\}$. We reach convergence before the maximum 1000 iterations in all datasets. The tuned $\mu$ for different datasets are: \textit{METR-LA:} 0.5; \textit{NREL:} 50; \textit{PeMS-Bay:} 5; \textit{USHCN:} 5; \textit{NOAA:} 5; and \textit{MODIS:} 0.5.}
\textbf{IGNNK} Inductive Graph Neural Network for Kriging \citep{wu2020inductive} is a novel GNN based model that combines dynamic subgraph sampling techniques and diffusion graph convolution structure for the kriging task. We implement this from the Github repository\footnote{\url{https://github.com/Kaimaoge/IGNNK}} of \citep{wu2020inductive}. For datasets with distance information, it uses a Gaussian kernel to construct the adjacency matrix for GNN:
\begin{equation}
W_{ij} = \exp\left(-\left(\frac{\text{dist}\left(v_i , v_j\right)}{\sigma}\right)^2\right),
\label{distance_rule}
\end{equation}
where $W_{ij}$ stands for adjacency or closeness between sensors/nodes $v_i$ and $v_j$, $\text{dist}\left(v_i , v_j\right)$ is the distance between $v_i$ and $v_j$, and $\sigma$ is the normalization parameter, which is illustrated in Table \ref{tab:data}. We use Adam optimizer with learning rate 0.0001 to optimize the GNN training and set the maximum number of training episodes as 750. Apart from them, there are many identical model parameters defined for each dataset. We follow similar settings in the work \citet{wu2020inductive}, which are listed in Table \ref{tab:para}:
\textbf{KCN-Sage} We implemented a Pytorch version KCN following the code\footnote{\url{https://github.com/tufts-ml/KCN}} given by the original authors. The authors implemented several graph neural networks based kriging models, including GCN, GAT, and GraphSage. According to the experimental results, the KCN-Sage model achieves the best performance. So we compare SATCN with KCN-Sage. KCNs are originally proposed for kriging task under a fixed graph structure, we use Algorithm 1 to train them adpative to our task. As the same as \citep{appleby2020kriging}, a 4-layer GraphSage with hidden size [20, 10, 5, 3] is used. As the same as SATCN, the nearest neighbor $\mathcal{K}$ for every data set is fine-tuned.
\begin{figure}
\caption{Absolute kriging error on test locations in June, 1999 of NOAA dataset.}
\label{fig:noaa_result}
\end{figure}
\begin{figure}
\caption{Absolute kriging error on test locations in January, 1971 of USHCN dataset. Only a third of the sensors are observed.}
\label{fig:udata_result}
\end{figure}
\subsection{Visualization of Results}
We provide the spatiotemporal kriging visualization of NOAA and USHCN cases. We select the kriging absolute error of NOAA and USHCN to illustrate SATCN's superiority. Figure \ref{fig:noaa_result} shows that outputs of SATCN contain less large errors, i.e. red nodes. In contrast, other traditional models fail to give competitive results. We also visualize the results on USHCN dataset in Figure \ref{fig:udata_result}, it is obvious that SATCN and KCN-Sage outperform other methods in this sparsely observed case.
\end{document}
|
\begin{document}
\title{Green's functions for reflectionless potentials and addition of boundstates to powerlaw potentials to find Supersymmetric partners}
\begin{abstract}
Green's functions for reflectionless potentials are constructed and analyzed. Green's functions for power law potentials, their Super Symmetric partners and sum rules for eigenvalues are examined. The SUSY partner potentials to power law potentials which have an additional bound state at $E=0$ are constructed.
\end{abstract}
\section{Green's functions}
Reflectionless potentials can belong to one of two categories- either (1)- potentials which support negative energy eigenstates for a Schrodinger differential operator and also have a continuous spectrum with vanishing reflection coefficients for positive energies or (2)-potentials which rise to infinite values and have a only discrete spectrum with positive energy eigenvalues.
\subsection{Confining potentials}
We first consider confining potentials which have only a bound-state spectrum with orthonormal eigenfunctions $\psi_j$ and positive eigenvalues $\gamma_j^2$ for the Schrodinger equation for a potential $U(x)$ in a domain $[a,b]$ using units in which $\hbar=1$ and mass $m=1/2$:
\begin{equation}
\frac{d^2}{dx^2}\psi_j(x) = \big(U-\gamma_j^2\big) \psi_j(x) \label{eq:d1}
\end{equation}
The orthonormal condition and the completeness relation are
\begin{align}
\int_{a}^{b} \psi_j(x)\ \psi_k(x)\ dx &= \delta_{jk} \label{}\\
g_0(x,x^{\prime}) &\equiv\sum_j \psi_j(x) \psi_j(x^{\prime}) = \delta(x-x^{\prime}) \label{eq:c0}
\end{align}
Using (\ref{eq:d1}) it can be established that
\begin{align}
g_k(x,x^{\prime}) &\equiv \sum_j (\gamma_j)^k \psi_j(x) \psi_j((x^{\prime}) ,\ k=0,\pm 1, \pm 2, \ \dots \label{eq:d0}\\
\frac{d^2}{dx^2} g_k(x,x^{\prime}) &=U g_k(x,x^{\prime}) - g_{k+2}(x,x^{\prime}) \label{}\\
g_{k+n}(x,x^{\prime}) &= \int_{a}^{b}g_k(x,z) g_n(z,x^{\prime})\ dz \label{eq:p1}
\end{align}
The Green's function $G(x,x^{\prime})$ for the Schrodinger equation may be identified as corresponding to $-g_{-2}(x,x^{\prime})$ and
\begin{equation}
G(x,x^{\prime}) \equiv - g_{-2}(x,x^{\prime})\quad \Rightarrow\ \Big[\frac{d^2}{dx^2} - U\Big]G(x,x^{\prime})=\delta(x-x^{\prime}) \label{eq:g1}
\end{equation}
Using (\ref{eq:d1}) it can also be established that the squares of the eigenfunctions satisfy
\begin{equation}
\frac{d^3}{dx^3}\psi_j^2(x) = 8[U-\gamma_j^2]\psi_j \frac{d}{dx}\psi_j +2\psi_j^2 \frac{du}{dx}\label{}
\end{equation}
It follows that
\begin{equation}
\big[\frac{d^3}{dx^3} - 4U \frac{d}{dx}-2 \frac{du}{dx}\big] g_k(x,x)= -4\frac{d}{dx} g_{k+2}(x,x) \label{eq:d2}
\end{equation}
which can also be written in the integral form
\begin{equation}
g_{k+2}(x,x)= \frac{1}{4} \Big(\Big[\big(-\frac{d^2}{dx^2} + 4U\big) g_k(x,x)\Big] \vert_{a}^x - 2\int_{a}^x \frac{dU}{dy} g_k(y,y) dy\Big)\label{eq:d3}
\end{equation}
Knowledge of $g_1(x,x)$ enables the determination of $g_{2k+1}$ through the repeated application of the integral form in (\ref{eq:d3}). Knowledge of $g_{-2}(x,x)$ enables the determination of $g_{-2k-2}(x,x)$ by solving the differential equation in (\ref{eq:d2}). Two boundary conditions on the solutions of the Schrodinger equation generate solutions for all $x$ and hence give conditions on all the derivatives of the eigenstates and consequently generate boundary conditions on all the derivatives of $g_k$ guaranteeing that the third order differential equation has adequate boundary conditions to generate unique solutions. It can be seen that
\begin{equation}
\int_{a}^{b} g_k(x,x) dx = \sum_j \big(\gamma_j\big)^k \label{eq:d4}
\end{equation}
which is a set of sum rules for the eigenvalues of the Schrodinger equation for a confining potential. The sums for negative values of $k$ may converge and the sums for positive values of $k$ will diverge.
${\mathbf{Theorem}}$: It can be seen that
\begin{align}
F(x,x^{\prime})&\equiv f(x_<)g(x_>)\ \Rightarrow \label{}\\
\int_a^b \int_a^b F(x,x^{\prime}) dx dx^{\prime}&= \int_a^b g(x)\Big[\int_a^x f(x^{\prime}) dx^{\prime}\Big] dx + \int_a^b f(x)\Big[\int_x^b g(x^{\prime}) dx^{\prime}\Big] dx \label{eq:Th1}
\end{align}
where $x_<$ is the lesser of $(x,x^{\prime})$ and $x_>$ is the greater of $(x,x^{\prime})$. By changing the order of the $x$ and $x^{\prime}$ integrations in the second integral we get
\begin{align}
\int_a^b \int_a^b F(x,x^{\prime}) dx dx^{\prime}&= \int_a^b g(x)\Big[\int_a^x f(x^{\prime}) dx^{\prime}\Big] dx + \int_a^b g(x^{\prime})\Big[\int_ax^{\prime} f(x) dx\Big] dx^{\prime} \label{}\\
&= 2\int_a^b g(x)\Big[\int_a^x f(x^{\prime}) dx^{\prime}\Big] dx =2\int_a^b f(x)\Big[\int_x^b g(x^{\prime}) dx^{\prime}\Big] dx \label{eq:Th2}
\end{align}
The equivalence expressed in (\ref{eq:Th1}) and (\ref{eq:Th2}) may be used together with the associative property expressed in (\ref{eq:p1}) to evaluate the sum rules in (\ref{eq:d4})
\subsection{reflectionless potentials related to KdV solitons}
The discussion for confining potentials can be adapted for reflectionless potentials with a discrete negative energy spectrum which may be related to multi-soilton solutions of the KdV equation (Scott {\it et al} 1973).
The mapping
\begin{equation}
\gamma_j\ \Rightarrow\ i \alpha_j ,\ L_k(x)\equiv -2(-2i)^{2k+1} g_{2k+1}(x,x)=-2\sum_j\big(2\alpha_j\big)^{2k+1}\psi^2_j(x) \label{eq:L1}
\end{equation}
may be used to establish that the KdV equation is the $k=0$ member of the Lax hierarchy (Lax 1968, Gardner{\it et al} 1967) $L_k$
\begin{equation}
L_0(x)= -4\sum_j \alpha_j \psi^2_j(x) = U(x) \quad \Rightarrow \ \int_{-\infty}^{\infty} U(x) dx = -4\sum_j \alpha_j \label{eq:L2}
\end{equation}
where $\psi_j$ are the bound-state solutions of the reflectionless potential $U$ at energies $-\alpha_j^2$. $U$ can be interpreted in terms of soliton solutions whose time evolution is governed by the KdV equation.
All the odd members of the $g_k(x,x)$ hierarchy can be related to members of the Lax hierarchy as defined in (\ref{eq:L1}) and the time evolution of the soliton solutions of the Lax hierarchy is governed by:
\begin{align}
\frac{\partial U}{\partial t_k} &= -\frac{\partial L_k}{\partial x} \label{eq:L3}\\
L_k&=-2 \sum_j \big[2\alpha_j\big]^{2k+1} \psi^2_j(x) \label{eq:L4}\\
\frac{d}{dx} L_{k+1} &=\Big[\frac{d^3}{dx^3} - 4U \frac{d}{dx}-2 \frac{du}{dx}\Big] L_k \label{eq:L5}
\end{align}
leading to the sum rules
\begin{equation}
\int_{-\infty}^{\infty} L_k(x) dx = -2\sum_j \big[2\alpha_j\big]^{2k+1} \label{eq:L6}
\end{equation}
The sums may converge for positive values of $k$ and may diverge for negative values of $k$.
\section{Example - Particle in a box}
\subsection{Case 1}
Consider the solutions for a free particle confined within a box with infinite walls at $x=0$ and $x=1$. The eigenfunctions which satisfy $\psi_j(0)=0$ and $\psi_j(1)=0$ are
\begin{align}
\psi_j(x)&= \sqrt{2} \sin(\gamma_j x) ,\quad \gamma_j = j\pi \Rightarrow \label{eq:d7}\\
g_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{\sin(j\pi x) \sin(j\pi x^{\prime})}{j\pi}=\sum_{j=1}^{\infty} \frac{\big(\cos[j\pi (x-x^{\prime})]-\cos[j\pi(x+x^{\prime})]\big)}{j\pi} \notag \\
&= \frac{1}{\pi} \ln\Big(\frac{\sin[(x+x^{\prime})\pi/2]}{\sin[|(x-x^{\prime})|\pi/2]}\Big) \label{}\\
g_{-2}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{\sin(j\pi x) \sin(j\pi x^{\prime})}{j^2\pi^2} =\sum_{j=1}^{\infty} \frac{\big(\cos[j\pi (x-x^{\prime})]-\cos[j\pi(x+x^{\prime})]\big)}{j^2\pi^2} \notag\\
&= \Big[\frac{1}{6}-\frac{|(x-x^{\prime})|}{2}+\frac{\big(x-x^{\prime}\big)^2}{4}\Big]-\Big[\frac{1}{6}-\frac{(x+x^{\prime})}{2}+\frac{\big(x+x^{\prime}\big)^2}{4}\Big]\label{}\\
&=x_< (1-x_>)\label{eq:g2}
\end{align}
where $x_<$ is the lesser of $(x,x^{\prime})$ and $x_>$ is the greater of $(x,x^{\prime})$. The two results for the two series given above are well known (Gradshteyn and Ryzhik 1.441.2 p38, 1.443.3 p39). Application of the result in (\ref{eq:p1}) for $n=-1$ and $k=-1$ leads to the relation
\begin{equation}
\frac{1}{\pi^2} \int_0^1 \ln\Big(\frac{\sin[(x+z)\pi/2]}{\sin[|(x-z)|\pi/2]}\Big) \ln\Big(\frac{\sin[(z+x^{\prime})\pi/2]}{\sin[|(z-x^{\prime})|\pi/2]}\Big) dz=x_< (1-x_>)\label{eq:q1}
\end{equation}
It can also be verified that
\begin{equation}
\int_0^1 g_{-2}(x,x) dx = \int_0^1 x(1-x) dx = \frac{1}{6} =\sum_{j=1}^{\infty} \frac{1}{j^2\pi^2} \label{}
\end{equation}
It was noted earlier that $-g_{-2}(x,x^{\prime})$ can be identified with the Green's function $G(x,x^{\prime})$ which satisfies (\ref{eq:g1}). $G(x,x^{\prime})$ can also be constructed by an independent procedure by finding the solutions for eigenvalue $0$ which satisfy the appropriate boundary condition at $x=0$ and $x=1$. For the particle in a box the appropriate zero energy solutions are $\psi=x$ and $\psi=(1-x)$ and
\begin{equation}
G(x,x^{\prime})= -x_< \big(1-x_>\big)\ \Rightarrow\ G(x,x^{\prime})=x(x^{\prime}-1)\Theta(x^{\prime}-x) + x^{\prime}(x-1)\Theta(x-x^{\prime}) \label{eq:g3}
\end{equation}
where $\Theta(z)$ is the Heaviside step function which vanishes for negative values of $z$ and has value 1 for positive $z$. $G$ so defined satisfies (\ref{eq:g1}).
The knowledge of $G(x,x^{\prime})$ enables the identification of two possible ways of finding $g_{-4}(x,x)$ either by solving (\ref{eq:d2}) using $g_{-2}(x,x)$ as input or by using (\ref{eq:p1}) with $k=n=-2$. The second method yields
\begin{align}
g_{-4}(x,x)&=\int_0^1 G(x,x^{\prime}) G(x^{\prime},x) dx^{\prime} = \int_0^x (1-x)^2 x^{\prime2} dx^{\prime} +\int_x^1 x^2(1-x^{\prime})^2 dx^{\prime} \notag\\
&=\frac{x^2}{3}(1-x)^2\label{eq:g4}\\
\int_0^1g_{-4}(x,x)dx&= \frac{1}{90} =\sum_{j=1}^{\infty} \frac{1}{j^4\pi^4} \label{eq:g5}
\end{align}
The alternate procedure of solving using (\ref{eq:d2}) is to integrate both sides of the equation in the range $[0,x]$ three times to find:
\begin{align}
\Big[\frac{d^2}{dx^2} g_{-4}(x,x)\Big]\vert_0^x&= -4\Big[g_{-2}(x,x)\Big]\vert_0^x=-4x(1-x) \label{}\\
\Big[\frac{d}{dx} g_{-4}(x,x)\Big]\vert_0^x&=x\Big[\frac{d^2}{dx^2} g_{-4}(x,x)\Big]\vert_0 -2x^2+\frac{4x^3}{3}\label{}\\
g_{-4}(x,x)&=x\Big[\frac{d}{dx} g_{-4}(x,x)\Big]\vert_0+\frac{x^2}{2}\Big[\frac{d^2}{dx^2} g_{-4}(x,x)\Big]\vert_0 -\frac{2x^3}{3} + \frac{x^4}{3}\label{}
\end{align}
The boundary condition on $\psi_j$ shows that the derivative of $g_{-4}$ at $x=0$ is $0$. The second derivative of $g_{-4}$ at $x=0$ may be determined to have the value $2/3$ by imposing the condition that $g_{-4}$ must vanish at $x=1$. These manipulations lead to
\begin{equation}
g_{-4}(x,x)= \frac{1}{3} (x^4-2x^3+x^2) = \frac{1}{3} x^2 (1-x)^2 \label{}
\end{equation}
in agreement with (\ref{eq:g4}).
\subsection{Case 2}
If we consider the eigenfunctions for a free particle which satisfy $[d\psi_j(x)/dx]=0$ at $x=0$ and $\psi_j(1)=0$
\begin{align}
\psi_j(x)&= \sqrt{2} \cos(\gamma_j x) ,\quad \gamma_j = (j-1/2)\pi \Rightarrow \label{eq:d8}\\
g_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{1}{\gamma_j}\cos(\gamma_j x) \cos(\gamma_j x^{\prime})=\sum_{j=1}^{\infty} \frac{1}{\gamma_j}\big(\cos[\gamma_j (x+x^{\prime})] +\cos[\gamma_j (x-x^{\prime})]\big) \notag\\
&= \frac{1}{\pi} \ln\Big(\cot[(x+x^{\prime})\pi/4] \cot[|(x-x^{\prime})|\pi/4]\Big) \label{}\\
g_{-2}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{1}{\gamma_j^2}\cos(\gamma_j x)\cos(\gamma_j y)= \sum_{j=1}^{\infty} \frac{1}{\gamma_j^2}\big(\cos[\gamma_j (x+y)] +\cos[\gamma_j (x-y)\big)\notag\\
&=\frac{1}{2}\Big[1-(x+x^{\prime})\Big]+ \frac{1}{2}\Big[1-|(x-x^{\prime})|\Big] =(1-x_>)\label{eq:g6}
\end{align}
where $x_>$ is the greater of $(x,x^{\prime})$. The two results for the two series given above are well known (Gradshteyn and Ryzhik 1.442.2 p38, 1.444.6 p39). Application of the result in (\ref{eq:p1}) for $n=-1$ and $k=-1$ leads to the relation
\begin{align}
&\frac{1}{\pi^2} \int_0^1 \ln\Big(\cot[(x+z)\pi/4] \cot[|(x-z)|\pi/4\Big)\ \ln\Big(\cot[(z+x^{\prime}\pi/4)]\cot[(|z-x^{\prime}|)\pi/4]\Big) dz\notag\\
&= (1-x_>)\label{eq:q2}
\end{align}
It can also be verified that
\begin{equation}
\int_0^1 g_{-2}(x,x) dx = \int_0^1 (1-x) dx = \frac{1}{2} =\sum_{j=1}^{\infty} \frac{4}{(2j-1)^2\pi^2} \label{}
\end{equation}
As in the earlier case $-g_{-2}(x,x^{\prime})$ can be identified with the Green's function $G(x,x^{\prime})$ which satisfies (\ref{eq:g1}). For a free particle the zero energy solutions are $\psi=1$ which has vanishing derivative at $x=0$ and $\psi=(1-x)$ which vanishes at $x=1$ and these solutions may be used to construct $G(x,x^{\prime})$ :
\begin{equation}
G(x,x^{\prime})= -\big(1-x_>\big)\ \Rightarrow\ G(x,x^{\prime})=(x^{\prime}-1)\Theta(x^{\prime}-x) + (x-1)\Theta(x-x^{\prime}) \label{eq:g7}
\end{equation}
which satisfies (\ref{eq:g1}). The knowledge of $G(x,x^{\prime})$ enables the identification of two possible ways of finding $g_{-4}(x,x)$ either by solving (\ref{eq:d2}) using $g_{-2}(x,x)$ as input or by using (\ref{eq:p1}) with $k=n=-2$. The second method yields
\begin{align}
g_{-4}(x,x)&=\int_0^1 G(x,x^{\prime}) G(x^{\prime},x) dx^{\prime} = \int_0^x (1-x)^2 d x^{\prime}+\int_x^1 (1-x^{\prime})^2 dx^{\prime} \notag\\
&= x(1-x)^2 +\frac{(1-x)^3}{3} = \frac{1}{3}(1-x)^2 (2x+1)\label{eq:g8}\\
\int_0^1g_{-4}(x,x)dx&= \frac{1}{6} =\sum_{j=1}^{\infty} \frac{16}{(2j+1)^4\pi^4} \label{eq:g9}
\end{align}
The alternate procedure of solving using (\ref{eq:d2}) is to integrate both sides of the equation in the range $[x,1]$ three times to find:
\begin{align}
\Big[\frac{d^2}{dx^2} g_{-4}(x,x)\Big]\vert_1^x&= -4\Big[g_{-2}(x,x)\Big]\vert_1^x=-4(1-x) \label{}\\
\Big[\frac{d}{dx} g_{-4}(x,x)\Big]\vert_1^x&=(x-1)\Big[\frac{d^2}{dx^2} g_{-4}(x,x)\Big]\vert_1 -4x+2x^2+2\label{}\\
g_{-4}(x,x)&=(x-1)\big[\frac{d}{dx} g_{-4}(x,x)\big]\vert_1+\frac{(x-1)^2}{2}\Big[\frac{d^2}{dx^2} g_{-4}(x,x)\Big]\vert_1 - \notag\\
& -2x^2+\frac{2x^3}{3} +2x - \frac{2}{3}\label{}
\end{align}
The boundary condition on $\psi_j$ shows that the first derivative of $g_{-4}$ vanishes at $x=0$ and $x=1$. The second derivative of $g_{-4}$ at $x=1$ may be determined to have the value 2 by imposing the condition that the derivative of $g_{-4}$ must vanish at $x=0$. These manipulations lead to
\begin{equation}
g_{-4}(x,x)= \frac{1}{3} (2x^3-3x^2+1) = \frac{1}{3}(1-x)^2(2x+1)\label{}
\end{equation}
in agreement with (\ref{eq:g8}).
\subsection{Case 3}
If we consider the eigenfunctions for a free particle which satisfy $[d\psi_j(x)/dx]=0$ at $x=1$ and $\psi_j(0)=0$
\begin{align}
\psi_j(x)&= \sqrt{2} \sin(\gamma_j x) ,\quad \gamma_j = (j-1/2)\pi \Rightarrow \label{eq:d9}\\
g_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{1}{\gamma_j}\sin(\gamma_j x) \sin(\gamma_j x^{\prime})=\sum_{j=1}^{\infty} \frac{1}{\gamma_j}\big(\cos[\gamma_j (x-x^{\prime})] -\cos[\gamma_j (x+x^{\prime})]\big) \notag\\
&= \frac{1}{\pi} \Big[\ln\Big(\cot[|(x-x^{\prime})|\pi/4]\Big)-\ln\Big(\cot[(x+x^{\prime})\pi/4]\Big)\Big] \label{}\\
g_{-2}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{1}{\gamma_j^2}\sin(\gamma_j x)\sin(\gamma_j x^{\prime})= \sum_{j=1}^{\infty} \frac{1}{\gamma_j^2}\big(\cos[\gamma_j (x-x^{\prime})] -\cos[\gamma_j (x+x^{\prime})]\big)\notag\\
&= \frac{1}{2} \Big(\big[1-|(x-x^{\prime})|\big] - \big[1- (x+x^{\prime})\big]\Big)= x_< \label{eq:g11}\\
G(x,x^{\prime})&= -x_<=-x^{\prime} \Theta(x-x^{\prime}) -x \Theta(x^{\prime}-x)\label{}
\end{align}
which satisfies (\ref{eq:g1}). These identifications lead to the relation
\begin{equation}
\frac{4}{\pi^2} \int_0^1 \Big[\ln\frac{\Big(\cot(|[x-z]|\pi/4)\Big)}{\Big( \cot([x+z]\pi/4)\Big)}\Big]\Big[\ln\frac{\Big(\cot(|[z-x^{\prime}]|\pi/4)\Big)}{\Big( \cot([z+x^{\prime}]\pi/4)\Big)}\Big] dz = x_<\label{eq:q3}
\end{equation}
It can also be verified that
\begin{equation}
\int_0^1 g_{-2}(x,x) dx = \int_0^1 x dx = \frac{1}{2} =\sum_{j=1}^{\infty} \frac{4}{(2j-1)^2\pi^2} \label{}
\end{equation}
It can also be established that
\begin{align}
g_{-4}(x,x)&=\int_0^1 G(x,x^{\prime}) G(x^{\prime},x) dx^{\prime} = \int_0^x x^{\prime2} dx^{\prime} +\int_x^1 x^2\ dx^{\prime} = x^2-\frac{2x^3}{3} \label{eq:g12}\\
\int_0^1g_{-4}(x,x)dx&= \frac{1}{6} = \sum_{j=1}^{\infty} \frac{16}{(2j+1)^4\pi^4} \label{eq:g13}
\end{align}
\subsection{Case 4}
If we consider the eigenfunctions for a free particle which satisfy $[d\psi_j(x)/dx]=0$ at $x=0$ and $x=1$
\begin{align}
\psi_j(x)&= \sqrt{2} \cos(\gamma_j x) ,\quad \gamma_j = j\pi \Rightarrow \label{eq:d12}\\
g_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{\cos(j\pi x) \cos(j\pi x^{\prime})}{j\pi}=\sum_{j=1}^{\infty} \frac{1\big(\cos[j\pi (x-x^{\prime})]+\cos[j\pi(x+x^{\prime})]\big)}{j\pi} \notag \\
&= \frac{-1}{\pi} \ln\Big(4\sin[|(x-x^{\prime})|\pi/2] \sin[(x+x^{\prime})\pi/2]\Big) \label{}\\
g_{-2}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{\cos(j\pi x) \cos(j\pi x^{\prime})}{j^2\pi^2} =\sum_{j=1}^{\infty} \frac{1\big(\cos[j\pi (x-x^{\prime})]+\cos[j\pi(x+x^{\prime})]\big)}{j^2\pi^2} \notag\\
&=\Big(\Big[\frac{1}{6}-\frac{|(x-x^{\prime})|}{2} +\frac{\big(x-x^{\prime}\big)^2}{4}\Big] +\Big[\frac{1}{6}-\frac{(x+x^{\prime})}{2} +\frac{(x+x^{\prime})^2}{4}\Big]\Big)\notag\\
&=\Big(\frac{1}{3}-x_> +\frac{1}{2}(x^2+x^{\prime2})\Big)=-G(x,x^{\prime})\label{eq:g15}
\end{align}
These identifications lead to the relation
\begin{align}
&\frac{1}{\pi^2} \int_0^1 \ln\Big(4\sin(|[x-z]|\pi/2)\sin([x+z]\pi/2)\Big) \ln\Big(4\sin(|[z-x^{\prime}]|\pi/2)\sin([z+x^{\prime}]\pi/2)\Big) dz \notag\\
&=\Big(\frac{1}{3}-x_> +\frac{1}{2}\big(x^2+x^{\prime2}\big)\Big)\label{eq:q4}
\end{align}
It can also be verified that
\begin{equation}
\int_0^1 g_{-2}(x,x) dx = \int_0^1\Big(\frac{1}{3}-x+x^2\Big) dx = \frac{1}{6} =\sum_{j=1}^{\infty} \frac{1}{j^2\pi^2} \label{}
\end{equation}
The completeness relation defined in (\ref{eq:c0}) should also include the $j=0$ term in the sum for this case because the normalized $j=0$ function defined by (\ref{eq:d12}) is $\psi_0\rightarrow1$ and is nonvanishing. But the $g_k$ functions defined above for $k\ne0$ have the sums beginning with $j=1$. Hence it can be recognized that in this case
\begin{equation}
G(x,x^{\prime})=\Big(-\frac{1}{3}+x_>-\frac{1}{2}(x^2+x^{\prime2})\Big) =-\Big(\frac{1}{3} +\frac{1}{2}(x^2+x^{\prime2})\Big)+x\Theta(x-x^{\prime})+x^{\prime}\Theta(x^{\prime}-x)\label{}
\end{equation}
will satisfy
\begin{equation}
\Big[\frac{d^2}{dx^2} - U\Big]G(x,x^{\prime})=\delta(x-x^{\prime}) -1 \label{eq:ga1}
\end{equation}
instead of (\ref{eq:g1})
It can be established that
\begin{align}
g_{-4}(x,x)&=\int_0^1 G(x,y) G(y,x) dy \notag\\
&=\int_0^x \Big(\frac{1}{3}-x +\frac{1}{2}(x^2+y^2)\Big)^2 dy +\int_x^1\Big(\frac{1}{3}-y +\frac{1}{2}(x^2+y^2)\Big)^2 dy \label{}\\
&=\frac{(-21x^5+45x^4-35x^3+15x^2-5x+1)}{45} + \notag{}\\
&+\quad\quad \frac{(21x^5-60x^4+65x^3-30x^2+5x)}{45}\label{}\\
&=\frac{ (-15x^4+30x^3-15x^2+1)}{45} =\frac{1}{45} -\frac{1}{3}x^2(1-x)^2 \label{eq:g16}\\
\int_0^1g_{-4}(x,x)dx&= \frac{1}{90} = \sum_{j=1}^{\infty} \frac{1}{j^4\pi^4} \label{eq:g17}
\end{align}
\subsection{ Series with alternating signs}
We can also consider weighted eigenstate sums with alternating signs. If we define
\begin{equation}
f_k(x,x^{\prime}) \equiv \sum_{j=1} (-)^{j-1}(\gamma_j)^k \psi_j(x) \psi_j(x^{\prime}) ,\ k=0,\pm 1, \pm 2, \ \dots \label{}
\end{equation}
then the $f_k$ are related to the $g_k$ defined earlier by the relation
\begin{equation}
g_{k+n}(x,x^{\prime}) = \int_{a}^{b}f_k(x,z) f_n(z,x^{\prime})\ dz \label{eq:p2}
\end{equation}
We can consider each of the 4 cases considered in the previous section with alternating signs for the series.
$\bullet$
For the particle in a box eigenstates which satisfy $\psi_j(0)=0$ and $\psi_j(1)=0$ it can be shown using (\ref{eq:d7}) that (Gradshteyn and Ryzhik 1.441.4 p38)
\begin{align}
\psi_j(x)&= \sqrt{2} \sin(\gamma_j x) ,\quad \gamma_j = j\pi \Rightarrow \notag\\
f_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{(-)^{j-1}}{j\pi}\sin(j\pi x) \sin(j\pi x^{\prime})\notag\\
&=\sum_{j=1}^{\infty} \frac{(-)^{j-1}}{j\pi}\big(\cos[j\pi (x-x^{\prime})]-\cos[j\pi(x+x^{\prime})]\big) \notag \\
&= \frac{1}{\pi} \ln\Big(\frac{\cos[(x-x^{\prime})\pi/2]}{\cos[(x+x^{\prime})\pi/2]}\Big) \label{}
\end{align}
which leads to a relation similar to (\ref{eq:q1}) of the form
\begin{align}
g_{-2}(x,x^{\prime})&= \int_0^1 f_{-1}(x,z) f_{-1}(z,x^{\prime}) dz \ \Rightarrow \notag\\
x_< (1-x_>)&=\frac{1}{\pi^2} \int_0^1 \ln\Big(\frac{\cos([x-z]\pi/2)}{\cos([x+z]\pi/2)}\Big)\ \ln\Big(\frac{\cos([z-x^{\prime}]\pi/2)}{\cos([z+x^{\prime}]\pi/2)}\Big) dz\label{eq:q5}
\end{align}
$\bullet$
Similarly for eigenstates which satisfy $[d\psi_j(x)/dx]=0$ at $x=0$ and $\psi_j(x)=0$ at $x=1$ it can be shown using (\ref{eq:d8}) that (Gradshteyn Ryzhik 1.442.4 p38)
\begin{align}
\psi_j(x)&= \sqrt{2} \cos(\gamma_j x) ,\quad \gamma_j = (j-1/2)\pi \Rightarrow \notag\\
f_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{(-)^{j-1}}{\gamma_j\pi}\cos(\gamma_j x) \cos(\gamma_j x^{\prime})\notag\\
&= \sum_{j=1}^{\infty} \frac{(-)^{j-1}}{\gamma_j}\big(\cos[\gamma_j (x+x^{\prime})] +\cos[\gamma_j (x-x^{\prime})]\big) \label{}
\end{align}
The Fourier analysis of the function $[\Theta(1-x)\ - 1/2]$ in the range $[0\le x\le 2]$ in terms of even functions gives
\begin{align}
\sum_{j=1}^{\infty} (-)^{j-1} \frac{\cos[(j-1/2)\pi x]}{(j-1/2)}&= +\frac{1}{2}\ \hbox{if}\ 0\le x< 1 \label{eq:t1}\\
&= -\frac{1}{2}\ \hbox{if}\ 1 < x\le 2 \label{eq:t2}
\end{align}
Using this result it can be shown that
\begin{align}
f_{-1}(x,x^{\prime})&= 1\ \hbox{if}\ x+x^{\prime} \le 1\ ,\ =0\ \hbox{if}\ x+ x^{\prime}>1 \ \Rightarrow \label{}\\
f_{-1}(x,x^{\prime})&= \Theta(1-x-x^{\prime}) \label{}
\end{align}
which leads to a relation similar to (\ref{eq:q2}) of the form
\begin{equation}
g_{-2}(x,x^{\prime}) = \int_0^1 f_{-1}(x,z) f_{-1}(z,x^{\prime}) dz= \int_0^{1-z_>} dz = 1-x_> \label{eq:q6}
\end{equation}
where $x_>$ is the greater of $(x,x^{\prime})$.
$\bullet$
For eigenfunctions which satisfy $[d\psi_j(x)/dx]=0$ at $x=1$ and $\psi_j(0)=0$ it can be shown using (\ref{eq:d9}) that
\begin{align}
\psi_j(x)&= \sqrt{2} \sin(\gamma_j x) ,\quad \gamma_j = (j-1/2)\pi \Rightarrow \notag\\
f_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{(-)^{j-1}}{\gamma_j}\sin(\gamma_j x) \sin(\gamma_j x^{\prime})\notag\\
&= \sum_{j=1}^{\infty} \frac{(-)^{j-1}}{\gamma_j}\big(\cos[\gamma_j (x-x^{\prime})] -\cos[\gamma_j (x+x^{\prime})]\big) \label{}
\end{align}
Using the result given in (\ref{eq:t1}) and (\ref{eq:t2}) it can be shown that
\begin{align}
f_{-1}(x,x^{\prime})&= 1\ \hbox{if}\ x+x^{\prime} \ge 1\ ,\ =0\ \hbox{if}\ x+ x^{\prime}<1 \ \Rightarrow \label{}\\
f_{-1}(x,x^{\prime})&= \Theta(x+x^{\prime}-1) \label{}
\end{align}
which leads to a relation similar to (\ref{eq:q3}) of the form
\begin{equation}
g_{-2}(x,x^{\prime}) = \int_0^1 f_{-1}(x,z) f_{-1}(z,x^{\prime}) dz= \int_{1-x_<}^1 dz = x_< \label{eq:q7}
\end{equation}
where $x_<$ is the lesser of $(x,x^{\prime})$.
$\bullet$
For eigenfunctions which satisfy $[d\psi_j(x)/dx]=0$ at $x=0$ and $x=1$ it can be shown using (\ref{eq:d12}) that
\begin{align}
\psi_j(x)&= \sqrt{2} \cos(\gamma_j x) ,\quad \gamma_j = j\pi \Rightarrow \notag\\
f_{-1}(x,x^{\prime})&= 2 \sum_{j=1}^{\infty} \frac{(-)^{j-1}}{j\pi}\cos(j\pi x) \cos(j\pi x^{\prime})\notag\\
&=\sum_{j=1}^{\infty} \frac{(-)^{j-1}}{j\pi}\big(\cos[j\pi (x+x^{\prime})]-\cos[j\pi(x-x^{\prime})]\big) \notag \\
&= \frac{1}{\pi} \ln\Big(4\cos[(x-x^{\prime})\pi/2] \cos[(x+x^{\prime})\pi/2]\Big) \label{}
\end{align}
which leads to an identity similar to (\ref{eq:q4}) in the form
\begin{align}
&g_{-2}(x,x^{\prime})= \int_0^1 f_{-1}(x,z) f_{-1}(z,x^{\prime}) dz \ \Rightarrow \notag\\
&=\int_0^1 \frac{\ln\Big(4\cos[(x-z)\pi/2] \cos[(x+z)\pi/2]\Big)}{\pi} \frac{\ln\Big(4\cos[(z-x^{\prime})\pi/2] \cos[(z+x^{\prime})\pi/2]\Big)}{\pi} dz\notag\\
&=\Big(\frac{1}{3}-x_>+\frac{1}{2}(x^2+x^{\prime2}) \Big)\label{eq:q8}
\end{align}
\section{Green's function and SUSY partner potentials}
The Green's function defined in ({\ref{eq:g1}) and the associated sum rule is a special case, with $\epsilon=0$, of
\begin{align}
G(x,x^{\prime};\epsilon) &= \sum_j \frac{\psi_j(x)\psi_j(x^{\prime})}{(\epsilon - \gamma_j^2)} \label{eq:g21}\\
\Big[\frac{d^2}{dx^2} - U + \epsilon \Big]G(x,x^{\prime}) &=\delta(x-x^{\prime}) \label{eq:g22}\\
\int_a^b G(x,x;\epsilon) dx&= \sum_j \frac{1}{(\epsilon - \gamma_j^2)} \label{eq:g23}
\end{align}
The generalized Green's function may be constructed from two linearly independent solutions which satisfy boundary conditions at $x=a$ and $x=b$ by the following procedure:
\begin{align}
0&=\Big[\frac{d^2}{dx^2} - U + \epsilon \Big]\psi(x) \label{}\\
\psi^{\prime}(x)&=\psi(x)\int_b^x\frac{dy}{\psi^2(y)},\ \Rightarrow G(x,x^{\prime};\epsilon)= \psi(x) \psi^{\prime}(x^{\prime}) =\psi(x)\psi(x^{\prime})\int_b^{x>}\frac{dz}{\psi^2(z)}\label{eq:g24}
\end{align}
in which $\psi(x)$ satisfies a boundary condition at $x=a$ and $\psi^{\prime}(b)=0$, which is appropriate for studying sum rules for eigenstates satisfying the same boundary conditions. Another equivalent representation is
\begin{equation}
\psi(x)=\psi^{\prime}(x)\int_a^x\frac{dy}{\psi^{\prime2}(y)}\ \Rightarrow G(x,x^{\prime};\epsilon)= -\psi^{\prime}(x) \psi(x^{\prime}) =-\psi^{\prime}(x)\psi^{\prime}(x^{\prime})\int_a^{x<}\frac{dz}{\psi^{\prime2}(z)}\label{eq:g25}
\end{equation}
Using either of these representations of $G$ it is possible to examine a second order sum rule:
\begin{align}
\sum_j \frac{1}{(\epsilon - \gamma_j^2)^2} &= \int_a^b \int_a^b G(x,y;\epsilon) G(y,x;\epsilon) dx dy \label{}\\
&=\int_a^b dx \int_a^x dy\Big(\psi(x)\psi(y)\int_b^x\frac{dz}{\psi^2(z)}\Big)^2 \notag\\
&+\int_a^b dx \int_x^b dy\Big(\psi(x)\psi(y)\int_b^y\frac{dz}{\psi^2(z)}\Big)^2 \notag\\
&=2\int_a^b dx \Big(\psi(x)\int_b^x\frac{dz}{\psi^2(z)}\Big)^2 \Big(\int_a^x dy\ \psi^2(y)\Big)\notag\\
&=2\int_a^b dx \Big(\psi^2(x)\int_b^x\frac{dz}{\psi^2(z)}\Big)^2 \Big(\frac{1}{\psi^2(x)}\int_a^x dy \ \psi^2(y)\Big)\notag\\
&= -2\int_a^b dx G^2(x,x;\epsilon) G^{\prime}(x,x;\epsilon) \label{eq:g26}
\end{align}
in which
\begin{equation}
G^{\prime}(x,x;\epsilon)= -(\phi^{\prime})^2(x)\int_a^x \frac{dy}{(\phi^{\prime})^2(y)} \ \hbox{where}\ \phi^{\prime}(x)=\frac{1}{\psi(x)} \label{eq:g27}
\end{equation}
can be identified using (\ref{eq:g25}) and interpreted using Supersymmetric Quantum Mechanics (SUSYQM) (Witten 1981, Andrianov {\it et al} 1984, Sukumar 1985}). Using SUSYQM it can be recognized that if $\psi(x)$ is a nodeless, but unnormalizable solution at an energy $\epsilon$ below the ground state energy of $U(x)$ then $\phi^{\prime}(x)$ is a solution at energy $\epsilon$ for the SUSY partner potential
\begin{equation}
U^{\prime}(x)= U(x) -2\frac{d^2}{dx^2}\ln[\psi(x)] \label{}
\end{equation}
If $\phi^{\prime}(x)$ is normalizable in $[a,b]$ then it can be the ground state eigenfunction for $U^{\prime}(x)$ and the eigenvalue spectrum for $U^{\prime}$ has $\epsilon$ as the lowest eigenvalue plus all the eigenvalues for $U$. If $\phi^{\prime}(x)$ is not normalizable then $U^{\prime}$ and $U$ have identical spectra and a Green's function for $U^{\prime}$ can be constructed in the form given in (\ref{eq:g21}). Thus we can conclude that the second order sum rule can be interpreted in terms of the equi-position Green's functions for $U$ and its SUSY partner $U^{\prime}$ evaluated for energy $\epsilon$ which is below the lowest eigenvalue for $U$.
\subsection{Example 1- Particle in a box}
We consider the sum rules for the eigenvalues for a particle in a box with eigenfunctions which go to zero at $x=0$ and $x=1$ and choose $\epsilon=0$. The solution $\psi(x)=x$
at zero energy gives $\phi^{\prime}(x)=1/x$. $\phi^{\prime}(x)$ is not normalizable in the domain $[0,1]$. Hence the SUSY partner and its spectrum, which is identical to that of $U$ are given by
\begin{equation}
U^{\prime}=U-2(d^2\ln(x)/dx^2)=U+ x^{-2},\quad \gamma_j=j^2\pi^2,\ j=1,2,\ \dots \label{}\\
\end{equation}
A result from SUSY Quantum Mechanics relates the eigenstates of SUSY partner potentials in the form
\begin{align}
\psi^{\prime}_j&= \frac{1}{\sqrt{\gamma_j^2-\epsilon}}\Big[\frac{d\psi_j}{dx} -\frac{d\ln[\psi(x)]}{dx} \psi_j\Big] \ \Rightarrow \label{}\\
\psi_j&=\sqrt{2} \sin(j\pi x),\ \psi(x)=x\ \Rightarrow \psi^{\prime}_j=\sqrt{2}\Big[\cos(j\pi x) - \frac{\sin(j\pi x)}{(j\pi x)}\Big] \label{eq:s1}
\end{align}
The eigenstates $\psi^{\prime}_j$ may be identified as $(j\pi x)$ times the spherical Bessel functions of order $1$ with argument $j\pi x$ (Abramowitz ans Stegun 1965 p438). Using
\begin{align}
&\int_0^1 \Big(\frac{\sin(j\pi x) \sin(k\pi x)}{jk\pi^2 x^2} - \frac{j\pi\cos(j\pi x) \sin(k\pi x)+k\pi\sin(j\pi x) \cos(k\pi x)}{jk\pi^2 x}\Big)\ dx \label{}\\
&=-\int_0^1 \frac{d}{dx}\Big(\frac{\sin(j\pi x) \sin(k\pi x)}{jk\pi^2 x}\Big)=\frac{\sin(j\pi x) \sin(k\pi x)}{jk\pi^2 x} \vert_1^{0} =0\label{}
\end{align}
it can be verified that
\begin{equation}
\int_0^1 \psi^{\prime}_j(x) \psi^{\prime}_k(x) \ dx = \delta_{jk} \label{}
\end{equation}
showing that the eigenfunctions $\psi^{\prime}_j(x)$ form an orthonormal set of functions and form a complete set for functions which satisfy $[\psi^{\prime}_j(0)]=0$ and $[\psi^{\prime}_j(1)]^2= 2$.
The Green's functions and the resulting first order sum rules for the eigenvalues are
\begin{align}
G(x,x^{\prime})&=-x x^{\prime}\int_1^{x_>}\frac{dy}{y^2}=x_<(x_>-1)\ \Rightarrow \int_0^1 G(x,x)\ dx = \frac{-1}{6}= -\sum_j\frac{1}{j^2\pi^2}\label{}\\
G^{\prime}(x,x^{\prime})&=-\frac{1}{xx^{\prime}}\int_0^{x_<} z^2\ dz= -\frac{x_<^2}{3x_>}\ \Rightarrow \int_0^1 G^{\prime}(x,x)\ dx = \frac{-1}{6} =-\sum_j\frac{1}{j^2\pi^2}\label{}
\end{align}
The second order sum rules may be evaluated in 4 different ways : either by using $G(x,x^{\prime})$ in a two-dimensional integral or by using $G^{\prime}(x,x^{\prime})$ in a two-dimensional integral or using both $G(x,x)$ and $G^{\prime}(x,x)$ in two different one-dimensional integral as shown below:
\begin{align}
S_2&\equiv\sum_j \frac{1}{j^4\pi^4}\label{}\\
S_2&=\int_0^1 \int_0^1 G(x,y) G(y,x)\ dx\ dy= \int_0^1\Big[(1-x)^2\int_0^x y^2\ dy+x^2 \int_x^1 (1-y)^2\ dy\Big] \notag\\
&= \int_0^1\frac{x^2(1-x)^2}{3}\ dx=\frac{1}{90}\label{}\\
S_2&=\int_0^1 \int_0^1 G^{\prime}(x,y) G^{\prime}(y,x)\ dx\ dy= \frac{1}{9}\int_0^1\Big[x^{-2}\int_0^x y^4\ dy+x^4 \int_x^1 \frac{dy}{y^2}\Big]\ dx \notag\\
&= \frac{1}{9}\int_0^1\Big[\frac{x^3}{5} -x^4+x^3\Big]\ dx =\frac{1}{90}\label{}\\
S_2&=-2\int_0^1 G^2(x,x)\ G^{\prime}(x,x)\ dx= 2\int_0^1 x^2(1-x)^2 \frac{x}{3}\ dx = \frac{1}{90} \label{}\\
S_2&=-2\int_0^1 G^{\prime2}(x,x)\ G(x,x)\ dx= 2\int_0^1 \frac{x^2}{9} x(1-x)\ dx = \frac{1}{90} \label{}
\end{align}
in agreement with well known results.
\subsection{Example 2- Simple harmonic Oscillator}
Consider the Hamiltonian for a harmonic oscillator in units in which $\hbar=1$, $m=1/2$ and $\omega=2$ so that the Schrodinger equation is
\begin{equation}
\frac{d^2\psi}{dx^2}= (x^2-E)\psi \label{}
\end{equation}
The minimum value of the potential is $0$ at $x=0$, the energy spacing is $2$, $E_n=2n+1$ and the ground state is at $E=1$. If we consider the even states which are symmetric in $x$ and are orthogonal in the space $[0,\infty]$ and are spaced in units of $4$ they can be denoted by $\psi_{2n}$ with energy $E_{2n}=(4n+1)$. We can construct a Green's function for the sum rules for the even states by the following procedure. A solution at energy $E=-1$ which is below the minimum of the potential is $\psi=\exp(+0.5 x^2)$ which is not normalizable but $\phi=(\psi)^{-1}= \exp(-0.5 x^2)$ is a normalizable function and can be the ground state eigenfunction for a SUSY partner potential. Hence
\begin{equation}
U^{\prime}=U- 2\frac{d^2\ln(\psi)}{dx^2} =x^2-2 \label{}\\
\end{equation}
which is an oscillator potential shifted downwards by $2$ and as a spectrum which has all the eigenvalues of $U$ and in addition has a ground state at $E=-1$ and the spectrum can be identified as $E^{\prime}_n= 2n-1, n=0,1,2,\ \dots$. The first order sum rule for the eigenvalues of the oscillator does not converge but diverges logarithmically. For the second order sum rule the Green's functions for $U$ and $U^{\prime}$ may be constructed using the solutions for $E=-1$.
Since $\psi(x)$, the solution for $E=-1$, has vanishing first derivative at $x=0$, a sum rule for the symmetric eigenstates states which also have vanishing first derivatives at $x=0$ may be constructed
\begin{align}
\psi&=\exp\Big(\frac{x^2}{2}\Big),\ \psi^{\prime}=\exp\Big(\frac{x^2}{2}\Big) \int_{\infty}^x \exp(-z^2)\ dz,\ \Rightarrow \label{}\\
G(x,x)&=- \exp(x^2)\int_{\infty}^{x} \exp(-z^2)\ dz \label{}\\
\phi&=\exp\Big(-\frac{x^2}{2}\Big),\ \phi^{\prime}=\exp\Big(-\frac{x^2}{2}\Big) \int_{0}^x \exp(y^2)\ dy\ \Rightarrow\label{}\\
G^{\prime}(x,x)&=\exp(-x^2)\int_{0}^{x} \exp(y^2)\ dy \label{}\\
S_2&=2\int_0^{\infty} G^2(x,x) G^{\prime}(x,x)\ dx \notag\\
&=2\int_0^{\infty}\Big(\Big[\exp(x^2)\int_x^{\infty} \exp(-z^2)\ dz\Big]^2\ \exp(-x^2)\int_{0}^{x} \exp(y^2)\ dy \Big) \label{eq:SS1}\\
S_2&=2\int_0^{\infty} G^{\prime2}(x,x) G(x,x)\ dx \notag\\
&=2\int_0^{\infty}\Big(\Big[\exp(-x^2)\int_{0}^{x} \exp(y^2)\ dy\Big]^2\ \exp(x^2)\int_x^{\infty} \exp(-z^2)\ dz \Big) \label{eq:SS2}\\
S_2&=\sum_{j=0}^{\infty} \frac{1}{(E_{2j}-E^{\prime}_0)^2} = \sum_{j=0}^{\infty} \frac{1}{(4j+2)^2} = \frac{\pi^2}{32} \label{eq:SS3}
\end{align}
where the sum is over the eigenvalues of the symmetric states of $U$. The equality of the expressions in (\ref{eq:SS1}), (\ref{eq:SS2}) and (\ref{eq:SS3}) has been verified by performing the integrations involving the Dawson's integral and the Complementary Error function (Abramowitz and Stegun 1965 p297-8).
\subsection{Power law Confining Potentials}
The procedure discussed for the addition of a state and finding sum rule for eigenvalues may be extended for all confining potentials of the power law form. We consider potentials of the form $U=|x|^n$ which support an infinite number bound states with positive definite energies and examine the Green's function for finding sum rules for the eigenstates of $U$ and also finding a SUSY partner which has an additional bound state at $E=0$ :
\begin{align}
\frac{d^2\psi}{dx^2}&=\vert x\vert^{\alpha} \psi,\ \nu\equiv \frac{1}{\alpha+2},\ z\equiv 2\nu x^{\frac{1}{2\nu}} \Rightarrow \Big(\frac{d^2}{dz^2}+\frac{1}{z}\frac{d}{dz}- \big[1+\frac{\nu^2}{z^2}\Big)\psi=0 \label{}\\
\psi^{(1)}&= \sqrt{x} K_{\nu}(2\nu x^{\frac{1}{2\nu}}) ,\ \psi_{\pm}^{(2)} =\sqrt{x} I_{\pm\nu}(2\nu x^{\frac{1}{2\nu}})\ \Rightarrow \psi_-^{(2)}=\psi_+^{(2)}+\frac{2}{\pi}\sin(\pi\nu) \psi^{(1)} \label{}
\end{align}
where $K_{\nu}$ and $I_{\pm\nu}$ are modified Bessel functions (Abramowitz and Stegun 1965 p374-380). The limiting values of the different solutions at $x=0$ and for large absolute values of $x$ are:
\begin{align}
\psi^{(1)}(x)\vert_0 &\sim x^0,\ \psi_{+}^{(2)}(x)\vert_0 \sim x,\ \psi_{-}^{(2)}\vert_0\sim x^0,
\ \frac{d\psi_{-}^{(2)}}{dx}\vert_0=0\ \label{}\\
\psi_{\pm}^{(2)}&\vert_{\infty}\sim\sqrt{\frac{x}{z}}\exp(z)=x^{\frac{\alpha}{4}}\exp(z),\ \psi^{(1)}(x)\vert_{\infty} \sim \sqrt{\frac{x}{z}}\exp(-z)=x^{\frac{\alpha}{4}}\exp(-z)\label{}
\end{align}
These limiting values suggest appropriate functions for the construction of Green's function for the symmetric states which have vanishing derivatives at $x=0$ and have vanishing values in the asymptotic region $x^2 \rightarrow \infty$ is
\begin{equation}
G_e(x,x^{\prime})= -2\nu\sqrt{xx^{\prime}} I_{-\nu}(2\nu x^{\frac{1}{2\nu}})\ K_{\nu}(2\nu x^{\prime\frac{1}{2\nu}}) \label{eq:pg1}\\
\end{equation}
The Green's function appropriate for the antisymmetric states which vanish at $x=0$ and have vanishing values in the asymptotic region is
\begin{equation}
G_o(x,x^{\prime})=- 2\nu\sqrt{xx^{\prime}} I_{+\nu}(2\nu x^{\frac{1}{2\nu}})\ K_{\nu}(2\nu x^{\prime\frac{1}{2\nu}}) \label{eq:pg2}\\
\end{equation}
The WKB result for the eigenvalues $E_n$ of $U$ in the limit of large quantum number $n$ can be derived from the Bohs-Sommerfield formula and is of the form
\begin{equation}
E_n=\gamma_n^2\sim \Big(\Big[n+\frac{1}{2}\Big]\frac{\sqrt{\pi}(\alpha+2)\Gamma\big(\frac{\alpha+2}{\alpha}\big)}{2\Gamma\big(\frac{1}{\alpha}\big)}\Big)^{\frac{2\alpha}{\alpha+2}} \label{}
\end{equation}
which shows that the sums over the inverse of eigenvalues will not converge if $\alpha<2$. For $\alpha>2$, (i.e) $\beta<1/4$, the Green's functions in (\ref{eq:pg1}) and (\ref{eq:pg2}) may be used in the integration range $[0,\infty]$ and the integrals may be performed (Gardshteyn and Ryzhik p 693-4) to find the sum rules
\begin{align}
S_{even}&=\sum_{n=0}^{\infty}\frac{1}{E_{2n}} = -\int_0^{\infty} G_o(x,x) dx=\nu^{2-4\nu}\frac{\Gamma(2\nu)\Gamma(\nu)\Gamma(1-4\nu)}{\Gamma(1-3\nu)\Gamma(1-2\nu)},\ 0<\nu<\frac{1}{4} \label{eq:pg3}\\
S_{odd}&=\sum_{n=0}^{\infty}\frac{1}{E_{2n+1}} = -\int_0^{\infty} G_e(x,x) dx=\nu^{2-4\nu}\frac{\Gamma(3\nu)\Gamma(2\nu)\Gamma(1-4\nu)}{\Gamma(1-2\nu)\Gamma(1-\nu)},\ 0<\nu<\frac{1}{4} \label{eq:pg4}\\
\end{align}
The WKB formula for the eigenvalues in the large $n$ limit also show that the difference between the sums over the inverses of the even and odd eigenvalues will converge for positive values of $\alpha$. It can be shown that
\begin{align}
&S=\sum_{n=0}^{\infty}\frac{(-)^n}{E_{n}} = -\int_0^{\infty}\big( G_e(x,x) - G_o(x,x\big) dx =\nu^{2-4\nu}\frac{\Gamma(3\nu)\Gamma^2(2\nu)}{\Gamma(4\nu)\Gamma(1-\nu)}, 0<\nu<\frac{1}{2} \label{eq:pg5}\\
&S_{even}= S\ \frac{\sin(3\pi\nu)}{\sin(\pi\nu)}\ \frac{1}{2\cos(2\pi\nu}),\ 0<\nu<\frac{1}{4}\label{}\\
&S_{odd}= S\ \frac{1}{2\cos(2\pi\nu)},\ 0<\nu<\frac{1}{4} \label{}
\end{align}
It is also possible to use the zero energy solutions in $U(x)$ to find a SUSY partner $U^{\prime}(x)$ which has a spectrum with all the eigenvalues of $U$ and in addition has an extra boundstate at $E=0$. It can be shown that $\Psi=\psi_{-}^{(2)}$ is a symmetric nodeless function which is not normalizable but $\Phi=1/\Psi$ is normalizable and can be the ground state eigenfunction for the SUSY partner potential
\begin{equation}
\Psi \equiv \sqrt{x} I_{-\nu}(2\nu x^{\frac{1}{2\nu}}) ,\quad U^{\prime}=\vert x\vert^n -2\frac{d^2\ln \Psi}{dx^2} \label{}
\end{equation}
Using the properties of the Modified Bessel functions it can be shown that
\begin{align}
U^{\prime}(x)&=\vert x\vert^n\ \Big(-1+2\Big[\frac{I_{1-\nu}(2\nu x^{\frac{1}{2\nu}})}{I_{-\nu}(2\nu x^{\frac{1}{2\nu}})}\Big]^2\Big) \label{}\\
x \rightarrow &0,\ U^{\prime}(x) \rightarrow -\vert x\vert^n\ +\frac{2\vert x\vert^{2n+2}}{(n+1)^2}\ ;\quad x \rightarrow \infty,\ U^{\prime}(x) \rightarrow
\vert x\vert^n -n \vert x\vert^{\frac{n-2}{2}}+O(x^{-2})\label{}
\end{align}
The SUSY partner potentials with an extra boundstate at $x=0$ for $n=2,\ 4,\ 6$ and $8$ are shown in figures $1$ and $2$.
\begin{figure}
\caption{{\small $U= x^n$ (yellow) and $U^{\prime}
\end{figure}
\begin{figure}
\caption{{\small $U= x^n$ (yellow) and $U^{\prime}
\end{figure}
The pattern of change of $U^{\prime}$ as $n$ increases is evident. $U^{\prime}$ is of the form of a double well with a flat hump and the groundstate energy level grazes the flat hump. The double wells are located close to $x^2=1$ and the flat region widens as n increases. In the limit of large $n$ both $U$ and $U^{\prime}$ become very large as $x^2$ exceeds $1$ and the pockets of the double wells located near $x^2=1$ become narrower and deeper as n increases. In the limit of $n\rightarrow \infty$ both $U$ and $U^{\prime}$ approach infinite walls as $x^2$ exceeds $1$ and $U^{\prime}$ has deep and narrow wells when $x^2$ is just below $1$. For large values of $n$ the groundstate wave function for $U^{\prime}$ will be almost a constant in the flat region $x^2\le 1-\epsilon$, $\epsilon<<1$, and dip smoothly to zero close to $x^2\sim 1$ and will look almost like a square barrier of height $\sim 1/\sqrt{2}$ and width $\sim 2$ confined to the region $x^2<1$.
A Green's function can be constructed in the form
\begin{equation}
G^{\prime}(x,x^{\prime}) = -\frac{1}{\Psi(x)\Psi(x^{\prime})}\int_0^{x_<}\Psi^2(y)\ dy \label{}
\end{equation}
leading to a second representation of the first order sum rule for the symmetric eigenstates of $U$ of the form
\begin{align}
\sum_{n=0}^{\infty} \frac{1}{E_{2n}}&=-\int_0^{\infty} G^{\prime}(x,x)\ dx=\int_0^{\infty}\frac{dx}{xI^2_{-\nu}(2\nu x^{\frac{1}{2\nu}})}\int_0^{x}yI^2_{-\nu}(2\nu y^{\frac{1}{2\nu}})\ dy \label{}\\
&=\big(2\nu\big)^{2-4\nu}\int_0^{\infty}\frac{dz}{z}I^{-2}_{-\nu}(z)\int_0^z y^{4\nu-1} I^{2}_{-\nu}(y) dy \label{}
\end{align}
which converges, for $n>2$, to the value given in (\ref{eq:pg3}). This identification leads to the relation
\begin{align}
\int_0^{\infty} z^{4\nu-1} I_{-\nu}(z)\ K_{\nu}(z) dz &=
\int_0^{\infty}\frac{dz}{z}I^{-2}_{-\nu}(z)\int_0^{z} y^{4\nu-1} I^2_{-\nu}(y)\ dy \label{}\\
&=2^{4\nu-2}\frac{\Gamma(2\nu)\Gamma(\nu)\Gamma(1-4\nu)}{\Gamma(1-3\nu)\Gamma(1-2\nu)},\ 0<\nu<\frac{1}{4} \label{}
\end{align}
A similar analysis of the sum rules for the odd eigenstates can be shown to lead to the relation
\begin{align}
\int_0^{\infty} z^{4\nu-1} I_{\nu}(z)\ K_{\nu}(z) dz &=
\int_0^{\infty}\frac{dz}{z}I^{-2}_{\nu}(z)\int_0^{z} y^{4\nu-1} I^2_{\nu}(y)\ dy \label{}\\
&=2^{4\nu-2}\frac{\Gamma(3\nu)\Gamma(2\nu)\Gamma(1-4\nu)}{\Gamma(1-2\nu)\Gamma(1-\nu)},\ 0<\nu<\frac{1}{4} \label{}
\end{align}
These relations arise because it can be shown using the differential equation satisfied by the Modified Bessel functions that
\begin{equation}
K_{\nu}(z)=I_{\pm \nu}(z) \int_z^{\infty} \frac{dy}{y} I^{-2}_{\pm \nu}(y) \label{}
\end{equation}
\section{Conclusion}
It has been shown that Green's functions for reflectionless potentials lead to a hierarchy of sumrules. The methods of Supersymmetric Quqantum Mechanics have been used to study the relation between Green's functions for SUSY partners. Potentials with an extra zero energy boundstate in addition to the energy spectrum of powerlaw potentials have been constructed and analyzed. For the case of a free particle confined to the space $[0,1]$, by considering the solutions at energy $E=0$, a SUSY partner potential can be identified for which the eigenfunctions are shown to be identical to the solutions of the radial Schroedinger equation for $l=1$ for a free particle which are of the form $R(kr)\sim krj_1(kr)$ with $k=n\pi,\ n=1,2,\dots$\ . Hence the set of functions $R(n\pi r)$ form a set of orthonormal functions in the interval $[0,1]$. This identification provides an alternative basis for expanding functions in the interval $[0,1]$ instead of the usual Fourier series basis set.
\section*{References}
\noindent Abramowitz M.and Stegun I.A. 1965 {\it{Handbook of Mathematical Functions}} Dover Publications 297, 374, 438
\noindent A.C.Scott, F.Y.E.Chu and D.W.Mclaughlin, Proc. I.E.E.E. {\bf 61}, 1443 (1973)
\noindent Andrianov A A, Borisov N V and Ioffe M V 1984 {\it Phys. Lett.} {\bf 105A} 19
\noindent C.S.Gardner, J.M.Greene, M.D.Kruskal and R.M.Miura, Phys. Rev. Lett. {\bf 19}, 1095 (1967)
\noindent Gradshteyn I.S. and Ryzhik I.M. 1965 {\it{Table of Integrals, Series and Products}} Academic Press 38
\noindent P.D.Lax, Comm. Pure. Appl. Math. {\bf 21}, 467 (1968)
\noindent Sukumar C V 1985a {\it J. Phys. A: Math. Gen.} {\bf 18} 2917
\noindent Sukumar C V 1985b {\it J. Phys. A: Math. Gen.} {\bf 18} 2937
\noindent Witten E 1981 {\it Nucl. Phys. B} {\bf 188} 513
\end{document}
|
\begin{document}
\title{Groups of proper homotopy equivalences of graphs
and Nielsen Realization}
\begin{abstract}
For a locally finite connected graph $X$ we consider the
group $Maps(X)$ of proper homotopy equivalences of $X$. We show
that it has a natural Polish group topology, and we propose these groups as an
analog of big mapping class groups. We prove the Nielsen Realization
theorem: if $H$ is a compact subgroup of $Maps(X)$ then $X$ is
proper homotopy equivalent to a graph $Y$ so that $H$ is realized by
simplicial isomorphisms of $Y$.
{\varepsilon}nd{abstract}
{\sigmama}ection{Introduction}
The group $Out(F_n)$ of outer automorphisms of the free group of rank
$n$ can be thought of as the group of homotopy
equivalences of a finite graph $X$ with $pseudo-Anosov\ rtiali_1(X)\cong F_n$, up to
homotopy.
In this paper we begin the study of the analogous group
associated with a {\it locally} finite graph $X$.
\begin{defn} Let $X$ be a locally finite connected graph.
The {\it mapping class
group} $Maps(X)$ of $X$ is the group of proper homotopy
equivalences of $X$, up to proper homotopy.
{\varepsilon}nd{defn}
Recall that $f:X{\tau}o Y$ is a {\it proper homotopy equivalence} if it is
proper and there is a proper map $g:Y{\tau}o X$ such that both $fg$ and
$gf$ are properly homotopic to the identity. For an example of a
proper map which is homotopy equivalence but not a proper homotopy
equivalence see Example \ref{noninvertible}.
We will equip $Maps(X)$ with a natural topology which will make it a
Polish group (recall that this means that the underlying topological
space is separable and admits a complete metric). See Section \ref{topology}.
We thus propose $Maps(X)$ as the ``big $Out(F_n)$'' equivalent of
mapping class groups of surfaces of infinite type (or ``big mapping
class groups''), for a survey of the subject see \cite{survey}.
Comparison with mapping class groups has shown to be
very useful in the study of $Out(F_n)$, and we expect that comparison
between $Maps(X)$ and big mapping class groups will likewise prove
fruitful. We remark that the group of all automorphisms
$Aut(F_\infty)$ of the free group of countable rank has a natural
structure of a Polish group (e.g. it is a closed subgroup of the
Polish group of all permutations of $F_\infty$), and so does
$Out(F_\infty)$ since the group of inner automorphisms is
discrete. However, the groups $Maps(X)$ seem more appealing as they
have a more topological flavor and come in great variety since they
depend on the graph $X$. Even when $X$ is a tree the group $Maps(X)$
is of interest, it coincides with the group $Homeo(pseudo-Anosov\ rtial X)$ of
homeomorphisms of the space of ends $pseudo-Anosov\ rtial X$ of $X$ (see Corollary \ref{sigma}).
Note that if $h:X{\tau}o Y$ is a proper homotopy equivalence with inverse
$h':Y{\tau}o X$, then $fMapsto hfh'$ induces an isomorphism $Maps(X){\tau}o
Maps(Y)$, which will turn out to be an isomorphism of topological
groups, see Corollary \ref{topiso}.
In this paper we focus on compact subgroups of $Maps(X)$ and prove
the following version of the Nielsen Realization theorem.
\begin{main}
Let $H$ be a compact subgroup of $Maps(X)$. Then there is a locally
finite graph $Y$ proper homotopy equivalent to $X$ so that under the
induced isomorphism $Maps(X)\congMaps(Y)$ the group $H$ is
realized as a group of simplicial isomorphisms of $Y$.
{\varepsilon}nd{main}
Recall that the original Nielsen Realization theorem for finite type
surfaces with negative Euler characteristic was proved by Kerckhoff
\cite{kerckhoff}, stating that any finite subgroup of the mapping
class group of a surface of negative Euler characteristic can be
realized by isometries of a complete hyperbolic metric with finite
area. The version for $Out(F_n)$, proved in \cite{NR1,NR2,NR3,NR4},
states that a finite subgroup of $Out(F_n)$ can be realized as a group
of simplicial isomorphisms of a finite graph with fundamental group
$F_n$. For big mapping class groups Nielsen Realization was proved
recently by Afton-Calegari-Chen-Lyman \cite{rylee-etal}. Among the
consequences is that compact subgroups of big mapping class groups are
finite. This is not the case for $Maps(X)$. For example, let $X$ be
the graph obtained from $[0,\infty)$ by attaching two loops at every
integer point. The group of symmetries of this graph is the compact
group $H^{\infty}= pseudo-Anosov\ rtialrod_{i=1}^\infty H$, where $H$ is the group of
symmetries of order 8 of the wedge of two circles. Note that $X$ is
proper homotopy equivalent to the graph $Y$ obtained from
$[0,\infty)$ by attaching three circles at every integer point, and
the group of symmetries of $Y$ is $G^\infty$, where $G$ is the
group of order 48 of symmetries of the wedge of three circles. The
groups $Maps(X)$ and $Maps(Y)$ are isomorphic as topological
groups, but the realization using different graphs displays
different compact subgroups.
\vskip 1cm
{\bf Plan of the paper.} We start by recalling the Classification
theorem for locally finite graphs in Section
\ref{s:classification}. We also introduce the notation and review the
homotopy extension theorem in our setting and some of its
consequences. In Section \ref{s:algebra} we explore the natural
homomorphism $Maps(X){\tau}o Out(pseudo-Anosov\ rtiali_1(X))$ and in particular we look for
conditions that guarantee that a proper map $X{\tau}o X$ is properly
homotopic to the identity. In the case when $X$ is a core graph
(i.e. it is the union of immersed loops) the criterion is particularly
simple: if $f$ induces the identity in $Out(pseudo-Anosov\ rtiali_1(X))$, it is properly
homotopic to the identity. In the other extreme, when $X$ is a tree,
$f$ is properly homotopic to the identity whenever it fixes the end
space $pseudo-Anosov\ rtialartial X$. The general case is more complicated since rays
attached to the core graph could wrap around the core, but we show
that if $f$ is identity in $pseudo-Anosov\ rtiali_1$, fixes the ends, and preserves
proper lines, then $f$ is properly homotopic to the identity.
Section \ref{topology} is devoted to defining the topology on
$Maps(X)$ and establishing that it is a Polish group. As long as $X$
is of infinite type, we show that the underlying topological space of
$Maps(X)$ is homeomorphic to the set of irrationals, but of course
the group structure will depend on $X$.
The remainder of the paper is devoted to the proof of the Main
Theorem. This is also divided into cases, with the two extremes of $X$
being a core graph and being a tree discussed first. When $X$ is a
tree, by averaging we find an $H$-invariant metric on the space of
ends $pseudo-Anosov\ rtial X$. From this we construct $H$-invariant finite covers by
disjoint clopen sets that refine each other and with mesh going to
0. The mapping telescope of this sequence is the desired tree $Y$.
The heart of the argument is the case when $X$ is a core graph. For
concreteness imagine that $X$ is the graph obtained from the ray
$[0,\infty)$ by attaching a circle at every integer point. We then
cover $[0,\infty)$ by large intervals $J_1,J_2,\cdots$ so that
$J_i\cap J_j={\varepsilon}mptyset$ if $|i-j|>1$ and so that $J_i\cap J_{i+1}$
are large as well, controlling the properness of elements of
$H$. Each $J_i$ and $J_{i}\cap J_{i+1}$ defines a subgraph of $X$
and a free factor of $pseudo-Anosov\ rtiali_1(X)$. By intersecting the
$H$-translates of these free factors we obtain $H$-invariant free
factors $F_i^*$ and $F_{i,i+1}^*$ respectively. Using Nielsen
Realization in finite rank we find finite graphs ${\Gamma}amma_{i,i+1}$
where $H$ acts by simplicial isomorphisms realizing
$F_{i,i+1}^*$. We then use the Relative Nielsen Realization, due
to Hensel-Kielak \cite{hensel-kielak}, to construct finite graphs
${\Gamma}amma_i$ where $H$ acts by simplicial isomorphisms realizing
$F_i^*$, and that contain ${\Gamma}amma_{i-1,i}$ and ${\Gamma}amma_{i,i+1}$ as
disjoint invariant subgraphs. Finally, we glue the ${\Gamma}amma_i$'s
along these subgraphs to obtain $Y$. In general, when $X$ is a
tree with circles attached at vertices, the above outline still
works, but instead of free factors we have to consider free factor
systems which makes the notation a bit more complicated.
In the general case, we first use the case of core graphs to
reduce to the situation where $H$ is already acting on the core by
simplicial isomorphisms. The graph $X$ is obtained from the core
by attaching trees, and the central part of the proof in this case
is to see how to attach new trees in an equivariant fashion. The
new trees are going to be mapping telescopes made of partition
elements (as in the tree case) subordinate to suitable clopen sets
in $pseudo-Anosov\ rtial X$. To
accomplish this we prove a fixed point theorem (see Lemma
\ref{fixed}) that provides a suitable point in the core where
these telescopes are attached.
In order to verify that the action of $H$ on the new graph $Y$ is
conjugate to the given action on $X$ we use the machinery
developed in Section \ref{s:algebra}, see Corollary
\ref{criterion}.
{\sigmama}ection{The classification of locally finite connected
graphs}{\lambda}abel{s:classification}
Let $X$ be a locally finite infinite connected graph. The fundamental
group $pseudo-Anosov\ rtiali_1(X)$ is free and we denote its rank by $g(X)\in
\{0,1,2,\cdots,\infty\}$ and think of it as the ``genus'' of
$X$. Let $$pseudo-Anosov\ rtial X=\underset{\lambda}ongleftarrow{\lambda}im_{K{\sigmama}ubset
X}pseudo-Anosov\ rtiali_0(X{\sigmama}mallsetminus K)$$ be the space of ends of $X$ with its
usual inverse limit topology, where the limit runs over all compact
subsets $K{\sigmama}ubset X$ . Then $pseudo-Anosov\ rtial X$ is a totally disconnected compact
metrizable space (recall that these are precisely the spaces
homeomorphic to a closed subset of the Cantor set). The union $\hat
X=X{\sigmama}qcuppseudo-Anosov\ rtial X$ has a natural topology that makes it compact; it is the
{\it Freudenthal} (or {\it end}) compactification of $X$. The basis of
open sets in $\hat X$ consists of open sets in $X$, and for every
compact $K{\sigmama}ubset X$ and every
component $U$ of $X{\sigmama}mallsetminus K$ the set $\hat U$ which is the
union of $U$ and the set of ends that map to $U$. We will sometimes
abuse notation and talk about a neighborhood $U$ in $X$ of an end
$\beta\inpseudo-Anosov\ rtial X$; what we mean is the intersection of such a
neighborhood $\hat U$ in $\hat X$ with $X$. The end compactification
can also be constructed in the same way for connected, locally finite
cell complexes. Every proper map $f:X{\tau}o Y$ between such complexes
extends continuously to a map $\hat X{\tau}o\hat Y$ between their end
compactifications. For simplicity we will usually denote this extension, as
well as its restriction $pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial Y$, by $f$ as well. Properly
homotopic maps induce the same map between the boundaries.
Denote by
$X_g{\sigmama}ubset X$ the {\it core} of $X$, i.e. the smallest subgraph that
contains all immersed loops. Thus $X_g={\varepsilon}mptyset$ precisely when $X$
is a tree. Let $pseudo-Anosov\ rtial X_g{\sigmama}ubseteq pseudo-Anosov\ rtial X$ be the space of ends of $X_g$;
it is a closed subspace of $pseudo-Anosov\ rtial X$ (and consists of ends ``accumulated
by genus''). Thus $pseudo-Anosov\ rtial X_g={\varepsilon}mptyset$ precisely when $g(X)<\infty$. In
general, $X$ is either a tree or is obtained from $X_g$ by attaching
trees.
\begin{defn}[Characteristic pairs]
If $g(X)<\infty$, its characteristic pair is $(pseudo-Anosov\ rtial X,g(X))$, otherwise
its characteristic pair is $(pseudo-Anosov\ rtial X, pseudo-Anosov\ rtial X_g)$.
{\varepsilon}nd{defn}
The following is the analog of Ker\'ekj\'art\'o's classification
theorem for surfaces and was proved by
Ayala-Dominguez-Marquez-Quintero \cite{admq}.
\begin{thm}{\lambda}abel{graphClassification}
Let $X, Y$ be locally finite connected graphs. Then a homeomorphism of
characteristic pairs extends to a proper homotopy equivalence. If $X$
and $Y$ are trees the extension is unique up to proper homotopy.
{\varepsilon}nd{thm}
In the case when the genus is finite, a homeomorphism of
characteristic pairs means a homeomorphism between the spaces of ends
together with the information that the genera are equal.
If $f:X{\tau}o X$ is a proper homotopy equivalence, then the extension
$pseudo-Anosov\ rtialartial X{\tau}opseudo-Anosov\ rtial X$ is a homeomorphism which preserves $pseudo-Anosov\ rtial X_g$. Thus
we have a well defined homomorphism
$${\sigmama}igma:Maps(X){\tau}o Homeo(pseudo-Anosov\ rtial X,pseudo-Anosov\ rtial X_g)$$
to the group of homeomorphisms of the pair $(pseudo-Anosov\ rtial X,pseudo-Anosov\ rtial X_g)$.
The following is then an immediate corollary of the classification theorem.
\begin{cor}{\lambda}abel{sigma}
The homomorphism ${\sigmama}igma$ is always surjective.
If $X$ is a tree the map ${\sigmama}igma:Maps(X){\tau}o Homeo(pseudo-Anosov\ rtialartial X)$ is
an isomorphism.
{\varepsilon}nd{cor}
In light of this we will usually focus on the kernel of ${\sigmama}igma$,
which is the {\it pure mapping class group} of $X$, and we denote it
by $PMaps(X)$.
\begin{defn}
The pure group $PMaps(X)$ is the subgroup of $Maps(X)$ consisting of
$f \in Maps(X)$ so that $f:pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial X$ is the identity.
{\varepsilon}nd{defn}
We shall make use of the following concepts and lemmas from \cite{admq}.
\begin{defn}[Standard Models]
The Cantor tree $T$ is the rooted binary tree embedded in the plane so
that its boundary is the standard trinary Cantor set in $[0,1] {\tau}imes
\{0\}$. For each closed subset $B$ of the Cantor set, let $T_B$ be
the union of the set of rays in $T$ initiating at the root and terminating in $B$. For a characteristic pair $(B, g)$ where $B$ is a closed subset of the Cantor set and $g$ is a natural number let $X_{(B,g)}$ be the tree $T_B$ with $g$ loops attached at the root. The characteristic set of $X_{(B,g)}$ is $(B,g)$.
For two closed non-empty subsets $A {\sigmama}ubseteq B$ of the Cantor set, let $X_{(B,A)}$ be the tree $T_B$ with a one edge loop attached at each vertex of the subtree $T_A$. Again, the characteristic set of $T_{(B,A)}$ is $(B,A)$. These trees are called the Standard Models.
{\varepsilon}nd{defn}
\begin{figure}[ht]
\centering
\begin{minipage}[t]{0.3{\lambda}inewidth}
\includegraphics{finitefg}
\caption{A graph with a finitely generated fundamental group. In this case $pseudo-Anosov\ rtialartial X_g = {\varepsilon}mptyset$ and the space of ends can be any closed subset of the Cantor set. }
{\lambda}abel{fig:minipage1}
{\varepsilon}nd{minipage}
\quad
\begin{minipage}[t]{0.3{\lambda}inewidth}
\includegraphics{coreG}
\caption{This is a core graph, $pseudo-Anosov\ rtialartial X= pseudo-Anosov\ rtialartial X_g$. By deleting a part of the Cantor tree we get a standard model for $(A,A)$ where $A$ is any closed subspace of the Cantor set. }
{\lambda}abel{fig:minipage2}
{\varepsilon}nd{minipage}
\quad
\begin{minipage}[t]{0.3{\lambda}inewidth}
\includegraphics{genralgraph}
\caption{In this case both $pseudo-Anosov\ rtialartial X_g$ and its complement $DX$ are non-empty. By deleteing loops/subtrees from the space in Figure \ref{fig:minipage2} we get a model for any pair of closed subsets $pseudo-Anosov\ rtialartial X = A {\sigmama}upset B = pseudo-Anosov\ rtialartial X_g$ of the Cantor set.}
{\lambda}abel{fig:minipage3}
{\varepsilon}nd{minipage}
{\varepsilon}nd{figure}
\begin{cor}
Every locally finite connected infinite graph is proper homotopy equivalent to a Standard Model.
{\varepsilon}nd{cor}
In particular, we can assume that $X$ has no valence 1 vertices, and
that $X=X_g$ if $pseudo-Anosov\ rtial X_g=pseudo-Anosov\ rtial X$.
The following is the standard Homotopy Extension Theorem, see
\cite{hatcher}. We will use it often, usually without saying it. Given a subgraph $Y {\sigmama}ubset X$, the frontier of $Y$ is the set of vertices in $Y - int(Y)$ denoted $Fr(Y)$.
\begin{prop}{\lambda}abel{pasting_homotopies}
Let $Y$ be a subgraph of $X$, let $H \colon Y {\tau}imes I {\tau}o X$ be a
proper homotopy from
$h$ to $f$. Let $u \colon X {\tau}o X$ be a proper map so that $u|_Y =
h$. Then there is a proper homotopy $H' \colon X{\tau}imes I {\tau}o X$
extending $H$.
Moreover, if $K {\sigmama}ubset X$ is a subset so that $H(Fr(Y) {\tau}imes I)
\cap K = {\varepsilon}mptyset$ and $u(\overline{X{\sigmama}mallsetminus Y}) \cap K =
{\varepsilon}mptyset$ then $H'((X{\sigmama}mallsetminus Y) {\tau}imes I) \cap K = {\varepsilon}mptyset$.
{\varepsilon}nd{prop}
\begin{proof}
First define the extension on the vertices $v$ of $X$ outside of $Y$
by $H'(v,t)=u(v)$. Now let $e$ be an edge with endpoints $a,b$. If
both $a,b$ are outside of $Y$ define $H'$ on $e{\tau}imes I$ to be
stationary as well: $H'(x,t)=u(x)$ for $x\in e$. Otherwise let
$\alphapha:[0,1]{\tau}o e$ be a parametrization of $e$. Let $P$ be a
retraction from $I {\tau}imes I$ to $(\{0\} {\tau}imes I) \cup (I {\tau}imes
\{0\}) \cup (\{1\} {\tau}imes I)$. Then $e{\tau}imes I$ can be identified
with $I{\tau}imes I$ (with $0{\tau}imes I$ and $1{\tau}imes I$ identified if $e$
is a loop). The homotopy is already defined on $(\{0\} {\tau}imes I)
\cup (I {\tau}imes \{0\}) \cup (\{1\} {\tau}imes I)$ and we extend it to
$I{\tau}imes I$ by composing with $P$.
We leave the verification that $H'$ is proper and the last sentence
to the reader.
{\varepsilon}nd{proof}
We will also sometimes have a need to restrict $f\in Mod(X)$ to an ``invariant''
subgraph $Y{\sigmama}ubset X$ e.g. $Y=X_g$. Since $f$ is defined only up to proper
homotopy, $Y$ will usually not satisfy $f(Y){\sigmama}ubseteq Y$, but this
will be true after a proper homotopy $H$. We will also want to know that
the proper homotopy class of the restriction is independent of the
choice of $H$. A bad example to keep in mind is the projection
$pseudo-Anosov\ rtiali:{\mathbb R}^2{\tau}o{\mathbb R}$. There are proper maps $f:{\mathbb R}{\tau}o{\mathbb R}^2$ such that $pseudo-Anosov\ rtiali f$
is not proper, and there are pairs of properly homotopic maps
$f,g:{\mathbb R}{\tau}o{\mathbb R}^2$ such that $pseudo-Anosov\ rtiali f$ and $pseudo-Anosov\ rtiali g$ are proper, but not
properly homotopic.
\begin{lemma}{\lambda}abel{restriction}
Let $Y$ be a subgraph of $X$ such that inclusion $Y\hookrightarrow
X$ induces an injection $pseudo-Anosov\ rtialartial Y\hookrightarrowpseudo-Anosov\ rtialartial X$. Let
$pseudo-Anosov\ rtiali:X{\tau}o Y$ be a retraction with the following property: for every
$\beta\inpseudo-Anosov\ rtialartial Y$ and every neighborhood $U$ of $\beta$ in $Y$
there is a neighborhood $V$ of $\beta$ in $X$ so that
$pseudo-Anosov\ rtiali(V){\sigmama}ubseteq U$. Let $Z$ be a locally compact metrizable
space.
\begin{enumerate}[(i)]
\item If $f:Z{\tau}o X$ is a proper map such that $f(pseudo-Anosov\ rtialartial
Z){\sigmama}ubseteq pseudo-Anosov\ rtialartial Y$ then $pseudo-Anosov\ rtiali f:Z{\tau}o Y$ is proper.
\item If $f,g:Z{\tau}o Y$ are proper maps such that they are
properly homotopic within $X$, then they are properly
homotopic within $Y$.
{\varepsilon}nd{enumerate}
{\varepsilon}nd{lemma}
\begin{proof}
To prove (i), note that if $\beta\in pseudo-Anosov\ rtialartial Z$ then $f$ sends
points near $\beta$ to points near $f(\beta)$, so by our assumption
the same is true for the composition $pseudo-Anosov\ rtiali f$. For (ii), apply (i) to
a proper homotopy $H:Z{\tau}imes I{\tau}o X$. The fact that $H(pseudo-Anosov\ rtialartial
(Z{\tau}imes I)){\sigmama}ubseteq pseudo-Anosov\ rtialartial Y$ follows from the assumption that
$pseudo-Anosov\ rtialartial Y{\sigmama}ubseteq pseudo-Anosov\ rtialartial X$.
{\varepsilon}nd{proof}
\begin{prop}{\lambda}abel{extension}
Suppose $X,Y$ are two locally finite connected graphs, $f:X_g{\tau}o Y$
a proper map that induces $\overline f:pseudo-Anosov\ rtial X_g{\tau}opseudo-Anosov\ rtial Y$ and let
$\overline F:pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial Y$ be an extension of $\overline f$. Then
there is a proper map $F:X{\tau}o Y$ that extends $f$ and induces
$\overline F$.
{\varepsilon}nd{prop}
\begin{proof}
First consider the case when $X$ and $Y$ are trees, and choose base
vertices $x_0\in X$, $y_0\in Y$. We are given $\overline F:pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial Y$
and we have to construct a proper map $F:X{\tau}o Y$. If $\overline F$ is a
homeomorphism the existence of $F$ follows from the Classification
theorem (and in fact it is unique up to proper homotopy). In general,
we can construct $F$ as follows. When $v$ is a vertex of $X$ let the
shadow $Sh_X(v){\sigmama}ubseteq pseudo-Anosov\ rtial X$ be the set of endpoints of rays that
start at $x_0$ and pass through $v$. When $A{\sigmama}ubseteq pseudo-Anosov\ rtial X$ contains
at least two points, let ${\sigmama}up A$ be the vertex $v\in X$ with the
largest distance $|v|$ from $x_0$ satisfying $Sh_X(v){\sigmama}upseteq A$. If
$A=\{\beta\}{\sigmama}ubseteq pseudo-Anosov\ rtial X$ is a single point, define ${\sigmama}up A$ to be
$\beta$ and let $|\beta|=\infty$. Make the similar definition for
subsets of $pseudo-Anosov\ rtial Y$. For a vertex $v\in X$ consider ${\sigmama}up
\overline F(Sh_X(v))$. If this is a vertex $w$ at distance $|w|{\lambda}eq |v|$
from $y_0$ then define $F(v)=w$, and otherwise define $F(v)$ as the
vertex at distance $|v|$ from $y_0$ along the segment (or ray)
$[y_0,w]$. Extend $F$ linearly to the edges of $X$.
Now consider the general case. We may assume that $f$ sends vertices
to vertices. First suppose that there is a maximal tree $T{\sigmama}ubseteq Y$
such that $pseudo-Anosov\ rtial T=pseudo-Anosov\ rtial Y$. Since $X$ is simplicial and locally finite,
$X{\sigmama}mallsetminus X_g$ is a countable (or finite) union of trees, and let
$T_i$ for $i \in {\mathbb N}$ be the closure of a component of $X{\sigmama}mallsetminus
X_g$. We
denote by $x_i$ the point of intersection of $T_i$ and $X_g$, so $x_i$ is
the root of $T_i$. For each $i \in {\mathbb N}$ let $F_i \colon T_i {\tau}o
T{\sigmama}ubseteq Y$ be the map constructed in the first paragraph so that
$F_i(x_i) = f(x_i)$ and $pseudo-Anosov\ rtialartial F_i = \overline F|_{pseudo-Anosov\ rtial T_i}$. We
define $F$ by gluing the maps $f$ and $F_i$ for all $i \in {\mathbb N}$.
One way to avoid constructing a special maximal tree is as
follows. Let $Z$ be a Standard Model proper homotopy equivalent to
$Y$. Then the underlying tree in $Z$ has the same ends. So we may
apply the above paragraph to the composition $X_g{\tau}o Y{\tau}o Z$ and get
an extension $X{\tau}o Z$, which we then compose with the inverse proper homotopy
equivalence $Z{\tau}o Y$ to get $F:X{\tau}o Y$. The map $F$ may not agree with
$f$ on $X_g$ but it is properly homotopic to it, so we conclude by
applying the Homotopy Extension Theorem (Proposition
\ref{pasting_homotopies}).
{\varepsilon}nd{proof}
{\sigmama}ection{Relationship with $Out(pseudo-Anosov\ rtiali_1(X))$.} {\lambda}abel{s:algebra}
In this section we will investigate the relationship between $Maps(X)$
and $Out(pseudo-Anosov\ rtiali_1(X))$. First, there is a natural homomorphism
$${\mathcal P}si:Maps(X){\tau}o Out(pseudo-Anosov\ rtiali_1(X))$$ that sends $h\in Maps(X)$ to
(the outer automophism class of) $h_*:pseudo-Anosov\ rtiali_1(X){\tau}opseudo-Anosov\ rtiali_1(X)$.
If the
genus of $X$ is infinite, ${\mathcal P}si$ is not onto since there are
automorphisms not realized by proper maps. On the other hand, ${\mathcal P}si$
is onto when $g(X)<\infty$. We will show first that ${\mathcal P}si$ is
injective if $X$ is a core graph (meaning $X=X_g$), and in general we
will describe the kernel of ${\mathcal P}si$. The next theorem is analogous
to the fact that a homeomorphism of a surface with nonabelian
fundamental group that induces identity in $pseudo-Anosov\ rtiali_1$ is isotopic to the
identity, see \cite{epstein,Hernandez-Valdez-Morales}.
\begin{thm}{\lambda}abel{id}
Suppose $X$ is a core graph and let $f \colon X {\tau}o X$ be a proper map
so that $f_*=id\in Out(pseudo-Anosov\ rtiali_1(X))$.
Then $f$ is properly homotopic to the identity on $X$. In particular,
${\mathcal P}si$ is injective.
{\varepsilon}nd{thm}
\begin{proof}
In the proof we will use the fundamental property of graphs that
disjoint nontrivial loops are not homotopic and that nullhomotopic
loops can be nullhomotoped within their images. We may assume that
$X$ is a Standard Model. Note that $f$ necessarily induces the
identity on the space of ends. Indeed, if $f(\beta)\neq \beta$ for an
end $\beta$, there will be an immersed loop $\alphapha$ in $X$ near $\beta$ such
that $f(\alphapha)$ is disjoint from $\alphapha$, and in particular $f$ does
not fix the conjugacy class of $\alphapha$.
Next, we can assume, by applying a proper homotopy (using Proposition
\ref{pasting_homotopies}) that $f$ fixes all
vertices and moreover, by homotoping the root $v$ around a loop, that
$f_*:pseudo-Anosov\ rtiali_1(X,v){\tau}o pseudo-Anosov\ rtiali_1(X,v)$ is the identity.
We will now construct a proper homotopy between the identity and $f$. If
$w$ is a vertex, let $e_1e_2\cdots e_k$ be the edge path in the
underlying tree $T$ from $v$ to $w$ and define $H:\{w\}{\tau}imes I{\tau}o X$
to be the tightened path $\bar e_k \dots \bar e_1 f(e_1) \dots
f(e_k)$. Also define $H$ on $X{\tau}imes \{0\}$ to be identity and on
$X{\tau}imes\{1\}$ to be $f$ (see Figure \ref{homotopyFig}). We will argue below that $H$ defined so far
is proper. If $e$ is an edge in $T$ then $H$ is defined on
$pseudo-Anosov\ rtialartial(e{\tau}imes I)$ and is nullhomotopic on this loop. Thus we may
extend $H$ to all such 2-cells keeping the image contained in the
image of $pseudo-Anosov\ rtialartial (e{\tau}imes I)$. Finally, $H$ extends to the cylinders
$x_w{\tau}imes I$, where $x_w$ is the loop attached at $w$, using the fact
that $f(e_1\cdots e_k x_w \overline e_k\cdots \overline e_1){\sigmama}imeq
e_1\cdots e_k x_w \overline e_k\cdots \overline e_1$. We again ensure
that the image of the extension is contained in the image of the
boundary of the 2-cell, so the extended $H$ will be proper.
\begin{figure}[ht]
\begin{center}
\includegraphics{homotopyFig}
\caption{The homotopy $H$. The brackets signify that we take the immersed path homotopic to the given one rel endpoints. {\lambda}abel{homotopyFig}}
{\varepsilon}nd{center}
{\varepsilon}nd{figure}
It remains to show that $H$ defined on the 1-skeleton is proper. This
is where we will use the assumption that $X$ is a core graph. Let
$K{\sigmama}ubset X$ be a finite subgraph. Since $f$ is proper, there is a
finite subgraph $L{\sigmama}ubset X$ such that $f(X{\sigmama}mallsetminus L){\sigmama}ubseteq
X{\sigmama}mallsetminus K$. Let $w$ be a vertex outside of $L$, $e_1\cdots
e_k$ the edge path from $v$ to $w$ in $T$, and $x_w$ the loop attached
to $w$. Thus $f(x_w)\cap K={\varepsilon}mptyset$. From the fact that
$$f(e_1 \dots e_k) f(x_w) f(\bar e_k \dots \bar e_1) {\sigmama}imeq e_1 \dots
e_k \cdot x_w \cdot \bar e_k \dots \bar e_1$$ we see that after
tightening $f(e_1 \dots e_k)$ does not cross any loops attached to
vertices in $K$. For example, the fundamental group can be thought of
as the free group on the attached loops, and if $y$ is the last loop
in $K$ crossed by $[f(e_1\cdots e_k)]$ the word $[f(e_1\cdots e_k)]\cdot
[f(x_w)]\cdot [f(\bar e_k\cdots \bar e_1)]$ could be tightened by
tightening the portion between the corresponding $y$ and $\bar y$, and
would not yield the trivial word.
Therefore $H(\{w\}{\tau}imes I)$ is
disjoint from $K$.
{\varepsilon}nd{proof}
The fundamental group of $X$ does not ``see'' the ends of $X$ not
accumulated by genus. For example, if $X$ is a tree the mapping class
group $Maps(X)$ is isomorphic to the homeomorphism group
$Homeo(pseudo-Anosov\ rtialartial X)$ and may be quite nontrivial, while
$pseudo-Anosov\ rtiali_1(X)=1$. It is therefore natural, when studying the kernel of
${\mathcal P}si$, to restrict to the pure mapping class group $PMaps(X)$.
We will now describe the kernel of the restriction
$${\mathcal P}si_P:PMaps(X){\tau}o Out(pseudo-Anosov\ rtiali_1(X))$$ of ${\mathcal P}si$. This is well-known
when $X$ is obtained from a finite graph, say of rank $r$ so that
$pseudo-Anosov\ rtiali_1(X)=F_r$, by attaching a finite number, say $n$, of rays. These
rays can be thought of equivalently as distinguished points in the
finite graph. When $n=1$ we have $Maps(X)\cong Aut(F_r)$ and when
$n>1$ then $PMaps(X)\cong Aut(F_r){\lambda}times F_r^{n-1}$ with the natural
diagonal action of $Aut(F_r)$ on $F_r^{n-1}$. The $F_r^{n-1}$ factor
can be thought of as measuring the marking of $(n-1)$
distinguished points with respect to the remaining distinguished
point, which is considered to be the basepoint. This will be
generalized in Corollary \ref{semidirect}. When $r\geq 2$ the kernel
of ${\mathcal P}si_P$ is isomorphic to $F_r^{n-1}$
represented by maps that are
identity on the finite graph and send each ray $R$ to a ray of the
form $w_R R$ for some loop $w_R$ in the finite graph.
Let $X$ be a locally finite graph and we assume $DX := pseudo-Anosov\ rtial X - pseudo-Anosov\ rtial X_g
\neq {\varepsilon}mptyset$ and choose $\alpha_0 \in DX$. This will be the basepoint
``at infinity''. Let $pseudo-Anosov\ rtiali_1(X,\alpha_0)$ be the set of proper homotopy
classes of lines ${\sigmama}ig \colon {\mathbb R} {\tau}o X$ so that ${\lambda}im_{t {\tau}o -\infty}
{\sigmama}ig(t) = {\lambda}im_{t {\tau}o \infty} {\sigmama}ig(t) = \alpha_0$, with concatenation as
the group operation. Notice that
concatenation makes sense since $\alpha_0 \in DX$ and any two rays
limiting to $\alpha_0$ eventually coincide, up to a proper homotopy. Given
$x_0\in X$, there is an isomorphism
\begin{equation*}{\lambda}abel{pi1eq} pseudo-Anosov\ rtiali_1(X,x_0) {\tau}o
pseudo-Anosov\ rtiali_1(X,\alpha_0){\varepsilon}nd{equation*} given by $\gamma
{\tau}o \bar \rho_{x_0} \gamma \rho_{x_0}$ where $\rho_{x_0}$ is a fixed ray in $X$ from $x_0$ to $\alpha_0$.
Moreover, if $f \in Maps(X)$ fixes $\alpha_0$ then $f$ induces a map $f_0
\in Aut(pseudo-Anosov\ rtiali_1(X,\alpha_0))$.
We first consider the case of the graph $X=X_g^*$ obtained from a core
graph (or a point) $X_g$ by attaching a single ray.
\begin{lemma}{\lambda}abel{Ystar}
For $X=X_g^*$ the kernel of ${\mathcal P}si:Maps(X){\tau}o Out(pseudo-Anosov\ rtiali_1X)$ (or
${\mathcal P}si_P:PMaps(X){\tau}o Out(pseudo-Anosov\ rtiali_1X)$) is isomorphic to
$pseudo-Anosov\ rtiali_1(X)=pseudo-Anosov\ rtiali_1(X_g)$ when this group is nonabelian, and otherwise
it is trivial.
{\varepsilon}nd{lemma}
When the genus $n=g(X)$ is finite, the lemma says that the kernel
$Aut(F_n){\tau}o Out(F_n)$ is $F_n$ for $n>1$ and otherwise it is
trivial.
\begin{proof}
Let $f\in Maps(X)$ induce identity in $Out(pseudo-Anosov\ rtiali_1(X))$. Using Lemma
\ref{restriction} applied to the nearest point projection $pseudo-Anosov\ rtiali:X{\tau}o
X_g$ we see that after a
proper homotopy we may assume that $f$ preserves the core $X_g$, and
thus by Theorem \ref{id} we may assume $f$ is identity on
$X_g$.
Let $\rho_0$ denote the geodesic ray in $X$ that intersects $X_g$ at one point and such that $X = X_g \cup \rho_0$. Let $c(f)$ be the homotopy class of $\bar \rho_0 f(\rho_0)$ in $pseudo-Anosov\ rtiali_1(X, \alpha_0)$.
Then $f_0 \in Aut(pseudo-Anosov\ rtiali_1(X, \alpha_0))$ is just conjugation by $c(f)$.
The map $fMapsto c(f)$ is a homomorphism $Ker({\mathcal P}si){\tau}o
pseudo-Anosov\ rtiali_1(X,\alphapha_0)$. When $pseudo-Anosov\ rtiali_1(X)$ is non-abelian it is an isomorphism.
If $X_g$ is a circle, $f$ can be homotoped to the
identity by a homotopy that rotates the circle to unwind the
attached ray, so $Ker({\mathcal P}si)$ is trivial.
{\varepsilon}nd{proof}
We now consider the general case. Let $X$ be a Standard Model which is
not a tree, let $X_g$ be the core subgraph of $X$, and we assume $DX
\neq {\varepsilon}mptyset$. Fix $\alpha_0 \in DX$ and let $\rho_0$ be the ray in
$X$ intersecting $X_g$ in a point and limiting to $\alpha_0$. Let
$T{\sigmama}ubset X$ be the underlying tree, and let $T_g=X_g\cap T$ be the
underlying tree in the core, and likewise let $T_g^*=T_g\cup\rho_0$ be
the underlying tree in $X_g^*=X_g\cup\rho_0$.
Thus $X_g{\sigmama}ubset X_g^*{\sigmama}ubseteq X$ and both inclusions
are homotopy equivalences. We note that restriction maps
$$PMaps(X){\tau}o PMaps(X_g)$$
and
$$PMaps(X){\tau}o PMaps(X_g^*)$$
are well-defined by Lemma \ref{restriction}, where for the retraction
$pseudo-Anosov\ rtiali$ we take the nearest point projection.
\begin{defn}
The group
$\mathcal{R}$ as a set is the collection of functions $h \colon DX {\tau}o pseudo-Anosov\ rtiali_1(X_g^*,
\alpha_0)$ satisfying
\begin{itemize}
\item[(R0)] $h(\alpha_0)=1$.
\item[(R1)] $h$ is locally constant.
\item[(R2)] For all $\beta \in pseudo-Anosov\ rtial X_g$ and every neighborhood
$U$ of $\beta$ in $X$ there exists a ray $\rho_U$
from a point in $U$ to $\alphapha_0$ which is the concatenation of a
segment in $T$ and $\rho_0$ so that if $(\beta_i)_{i=1}^\infty
{\sigmama}ubset DX$ limits to $\beta$ then for large enough $i$,
$h(\beta_i)= \bar\rho_U*\gamma_i*\rho_U$ where $\gamma_i$ is a loop
contained in $U$.
{\varepsilon}nd{itemize}
The group operation in $\mathcal{R}$ is pointwise multiplication in
$pseudo-Anosov\ rtiali_1(X,\alpha_0)$.
{\varepsilon}nd{defn}
\begin{defn}{\lambda}abel{defOfPhi}
We start by assigning an element ${\mathcal P}hi_T(f)\in\mathcal{R}$ to certain proper
maps $f:X{\tau}o X$. More precisely assume:
\begin{enumerate}[(i)]
\item $f:pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial X$ is the identity, and
\item either $DX$ is compact or
$f_*:pseudo-Anosov\ rtiali_1(X,\alpha_0){\tau}opseudo-Anosov\ rtiali_1(X,\alpha_0)$ is the identity.
{\varepsilon}nd{enumerate}
Note that we do not assume that $f$ is a proper homotopy equivalence,
cf. Example \ref{noninvertible}.
If $\beta\in DX{\sigmama}mallsetminus\{\alphapha_0\}$
let ${\varepsilon}ll_\beta$ be the bi-infinite line in $T$ connecting $\alpha_0$ to
$\beta$.
The map
\[ \begin{array}{l}
{\mathcal P}hi_T(f)=h \colon DX {\tau}o pseudo-Anosov\ rtiali_1(X,\alpha_0) \\[0.2 cm]
h(\beta) = {\lambda}eft\{
\begin{array}{ll}
{\varepsilon}ll_\beta f(\bar{\varepsilon}ll_\beta) & \beta \in DX-\{ \alpha_0 \}, \\
1 & \beta = \alpha_0
{\varepsilon}nd{array} \right.
{\varepsilon}nd{array} \]
{\varepsilon}nd{defn}
We claim that indeed $h \in \mathcal{R}$. That $h$ is locally constant follows from the
observation that if $\beta\in DX$ there is a neighborhood $U{\sigmama}ubset DX$ of
$\beta$ so that if ${\varepsilon}ll$ is a line joining two distinct points in $U$
then $f({\varepsilon}ll)$ is properly homotopic to ${\varepsilon}ll$.
Condition (R2) is vacuous if $DX$ is compact so suppose $f_*=id$. Thus
we may assume that $f$ is identity on $X_g^*$. Let ${\varepsilon}ll_\beta$ be the
line in $T$ from $\alpha_0$ to $\beta$, and similarly let
${\varepsilon}ll_{\beta_i}$ be the line in $T$ from $\alpha_0$ to $\beta_i$. Note
that the lines ${\varepsilon}ll_{\beta_i}$ converge ${\varepsilon}ll_\beta$. The map $f$
fixes ${\varepsilon}ll_\beta$ and thus for large $i$ takes ${\varepsilon}ll_{\beta_i}$ to a
line that agrees with ${\varepsilon}ll_{\beta_i}$ outside a given neighborhood
$U$ of $\beta$. This proves (R2).
\begin{thm}{\lambda}abel{kernel}
Assume $f \colon X{\tau}o X$ is proper, $\alpha_0\in DX$, and
$f_*=id \colon pseudo-Anosov\ rtiali_1(X,\alphapha_0){\tau}opseudo-Anosov\ rtiali_1(X,\alpha_0)$. If $f \colon pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial X$ is
identity and
${\mathcal P}hi_T(f)=1$
then $f$ is properly homotopic to the identity. Moreover,
$${\mathcal P}hi_T:Ker(PMaps(X){\tau}o PMaps(X_g^*)){\tau}o\mathcal{R}$$ is an isomorphism.
{\varepsilon}nd{thm}
\begin{proof}
Let ${\mathcal K}=Ker(PMaps(X){\tau}o
PMaps(X_g^*))$.
We start by arguing that
${\mathcal P}hi_T \colon {\mathcal K} {\tau}o \mathcal{R}$ is a homomorphism. Let $f,g\in{\mathcal K}$, and we
assume that $f,g$ are identity on $X_g^*$. Let ${\varepsilon}ll_\beta$ be the line
as in the definition of $pseudo-Anosov\ rtialhi_T$. We have
\[
{\mathcal P}hi_T(gf)(\beta)={\varepsilon}ll_\beta \cdot gf(\bar{\varepsilon}ll_\beta)={\varepsilon}ll_\beta\cdot
g(\bar{\varepsilon}ll_\beta) \cdot g({\varepsilon}ll_\beta \cdot
f(\bar{\varepsilon}ll_\beta))={\mathcal P}hi_T(g)(\beta)\cdot
g({\mathcal P}hi_T(f)(\beta))
\]
which equals ${\mathcal P}hi_T(g)(\beta)\cdot {\mathcal P}hi_T(f)(\beta)$
since $g$ acts as the identity on $pseudo-Anosov\ rtiali_1(X,\alphapha_0)$.
We next show that if ${\mathcal P}hi_T(f)=1$, then $f{\sigmama}imeq id$. We may assume
$f$ is identity on $X_g^*$. Consider the universal cover ${\tau}ilde X$ of
$X$ and let ${\tau}ilde X_g$ be the lift of $X_g$ to ${\tau}ilde X$ (which is
connected). Let ${\tau}ilde f$ be the lift of $f$ that restricts to the
identity on ${\tau}ilde X_g$. The assumption that ${\mathcal P}hi_T(f)(\beta)=1$ for
every $\beta\in DX$ amounts to saying that ${\tau}ilde f$ fixes the ends
of ${\tau}ilde X$. The straight line homotopy ${\tau}ilde H$ from ${\tau}ilde f$
to $id$ is equivariant with respect to the deck group and descends to
the homotopy $H \colon X {\tau}imes I {\tau}o X$ from $f$ to $id$. It remains
to show that $H$ is proper. It is useful to describe $H$ directly. If
$x\in X$ consider the ray $\rho_x$ from $x$ to $\alphapha_0$ in $T$. The
path $H(\{x\}{\tau}imes I)$ is the tightened path $f(\rho_x) \bar
\rho_x$.
Let $K$ be a compact set in $X$. As in the proof of Theorem \ref{id}
it is enough to show that there is a compact set $S$ so that for each
vertex $v$ outside $S$, $H(\{v\} {\tau}imes I) \cap K = {\varepsilon}mptyset$. We
show that for each $\beta \in pseudo-Anosov\ rtialartial X$ there is a neighborhood $U
{\sigmama}ubset X$ such that for all vertices $v \in U$, $H(\{v\} {\tau}imes I)
\cap K = {\varepsilon}mptyset$. We will be done by compactness of $pseudo-Anosov\ rtialartial
X$. Let $L {\sigmama}upset K$ be compact and so that $f(X-L) \cap K =
{\varepsilon}mptyset$. If $\beta \in pseudo-Anosov\ rtialartial X_g$ then by property (R2) there
is a $U {\sigmama}ubset X-L$ so that for all $\delta \in pseudo-Anosov\ rtialartial U$,
$h(\delta) = \bar\rho_U * \gamma_\delta * \rho_U$ and $\gamma_\delta
{\sigmama}ubset U$. Let $v$ be a vertex in $U$. Then $f(\rho_v), \rho_v$ agree
on their terminal segment which is $\rho_U$ so after tightening,
$f(\rho_v) \bar \rho_v$ is contained in $U$ and does not intersect
$K$. If $\beta \in DX$ let $U$ be the complementary component of $L$
containing $\beta$ and let $v$ be a vertex of $U$ and ${\varepsilon}ll_v =\bar
\rho_v \gamma_v$ a binfinite geodesic from $\alpha_0$ to $pseudo-Anosov\ rtialartial U -
\{\alpha_0\}$ (if $U = \{\alpha_0\}$ then since $f=id$ on $X_g^*$ there is
nothing to prove). The image of $\gamma_v$ is contained in $U$. By
assumption $f({\varepsilon}ll_v)$ is homotopic to ${\varepsilon}ll_v$ hence $f(\bar \rho_v)
f(\gamma_v) {\sigmama}im \bar \rho_v \gamma_v$ so $\rho_v f(\bar \rho_v)$ is
homotopic to $\gamma_v f(\bar \gamma_v)$ and the latter is completely
disjoint from $K$.
\begin{comment}
Assume $x\in X$ is in a small neighborhood of an end
$\beta\in pseudo-Anosov\ rtialartial X$. If $\beta\inpseudo-Anosov\ rtialartial X_g$ then $f(\rho_x)$ and
$\rho_x$ agree outside a bit larger neighborhood by the assumption
that $f$ fixes $\beta$ and $X_g^*$, so the path $H(\{x\}{\tau}imes I)$ is
also close to $\beta$. If $\beta\in DX$ consider the concatenation
$\bar\rho_x \gamma$ where $\gamma$ is the ray from $x$ to $\beta$ in
$T$. This concatenation is properly homotopic to the line ${\varepsilon}ll_\beta$
from the definition of ${\mathcal P}hi_T$, and so by our assumption
$f(\bar\rho_x \gamma){\sigmama}imeq \bar\rho_x\gamma$. Since $f$ fixes
$\beta$, $f(\gamma)$ is in a bit larger neighborhood of $\gamma$, so
$f(\rho_x)\bar\rho_x{\sigmama}imeq f(\gamma)\bar\gamma$ is also close to
$\beta$.
{\varepsilon}nd{comment}
Finally, we argue that ${\mathcal P}hi_T:{\mathcal K}{\tau}o\mathcal{R}$ is onto. Let $h\in\mathcal{R}$. We
define a proper homotopy equivalence $f \colon X {\tau}o X$. Let
$f|_{X_g^*} = id$. Let $S_w$ be the tree attached to a vertex $w\in
X_g^*$, i.e. the closure of a component of $T{\sigmama}mallsetminus
T_g^*$. Then $pseudo-Anosov\ rtialartial S_w$ is compact and from the fact that $h$ is
locally constant we see that there is some distance $C$ so that for
every edge $e{\sigmama}ubset S_w$ at distance $C$ from $w$, $h$ is constant on
the ends of the unbounded component of $S{\sigmama}mallsetminus e$. Define
$f|S_w$ to be identity on all edges of $S$ other than those at
distance $C$ from $w$. On such an edge $e$ define $f$ to be the
immersed path with the same endpoints as $e$ and so that if
${\varepsilon}ll_\beta$ is a line that crosses $e$ then $f({\varepsilon}ll_\beta)
\bar{\varepsilon}ll_\beta\inpseudo-Anosov\ rtiali_1(X,\alphapha_0)$ represents $h(\beta)$
(equivalently, $\bar\rho_e f(e)\bar e\rho_e$ represents $h(\beta)$ for
a suitable ray $\rho_e$ in $T$ with $e$ oriented towards
$\beta$). Since $h(\alphapha_0)=1$ the map $f$ will be identity on
$X_g^*$, as no edges $e$ where we modify the identity are along the
ray to $\alphapha_0$. We must show that $f$ is a proper map. Let
$\beta\in pseudo-Anosov\ rtialartial X$ and assume that the edge $e$ as above is close
to $\beta$. This means that $\beta\in pseudo-Anosov\ rtialartial X_g$ (ends in $DX$ have
a neighborhood not containing any edges $e$ as above). It follows that
$f(e)$ is close to $\beta$ by (R2).
Thus $f$ is a proper map. Let $g$ be constructed
similarly for $h^{-1}\in \mathcal{R}$. Then $gf\in PMod(X)$ has the property
that ${\mathcal P}hi_T(gf)={\mathcal P}hi_T(g){\mathcal P}hi_T(f)=h^{-1}h=id$, so $gf{\sigmama}imeq id$ by
the injectivity of ${\mathcal P}hi_T$, and similarly $fg{\sigmama}imeq id$.
{\varepsilon}nd{proof}
We now have a useful criterion when a proper map $X{\tau}o X$ is properly
homotopic to the identity, without assuming it is a proper homotopy
equivalence.
\begin{cor}{\lambda}abel{criterion}
Let $f:X{\tau}o X$ be proper. Then $f$ is properly homotopic to the
identity if and only if it preserves the homotopy class of every
oriented closed curve and the proper homotopy class of every
oriented proper line in
$X$ that in each direction converges to an end in $DX=pseudo-Anosov\ rtialartial
X{\sigmama}mallsetminus pseudo-Anosov\ rtial X_g$.
{\varepsilon}nd{cor}
\begin{proof}
If $X$ is a core graph we are assuming that $f_*\in Out(pseudo-Anosov\ rtiali_1(X))$
preserves all conjugacy classes in $pseudo-Anosov\ rtiali_1(X)$. This implies that $f_*=id$
and the conclusion follows from Theorem \ref{id}.
If $DX\neq{\varepsilon}mptyset$ choose some $\alpha_0\in DX$. We now see that
$f_*:pseudo-Anosov\ rtiali_1(X,\alpha_0){\tau}opseudo-Anosov\ rtiali_1(X,\alpha_0)$ is identity, since $f$
preserves lines that start and end at $\alpha_0$, and similarly $f$
fixes all ends of $X$. Finally, we see that
${\mathcal P}hi_T(f)=1$ since $f$ preserves all lines joining $\alpha_0$ with any
$\beta\in DX$, so the statement follows from Theorem \ref{kernel}.
{\varepsilon}nd{proof}
\begin{cor}{\lambda}abel{criterion2}
Suppose $f:X{\tau}o Y$ is proper, induces a homeomorphism $pseudo-Anosov\ rtial X{\tau}opseudo-Anosov\ rtial Y$
and the restriction $X_g{\tau}o Y_g$ is a proper homotopy equivalence. Then $f$
is a proper homotopy equivalence.
{\varepsilon}nd{cor}
\begin{proof}
Using Proposition \ref{extension} we have a proper map $g:Y{\tau}o X$ so
that the restriction $Y_g{\tau}o X_g$ is the homotopy inverse to $f:X_g{\tau}o Y_g$
and so that $fg$ and $gf$ are identity on the
boundaries. But then both are proper homotopy equivalences by
Theorem \ref{kernel}, and thus both $f$ and $g$ are as well.
{\varepsilon}nd{proof}
Recall that $\mathcal{R} \cong Ker(PMaps(X) {\tau}o PMaps(X_g^*))$.
\begin{cor}
If $pseudo-Anosov\ rtiali_1(X)$ is nonabelian then
$K=Ker(PMaps(X){\tau}o PMaps(X_g))$
fits in an exact sequence
$$1{\tau}o \mathcal{R}{\tau}o K{\tau}o pseudo-Anosov\ rtiali_1(X){\tau}o 1$$
If $X_g=S^1$ then $Ker(PMaps(X){\tau}o PMaps(X_g)={\mathbb Z}/2{\mathbb Z})$ is isomorphic to
$\mathcal{R}$.
{\varepsilon}nd{cor}
\begin{proof}
We focus on the first statement. The horizontal and vertical
sequences in the commutative diagram below are exact by Theorem
\ref{kernel} and Lemma \ref{Ystar}. The diagonal sequence is exact
by the definition of $K$. The construction of the red arrows and the
exactness of the sequence are a diagram chase.
\vskip 0.5cm
\begin{center}
\begin{tikzcd}
1
\arrow{dr} & & & 1\arrow{d} & \\
& K \arrow{dr}
\arrow[red]{rr} & & pseudo-Anosov\ rtiali_1(X) \arrow{d}\arrow[red]{r} & 1\\
1\arrow{r} & \mathcal{R} \arrow{r}\arrow[red]{u} & PMaps(X) \arrow{r}\arrow{dr} &
PMaps(X_g^*) \arrow{r}\arrow{d} & 1\\
& 1\arrow[red]{u} & & PMaps(X_g) \arrow{d} \arrow{dr}\\
& & & 1 & 1
{\varepsilon}nd{tikzcd}
{\varepsilon}nd{center}
{\varepsilon}nd{proof}
When $DX$ is compact there is a more refined statement. Note that in
that case condition (R2) in the definition of $\mathcal{R}$ is vacuous.
\begin{cor}{\lambda}abel{semidirect}
If $DX$ is compact and nonempty then
$$PMaps(X)\cong \mathcal{R}\rtimes PMaps(X_g^*)$$
where $PMaps(X_g^*)$ acts on $\mathcal{R}$ by $g\cdot h(\beta)=g_*(h(\beta))$,
where $g_*:pseudo-Anosov\ rtiali_1(X_g^*,\alpha_0){\tau}o pseudo-Anosov\ rtiali_1(X_g^*,\alpha_0)$ is the homomorphism
induced by $g$.
{\varepsilon}nd{cor}
\begin{proof}
By Definition \ref{defOfPhi}, we have ${\mathcal P}hi_T:PMaps(X){\tau}o\mathcal{R}$, and let
$R:PMaps(X){\tau}o PMaps(X_g^*)$ be the restriction. Thus we have a
function
$$({\mathcal P}hi_T{\tau}imes R):PMaps(X){\tau}o \mathcal{R}\rtimes PMaps(X_g^*)$$
That this is a homomorphism follows from the displayed calculation
in the proof of Theorem \ref{kernel}. That the map is 1-1 and onto
follows from Theorem \ref{kernel} plus the observation that $R$ is
onto.
{\varepsilon}nd{proof}
When $DX$ is not compact, ${\mathcal P}hi_T$ may not be a well-defined function
to $\mathcal{R}$ since (R2) may fail.
{\sigmama}ection{Topology}{\lambda}abel{topology}
It
takes a bit of care to define the topology on $Maps(X)$.
Let $\hat X$ denote the Freudenthal compactification $X\cup pseudo-Anosov\ rtialartial
X$ by the ends of $X$. If a map $f:X{\tau}o X$ is a proper homotopy
equivalence then it induces an isomorphism of $pseudo-Anosov\ rtiali_1(X)$ and it
extends to a continuous map $\hat X{\tau}o\hat X$ that restricts to a
homeomorphism of $pseudo-Anosov\ rtialartial X$. However, the converse of this statement
is false.
\begin{example}{\lambda}abel{noninvertible}
Let $X$ be the ray $[0,\infty)$ with the circle $x_n$ attached at
$n$. Then there is a proper map whose action on $pseudo-Anosov\ rtiali_1$ is
$x_0Mapsto x_0$ and $x_nMapsto x_n x_{n-1}$ for $n>0$. The
inverse sends every $x_n$ to a word that involves $x_0$ so it
cannot be realized by a proper map.
{\varepsilon}nd{example}
To circumvent this pathology, we will consider the space of pairs of
maps which are proper homotopy inverses of each other. This way the
inverse is ``built in'', cf. the proof that inversion is continuous,
Proposition \ref{group}. More precisely, let $(\hat X{\tau}o\hat X)$ be
the space of all continuous maps $\hat X{\tau}o \hat X$ equipped with
compact open topology. If we fix a metric $d$ on $\hat X$ then $(\hat
X{\tau}o\hat X)$ has the associated ${\sigmama}up$ metric which we also denote by
$d$. This
metric is also complete, and composition is continuous, and $(\hat
X{\tau}o\hat X)$ is separable (see e.g. \cite[Theorem 4.19]{Kechris_sep}).
Next, we look at the space $PH(X){\sigmama}ubset (\hat X{\tau}o\hat X)^2$ consisting
of pairs $(\hat f,\hat g)$ such that:
\noindent
{\it $\hat f,\hat g$ are extensions to $\hat X$ of proper homotopy
equivalences $f,g:X{\tau}o X$ that are each other's inverses.}
In particular, $\hat f,\hat g$ are homeomorphisms when restricted to
$pseudo-Anosov\ rtialartial X$ and they are each other's inverses. We put the product
topology on $(\hat X{\tau}o\hat X)^2$ and the subspace
topology on $PH(X)$.
Now define the function $pseudo-Anosov\ rtiali:PH(X){\tau}o Maps(X)$ by
$$pseudo-Anosov\ rtiali(\hat f,\hat g)=[f]$$ where $[f]$ is the proper homotopy class of
the restriction of $\hat f$ to $X$. This function is surjective and
we put the quotient topology on $Maps(X)$.
\begin{prop}
The quotient map $pseudo-Anosov\ rtiali$ is an open map.
{\varepsilon}nd{prop}
\begin{proof}
Let $U{\sigmama}ubseteq PH(X)$ be open; we need to show that
$pseudo-Anosov\ rtiali^{-1}pseudo-Anosov\ rtiali(U){\sigmama}ubseteq PH(X)$ is open. Let $(\hat f,\hat g)\in
pseudo-Anosov\ rtiali^{-1}pseudo-Anosov\ rtiali(U)$. Thus we have proper homotopies $H,K$ from $f,g$ to
$h,k$ respectively, and $(\hat h,\hat k)\in U$. Therefore there is
${\varepsilon}psilonilon>0$ such that if $(\hat h',\hat k')\in PH(X)$ and $d(\hat
h,\hat h')<{\varepsilon}psilonilon$, $d(\hat
k,\hat k')<{\varepsilon}psilonilon$ then $(\hat h',\hat k')\in U$. We now claim
that there is $\delta>0$ such that if $(\hat f',\hat g')\in PH(X)$,
$d(\hat f,\hat f')<\delta$, $d(\hat g,\hat g')<\delta$, then there
are homotopies of $f',g'$ to maps $h',k'$ as above, and this will
show that $(\hat f',\hat g')\inpseudo-Anosov\ rtiali^{-1}pseudo-Anosov\ rtiali(U)$, finishing the
proof.
We will prove the claim using Proposition \ref{pasting_homotopies}. Note
that all proper homotopies between maps on $X$ extend continuously
to $pseudo-Anosov\ rtialartial X$ and are stationary on all points of $pseudo-Anosov\ rtialartial
X$. It follows that in the complement of a sufficiently large
finite subgraph the tracks (i.e. paths traversed by points) of each
such
homotopy are as small as we
like. In addition, the edges outside a large finite subgraph are as
small as we like.
Choose a large finite subgraph $L{\sigmama}ubset X$ and
choose $\delta_f>0$ so that if $d(\hat f,\hat f')<\delta_f$ then we
have a homotopy between $f'|L$ and $f|L$ whose tracks have small
size, and we also have a homotopy between $f$ and $h$ with small
tracks for points in $\overline{X{\sigmama}mallsetminus L}$. Applying Proposition
\ref{pasting_homotopies} we get a homotopy from $f'$ to some map
$h'$ that agrees with $h$ on $L$ and whose tracks of points outside
$L$ are small. Thus $h'$ is close to $f$ outside $L$, which in turn
is close to $h$ outside $L$. Thus $h'$ is close to $h$
everywhere. In a similar way we find $\delta_g>0$ and a homotopy
from $g'$ to $k'$. Then we set $\delta=\min\{\delta_f,\delta_g\}$.
{\varepsilon}nd{proof}
\begin{cor}{\lambda}abel{phe}
Let $PHE(X){\sigmama}ubset (\hat X{\tau}o\hat X)$ be the subspace of maps $\hat
f$ that are extensions to $\hat X$ of proper homotopy equivalences
$f \colon X{\tau}o X$. Then the map $q\colon PHE(X){\tau}o Maps(X)$ that to $\hat f$ assigns
$[f]$ is open. Thus alternatively we could use this map to define
the quotient topology on $Maps(X)$.
{\varepsilon}nd{cor}
\begin{proof}
The projection $(\hat X{\tau}o\hat X)^2{\tau}o(\hat X{\tau}o\hat X)$ to the
first coordinate restricts
to a continuous map $PH(X){\tau}o PHE(X)$. The composition with
$q\colon PHE(X){\tau}o Maps(X)$ is open by the proposition, so $q$ is open.
{\varepsilon}nd{proof}
\begin{prop}{\lambda}abel{group}
With this topology $Maps(X)$ is $T_1$ and it is a topological group.
{\varepsilon}nd{prop}
\begin{proof}
We first show $Maps(X)$ is a topological group. That composition is
continuous follows from the fact that the map $PH(X){\tau}imes PH(X){\tau}o
PH(X)$ defined by $((\hat f,\hat g),(\hat f',\hat g'))Mapsto (\hat
f\hat f',\hat g'\hat g)$ is continuous, being the restriction of the
analogous map $(\hat X{\tau}o\hat X)^4{\tau}o (\hat X{\tau}o\hat X)^2$.
\vskip 1cm
\begin{center}
\begin{tikzcd}
PH(X){\tau}imes PH(X) \arrow{d}{pseudo-Anosov\ rtiali{\tau}imespseudo-Anosov\ rtiali} \arrow[r] &
PH(X)\arrow{d}{pseudo-Anosov\ rtiali} \\
Maps(X){\tau}imes Maps(X)\arrow[r] & Maps(X)
{\varepsilon}nd{tikzcd}
\vskip 1cm
{\varepsilon}nd{center}
Product of open maps is open so both vertical arrows are quotient
maps. Continuity of the bottom horizontal map now follows.
For the inverse we consider the map $PH(X){\tau}o PH(X)$, $(\hat f,\hat
g)Mapsto (\hat g,\hat f)$ and the argument is similar.
To prove that $Maps(X)$ is $T_1$ it suffices to show that the
identity point is closed, since $Maps(X)$ is a topological
group. This amounts to showing that the space of $\hat f:\hat
X{\tau}o\hat X$ so that $f$ is properly homotopic to the identity is a
closed set. This follows from Corollary \ref{criterion} since
preserving a loop or a line is a closed condition.
{\varepsilon}nd{proof}
\begin{cor}{\lambda}abel{topiso}
Let $X,Y$ be two connected locally finite graphs that are proper
homotopy equivalent. Then $Maps(X)$ and $Maps(Y)$ are isomorphic as
topological groups.
{\varepsilon}nd{cor}
\begin{proof}
Let $F:X{\tau}o Y$ and $G:Y{\tau}o X$ be proper homotopy equivalences with
$FG$ and $GF$ properly homotopic to the identity. Then $F,G$ extend
to maps $\hat F:\hat X{\tau}o \hat Y$ and $\hat G:\hat Y{\tau}o \hat X$ and
we have the induced maps $(\hat X{\tau}o\hat X){\tau}o (\hat Y{\tau}o \hat Y)$,
$\hat fMapsto \hat F\circ \hat f \circ \hat G$ and $(\hat Y{\tau}o \hat Y){\tau}o (\hat
X{\tau}o\hat X)$, $\hat gMapsto \hat G \circ \hat g \circ \hat F$. These maps
restrict to $PHE(X){\tau}o PHE(Y)$ and $PHE(Y){\tau}o PHE(X)$,
which induce homomorphisms $Maps(X){\tau}o Maps(Y)$ and $Maps(Y){\tau}o
Maps(X)$. These are each other's inverses and they are both
continuous by Corollary \ref{phe}.
{\varepsilon}nd{proof}
{\sigmama}ubsection{Clopen subgroups}{\lambda}abel{clopen}
We next show that $Maps(X)$ has a countable basis consisting of clopen
sets, which are in fact cosets of subgroups. We will use this to show
that $Maps(X)$ is homeomorphic to ${\mathbb Z}^\infty$, i.e. the irrationals,
and is hence a totally disconnected Polish group.
We define clopen subgroups of $Maps(X)$, analogs of pointwise stabilizers of
compact subsurfaces in big mapping class groups. Let $K{\sigmama}ubset X$ be a
finite subgraph and define
$U_K$ to be the set of equivalence classes $[f]\in Maps(X)$ with a representative such that:
\begin{enumerate}[(i)]
\item $f=id$ on $K$,
\item $f$ preserves each complementary component of $K$,
\item there is a representative $g$ of $[f]^{-1}$ that also
satisfies (i) and (ii),
\item there are proper homotopies $gf{\sigmama}imeq 1$ and $fg{\sigmama}imeq 1$
that are stationary on $K$ and preserve complementary components
of $K$.
{\varepsilon}nd{enumerate}
\begin{lemma}{\lambda}abel{UkOpen} $U_K$ is open.
{\varepsilon}nd{lemma}
\begin{proof}
We must show that $pseudo-Anosov\ rtiali^{-1}(U_K)$ is open. Let $pseudo-Anosov\ rtiali(h,k) \in U_K$.
Let $f,g$ be the representatives satisfying (i)-(iv). We then have
proper homotopies $H,K:X{\tau}imes I{\tau}o X$ with $H(x,0)=h(x), H(x,1)=f(x),
K(x,0)=k(x), K(x,1)=g(x)$. Choose a compact subgraph $L{\sigmama}upset K$ such
that both $H$ and $K$ at all times map $X{\sigmama}mallsetminus L$ into $X
{\sigmama}mallsetminus K$. Finally, choose ${\varepsilon}psilonilon>0$ such that if
$d(h',h)<{\varepsilon}psilonilon$ and $d(k',k)<{\varepsilon}psilonilon$ then $h',k'$ send
$X{\sigmama}mallsetminus L$ to $X{\sigmama}mallsetminus K$ and $h,h'$ ($k,k'$)
restricted to $L$ are homotopic by a homotopy that doesn't move points
in $Fr(L)$ into $K$.
Now use Proposition \ref{pasting_homotopies} a total of 4 times to prove
that $pseudo-Anosov\ rtiali(h',k')\in U_K$. First, we have a homotopy from $h'$ to a map $h''$
extending $h|L$, and second, we have a homotopy from $h''$ to a
map $f'$ extending $f|L$. Thus $f'|K=1$ and preparations above show
that $f'$ maps $X{\sigmama}mallsetminus L$ to $X{\sigmama}mallsetminus K$. Two more
homotopy extensions yield a similar homotopy from $k'$ to $g'$. Since
$(f',g')$ satisfies (i)-(iv) we conclude that $pseudo-Anosov\ rtiali(h',k')\in U_K$.
{\varepsilon}nd{proof}
\begin{prop}
$U_K$ is a clopen subgroup. For every neighborhood $U$ of $1\in
Maps(X)$ there is some $K$ so that $U_K{\sigmama}ubseteq U$.
{\varepsilon}nd{prop}
\begin{proof}
To prove that $U_K$ is a subgroup, we must prove that if $[f_1],[f_2]
\in U_K$ with $f_1,f_2$ preferred representatives, then $[f_1][f_2]
\in U_K$. Indeed, $[f_1][f_2] = [f_1 \circ f_2]$ and $f_1 \circ f_2$ is
the identity on $K$ and preserves complementary components. The same
is true for its homotopy inverse $g_2 \circ g_1$ as well as for both
homotopies to $id_K$.
We proved in Lemma \ref{UkOpen} that $U_K$ is open. Thus $U_K$ is also
closed since its complement is a union of cosets, which are also open.
Finally, we must show that for every open $U {\sigmama}ubset Maps(X)$ containing $1$, there is a compact $K$ such that $U_K {\sigmama}ubset U$.
We have $pseudo-Anosov\ rtiali^{-1}U$ is an open set in $PH(X)$ containing $(1,1)$.
Let ${\varepsilon}psilonilon>0$ be such that if
$(f, g)\in PH(X)$ and $d(1,\hat f)<{\varepsilon}psilonilon$, $d(1,\hat g)<{\varepsilon}psilonilon$
then $(\hat f,\hat g)\in pseudo-Anosov\ rtiali^{-1}(U)$.
Let $K{\sigmama}ubset X$ be compact so
that all complementary components of $K$ have diameter
$<{\varepsilon}psilonilon$. Then for preferred representatives $(f,g)$ of $[f] \in U_K$ we have
$(f, g)\inpseudo-Anosov\ rtiali^{-1}(U)$, so $U_K{\sigmama}ubseteq pseudo-Anosov\ rtialipseudo-Anosov\ rtiali^{-1}(U)=U$.
{\varepsilon}nd{proof}
\begin{cor}
$Maps(X)$ has a countable basis of clopen sets, it is separable,
metrizable and totally disconnected.
{\varepsilon}nd{cor}
\begin{proof}
$Maps(X)$ is separable since it is the continuous image of $PH(X)$
which is separable, being a subspace of a separable metric space. So
in particular each open subgroup $U_K$ has at most countably many
cosets. Choose an exhaustion $K_i$ of $X$; then $U_{K_i}$ and their
cosets form a countable basis of clopen sets. Next, $T_1$ plus a
basis of clopen sets implies regular (proof: let $x\notin A$ with
$A$ closed; then there is a basis element $V$ with $x\in V$ and
$V\cap A={\varepsilon}mptyset$ and so $V,V^c$ is the required separation). Then
countable basis plus regular implies normal
\cite[1.5.16]{engelking}, and finally countable basis plus normal
implies metrizable (Urysohn metrization theorem, \cite[Theorem
34.1]{munkres}).
{\varepsilon}nd{proof}
Recall that $X$ has {\it finite type} if $pseudo-Anosov\ rtiali_1(X)$ is finitely
generated and $pseudo-Anosov\ rtialartial X$ is finite; otherwise $X$ has {\it infinite
type}.
\begin{lemma}{\lambda}abel{not compact}
Suppose that $X$ has infinite
type. Then for every finite subgraph $K{\sigmama}ubset X$ there is a finite
subgraph $L{\sigmama}ubset X$ such that $L{\sigmama}upset K$ and $U_L<U_K$ has
infinite (countable) index.
{\varepsilon}nd{lemma}
\begin{proof}
First recall that an infinite totally disconnected compact
metrizable space either has infinitely many isolated points or else
it is the disjoint union of a Cantor set and finitely many isolated
points. Consider the complementary components of $K$. If one of them
has genus $N>1$ then there is $L{\sigmama}upset K$ such that $U_K$ contains
a subgroup $H$ isomorphic to the infinite group $Aut(F_N)$, while
$H\cap U_L=1$. The elements of $H$ are realized by homotopy
equivalences supported on a finite subgraph of genus $N$.
Otherwise, after
perhaps enlarging $K$, we may assume that all complementary
components are trees, and in this case $pseudo-Anosov\ rtialartial X$ is
infinite. According to the dichotomy above, we can find $L{\sigmama}upset K$
such that one of the following holds:
\begin{itemize}
\item There is a complementary component of $K$ whose boundary at infinity
contains an infinite set of isolated points, and this set can be
written nontrivially as $A\cup B$ where $A$ and $B$ are boundary points of
two distinct complementary components of $L$.
\item There is a complementary component of $K$ whose boundary
at infinity contains a Cantor set as a clopen subset, and this
Cantor set can be written nontrivially as $A\cup B$ with both
$A,B$ clopen and both boundary points of two distinct
complementary components of $L$.
{\varepsilon}nd{itemize}
In the first case, $U_K$ contains the entire group $Perm_0(A\cup B)$
of finitely supported permutations of $A\cup B$ (cf. the
classification theorem), while the intersection of this group with
$U_L$ contains only permutations that preserve $A$ and $B$. Since
either $A$ or $B$ is infinite, this subgroup has infinite index.
In the second case $U_K$ contains the entire group $H=Homeo(A\cup B)$,
while the intersection $H\cap U_L$ contains only homeomorphisms that
preserve $A$ and $B$, and this again has infinite index.
{\varepsilon}nd{proof}
\begin{lemma}
$PH(X){\sigmama}ubset (\hat X{\tau}o\hat X)^2$ is a $G_\delta$-subset.
{\varepsilon}nd{lemma}
\begin{proof}
A pair $(\hat f,\hat g)\in (\hat X{\tau}o\hat X)^2$ is in $PH(X)$ iff
(see Corollary \ref{criterion}):
\begin{enumerate}[(1)]
\item $\hat f(pseudo-Anosov\ rtialartial X){\sigmama}ubseteq pseudo-Anosov\ rtialartial X$, $\hat g(pseudo-Anosov\ rtialartial
X){\sigmama}ubseteq pseudo-Anosov\ rtialartial X$,
\item $\hat f(X){\sigmama}ubseteq X$, $\hat g(X){\sigmama}ubseteq X$,
\item $\hat f\hat g$ and $\hat g\hat f$ are identity on $pseudo-Anosov\ rtialartial X$,
\item $\hat f\hat g$ and $\hat g\hat f$ restricted to $X$
preserve the homotopy classes of oriented loops and proper
homotopy classes of oriented proper lines joining points of $DX$.
{\varepsilon}nd{enumerate}
Conditions (1),(3) and (4) are closed conditions and (2) is
$G_\delta$: (2) can be written as countably many conditions $\hat
f(K_n){\sigmama}ubset X$, $\hat g(K_n){\sigmama}ubset X$ for an exhaustion $\{K_n\}$,
$n=1,2,\cdots$ and these are all open.
{\varepsilon}nd{proof}
When $X$ has finite type, $Maps(X)$ is countable and
discrete. Otherwise we have:
\begin{prop}
Suppose $X$ has infinite type. Then $Maps(X)$ is a Polish group
with the underlying space
homeomorphic to ${\mathbb Z}^\infty$, i.e. to the set of irrationals.
{\varepsilon}nd{prop}
\begin{proof}
There is a theorem of Sierpinski that if $f\colon X{\tau}o Y$ is an open
surjective map between separable metric spaces and $X$ is complete,
then $Y$ is completely metrizable (see \cite[Exercise
5.5.8.(d)]{engelking}). Since $PH(X)$ is a $G_\delta$ subset of
$(\hat X{\tau}o\hat X)^2$, it is completely metrizable (see
\cite[4.3.23]{engelking}) and therefore $Maps(X)$ is completely
metrizable. Then the fact that $Maps(X)$ is homeomorphic to the
irrationals follows from a theorem of Hausdorff (see
e.g. \cite{eberhart}): If $Z$ is separable, completely metrizable,
zero dimensional (i.e. has a basis of clopen sets), and every compact
subset has empty interior, then $Z$ is homeomorphic to the
irrationals. To finish the proof, if $Maps(X)$ had a compact subset
with nonempty interior, then some $U_{K}$ would be compact. But this
contradicts Lemma \ref{not compact}, since $U_{K}$ is covered by the
pairwise disjoint cosets of $U_{L}$ and this cover doesn't have a
finite subcover.
{\varepsilon}nd{proof}
We finish this section by considering continuity properties of
homomorphisms studied in Section \ref{s:algebra}.
Recall the surjective homomorphism ${\sigmama}igma \colon Maps(X){\tau}o Homeo(pseudo-Anosov\ rtial X,pseudo-Anosov\ rtial
X_g)$ from Corollary \ref{sigma}. The group $Homeo(pseudo-Anosov\ rtial X,pseudo-Anosov\ rtial X_g)$ is
equipped with the compact-open topology. This means that a basis of
neighborhoods of the identity is defined by clopen subgroups
$V_{\mathcal P}$ where $\mathcal P$ is a finite partition of $pseudo-Anosov\ rtial X$
into clopen subsets and $V_{\mathcal P}$ consists of the elements of
$Homeo(pseudo-Anosov\ rtial X,pseudo-Anosov\ rtial X_g)$ that leave the partition elements
invariant. Refining the partition yields a smaller clopen subgroup.
\begin{cor}{\lambda}abel{4.12}
The homomorphism ${\sigmama}igma$ is continuous and open. In particular, when $X$
is a tree, ${\sigmama}igma:Maps(X){\tau}o Homeo(pseudo-Anosov\ rtial X)$ is an isomorphism of
topological groups.
{\varepsilon}nd{cor}
\begin{proof}
We may assume that $X$ is a Standard Model. We will consider finite
subgraphs $K{\sigmama}ubset X$ consisting of a subtree in the
underlying tree together with all circles attached to it. The
complementary components of $K$ determine a partition $\mathcal P_K$
of $pseudo-Anosov\ rtial X$. Since every partition $\mathcal P$ is refined by some
$\mathcal P_K$ and ${\sigmama}igma(U_K){\sigmama}ubseteq V_{\mathcal P_K}$ it
follows that ${\sigmama}igma$ is continuous. To prove that ${\sigmama}igma$ is open
it suffices to argue that ${\sigmama}igma(U_K)=V_{\mathcal P_K}$. Let $W$ be
a complementary component of $K$. Thus $pseudo-Anosov\ rtial W$ is one of the
partition elements $A_W$ of $\mathcal P_K$ together with one point $v$
corresponding to the vertex of intersection
$\overline W \cap K$. Given a homeomorphism $h$ of $(A_W,A_W\cappseudo-Anosov\ rtial
X_g)$, extend it by $vMapsto v$ and view it as a homeomorphism of
$(pseudo-Anosov\ rtial W,pseudo-Anosov\ rtial W_g)$. By the Classification Theorem there is
$f_h\inMaps(W)$ that induces $h:pseudo-Anosov\ rtial W{\tau}opseudo-Anosov\ rtial W$. Now define
$f\inMaps(X)$ as the identity on $K$ and as $f_h$ on $W$, for each
complementary component $W$, and observe that ${\sigmama}igma(f)$ is the
given homeomorphism in $V_{\mathcal P_K}$.
{\varepsilon}nd{proof}
Next, recall the homomorphism ${\mathcal P}si:Maps(X){\tau}o Out(pseudo-Anosov\ rtiali_1(X))$ to the
Polish group $Out(pseudo-Anosov\ rtiali_1(X))$. It is injective when $X$ is a core graph
(Theorem \ref{id}).
\begin{prop}
The homomorphism ${\mathcal P}si$ is continuous. If
the genus of $X$ is infinite, the image is not a closed subgroup. If
in addition $X$ is a core graph then ${\mathcal P}si$ is injective but it is
not a homeomorphism onto its image.
{\varepsilon}nd{prop}
\begin{proof}
Consider $f \colon X{\tau}o X$ from Example \ref{noninvertible}. Let $f_n \colon X{\tau}o
X$ be defined by $f_n(x_0)=x_0$, $f_n(x_k)=x_kx_{k-1}$ for $k{\lambda}eq n$
and $f_n(x_k)=x_k$ for $k>n$. Then ${\mathcal P}hi(f_n){\tau}o f_*\in
Out(pseudo-Anosov\ rtiali_1(X))$, but $f_*$ is not in the image of ${\mathcal P}hi$.
Similarly, consider $g_n \colon X{\tau}o X$ defined by $g_n(x_k)=x_k$
when $k{\lambda}eq n$ or $k\geq 2n$, $g_n(x_k)=x_kx_{1}$ when $n<k<2n$. Then
${\mathcal P}si(g_n){\tau}o id$ but the sequence $g_n$ does not converge to $id$
(or anywhere). So ${\mathcal P}si$ is not a homeomorphism onto its image.
Generalizing these examples to other graphs is left to the reader.
{\varepsilon}nd{proof}
Finally, we have the following statement, whose proof is left to the
reader.
\begin{prop}
The restriction epimorphisms $PMaps(X){\tau}o PMaps(X_g)$ and $PMaps(X){\tau}o
PMaps(X_g^*)$ are continuous and open.
{\varepsilon}nd{prop}
{\sigmama}ection{Proof of Main Theorem for core graphs}
{\sigmama}ubsection{Free factor systems}
\def{\mathcal F}{{\mathbb F}} Let ${\mathcal F}$ be a free group, possibly of infinite
rank. Recall that a nontrivial subgroup $A<{\mathcal F}$ is a {\it free factor}
of ${\mathcal F}$ if there is a subgroup $B<{\mathcal F}$ such that $A*B={\mathcal F}$. We will only
consider free factors of finite rank, and only conjugacy classes $[A]$
of such free factors. To simplify notation we will usually omit the
brackets. Topologically, a (conjugacy class of a) nontrivial subgroup
is a free factor if there is a graph ${\Gamma}amma$ with $pseudo-Anosov\ rtiali_1({\Gamma}amma)={\mathcal F}$
and with $A$ represented by a subgraph. Similarly, a finite collection
$\mathcal F$ of (conjugacy classes of) finitely generated free factors
is a {\it free factor system} if there are representatives
$A_1,A_2,\cdots,A_n$ and a subgroup $B<{\mathcal F}$ such that $A_1*A_2*\cdots
*A_n*B={\mathcal F}$. Topologically, there is a graph ${\Gamma}amma$ with
$pseudo-Anosov\ rtiali_1({\Gamma}amma)={\mathcal F}$ and with the $A_i$s represented by pairwise
disjoint subgraphs.
If $\mathcal F$ and $\mathcal F'$ are two free factor systems, the
intersection $\mathcal F\cap\mathcal F'$ is naturally a free factor
system. It consists of conjugacy classes of nontrivial subgroups
obtained by intersecting a representative of a conjugacy class in
$\mathcal F$ with a representative of a conjugacy class in $\mathcal
F'$. Topologically, one can represent $\mathcal F$ and $\mathcal F'$
by immersions of finite graphs ${\Gamma}amma_F{\tau}o{\Gamma}amma$ and
${\Gamma}amma_{F'}{\tau}o{\Gamma}amma$, form the pull-back (see \cite{stallings}) and
discard the contractible components to get an immersion representing
the intersection.
\begin{example}
Let ${\mathcal F}={\lambda}anglea,b,c\rangle$, $A={\lambda}anglea,b\rangle$, $B={\lambda}anglea,cbc^{-1}\rangle$. Then $A$ and
$B$ are free factors of ${\mathcal F}$, while their intersection is the free
factor system consisting of two rank 1 free factors ${\lambda}anglea\rangle$ and
${\lambda}angleb\rangle$. The intersection of $A$ and ${\lambda}anglec\rangle$ is the empty free
factor system.
{\varepsilon}nd{example}
To see that the intersection $\mathcal F\cap\mathcal F'$ is a free
factor system, one can arrange that one of them is represented by
subgraphs of ${\Gamma}amma$ and then the pullback will be represented by
subgraphs of the other one. It is also possible to compute finite
intersections of free factor systems by a pull-back of several
immersions.
Finally, we write $\mathcal F<\mathcal F'$ if every group (representing
a conjugacy class) in $\mathcal F$ is contained in a group in $\mathcal
F'$. For example, $\mathcal F\cap\mathcal F'<\mathcal F$.
{\sigmama}ubsection{Tree of groups}
\def{\mathcal F}{{\mathcal F}} We now assume that $X$ is a core graph and is a
Standard Model. Thus $X$ is a tree $T$ with a root vertex $v$ and with
a loop attached at every vertex. We assign length 1 to each edge and
let $D_0:T{\tau}o [0,\infty)$ be the distance function from $v$. We extend
$D_0$ to all of $X$ so that it is constant on each attached
loop. Our first task is to control the sizes of maps representing
elements of $H$, as well as homotopies, measured by $D_0$. So in
effect we replace properness by metric control. Recall that for a
finite subgraph $K{\sigmama}ubset X$ we have a clopen subgroup $U_K<Mod(X)$,
so $H\cap U_K$ is compact and has finite index in $H$.
\begin{prop}{\lambda}abel{rs}
There is a sequence of integers $0=r_0<r_1<r_2<\cdots$ and for
every $n>0$ and every $[h]\in H$ there is a representative $h$ satisfying
\begin{enumerate}[(*)]
\item $h$ maps
every element of the closed cover $\mathcal
C(r_1,r_2,\cdots,r_n)$ of $X$ consisting of the sets
$$D_0^{-1}[r_0,r_1],D_0^{-1}[r_1,r_2],\cdots,D_0^{-1}[r_{n-1},r_n],D_0^{-1}[r_n,\infty)$$
to the union of the same element with the one or two
adjacent elements.
{\varepsilon}nd{enumerate}
{\varepsilon}nd{prop}
\begin{proof}
We construct the numbers inductively, starting
with $r_1=1$. Then (*) is
vacuous.
Suppose that $r_n$ has been constructed satisfying (*). Note that
by properness for every $[h]\in H$ (and every representative $h$
that exists by induction) there is some $r_{n+1}>r_n$ so that
$(*)$ holds for the cover $C(r_1,r_2,\cdots,r_n,r_{n+1})$ and this
$h$. Moreover, the same $r_{n+1}$ will also work in a neighborhood
of $[h]$ by choosing representatives of the form $hu$ where $[u]\in
U_{D_0^{-1}[0,r_{n+1}]}$ i.e. $u$ fixes $D_0^{-1}[0,r_{n+1}]$ and
leaves the complementary components invariant. Now by compactness
of $H$, there is a finite cover of $H$ by such open sets and the
maximal $r_{n+1}$ will then satisfy the requirements.
{\varepsilon}nd{proof}
We also need a statement similar to (*), except for homotopies.
\begin{prop}{\lambda}abel{homotopies}
Let $0<r_1<r_2<\cdots$ be as in Proposition
\ref{rs}. Fix $n$ and let $h,h'$ be the representatives of two
elements of $H$ that are inverses of each other as in Proposition
\ref{rs}. Thus $h'h$ is properly homotopic to the identity and it
maps each element of $\mathcal C(r_1,\cdots,r_n)$ to the union of
at most 5 elements (the 2-neighborhood of the given element). Then
there is a proper homotopy between the identity and $h'h$ that
moves each point within the 2-neighborhood of an element of the
cover that contains it.
{\varepsilon}nd{prop}
\begin{proof}
First note that there is a canonical proper homotopy between the
identity and $h'h$: lift the given homotopy to the universal cover
extending the identity map, and then replace it by the straight
line homotopy. We now argue that this homotopy moves within
2-neighborhoods. Fix a component $P$ of an element of the cover
and let ${\tau}ilde P$ be the component of the 2-neighborhood that
contains it. Since a loop in $P$ cannot be mapped by $h'h$ disjointly
(since otherwise $h'h$ would not be homotopic to the identity) we
see that $h'h(P){\sigmama}ubseteq {\tau}ilde P$. By lifting to the covering
space of $X$ corresponding to $pseudo-Anosov\ rtiali_1({\tau}ilde P)$ and then
retracting to the core ${\tau}ilde P$, we see that
$h'h|P:P{\tau}o {\tau}ilde P$ is homotopic to inclusion
$i:P\hookrightarrow{\tau}ilde P$ within ${\tau}ilde P$. Now note that any
homotopy from $i$ to $h'h|P$ has tracks that are nullhomotopic loops
(they have to represent $pseudo-Anosov\ rtiali_1$-elements that commute with
$pseudo-Anosov\ rtiali_1(P)$, but since $pseudo-Anosov\ rtiali_1(P)$ and $pseudo-Anosov\ rtiali_1({\tau}ilde P)$ are
nonabelian free groups this forces these loops to be trivial). It
follows that the tracks described by the straight line homotopy
are homotopic to paths in ${\tau}ilde P$, but since they are immersed,
they must be contained in ${\tau}ilde P$.
{\varepsilon}nd{proof}
It will be convenient to introduce the following notation. First,
let $\rho:[0,\infty){\tau}o [0,\infty)$ be a homeomorphism such that
$\rho(r_n)=n$ for $n=0,1,\cdots$ and let $D=\rho D_0:X{\tau}o [0,\infty)$. Thus
$D^{-1}([m,n])=D_0^{-1}([r_m,r_n])$. We think of $D$ as a
``control function''. For example, (*) says that for every $n$
every element
of $H$ has a representative $h$ that ``moves points $<2$''
i.e. $|D(x)-D(h(x))|<2$ for every $x\in D^{-1}[0,n]$, and similarly
Proposition \ref{homotopies} says that homotopies ``move
points $<3$''.
If $J{\sigmama}ubset [0,\infty)$ is a closed interval with integer
endpoints, write $\mathcal F(J)$ for the free factor system
represented by $D^{-1}(J)$. Thus the number of free factors in
$\mathcal F(J)$ is equal to the number of components of
$D^{-1}(J)$. When $J$ is a degenerate interval (a single integer
point) then each factor in ${\mathcal F}(J)$ has rank 1. We denote by $|J|$
the length of the interval.
We also set $${\mathcal F}'(J)=\cap_{h\in H}h_*({\mathcal F}(J))$$
where $h_*:pseudo-Anosov\ rtiali_1(X){\tau}opseudo-Anosov\ rtiali_1(X)$ is the automorphism induced by $h$
(defined up to conjugation). This is really only a finite
intersection since when $h$ is close to the identity we will have
$h_*({\mathcal F}(J))={\mathcal F}(J)$, so it suffices to intersect over finitely many
coset representatives. Thus ${\mathcal F}'(J)$ is an $H$-invariant free factor
system.
When $J=[a,b]{\sigmama}ubset [0,\infty)$ with integer endpoints and with
$b-a\geq 4$ we set $J^+=[a-2,b+2]\cap [0,\infty)$ to be the
2-neighborhood of $J$, and likewise $J^-=[a',b-2]$ where $a'=0$ if
$a=0$ and otherwise $a'=a+2$ (so $J^-$ is obtained from $J$ by
subtracting the 2-neighborhood of the complement).
Note
that by our assumptions on the sequence $r_n$ we have that
$${\mathcal F}(J^-)<{\mathcal F}'(J)<{\mathcal F}(J^+)$$
We now show that each
group in ${\mathcal F}'(J)$ either contains a group in ${\mathcal F}(J^-)$ or it has
trivial intersection with all of them.
\begin{lemma}{\lambda}abel{5.2}
Let $A$ be a free factor in ${\mathcal F}'(J)$. If $A$ contains a nontrivial
element $\alphapha$ that also belongs to a free factor $B$ in ${\mathcal F}(J^-)$ then
$B<A$ (up to conjugacy).
{\varepsilon}nd{lemma}
\begin{proof}
Represent different $h_*({\mathcal F}(J))$, $h\in H$, by immersions of
finite (possibly disconnected) graphs into $X$. The non-tree
components of the pull-back then represent the free factors in
${\mathcal F}'(J)$. Since free factors (and free factor systems) are
malnormal, if an immersion to $X$ lifts to the pull-back, it does
so uniquely. Since an immersion representing $B$ lifts, it must
lift to the component representing $A$, since this is where
$\alphapha$ lifts.
{\varepsilon}nd{proof}
We now set ${\mathcal F}^*(J)$ to be the free factor system consisting of the
free factors in ${\mathcal F}'(J)$ that contain a free factor in
${\mathcal F}(J^-)$. Thus we still have
$${\mathcal F}(J^-)<{\mathcal F}^*(J)<{\mathcal F}(J^+)$$
and also $J{\sigmama}ubset J'$ implies ${\mathcal F}^*(J)<{\mathcal F}^*(J')$.
\begin{lemma}
If $|J|\geq 8$ then ${\mathcal F}^*(J)$ is $H$-invariant.
{\varepsilon}nd{lemma}
\begin{proof}
Take a free factor $A$ in ${\mathcal F}^*(J)$. It will
contain an element $\alphapha$ corresponding to a loop in $D^{-1}(t)$
for any $t\in J$ whose distance to each endpoint is $\geq 4$. For
any $h\in H$ we have $h_*(\alphapha)$ is an element in a free factor
of ${\mathcal F}([t-2,t+2])<{\mathcal F}(J^-)$, and the free factor of the latter that
contains it is contained in a free factor $B$ of ${\mathcal F}^*(J)$ by
Lemma \ref{5.2}, and
$h_*(A)=B$.
{\varepsilon}nd{proof}
Now fix a sequence of intervals $J_1,J_2,\cdots$ that cover
$[0,\infty)$ and so that $J_n\cap J_m={\varepsilon}mptyset$ when $|n-m|>1$ and
$J_{n,n+1}:=J_n\cap J_{n+1}$ is an interval of length $\geq 22$
for
$i=1,2,\cdots$. Now construct the following tree of groups $\mathcal
T$. The vertices of the tree are the free factors in ${\mathcal F}(J_n)$ (or
equivalently the components of $D^{-1}(J_n)$), $n=1,2,\cdots$. The
group associated to a vertex is the underlying free factor. The
edges are the free factors in ${\mathcal F}(J_{n,n+1})$ (components of
$D^{-1}(J_{n,n+1})$), again with the associated group the
underlying free factor. Incidence relation is inclusion. The
underlying graph is a tree, the nerve of the cover of $X$ by the
components of $D^{-1}(J_n)$, $n\geq 1$.
\def{\mathcal T}{{\mathcal T}}
\begin{lemma}
$pseudo-Anosov\ rtiali_1(\mathcal T)\cong pseudo-Anosov\ rtiali_1(X)$.
{\varepsilon}nd{lemma}
\begin{proof}
By induction, the subtree of groups corresponding to the
first $n$ intervals has the fundamental group of the corresponding
subgraph of $X$.
{\varepsilon}nd{proof}
In a similar way we construct a tree of groups ${\mathcal T}^*$, which will be
$H$-invariant. A vertex of {\it height $n$} is a free factor
in ${\mathcal F}^*(J_n)$, with this factor as the vertex group. An edge of {\it
height $[n,n+1]$} is a free
factor in ${\mathcal F}^*(J_{n,n+1})$, with this factor as the edge
group. Such a factor is contained in a unique vertex group at height
$n$ and a unique vertex group at height $n+1$ by Lemma \ref{5.2} and
this gives incidence and edge-to-vertex inclusions. Thus ${\mathcal T}^*$ is a
graph of groups and it is $H$-invariant by construction. Below we
will show that ${\mathcal T}^*$ is a tree and $pseudo-Anosov\ rtiali_1({\mathcal T}^*)\cong pseudo-Anosov\ rtiali_1({\mathcal T})$.
\begin{lemma}
If $C$ is an edge group in ${\mathcal T}^*$ with $A,B$ the incident vertex
groups of heights $n,n+1$ resp., then $C$ is one of the free
factors in $A\cap B$.
{\varepsilon}nd{lemma}
\begin{proof}
We have that $C$ is contained in some group in $A\cap B$ by
construction. The free factor $A$ is a free factor in the free
factor system ${\mathcal F}'(J_n)$ that contains a factor in ${\mathcal F}(J_n^-)$ and
similarly for $B$. The intersection $A\cap B$ consists of free factors in
${\mathcal F}'(J_{n,n+1})$ and one of them contains $C$, which is also a free factor
in ${\mathcal F}'(J_{n,n+1})$, so equality holds.
{\varepsilon}nd{proof}
Note here that in principle the intersection of $A$ and $B$ can
consist of several free factors, i.e. the vertices might be
joined by several edges. We will rule this out in Lemma \ref{5.8}.
There is a natural morphism
(vertices to vertices and edges to edges) $pseudo-Anosov\ rtiali:{\mathcal T}^*{\tau}o{\mathcal T}$ that sends a
factor in ${\mathcal F}^*(J_i)$ to the factor in ${\mathcal F}(J_i)$ that contains it,
and similarly for the edges. Note that we have a height function on
both trees (sending factors in ${\mathcal T}^*(J_i)$, respectively in
${\mathcal F}(J_i)$ to $i$) that commutes with this map.
In the sequel it will be convenient to abuse the terminology and
conflate a subcomplex of $X$ and its fundamental group, and likewise a
``component'' and a ``free factor'' in a free factor system.
\begin{lemma}
Every vertex of ${\mathcal T}^*$ at height $n+1>0$ is connected by an edge to a vertex
at height $n$. There is a unique vertex of ${\mathcal T}^*$ at height
0. In particular, ${\mathcal T}^*$ is connected.
{\varepsilon}nd{lemma}
\begin{proof}
Suppose the vertex is $A$, so it contains (possibly more than one)
free factor $B$ in ${\mathcal F}(J^{-}_{n+1})$. Every component of
$D^{-1}(J^-_{n+1})$ contains a (unique) component of
$D^{-1}(J_{n,n+1}^-)$ and this component is contained in a unique
free factor of ${\mathcal F}^*(J_{n,n+1})$, which represents an edge at
height $[n,n+1]$ attached to $A$.
Since $D^{-1}(J_1^{-})$ is connected (recall that $J_1^-$ contains
$\{0\}$) and every vertex at height 0
must contain a component of it, it follows that there is only one
height 0 vertex in ${\mathcal T}^*$.
{\varepsilon}nd{proof}
Note that a vertex at height $n$ may not be connected to any
vertices at height $n+1$ since a component of $D^{-1}(J^-_{n})$ may
not contain any components of
$D^{-1}(J_{n,n+1}^-)$.
\begin{lemma}{\lambda}abel{5.8}
Let $e$ be an edge in ${\mathcal T}$ with height in $[n,n+1]$ and consider
its preimage $pseudo-Anosov\ rtiali^{-1}(e)$ in ${\mathcal T}^*$. After removing isolated
vertices from $pseudo-Anosov\ rtiali^{-1}(e)$, it is a tree with one vertex $w$ at
height $n$ and all other vertices at height $n+1$, and these are
all connected to $w$ by a unique edge. In particular, ${\mathcal T}^*$ is a
tree.
{\varepsilon}nd{lemma}
\begin{proof}
Let $J=J_{n,n+1}$. The statement that all edges in the preimage of
$e$ have the same vertex at height $n$ follows from the following
fact. If two components of $D^{-1}(J^-)$ are contained in the same
component of $D^{-1}(J)$ then they are contained in the same
component of $D^{-1}(J_n^{-})$ (and this is not true if $J_n^-$ is
replaced by $J_{n+1}^-$ and there may be several vertices at
height $n+1$).
We now argue that the height $n+1$ vertices of all these edges in
the preimage of $e$ are
distinct. Fix some integer $k\in J$ at
distance $\geq 9$ from the endpoints and let $x,x'$ be two loops
in $D^{-1}(J)$ that map to $k$. They will lift to unique components of
${\mathcal F}^*(J)$ and any two components are determined in this
way. If they lift to the same component of ${\mathcal F}^*(J_{n+1})$ then
there is an immersion $q:{\Gamma}amma{\tau}o D^{-1}(J_{n+1})$ of a barbell (two
disjoint loops connected by an edge) sending one loop to $x$ and
the other to $x'$ and so that $hq$ can be homotoped into
$D^{-1}(J_{n+1})$ for every $h\in H$. Thus $q$ is kind of a
``witness'' that $x,x'$ lift to the same component of
${\mathcal F}^*(J_{n+1})$. We need a similar witness that they lift to the
same component of ${\mathcal F}^*(J)$. The map $q$ may not work, since its
image may contain points of $D^{-1}(J_{n+1}{\sigmama}mallsetminus J)$, and we will
perform a kind of surgery on $q$ to get a better map.
Fix $h\in H$. By perturbing if necessary we may assume that $hq$
doesn't collapse any edges and is simplicial with respect to
suitable subdivisions. Then the statement that $hq$ can be
homotoped into ${\mathcal F}(J_{n+1})$ is equivalent to saying that after
folding and replacing $hq$ by an immersion, the image of the core
subgraph is contained in $D^{-1}(J_{n+1})$. This same $q$ may not
map into $D^{-1}(J)$ since it may map around loops in
$D^{-1}(J_{n+1}){\sigmama}mallsetminus D^{-1}(J)$, so we will modify it to
$q':{\Gamma}amma'{\tau}o D^{-1}(J)$. First
we analyze $q$.
Recall that a {\it
vanishing path} for $hq$ is an immersion $\nu:I{\tau}o{\Gamma}amma$ such
that $hq\nu:I{\tau}o X$ is a nullhomotopic closed path. There are only
finitely many maximal vanishing paths and the folding process can
be thought of as folding maximal vanishing paths one at a time. We
now claim that $hq\nu$ has $D$-size $<10$ (i.e. $\operatorname{diam}
Im(Dhq\nu)<10$) when $h$ is as in Proposition \ref{rs} (see Figure \ref{Vanishing_paths_figure} for an illustration).
Indeed,
$h'hq\nu$ is also a closed nullhomotopic path (where $h'$ is as in
Proposition \ref{homotopies})
and there is a
homotopy of $h'hq\nu$ to $q\nu$ that moves the endpoints by $<3$
measured by $D$. Thus the immersed path $q\nu$
gets closed up to a nullhomotopic loop by a path of $D$-size $<6$,
so it must itself have $D$-size $<6$, and so $hq\nu$ has $D$-size
$<10$.
\begin{figure}[ht]
\includegraphics{smallVanishing}
\caption{$H:I{\tau}imes I{\tau}o X$ denotes the homotopy from $q\nu$ to
$h'hq\nu$. The path denoted by $\beta$ is mapped by $H$ to a path
homotopic to $q\nu$. Since $H$ maps vertical segments $\{t\}{\tau}imes
I$ to paths whose images have $D$-length less than $3$, the dotted
subpaths are mapped by $H$ to paths with $D$-length smaller than
$3$. The dashed part of $\beta$ is null homotopic. Since $q\nu$ is
immersed, its $D$-length is
$<6$.{\lambda}abel{Vanishing_paths_figure}}
{\varepsilon}nd{figure}
We now observe that after folding $hq$ the tree
components of the complement of the core have $D$-size
$<10$. Indeed, choose any point $p\in{\Gamma}amma$. First fold all
vanishing paths that do not contain $p$. After this, $p$ is still
in the core. Finally, fold the remaining vanishing paths -- this
operation changes only the neighborhood of $p$ of $D$-size $<10$.
Now consider $(Dq)^{-1}[k+1,\infty){\sigmama}ubset{\Gamma}amma$. It is a
disjoint union of (possibly degenerate) closed intervals in the
interior of the separating arc of ${\Gamma}amma$. Form a new graph
${\tau}ilde{\Gamma}amma$ by attaching an edge $E_a$ to ${\Gamma}amma$ for every
nondegenerate arc $a$ in this disjoint union, with $pseudo-Anosov\ rtialartial
E_a=pseudo-Anosov\ rtialartial a$. Note that $q$ sends both endpoints of $a$ to
the same vertex (at $D$-height $k+1$, i.e. distance $r_{k+1}$
from the root vertex). Extend $q$ to an immersion
${\tau}ilde q:{\tau}ilde{\Gamma}amma{\tau}o X$ by sending each $E_a$ to a loop based at this
vertex of combinatorial length ${\lambda}eq 3$ (for example, one can
send it either to the attached loop based at that vertex or to the loop
of the form $dcd^{-1}$ where $d$ is an edge that increases the
distance from the root and $c$ is the loop attached at the
terminal vertex of $d$, see Figure \ref{GammaTildeFigure}).
Let ${\Gamma}amma'{\sigmama}ubset{\tau}ilde{\Gamma}amma$ be the
barbell obtained by deleting the interiors of the arcs $a$ as
above, and let $q':{\Gamma}amma'{\tau}o X$ be the restriction of ${\tau}ilde q$.
\begin{figure}[ht]
\includegraphics{GammaTilde}
\caption{The barbell graph on the left is the graph ${\Gamma}amma$. It is
mapped via $h q$ to $X$. The graph ${\tau}ilde{\Gamma}amma$ is ${\Gamma}amma$ union
the edge $E_a$. The arc labeled $a$ is mapped into
$D^{-1}[k+1,\infty)$. Suppose the initial edge of of $a$ is mapped
to the loop $b$ and the terminal edge is mapped to $\bar b$. Then
in order to define ${\tau}ilde q$ on ${\tau}ilde{\Gamma}amma$ so that it will be
an immersion we will let $E_a$ map to $dc\bar{d}$ where $d$ is the
edge to the right of $b$, and $c$ is the one edge loop based at
the endpoint of $d$. {\lambda}abel{GammaTildeFigure}}
{\varepsilon}nd{figure}
We now claim that $h{\tau}ilde q$ is homotopic into
$D^{-1}(J_{n+1})$ for every $h\in H$.
We can fold $h{\tau}ilde q$ by first folding $hq$, which produces a
core graph with trees attached, and then adding the edges
$E_a$. They could be attached to points in the attached trees,
but all such attached trees have $D$-size $<10$ and map to
$(k+1-10,k+1+10){\sigmama}ubset J_{n+1}$ by $D$. So after removing the
attached trees to which no $E_a$'s are attached, the image is
entirely contained in $D^{-1}(J_{n+1})$, which proves the
claim.
In particular, $hq':{\Gamma}amma'{\tau}o X$ is homotopic into
$D^{-1}(J_{n+1})$, so $q'$ is also a witness to the fact that
$x,x'$ lift to the same component of ${\mathcal F}^*(J_{n+1})$. By
construction, the image of $q'$ does not exceed the $D$-height
$k+2$, so $hq'$ does not exceed the $D$-height $k+4$, and we see
that $hq'$ is contained (even without homotopies) in
$D^{-1}(J_n)$. It follows that $hq'$ is homotopic into
$D^{-1}(J_{n,n+1})$, and so the lifts of $x,x'$ in ${\mathcal F}^*(J)$ are
in the same component.
To see that ${\mathcal T}^*$ has no loops, we note that if there were an
embedded loop then let $u \in V({\mathcal T}^*)$ be the vertex of the loop of
maximal height. Since no two vertices of the same height are connected
then the loop gives us two vertices of the same height attached to $u$
which is a contradiction.
{\varepsilon}nd{proof}
Our next goal is to
verify that ${\mathcal T}^*{\tau}o{\mathcal T}$ induces an isomorphism between the
fundamental groups of these graphs of groups. Our method is to find a sequence
of folds that converts ${\mathcal T}^*$ to ${\mathcal T}$. We will do this through an
intermediate tree of groups ${\mathcal T}^*{\tau}o{\mathcal T}^{**}{\tau}o{\mathcal T}$. Only ${\mathcal T}^*$ will
be $H$-invariant.
Recall the following folding moves on simplicial $G$-trees $T$
\cite{bf:bounding}. If $e_1$, $e_2$ are two oriented edges with the
common initial vertex $v$ such that $e_1\cup e_2$ embeds in the
quotient $T/G$, then we may construct a new $G$-tree $T'$ by
identifying $e_1$ and $e_2$ in an equivariant fashion, i.e. we
identify $g(e_1)$ and $g(e_2)$ for every $g\in G$. The stabilizer of
the new edge $e_1=e_2$ is the group generated by $Stab(e_1)$ and
$Stab(e_2)$, and similarly for the terminal vertices of $e_1$ and
$e_2$. The effect in the quotient graph is to fold the images of $e_1$
and $e_2$. This is called Move IA in \cite{bf:bounding}.
Similarly, suppose $e_1$, $e_2$ are two oriented edges with the common
initial vertex $v$, each edge embeds in the quotient $T/G$, but they
have the same images in $T/G$. This means that $g(e_1)=e_2$ for some
$g\in Stab(v)$, so $Stab(e_2)=g Stab(e_1) g^{-1}$. The equivariant
folding operation has the effect that the underlying quotient graph is
unchanged, but the stabilizer of $e_1=e_2$ is now the group generated
by $Stab(e_1)$ and $g$, and similarly for the terminal vertex. This is
called Move IIA, and we think of it as pulling the element $g\in
Stab(v)$ across the image edge to the terminal vertex and enlarging
the stabilizers by this $g$. In a similar way we can pull finitely
generated subgroups (or think of it as several Moves IIA performed in
sequence).
Let ${\mathcal T}^{**}$ be the tree of groups obtained from ${\mathcal T}^*$ by
folding each preimage of an edge to an edge, so that there is a
morphism ${\mathcal T}^{**}{\tau}o{\mathcal T}$. This amounts to performing infinitely
many Moves IA, but they are all independent and can be performed
simultaneously. The resulting morphism ${\mathcal T}^{**}{\tau}o{\mathcal T}$ is an isomorphism of
underlying trees.
It will be convenient to denote by $\mathcal T(e)$ the group
associated to an edge $e$ of $\mathcal T$, and similarly for the
vertices, and for the trees $\mathcal T^*$ and $\mathcal T^{**}$.
\begin{lemma}{\lambda}abel{5.9}
After independent Moves IIA, the morphism ${\mathcal T}^{**}{\tau}o{\mathcal T}$ becomes
an isomorphism of graphs of groups.
{\varepsilon}nd{lemma}
\begin{proof}
The moves consist of pulling across an edge $e$ from an endpoint
$w$ the subgroup ${\mathcal T}^{**}(w)\cap {\mathcal T}(e)$, simultaneously for all
$(w,e)$. Since $J_{n,n+1} {\sigmama}mallsetminus J_n^- {\sigmama}ubset J_{n+1}^-$
then ${\mathcal F}(J_{n,n+1})$ is generated by elements of ${\mathcal F}(J_n^{-})$ and ${\mathcal F}(J_{n+1}^-)$ which are contained in ${\mathcal F}^*(J_n)$ and ${\mathcal F}^*(J_{n+1})$ respectively.
Therefore the group ${\mathcal T}(e)$ is generated by elements in ${\mathcal T}^*(w), {\mathcal T}^*(v)$ for the enpoints $w,v$ of $e$. Thus by applying IIA moves we can promote ${\mathcal T}^{**}(e)$ to ${\mathcal T}(e)$. Similarly, $J_n {\sigmama}ubset J_n^{-} \cup J_{n-1,n}^- \cup J_{n,n+1}^-$ hence ${\mathcal T}(w)$ is generated by elements in ${\mathcal T}^{**}(w)$ and $\{{\mathcal T}^{**}(e) \mid w {\tau}ext{ is an endpoint of } e\}$. Therefore we can promote ${\mathcal T}^{**}(w)$ to ${\mathcal T}(w)$ using IIA moves.
{\varepsilon}nd{proof}
When $\mathcal Y$ is a locally finite graph of groups with all
vertex and edge stabilizers finite rank free groups we define the
{\it geometric realization} $GR(\mathcal Y)$. This is the
2-complex constructed by taking a finite graph ${\Gamma}amma_w$ for
every vertex $w$ so that $pseudo-Anosov\ rtiali_1({\Gamma}amma_w)=\mathcal Y(w)$, and
similarly taking a finite graph ${\Gamma}amma_e$ for every edge $e$ so
that $pseudo-Anosov\ rtiali_1({\Gamma}amma_w)=\mathcal Y(w)$, and gluing ${\Gamma}amma_e{\tau}imes
[0,1]$ according to
inclusion homomorphisms. Up to a proper homotopy equivalence,
$GR(\mathcal Y)$ is independent of the choices.
From the lemmas above we see that the
fundamental groups of graphs of groups ${\mathcal T}$, ${\mathcal T}^*$, ${\mathcal T}^{**}$ are
all isomorphic to $pseudo-Anosov\ rtiali_1(X)$. We now upgrade this to proper
homotopy equivalences of geometric realizations.
\begin{lemma}{\lambda}abel{allsame}
$X, GR({\mathcal T}), GR({\mathcal T}^*), GR({\mathcal T}^{**})$ are all proper homotopy
equivalent.
{\varepsilon}nd{lemma}
\begin{proof}
$GR({\mathcal T})$ can be built as a subspace of $X{\tau}imes [0,\infty)$:
$$GR({\mathcal T})=\cup_{n=0}^\infty \big(D^{-1}(J_n){\tau}imes\{n\}\cup
D^{-1}(J_n\cap J_{n+1}){\tau}imes [n,n+1]\big)$$
The map $GR({\mathcal T}){\tau}o X$ is the projection, and $X{\tau}o GR({\mathcal T})$ is
the map $xMapsto (x,pseudo-Anosov\ rtialhi(x))$, where $pseudo-Anosov\ rtialhi$ equals $n$ on
$J_n{\sigmama}mallsetminus (J_{n-1}\cup J_{n+1})$ and is in $[n,n+1]$
on $J_n\cap J_{n+1}$. These are each other's proper homotopy
inverses by homotoping along the second coordinate.
That $GR({\mathcal T}^*){\tau}o GR({\mathcal T}^{**}){\tau}o GR({\mathcal T})$ are proper homotopy
equivalences follows from the fact that Moves IA as well
as IIA consisting of pulling finitely generated subgroups
are proper homotopy equivalences on geometric realizations.
{\varepsilon}nd{proof}
To finish, we need the relative version of Nielsen Realization for
graphs, proved by Hensel-Kielak.
\begin{thm}[\cite{hensel-kielak}]
Let $H<Out(F_n)$ be a finite subgroup and ${\mathcal F}$ an $H$-invariant
free factor system. Suppose the action of $H$ on ${\mathcal F}$ is
realized as a simplicial action of $H$ on a finite graph ${\Gamma}amma_0$ whose
fundamental group is identified with ${\mathcal F}$ (so the components of
${\Gamma}amma_0$ correspond to the free factors in ${\mathcal F}$). Then there
is a finite graph ${\Gamma}amma$, a simplicial action of $H$ on
${\Gamma}amma$, an $H$-equivariant embedding
${\Gamma}amma_0\hookrightarrow {\Gamma}amma$, and an identification
$pseudo-Anosov\ rtiali_1({\Gamma}amma)\cong F_n$ so that the induced $H{\tau}o Out(F_n)$ is
the given embedding $H<Out(F_n)$.
{\varepsilon}nd{thm}
When ${\mathcal F}$ is empty, we have the (absolute) Nielsen Realization
\cite{NR1,NR2,NR3,NR4}.
To apply this, we note:
\begin{lemma}
For every vertex $w$ in ${\mathcal T}^*$ the incident edge groups form a free
factor system in ${\mathcal T}^*(w)$.
{\varepsilon}nd{lemma}
\begin{proof}
This is true for the tree ${\mathcal T}$ by construction. The statement
then follows from the fact that intersections of free factor
systems are free factor systems.
{\varepsilon}nd{proof}
Now we build a graph $Y$. We first construct graphs associated to
the edges. Note that all orbits of edges are finite. For an
edge $e$ of ${\mathcal T}^*$ choose a graph ${\Gamma}amma_e$ with
$pseudo-Anosov\ rtiali_1({\Gamma}amma_e)={\mathcal T}^*(e)$ where $Stab_H(e)$ acts inducing the
given action on ${\mathcal T}^*(e)$. Of course, $Stab_H(e)$ is a compact
group, but the action on ${\mathcal T}^*(e)$ factors through a finite group,
so we can apply the Nielsen Realization theorem.
We associate the same graph to all edges in the orbit of $e$, with
suitable identifications on $pseudo-Anosov\ rtiali_1$, so that $H$ now acts on the
disjoint union of these graphs with the given action on $pseudo-Anosov\ rtiali_1$.
Now consider a vertex $w$. We have that $Stab_H(w)$ acts on
${\mathcal T}^*(w)$ and this action factors through a finite group, which
also acts on the free factor system defined by the incident
edges. This action is realized by the action of $Stab_H(w)$ on the
disjoint union of the graphs representing the edge spaces, so the
Relative Nielsen Realization provides a finite graph ${\Gamma}amma_w$
that contains this disjoint union and an extension of this
action. Associate such graphs to the vertices equivariantly. The
union along the subgraphs associated to the edges is the desired
graph $Y$. Thus $H$ acts on $Y$ simplicially. The following lemma
finishes the proof of the Main Theorem in the core graph case.
\begin{lemma}
There is a proper homotopy equivalence $Y{\tau}o X$ that commutes
with the action of $H$.
{\varepsilon}nd{lemma}
\begin{proof}
Using the same graphs to represent vertex and edge groups, the
geometric realization $GR({\mathcal T}^*)$, after collapsing the
$I$-factors, becomes $Y$, and this is a proper homotopy
equivalence. By composing with proper homotopy equivalences from
Lemma \ref{allsame} we have $f:X{\tau}o Y$ and $g:Y{\tau}o X$, which are
each other's inverses. If $h\in H$ then by construction $h:X{\tau}o
X$ and $ghf:X{\tau}o X$ induce the same element of
$Out(pseudo-Anosov\ rtiali_1(X))$. It then follows from Theorem \ref{id} applied
to $ghf\cdot h^{-1}$ that they are properly homotopic.
{\varepsilon}nd{proof}
{\sigmama}ection{Proof for trees}{\lambda}abel{5}
We next prove Nielsen realization for trees.
\begin{thm}
Suppose the graph $X$ is a tree and let $H<Maps(X)$ be a compact
subgroup. Then there is a tree $Y{\sigmama}imeq X$ where $H$ acts by
simplicial isomorphisms.
{\varepsilon}nd{thm}
Note that by Corollary \ref{4.12} $Maps(X)=Homeo(pseudo-Anosov\ rtialartial
X)$. Fix a metric $d$ on $pseudo-Anosov\ rtialartial X$.
{\bf Step 1.} We replace $d$ by an $H$-invariant metric $d'$. Let $\nu$
be a Haar measure on $H$ and define
$$d'(p,q)=\int_H d(h(p),h(q))~ d\nu$$
This is an $H$-invariant metric.
We drop the prime and assume $d$ is $H$-invariant.
{\bf Step 2.} We now build equivariant finite partitions of $pseudo-Anosov\ rtialartial X$
into clopen sets. Let ${\varepsilon}psilonilon>0$. Say $p,q\in pseudo-Anosov\ rtialartial X$ are
${\varepsilon}psilonilon$-path connected if there is a sequence
$p=z_0,z_1,\cdots,z_n=q$ so that $d(z_i,z_{i+1})<{\varepsilon}psilonilon$ for all
$i=0,\cdots,n-1$. The equivalence classes form the desired
partition $\mathcal P_{{\varepsilon}psilonilon}$. Note that if ${\varepsilon}psilonilon<{\varepsilon}psilonilon'$
then $\mathcal P_{\varepsilon}psilonilon$ refines $\mathcal P_{{\varepsilon}psilonilon'}$ and if
$\mathcal P$ is an arbitrary finite partition into clopen sets,
there is ${\varepsilon}psilonilon>0$ so that $\mathcal P_{{\varepsilon}psilonilon}$ refines
$\mathcal P$.
{\bf Step 3.} Finally we build $Y$ as the {\it mapping telescope} of
a sequence of partitions from Step 2. Fix a decreasing sequence
${\varepsilon}psilonilon_n{\tau}o 0$ with $n=1,2,\cdots$ and let $\mathcal
P_n:=\mathcal P_{{\varepsilon}psilonilon_n}$. We also set $\mathcal P_0$ to be the
trivial partition $\{pseudo-Anosov\ rtialartial X\}$. Since $\mathcal P_{n+1}$
refines $\mathcal P_n$ we have a natural surjection $\mathcal
P_{n+1}{\tau}o \mathcal P_{n}$ induced by inclusion of sets. Now let $Y$
be the mapping telescope of this sequence. More concretely, the set
of vertices is the disjoint union ${\sigmama}qcup_{n=0}^\infty \mathcal
P_n{\tau}imes\{n\}$, and there is an edge from $P{\tau}imes\{n+1\}$ to
$Q{\tau}imes\{n\}$ whenever $P{\sigmama}ubseteq Q$ (here $P\in\mathcal
P_{n+1}$ and $Q\in \mathcal P_n$). Then $Y$ is a tree and
$pseudo-Anosov\ rtialartial Y$ is naturally (and $H$-equivariantly) homeomorphic
to $pseudo-Anosov\ rtialartial X$ by the homeomorphism that sends a branch
$(P_n)_n$ of $Y$ to the point $\cap_n P_n$ in $pseudo-Anosov\ rtialartial X$. The
theorem is now proved since we have natural
identifications
$$Maps(X)=Homeo(pseudo-Anosov\ rtialartial X)=Homeo(pseudo-Anosov\ rtialartial Y)=Maps(Y)$$
and $H$ acts simplicially on $Y$.
{\sigmama}ection{Proof in general}
Let $X$ be a locally finite graph which is not a tree and assume that
a compact group $H$ is acting on $X$ by proper homotopy
equivalences. The action then restricts to the core $X_g$ (see Lemma
\ref{restriction}) and by the special case of core graphs there is a
core graph $Y_g$, an action of $H$ by simplicial isomorphisms on
$Y_g$, and an $H$-equivariant proper homotopy equivalence $f:X_g{\tau}o
Y_g$.
\begin{lemma}
There is a locally finite graph $Y{\sigmama}upseteq Y_g$ and a proper
homotopy equivalence $X{\tau}o Y$ that extends $f$.
{\varepsilon}nd{lemma}
\begin{proof}
Form the mapping cylinder $M=X_g{\tau}imes I{\sigmama}qcup Y_g/x{\sigmama}im f(x)$ of
$f$. Since $f$ is a proper homotopy equivalence, both 0 and 1-levels
of $M$ (which can be identified with $X_g$ and $Y_g$) are proper strong
deformation retracts of $M$. For $Y_g$ this can be seen by deforming
along the mapping cylinder lines. For $X_g$, without the word
``proper'', this is a theorem of Ralph Fox \cite{fox}, see also
\cite{fuchs}, but their proofs work just as well in the proper
category. The statement can also be deduced from the
Whitehead theorem, see \cite{hatcher}, and
\cite{farrell-taylor-wagoner} for the proper version. Now $X$ is
obtained from $X_g$ by attaching trees $T_v$ along vertices $v\in
X_g$. Attach products $T_v{\tau}imes I$ to $M$ along the natural copies
of $\{v\}{\tau}imes I$ to obtain a space $Z$ and note that both $X$ and
the space $Y$ (obtained from $Y_g$ by attaching trees $T_v$ along
$f(v)$) are proper strong deformation retracts of $Z$ and this gives
the desired proper homotopy equivalence $X{\tau}o Y$.
{\varepsilon}nd{proof}
We will now revert to the original notation and simply assume that $H$
is acting by simplicial isomorphisms on $X_g$.
By the {\it convex hull} of a nonempty subset of a simplicial tree we mean
the smallest simplicial subtree that contains the set. The following
fixed point fact is well known.
\begin{lemma}{\lambda}abel{fixed0}
Suppose a compact group $H$ acts continuously on a simplicial
tree. Then $H$ fixes a point in the convex hull of any orbit.
{\varepsilon}nd{lemma}
\begin{proof}
The convex hull is $H$-invariant and it is a tree of finite
diameter. Iteratively remove all edges that contain a valence 1
vertex until the tree that's left is either a single vertex or a
single edge. This vertex or the midpoint of the edge is then fixed
by $H$.
{\varepsilon}nd{proof}
We will now use this fact to prove the following fixed point
theorem, which is really the heart of the argument in this case.
\begin{lemma}{\lambda}abel{fixed}
Suppose $H$ fixes a point $\beta$ in $DX=pseudo-Anosov\ rtialartial X{\sigmama}mallsetminus
pseudo-Anosov\ rtialartial X_g$. Then $H$ fixes a point $\rho(\beta)$
in $X_g$ and there is a ray (called the {\it Nielsen ray}) $r$ from
$\rho(\beta)$ to $\beta$ such that $h(r)$ and $r$ are properly
homotopic rel $\rho(\beta)$ for every $h\in H$.
{\varepsilon}nd{lemma}
\begin{proof}
Let ${\tau}ilde X$ be the universal cover of $X$. Let $r$ be a ray in
$X$ converging to $\beta$. The deck group acts simply transitively on
the set of lifts of $r$ and distinct lifts are not asymptotic and
hence not properly homotopic. Choose one such lift ${\tau}ilde r$.
Every $h\in H$ has a unique lift to ${\tau}ilde X$ that fixes the
asymptotic class of rays $[{\tau}ilde r]$
and the set of these lifts defines an action of $H$ on
${\tau}ilde X$ by proper homotopy equivalences. We will prove that the action is continuous in the next paragraph. The lifted group $H$ preserves the preimage ${\tau}ilde X_g$ of
$X_g$, which is a tree, and this defines an action of $H$ on
${\tau}ilde X_g$. By Lemma \ref{fixed0} it fixes
a point $z$. The image of $z$ in $X_g$ is the desired fixed
point and the image of the ray that starts at $z$ and is
asymptotic to ${\tau}ilde r$ is the Nielsen ray.
The action
is continuous: if $h\in H$ is close to the identity, we can choose
a representative in its proper homotopy class that fixes a large
compact set $K{\sigmama}ubset X$ as well as the ray $r$, and preserves the
complementary components of $K$. We can also arrange that $K\cup
r$ is connected. Then the lift of $h$ to ${\tau}ilde X$ will fix the
preimage ${\tau}ilde K$ and will preserve its complementary
components. Since $K$ can be chosen so that ${\tau}ilde K$ contains
any given compact set, the lift of $h$ will be close to the
identity.
{\varepsilon}nd{proof}
Let $d$ be an $H$-invariant metric on $pseudo-Anosov\ rtialartial X$ (see Step 1 in
Section \ref{5}) and let $\mathcal P_{\varepsilon}psilonilon$ be the partition of
$pseudo-Anosov\ rtial X$ as in Step 2 in Section \ref{5}. Again fix a decreasing
sequence ${\varepsilon}psilonilon_n{\tau}o 0$ and set $\mathcal P_n:=\mathcal
P_{{\varepsilon}psilonilon_n}$. Let $pseudo-Anosov\ rtiali':X\cup DX{\tau}o X_g$ denote the nearest point
projection (this is not equivariant).
Fix an $H$-equivariant exhaustion ${\varepsilon}mptyset=K_0{\sigmama}ubset K_1{\sigmama}ubset
K_2{\sigmama}ubset\cdots$ of $X_g$ by finite connected subgraphs so that
if $\beta\in X\cup DX$ and $pseudo-Anosov\ rtiali'(\beta)\not\in K_{i+1}$ then
$pseudo-Anosov\ rtiali'(h(\beta))\not\in K_i$ for every $h\in H$.
Call an element $P\in\mathcal P_n$ {\it good} if the following
holds:
\begin{itemize}
\item $P{\sigmama}ubset DX$,
\item $pseudo-Anosov\ rtiali'(P)$ is a point,
\item $Stab_H(P)$ fixes a point $\rho(P)$ in $X_g$; moreover,
if $pseudo-Anosov\ rtiali'(P)$ is disjoint from $K_{i+1}$ then
$\rho(P)$ and $pseudo-Anosov\ rtiali'(P)$ are in the same
component of $X_g{\sigmama}mallsetminus K_i$,
\item for every $x\in P$ there is a ray $r_x$ from $\rho(P)$ to $x$ so
that all these rays (for all $x\in P$) agree along $X_g$ and
further they are permuted up to proper homotopy by $Stab_H(P)$.
{\varepsilon}nd{itemize}
So in particular $r_x$ is a Nielsen ray with respect to
$Stab_H(x)<Stab_H(P)$. We will also call
the rays $r_x$ {\it Nielsen rays}.
\begin{lemma}
For every $\beta\in DX$ there is $n_0$ so that for every $n\geq n_0$ the
element $P\in \mathcal P_n$ containing $\beta$ is good.
{\varepsilon}nd{lemma}
\begin{proof}
We first observe that $Stab_H(\beta)$ fixes a point in $X_g$ by
applying Lemma \ref{fixed} to the induced action of $Stab_H(\beta)$
on the graph $X_g^*=X_g\cup \rho_\beta$ (see Lemma
\ref{restriction}). By our assumption on the exhaustion, if
$pseudo-Anosov\ rtiali'(\beta)$ misses $K_{i+1}$ then the action restricts to the
complementary component of $K_i$ that contains $pseudo-Anosov\ rtiali'(\beta)$, so in
this case the fixed point $\rho(\beta)$ can be found there. Now notice that the
stabilizer of a point in $X_g$ is a clopen subgroup of $H$, so when
$n$ is large the stabilizer of $P_n\in \mathcal P_n$ that contains
$\beta$ will also fix the same point. (Since $H$ permutes the
partition elements in $\mathcal P_n$, $Stab_H(\beta)$ will leave $P_n$
invariant and we see that $Stab_H(\beta)=\cap_n Stab_H(P_n)$ is the
intersection of clopen subgroups. By compactness we have
$Stab_H(P_n){\sigmama}ubseteq Stab_H(\rho(\beta))$ for large $n$.)
We will of course also have
$P{\sigmama}ubset DX$, $pseudo-Anosov\ rtiali'(P)$ is a point, and $h({\varepsilon}ll)\cap X_g={\varepsilon}mptyset$
for every line ${\varepsilon}ll$ joining two points of $P$ and every $h\in H$.
{\varepsilon}nd{proof}
Now we construct an $H$-equivariant cover $\mathcal N$ by pairwise
disjoint good partition elements. Say an $H$-orbit in $\mathcal P_n$
(which is finite) is {\it good} if every (any) element in it is good. Then
let $\mathcal N$ consist of good orbits in $\mathcal P_1$ as well as
those good orbits in $\mathcal P_n$, $n=2,3,\cdots$ whose union is not
contained in the union of any good orbit in $\mathcal P_{n-1}$.
Define an equivariant map $\rho:\mathcal N{\tau}o X_g$ by letting $\rho$
be as in the definition of a good partition element on a
representative of the orbit, and then extend it equivariantly. Thus we
still have the Nielsen rays for all elements of $\mathcal N$.
We now construct a graph $Y$ by attaching trees to $Y_g=X_g$. For
every $N\in \mathcal N$ we build a tree $T_N$ as in Step 3 of Section
\ref{5} for $Stab_H(N)$, namely the mapping telescope with base vertex $N$ and the
other vertices all the partition elements contained in $N$. We
identify $pseudo-Anosov\ rtialartial T_N$ with $N$. We then attach $T_N$ to $X_g$ by
identifying the base vertex $N$ with the point $\rho(N)\in X_g$. Doing
this for all $N\in \mathcal N$ produces the desired graph $Y$. By
construction $H$ acts on $Y$ by simplicial isomorphisms.
\begin{lemma}
There is a proper homotopy equivalence $F:Y{\tau}o X$ such that
\begin{enumerate}[(a)]
\item $F$ is identity on
$X_g$ and on $DX=DY$,
\item $F$ sends the rays in $T_N$ based at $N$ to the Nielsen rays $r_x$
from $\rho(N)$ to
$pseudo-Anosov\ rtialartial N$ preserving the endpoints,
\item
$F$ is $H$-equivariant.
{\varepsilon}nd{enumerate}
{\varepsilon}nd{lemma}
\begin{proof}
The map $F$ is uniquely defined on each $T_N$ by (a)-(c). That this
map is proper as a map $Y{\tau}o X$ follows from the fact that if
$N_i\in \mathcal N$ converge to $\beta\inpseudo-Anosov\ rtialartial X_g$, then
$\rho(N_i){\tau}o\beta$. Thus $F$ is a proper homotopy equivalence by
Corollary \ref{criterion2}.
Finally we argue $H$-equivariance. Denote by $F'$ the proper
homotopy inverse of $F$ which is identity on $X_g$. If $h\in H$
consider $F'hF\cdot h^{-1}:Y{\tau}o Y$. This is identity on $X_g$ and on
$pseudo-Anosov\ rtial X$. By Corollary \ref{criterion} it suffices to argue that this
map preserves oriented loops and lines connecting points of
$DX$. For loops this is clear since the map is identity on $X_g$. It
also preserves lines joining points of $DX$ since such lines can be written as a concatenation
$r^{-1}sr'$ where $r,r'$ are Nielsen rays and $s$ is a segment in
$X_g$. Finally, it preserves lines that connect distinct points of
some $N\in \mathcal N$.
{\varepsilon}nd{proof}
This finishes the proof of the Main Theorem.
{\varepsilon}nd{document}
|
\begin{document}
\title{3-color Bounded Patterned Self-assembly
\texorpdfstring{
\thanks{
The research of L.~K. and S.~K. was supported
by the NSERC Discovery Grant R2824A01 and
UWO Faculty of Science grant to L.~K.
The research of S.~S. was supported
by the HIIT Pump Priming Project Grant 902184/T30606.}}{}}
\author{Lila Kari$^1$ \and Steffen Kopecki$^1$ \and
Shinnosuke Seki$^2$}
\date{}
\maketitle
{\setminusall \centering
$^1$ Department of Computer Science, Middlesex College, \\
The University of Western Ontario \\
London Ontario N6A 5B7, Canada \\
{\tt [email protected], [email protected]} \\
$^2$ Helsinki Institute of Information Technology (HIIT), \\
Department of Computer Science, Aalto University \\
P.\,O.~Box 15400, FI-00076, Aalto, Finland \\
{\tt [email protected]} \\
}
\begin{abstract}
Patterned self-assembly tile set synthesis (\pats) is the problem of finding a minimal tile set which uniquely self-assembles into a given pattern.
Czeizler and Popa proved the \mathbb{N}P-completeness of \pats and Seki showed that the \pats problem is already \mathbb{N}P-complete for patterns with 60 colors.
In search for the minimal number of colors such that \pats remains \mathbb{N}P-complete, we introduce multiple bound \pats (\mbpats) where we allow bounds for the numbers of tile types of each color.
We show that \mbpats is \mathbb{N}P-complete for patterns with just three colors and, as a byproduct of this result, we also obtain a novel proof for the \mathbb{N}P-completeness of \pats which is more concise than the previous proofs.
\end{abstract}
\section{Introduction}\label{sec:intro}
Tile self-assembly is the autonomous formation of a structure from individual {\em tiles} controlled by local attachment rules.
One application of self-assembly is the implementation of nanoscopic tiles by DNA strands forming double crossover tiles with four unbounded single strands \cite{WinfreeLWS1998}.
The unbounded single strands control the assembly of the structure as two, or more, tiles can attach to each other only if the bonding strength between these single strands is big enough.
The general concept is to have many copies of the same tile types in a solution which then form a large crystal-like structure over time; often an initial structure, the {\em seed}, is present in the solution from which the assembly process starts.
A mathematical model describing self-assembly systems is the {\em abstract tile self-assembly model} (aTAM), introduced by Winfree \cite{WinfreePhD}.
Many variants of aTAMs have been studied: a main distinction between the variants is whether the {\em shape} or the {\em pattern} of a self-assembled structure is studied.
In this paper we focus on the self-assembly of patterns, where a property, modeled as color, is assigned to each tile; see for example \cite{RothemundPW2004} where fluorescently labeled DNA tiles self-assemble into Sierpinski triangles.
Formally, a pattern is a rectilinear grid where each vertex has a color: a $k$-colored $m\times n$-pattern $P$ can be seen as a function $P\colon [m]\times [n] \to [k]$, where $[i] = \set{1,2,\ldots,i}$.
The optimization problem of {\em patterned self-assembly tile set synthesis} (\pats), introduced by Ma and Lombardi \cite{MaL2008}, is to determine the minimal number of tile types needed to uniquely self-assemble a given pattern starting from an $L$-shaped seed.
In this paper, we consider the decision variant of \pats, defined as follows:
\myproblem{A $k$-colored pattern $P$ and an integer $m$}
{``Yes'' if $P$ can uniquely be self-assembled by using $m$ tile types}
{\kpats}
Czeizler and Popa proved that \pats, where the number of colors on an input pattern is not bounded, is \mathbb{N}P-hard \cite{CzeizlerP2012}, but the practical interest lies in \kpats. Seki proved $60$-\pats is \mathbb{N}P-hard \cite{shin}.
By the nature of the biological implementations, the number of distinct colors in a pattern can be considered small.
In search for the minimal number $k$ for which \kpats remains \mathbb{N}P-hard, we investigate a modification of \pats: {\em multiple bound \pats} (\mbpats) uses individual bounds for the number of tile types of each color.
\myproblem{A pattern $P$ with colors from $[k]$ and $m_1,\ldots,m_k\in\mathbb{N}$}
{``Yes'' if $P$ can uniquely be self-assembled by using $m_i$ tile types of color $i$, for $i\in[k]$}
{\kmbpats}
The main contribution of this paper is a polynomial-time reduction from \pats to \tmbpats which proves the \mathbb{N}P-hardness of \tmbpats.
However, our reduction does not take every pattern as input, we only consider a restricted subset of patterns for which \pats is known to remain \mathbb{N}P-hard.
The patterns we use as input are exactly those patterns that are generated by a polynomial-time reduction from \sat to \pats.
Using one of the reductions which were presented in \cite{CzeizlerP2012,shin} as a foundation for our main result turned out to be unfeasible.
Therefore, we present a novel proof for the \mathbb{N}P-hardness of \pats which serves well as foundation for our main result.
Furthermore, our reduction from \sat to \pats is more concise compared to previous reductions in the sense that in order to self-assemble a pattern $P$ we only allow three more tile types than colors in $P$.
In Czeizler and Popa's approach the number of additional tile types is linear in the size of the input formula and Seki uses 84 tile types with 60 colors.
Let us note first that the decision variants of \pats and \mbpats can be solved in \mathbb{N}P by simple ``guess and check'' algorithms.
Before we prove \mathbb{N}P-hardness of \pats, Corollary~\ref{cor:pats} in Sect.~\ref{sec:pats}, and \tmbpats, Corollary~\ref{cor:mbpats} in Sect.~\ref{sec:bwg}, we introduce the formal concepts of patterned tile assembly systems, in Sect.~\ref{sec:RTAS}.
\section{Rectilinear Tile Assembly Systems}
\label{sec:RTAS}
In this section we formally introduce patterns and rectilinear tile assembly systems.
An excellent introduction to the fundamental model aTAM is given in \cite{RothemundW2000}.
Let $C$ be a finite {\em alphabet of colors}.
An $m\times n$-{\em pattern} $P$, for $m,n\in\mathbb{N}$, with colors from $C$ is a mapping $P\colon [m]\times[n] \to C$.
By $C(P) \subseteq C$ we denote the colors in the pattern $P$, i.\,e.,\xspace the codomain or range of the function $P$.
The pattern $P$ is called {\em $k$-colored} if $\abs{C(P)} \le k$.
The width and height of $P$ are denoted by $w(P) = m$ and $h(P) = n$, respectively.
We call $(x,y)\in[m]\times [n]$ a {\em position} in $P$.
The pattern is arranged such that position $(1,1)$ is on the bottom left, $(m,1)$ is on the bottom right, $(1,n)$ is on the top left, and $(m,n)$ is on the top right of the pattern $P$.
Fig.~\ref{fig:ex:pattern} (left side) shows an example pattern.
\begin{figure}
\caption{Pattern $P$ and how it can be self-assembled by three tile types.}
\label{fig:ex:pattern}
\end{figure}
Let $\Sigma$ be a finite {\em alphabet of glues}.
A {\em colored Wang tile}, or simply {\em tile}, $t\in C\times \Sigma^4$ is a unit square with a color from $C$ and four glues from $\Sigma$, one on each of its edges.
The color of $t$ is denoted by $\chi(t)\in C$ and we denote the glues on the north, east, west, and south edges of $t$ by $t(N)$, $t(E)$, $t(W)$, and $t(S)$, respectively.
We also call the south and west glues the {\em inputs} of $t$ while the north and east glues are called {\em outputs} of $t$; this notation will become clear in the next paragraph.
Wang tiles are not allowed to rotate.
A {\em rectilinear tile assembly system} (RTAS) $(T,\sigma)$ over $C$ and $\Sigma$ consists of a set of colored Wang tiles $T\subseteq C\times \Sigma^4$ and an $L$-shaped seed $\sigma$.
The seed $\sigma$ covers positions $(0,0)$ to $(m,0)$ and $(0,1)$ to $(0,n)$ of a two-dimensional Cartesian grid and it has north glues from $\Sigma$ on the positions $(1,0)$ to $(m,0)$ and east glues from $\Sigma$ on positions $(0,1)$ to $(0,n)$.
We will frequently call $T$ an RTAS without explicitly mentioning the seed, but we keep in mind that a unique seed is assigned to each RTAS.
The RTAS $T$ describes the self-assembly of a structure: starting with the seed, a tile $t$ from $T$ can attach to the structure at position $(x,y)\in[m]\times[n]$, if its west neighbor at position $(x-1,y)$ and south neighbor at position $(x,y-1)$ are present and the inputs of $t$ match the adjacent outputs of its south and west neighbors; the self-assembly stops when no more tiles in $T$ can be attached by this rule.
Arbitrarily many copies of a each tile type in $T$ are considered to be present while the structure is self-assembled, thus, one tile type can appear in multiple positions.
Fig.~\ref{fig:ex:pattern} shows the process of self-assembling a pattern by an RTAS with three tiles.
We are only interested in structures that fully tile the rectangle that is spanned by the seed.
A {\em tile assignment} in $T$ is a function $f\colon [m]\times[n] \to T$ such that $f(x,y)(W) = f(x-1,y)(E)$ and $f(x,y)(S) = f(x,y-1)(N)$ for $(x,y)\in[m]\times[n]$.
The RTAS self-assembles a pattern $P$ if there is a tile assignment $f$ in $T$ such that the color of each tile in the assignment $f$ is the color of the corresponding position in $P$, i.\,e.,\xspace $\chi \circ f = P$.
A terminological convention is to call the elements in $T$ {\em tile types} while the elements in a tile assignment are called {\em tiles}; each tile in a tile assignment is the copy a tile type from the corresponding RTAS $T$.
A {\em directed RTAS} (\tas) $T$ is an RTAS where any two distinct tile types $t_1,t_2\in T$ have different inputs, i.\,e.,\xspace $t_1(S) \neq t_2(S)$ or $t_1(W) \neq t_2(W)$.
A \tas has at most one tile assignment and can self-assemble at most one pattern.
If $T$ self-assembles an $m\times n$-pattern $P$, it defines the function $P_T\colon [m]\times[n] \to T$ such that $P_T(x,y)$ is the tile in position $(x,y)$ of the tile assignment given by $T$.
In this paper, we investigate minimal RTASs which uniquely self-assemble one given pattern $P$.
An observation from \cite{GoosO2011} allows us to focus on \tas only when searching for minimal RTAS that uniquely self-assemble a given pattern:
\begin{proposition}
If a pattern $P$ can uniquely be self-assembled by an RTAS $T$ with $m$ tile types, then there is also a \tas $T'$ with $m$ tile types which (uniquely) self-assembles $P$.
\end{proposition}
\begin{remark}\label{rem:glues}
In the following proofs, a central concept is to show that the design of a pattern $P$ enforces that any \tas $T$ which self-assembles $P$ (and maybe respects some tile type bounds) contains {\em tile types of a certain form}.
As we are flexible with choosing the set of glues, we can always obtain a \tas with different tile types by applying some bijection on the set of glues.
A subtler point is that glues used on horizontal edges and glues used on vertical edges can be seen as separate sets of glues as these edges can never glue to each other.
For the ease of notation, we will use the same glue labels for horizontal and vertical glues, but keep in mind that we may apply one bijection to all horizontal glues and another bijection to all vertical glues in order to obtain an {\em isomorphic} \tas.
\end{remark}
\section{\texorpdfstring{\mathbb{N}P-hardness of \pats}{NP-hardness of PATS}}
\label{sec:pats}
In this section, we prove the \mathbb{N}P-hardness of \pats.
The proof we present uses many techniques that have already been employed in \cite{CzeizlerP2012,shin}.
Let us also point out that we do not intend to minimize the number of colors used in our patterns or the size of our patterns.
Our motivation is to give a proof that is easy to understand and serves well as a foundation for the results in Sect.~\ref{sec:bwg}.
A boolean formula $F$ over variables $V$ in {\em conjunctive normal form with three literals per clause}, 3-CNF for short, is a boolean formula such that
\[
F = ( c_{1,1} \lor c_{1,2} \lor c_{1,3}) \land
( c_{2,1} \lor c_{2,2} \lor c_{2,3}) \land \cdots \land
( c_{\ell,1} \lor c_{\ell,2} \lor c_{\ell,3})
\]
where $c_{i,j} \in \sett{v,\neg v}{v\in V}$ for $i\in[\ell]$ and $j=1,2,3$.
It is well known that the problem \sat, to decide whether or not a given formula $F$ in 3-CNF is satisfiable, is \mathbb{N}P-complete; see e.\,g.,\xspace \cite{Papadimitriou2003}.
The \mathbb{N}P-hardness of \pats follows by the polynomial-time reduction from \sat to \pats, stated in Theorem~\ref{thm:reduction:pats}.
\begin{theorem}\label{thm:reduction:pats}
For every formula $F$ in 3-CNF there exists a pattern $P_F$ such that $F$ is satisfiable if and only if $P_F$ can be self-assembled by a \tas with at most $\abs{C(P_F)} +3$ tile types.
Moreover, $P_F$ can be computed from $F$ in polynomial time.
\end{theorem}
Theorem~\ref{thm:reduction:pats} follows by Lemmas~\ref{lem:3sat:poly} and~\ref{lem:pats}, which are presented in the following.
\begin{corollary}\label{cor:pats}
\pats is \mathbb{N}P-hard.
\end{corollary}
The pattern $P_F$ consists of several rectangular {\em subpatterns} which we will describe in the following.
None of the subpatterns will be adjacent to another subpattern.
The remainder of the pattern $P_F$ is filled with {\em unique colors}; a color $c$ is unique in a pattern $P$ if it appears only in one position in $P$, i.\,e.,\xspace $\abs{P^{-1}(c)} = 1$.
As a technicality that will become useful only in the proof of Theorem~\ref{thm:reduction}, we require that each position adjacent to the $L$-shaped seed or to the north or east border of pattern $P_F$ has a unique color.
Clearly, for each unique color in $P_F$ we require exactly one tile in any \tas which self-assembles $P_F$.
Since each subpattern is surrounded by a frame of unique colors, the subpatterns can be treated as if each of them would be adjacent to an $L$-shaped seed and we do not have to care about the glues on the north border or east border of a subpattern.
The placement of the tiles with unique colors is simple, as for each unique color we find a path of unique colors to the seed, using west and south steps, and we may assume that this path uses unique glues (glues which are not used anywhere else in the tile assignment).
\begin{figure}
\caption{The four tile types used to implement the \tl{or}
\label{fig:or-tiles}
\end{figure}
As stated earlier, the number of tile types $m$ that is required to self-assemble $P_F$, if $F$ is satisfiable, is $m = \abs{C(P_F)} +3$.
Actually, every color in $C(P_F)$ will require one tile type only except for one color which is meant to implement an \tl{or}-gate; see Fig.~\ref{fig:or-tiles}.
Each of the tile types with color \mathbb{C}or is supposed to have west input $w\in\set{0,1}$, south input $s\in \set{0,1}$, east output $w \lor s$, and an independent north output.
Our first subpattern $p$, shown in Fig.~\ref{fig:sub:p}, ensures that every \tas which self-assembles the subpattern $p$ contains at least three tile types with color \mathbb{C}or.
For the upcoming proof of Theorem~\ref{thm:reduction} we need a more precise observation which draws a connection between the number of distinct output glues and the number of distinct tile types with color \mathbb{C}or.
\begin{figure}
\caption{The subpattern $p$.}
\label{fig:sub:p}
\end{figure}
\begin{lemma}\label{lem:p_or}
A \tas $T$ which self-assembles a pattern including the subpattern $p$ contains either
\begin{compactenum}[i.)]
\item three distinct tile types $o_1,o_2,o_3\in T$ with color \mathbb{C}or all having distinct north and east glues,
\item four distinct tile types $o_1,o_2,o_3,o_4\in T$ with color \mathbb{C}or all having distinct north glues and together having at least two distinct east glues,
\item four distinct tile types $o_1,o_2,o_3,o_4\in T$ with color \mathbb{C}or all having distinct east glues and together having at least two north glues, or
\item eight distinct tile types $o_1,\ldots,o_8\in T$ with color \mathbb{C}or all having distinct east or north glues.
\end{compactenum}
\end{lemma}
\begin{proof}
In the subpattern $p$, each of the eight tiles $t_1,\ldots,t_8$ with colors \colT{colMod}{\color{white}\tl{Y_1}} to \colT{colMod}{\color{white}\tl{Y_8}} has south and west neighbors colored by \mathbb{C}or.
Since these tiles have mutually distinct colors they are all of different types as their inputs (south and west edges) have to differ.
Therefore, the combination of outputs (north and east edges) of the tile types with color \mathbb{C}or cannot be less than eight.
More formally, let $O\subseteq T$ be the set of tiles with color \mathbb{C}or.
Let $i = \abs{\sett{t(N)}{t\in O}}$ be the number of distinct north glues on tiles from $O$ and let $j = \abs{\sett{t(E)}{t\in O}}$ be the number of distinct east glues.
If $i\cdot j$ were less than $8$, at least the inputs of two of the eight tiles $t_1,\ldots,t_8$ would coincide as their placement solely depends on the outputs of tiles from $O$.
There are four possibilities:
\begin{compactenum}[\it i.)]
\item $i\ge 3$ and $j\ge 3$, therefore, $\abs O \ge 3$,
\item $i\ge 4$ and $j = 2$, therefore, $\abs O \ge 4$,
\item $i = 2$ and $j\ge 4$, therefore, $\abs O \ge 4$, or
\item $i\ge 8$ or $j\ge 8$, therefore, $\abs O \ge 8$.
\end{compactenum}
\end{proof}
We aim to have statement {\it ii.)}\ of Lemma~\ref{lem:p_or} satisfied, but so far all four statements are possible.
Note that this lemma is independent of the number of tile types in the \tas $T$, which is a crucial difference to the observations that will follow.
The subpatterns $q_1$ to $q_5$ in Fig.~\ref{fig:sub:q} will enforce the functionality of the \tl{or}-gate tile types.
\begin{figure}
\caption{The subpatterns $q_1$ to $q_5$.}
\label{fig:sub:q}
\end{figure}
\begin{lemma}\label{lem:q_or}
Let $P$ be a pattern that contains the subpatterns $p$ and $q_1$ to $q_5$, and let $m = \abs{C(P)}+3$.
A \tas $T$ with at most $m$ tile types which self-assembles pattern $P$ contains four tile types with color \mathbb{C}or of the forms shown in Fig.~\ref{fig:or-tiles}.
For every other color in $C(P)$ there exists exactly one tile type in $T$.
Moreover, the tile type with color \mathbb{C}ra has east output $0$
and the tile type with color \mathbb{C}pos has west input $1$.
\end{lemma}
\begin{proof}
By Lemma~\ref{lem:p_or}, the \tas $T$ contains at least three tile types with color \mathbb{C}or.
Since we need at least one tile type for each color in $C(P)$, there is one tile type left in $T$ whose color is not determined yet.
By $o_i\in T$ we denote the tile of color \mathbb{C}or in subpattern $q_i$ for $i=1,2,3,4$.
We will show that all four tiles $o_1,o_2,o_3,o_4$ are of different types and are the tile types shown in Fig.~\ref{fig:or-tiles}, from left to right.
Suppose by contradiction, there were only three tile types with color \mathbb{C}or and only one tile type with color \colT{colAux}{\tl c}.
Two of the tiles with colors \colT{colMod}{\color{white}\tl A}, \colT{colMod}{\color{white}\tl B}, \colT{colMod}{\color{white}\tl C}, and \colT{colMod}{\color{white}\tl D} in patterns $q_1$ to $q_4$ would have the same south and west neighbors and, hence, the inputs of the two corresponding tile types would coincide --- a property which is forbidden for \tas{}s.
Thus, either there are four tile types with color \mathbb{C}or or two tile types with color \colT{colAux}{\tl c}.
For every other color there is exactly one tile type.
As there is only one tile type with color \colT{colAux}{\tl b} the tile $o_1$ and $o_2$ have to be of different types; otherwise, the tiles with colors \mathbb{C}neg and \mathbb{C}pos would have the same inputs in subpatterns $q_1$ and $q_2$.
Because tiles $o_1$ and $o_2$ have the same west neighbor, the tiles with colors \colT{white}{$\uparrow\!_0$} and \colT{white}{$\uparrow\!_1$} are responsible for the placement of $o_1$ and $o_2$, respectively;
this means the north glues of the tile types with colors \colT{white}{$\uparrow\!_0$} and \colT{white}{$\uparrow\!_1$} differ.
Symmetrically, $o_1$ and $o_3$ are of different types and the east glues of the tile types with colors \mathbb{C}ra and \colT{white}{$\overset1\rightarrow$} differ.
Next, we see that the four tiles $o_1$ to $o_4$ all have different inputs, thus, the types of $o_1$ to $o_4$ are mutually distinct and their is only one tile type with color \colT{colAux}{\tl c}.
By the freedom of naming the used glues, see Remark~\ref{rem:glues}, we assume that the tile type with color \mathbb{C}ra has east output $0$, and the tile type with \colT{white}{$\overset1\rightarrow$} has east output $1$, the tile type with color \colT{white}{$\uparrow\!_0$} has north output $0$, and the tile type with color \colT{white}{$\uparrow\!_1$} has north output $1$.
The two tiles with colors \mathbb{C}pos and \mathbb{C}neg have the same south input whence their west input differs and depends on the neighboring \tl{or}-gate tile.
We assume that the tile with color \mathbb{C}neg has west input $0'$ and the tile with color \mathbb{C}pos has west input $1'$.
We have $o_1(E) = 0'$ and $o_2(E) = o_3(E) = o_4(E) = 1'$.
By Lemma~\ref{lem:p_or}, the north outputs of the four tiles $o_1$ to $o_4$ have to be distinct.
Next, we take a look at subpattern $q_5$.
The tiles with colors \colT{colMod}{\color{white}\tl A}, \colT{colMod}{\color{white}\tl B}, \colT{colMod}{\color{white}\tl C}, and \colT{colMod}{\color{white}\tl D}, in the top row of subpattern $q_5$ enforce that the four \tl{or}-gates below are of the same types as $o_1$, $o_2$, $o_3$, and $o_4$, from left to right; otherwise the south inputs of the top row cannot match the north outputs of the middle row.
By the placement of the four \tl{or}-gate tiles it is clear that $0 = 0'$ and $1 = 1'$ as desired.
\end{proof}
\begin{figure}
\caption{The subpatterns $r_1(v)$ to $r_3(v)$ for a variable $v\in V$ and
the subpattern $s(C)$ for a clause
$C = (c_1\lor c_2 \lor c_3)$ in $F$ where $c_i = v$ or $c_i = \neg v$
for some variable $v\in V$ and $i=1,2,3$.}
\label{fig:sub:F}
\end{figure}
The subpatterns that we defined so far did not depend on the formula $F$.
Now, for each variable $v\in V$ we define three subpatterns $r_1(v)$, $r_2(v)$, $r_3(v)$ and for a clause $C$ from $F$ we define one more subpattern $s(C)$; these patterns are given by Fig.~\ref{fig:sub:F}.
For a formula $F$ in 3-CNF we let $P_F$ be the pattern that contains all the subpatterns $p$, $q_1$ to $q_5$, $r_1(v)$ to $r_3(v)$ for each variable $v\in V$, and $s(C)$ for each clause $C$ from $F$, where each subpattern is placed to the right of the previous subpattern with one column of unique colors in between.
Then, $P_F$ has height $6$, because the top and bottom rows contain unique colors only, and $P_F$ has width $45+11\cdot \abs V + 6\cdot \ell$.
The next lemma follows from this observation.
\begin{lemma}\label{lem:3sat:poly}
Given a formula $F$ in 3-CNF, the pattern $P_F$ can be computed from $F$ in polynomial time.
\end{lemma}
\begin{proof}
This is obvious by the design of the pattern.
\end{proof}
The subpatterns $r_1(v)$ and $r_2(v)$ ensure that the two tile types with colors \colT{white}{$v$} and \colT{white}{$\neg v$} have distinct north outputs.
The subpattern $r_3(v)$ then implies that one of the north glues is $0$ and the other one is $1$.
\begin{lemma}\label{lem:sub:r}
Let $P_F$ be the pattern for a formula $F$ over variables $V$ in 3-CNF and let $T$ be a \tas with at most $m = \abs{C(P_F)}+3$ tile types which self-assembles pattern $P_F$.
For all variables $v\in V$, there is a unique tile type $t_v^\oplus\in T$ with color \colT{white}{$v$} and a unique tile type $t_v^\ominus\in T$ with color \colT{white}{$\neg v$} such that either $t_v^\oplus(N) = 1$ and $t_v^\ominus(N) = 0$ or $t_v^\oplus(N) = 0$ and $t_v^\ominus(N) = 1$.
Here, $0$ and $1$ are the south inputs of the \tl{or}-gate tile types as shown in Fig.~\ref{fig:or-tiles}.
\end{lemma}
\begin{proof}
Let $v\in V$, $t_v^\oplus\in T$ be the tile type with color \colT{white}{$v$}, and $t_v^\ominus\in T$ be the tile type with color \colT{white}{$\neg v$}.
The fact that $t_v^\oplus$ and $t_v^\ominus$ are unique is stated Lemma~\ref{lem:q_or} and, furthermore, for every color in $C(P_F)$ except for \mathbb{C}or there exists just one tile type in $T$ with that color.
In particular, there is only one tile type with color \colT{colAux}{$v$}.
Since the north neighbors of the tile of type $t_v^\oplus$ in subpattern $r_1(v)$ and the tile of type $t_v^\ominus$ in subpattern $r_2(v)$ differ while their north neighbors have the same west input, we conclude that $t_v^\oplus(N) \neq t_v^\ominus(N)$.
Moreover, both of the tile types are a south neighbor of an \tl{or}-gate tile in subpattern $r_3(v)$, hence, $t_v^\oplus(N), t_v^\ominus(N)\in\set{0,1}$.
\end{proof}
Now, these glues serve as input for the \tl{or}-gates in the subpatterns $s(C)$.
The following lemma concludes the proof of Theorem~\ref{thm:reduction:pats}.
\begin{lemma}\label{lem:pats}
Let $P_F$ be the pattern for a formula $F$ over variables $V$ in 3-CNF and let $m = \abs{C(P_F)}+3$.
The formula $F$ is satisfiable if and only if $P_F$ can be self-assembled by a \tas $T$ with at most $m$ tile types.
\end{lemma}
\begin{figure}
\caption{All tile types, except for the \tl{or}
\label{fig:all:3sat:tiles}
\end{figure}
\begin{proof}
The formula $F$ is satisfiable if and only if there is a variable assignment $f\colon V\to \set{0,1}$ which satisfies every clause in $F$.
Suppose such a variable assignment $f$ exists.
For every variable $v\in V$ we let $t_v^\oplus\in T$ be the tile type with color \colT{white}{$v$} and $t_v^\ominus\in T$ be the tile type with color \colT{white}{$\neg v$}.
In accordance with Lemma~\ref{lem:sub:r}, we let $t_v^\oplus(N) = f(v)$ and $t_v^\ominus(N) = 1-f(v)$.
The remaining glues on $t_v^\oplus$ and $t_v^\ominus$ and the other tile types are given in Fig.~\ref{fig:all:3sat:tiles} plus the four \tl{or}-gate tile types in Fig.~\ref{fig:or-tiles}.
By design, it is clear that these tile types can self-assemble all the subpatterns $p$, $q_1$ to $q_5$, and $r_1(v)$ to $r_3(v)$ for $v\in V$; see also Sect.~\ref{app:subpatterns} where the subpatterns with their tile assignments are presented.
Now, consider the subpattern $s(C)$ for a clause $C=(c_1\lor c_2 \lor c_3)$ in $F$.
It is not difficult to observe that, since the clause $C$ is satisfied by the assignment $f$, at least one of the north glues of the tiles in $s(C)$ with colors \colT{white}{$c_1$}, \colT{white}{$c_2$}, and \colT{white}{$c_3$} is $1$.
The design of the \tl{or}-gate tile types ensures that the right \tl{or}-gate tile in $s(C)$ has east output $1$ and the tile with color \mathbb{C}pos can be placed --- concluding that $P_F$ can be self-assembled by the given \tas having $m$ tile types.
Conversely, suppose $P_F$ can be self-assembled by a \tas with $m$ tile types.
By Lemma~\ref{lem:sub:r}, for each variable $v\in V$ the tile type $t_v^\oplus$ with color \colT{white}{$v$} has either north output $0$ or $1$.
We define a variable assignment $f\colon V \to \set{0,1}$ by $v\mapsto t_v^\oplus(N)$.
Recall from Lemma~\ref{lem:q_or} that the four \tl{or}-gate tile types in $T$ actually implement an \tl{or}-gate.
Furthermore, in a subpattern $s(C)$ where $C = (c_1\lor c_2\lor c_3)$ is a clause from $F$, the west input of the left \tl{or}-gate tile is $0$, and if the east output of the right \tl{or}-gate tile were $0$, then the tile type with color \mathbb{C}pos could not be placed in $s(C)$ (instead the tile type with color \mathbb{C}neg would be placed).
Hence, at least one of the north glues of the tiles with colors \colT{white}{$c_1$}, \colT{white}{$c_2$}, and \colT{white}{$c_3$} is $1$.
Using the fact that for $v\in V$ the north output of the tile type with color \colT{white}{$\neg v$} is $1$ if and only if the north output of the tile type with color \colT{white}{$v$} is $0$, stated in Lemma~\ref{lem:sub:r}, we infer that $f$ is a variable assignment that satisfies every clause in $F$.
Therefore, $F$ is satisfiable.
\end{proof}
\subsection{Subpatterns in \texorpdfstring{$P_F$}{P(F)} with Tile Assignment}
\label{app:subpatterns}
In this section we present the subpatterns $p$, $q_1$ to $q_5$, $r_1(v)$ to $r_3(v)$, $r_1(u)$ to $r_3(u)$, and $s(C)$ in $P_F$ with tile assignments, where $C = ( u \lor v \lor \neg w)$ and $u,v,w\in V$ with the variable assignment $u\mapsto 0$, $v\mapsto 1$, and $w\mapsto 1$.
The used tiles are described in Fig.~\ref{fig:or-tiles} and~\ref{fig:all:3sat:tiles}.
The other subpatterns in $P_F$ can be tiled analogously.
\begin{center}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\begin{scope}[yshift=1.5cm]
\fulltile{0}{0}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{1.5}{0}{colMod}{white}{\tl{Y_1}}{}{0}{\tl A}{0}
\fulltile{3}{0}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{4.5}{0}{colMod}{white}{\tl{Y_2}}{}{0}{\tl B}{0}
\fulltile{6}{0}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{7.5}{0}{colMod}{white}{\tl{Y_3}}{}{0}{\tl C}{0}
\fulltile{9}{0}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{10.5}{0}{colMod}{white}{\tl{Y_4}}{}{0}{\tl D}{0}
\end{scope}
\fulltile{0}{0}{colAux}{black}{\tl{X_1}}{0}{0}{}{}
\fulltile{1.5}{0}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{3}{0}{colAux}{black}{\tl{X_2}}{0}{0}{}{0}
\fulltile{4.5}{0}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{6}{0}{colAux}{black}{\tl{X_3}}{0}{1}{}{1}
\fulltile{7.5}{0}{white}{black}{\tl{or}}{\tl C}{1}{0}{1}
\fulltile{9}{0}{colAux}{black}{\tl{X_4}}{0}{1}{}{1}
\fulltile{10.5}{0}{white}{black}{\tl{or}}{\tl D}{1}{1}{1}
\begin{scope}[yshift=-3.5cm, xshift=-10.5cm]
\begin{scope}[yshift=1.5cm]
\fulltile{12}{0}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{13.5}{0}{colMod}{white}{\tl{Y_5}}{}{0}{\tl A}{1}
\fulltile{15}{0}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{16.5}{0}{colMod}{white}{\tl{Y_6}}{}{0}{\tl B}{1}
\fulltile{18}{0}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{19.5}{0}{colMod}{white}{\tl{Y_7}}{}{0}{\tl C}{1}
\fulltile{21}{0}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{22.5}{0}{colMod}{white}{\tl{Y_8}}{}{}{\tl D}{1}
\end{scope}
\fulltile{12}{0}{colAux}{black}{\tl{X_5}}{1}{0}{}{1}
\fulltile{13.5}{0}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{15}{0}{colAux}{black}{\tl{X_6}}{1}{0}{}{0}
\fulltile{16.5}{0}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{18}{0}{colAux}{black}{\tl{X_7}}{1}{1}{}{1}
\fulltile{19.5}{0}{white}{black}{\tl{or}}{\tl C}{1}{0}{1}
\fulltile{21}{0}{colAux}{black}{\tl{X_8}}{1}{1}{}{1}
\fulltile{22.5}{0}{white}{black}{\tl{or}}{\tl D}{1}{1}{1}
\end{scope}
\node at (12,.75) {$\ldots$};
\node at (0,-2.75) {$\ldots$};
\node at (6,-4.5) {$p$};
\end{tikzpicture}
\hspace*{-2cm}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\fulltile{0}{0}{colAux}{black}{\tl{Z_1}}{\bul}{\bul}{}{}
\fulltile{1.5}{0}{colAux}{black}{\tl{Z_2}}{\dia}{\bul}{}{\bul}
\fulltile{3}{0}{colModd}{white}{\uparrow\!_0}{0}{\bul}{0}{\bul}
\fulltile{4.5}{0}{colModd}{white}{\tl b}{\str}{}{}{\bul}
\fulltile{0}{1.5}{colAux}{black}{\tl{Z_3}}{\bul}{\dia}{\bul}{}
\fulltile{1.5}{1.5}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{3}{1.5}{white}{black}{\uparrow\!_0}{0}{\dia}{0}{\dia}
\fulltile{4.5}{1.5}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{3}{colModd}{white}{\overset0\rightarrow}{\bul}{0}{\bul}{0}
\fulltile{1.5}{3}{white}{black}{\overset0\rightarrow}{\dia}{0}{\dia}{0}
\fulltile{3}{3}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{4.5}{3}{colNeg}{white}{\pmb-}{\str}{}{\str}{0}
\fulltile{0}{4.5}{colModd}{white}{\tl c}{}{\str}{\bul}{}
\fulltile{1.5}{4.5}{colAux}{black}{\tl c}{}{\str}{\dia}{\str}
\fulltile{3}{4.5}{colMod}{white}{\tl A}{}{\str}{\tl A}{\str}
\fulltile{4.5}{4.5}{colAux}{black}{\tl{d}}{}{}{\str}{\str}
\node at (2.25,-1) {$q_1$};
\begin{scope}[xshift=7cm]
[scale=1,every node/.style={scale=1node}]
\fulltile{0}{0}{colAux}{black}{\tl{Z_1}}{\bul}{\bul}{}{}
\fulltile{1.5}{0}{colAux}{black}{\tl{Z_2}}{\dia}{\bul}{}{\bul}
\fulltile{3}{0}{colModd}{white}{\uparrow\!_1}{1}{\bul}{1}{\bul}
\fulltile{4.5}{0}{colModd}{white}{\tl b}{\str}{}{}{\bul}
\fulltile{0}{1.5}{colAux}{black}{\tl{Z_3}}{\bul}{\dia}{\bul}{}
\fulltile{1.5}{1.5}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{3}{1.5}{white}{black}{\uparrow\!_1}{1}{\dia}{1}{\dia}
\fulltile{4.5}{1.5}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{3}{colModd}{white}{\overset0\rightarrow}{\bul}{0}{\bul}{0}
\fulltile{1.5}{3}{white}{black}{\overset0\rightarrow}{\dia}{0}{\dia}{0}
\fulltile{3}{3}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{4.5}{3}{colPos}{white}{\pmb+}{\str}{}{\str}{1}
\fulltile{0}{4.5}{colModd}{white}{\tl c}{}{\str}{\bul}{}
\fulltile{1.5}{4.5}{colAux}{black}{\tl c}{}{\str}{\dia}{\str}
\fulltile{3}{4.5}{colMod}{white}{\tl B}{}{\str}{\tl B}{\str}
\fulltile{4.5}{4.5}{colAux}{black}{\tl{d}}{}{}{\str}{\str}
\node at (2.25,-1) {$q_2$};
\end{scope}
\end{tikzpicture}
\hspace*{-2cm}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\fulltile{0}{0}{colAux}{black}{\tl{Z_1}}{\bul}{\bul}{}{}
\fulltile{1.5}{0}{colAux}{black}{\tl{Z_2}}{\dia}{\bul}{}{\bul}
\fulltile{3}{0}{colModd}{white}{\uparrow\!_0}{0}{\bul}{0}{\bul}
\fulltile{4.5}{0}{colModd}{white}{\tl b}{\str}{}{}{\bul}
\fulltile{0}{1.5}{colAux}{black}{\tl{Z_3}}{\bul}{\dia}{\bul}{}
\fulltile{1.5}{1.5}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{3}{1.5}{white}{black}{\uparrow\!_0}{0}{\dia}{0}{\dia}
\fulltile{4.5}{1.5}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{3}{colModd}{white}{\overset1\rightarrow}{\bul}{1}{\bul}{1}
\fulltile{1.5}{3}{white}{black}{\overset1\rightarrow}{\dia}{1}{\dia}{1}
\fulltile{3}{3}{white}{black}{\tl{or}}{\tl C}{1}{0}{1}
\fulltile{4.5}{3}{colPos}{white}{\pmb-}{\str}{}{\str}{1}
\fulltile{0}{4.5}{colModd}{white}{\tl c}{}{\str}{\bul}{}
\fulltile{1.5}{4.5}{colAux}{black}{\tl c}{}{\str}{\dia}{\str}
\fulltile{3}{4.5}{colMod}{white}{\tl C}{}{\str}{\tl C}{\str}
\fulltile{4.5}{4.5}{colAux}{black}{\tl{d}}{}{}{\str}{\str}
\node at (2.25,-1) {$q_1$};
\begin{scope}[xshift=7cm]
[scale=1,every node/.style={scale=1node}]
\fulltile{0}{0}{colAux}{black}{\tl{Z_1}}{\bul}{\bul}{}{}
\fulltile{1.5}{0}{colAux}{black}{\tl{Z_2}}{\dia}{\bul}{}{\bul}
\fulltile{3}{0}{colModd}{white}{\uparrow\!_1}{1}{\bul}{1}{\bul}
\fulltile{4.5}{0}{colModd}{white}{\tl b}{\str}{}{}{\bul}
\fulltile{0}{1.5}{colAux}{black}{\tl{Z_3}}{\bul}{\dia}{\bul}{}
\fulltile{1.5}{1.5}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{3}{1.5}{white}{black}{\uparrow\!_1}{1}{\dia}{1}{\dia}
\fulltile{4.5}{1.5}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{3}{colModd}{white}{\overset1\rightarrow}{\bul}{1}{\bul}{1}
\fulltile{1.5}{3}{white}{black}{\overset1\rightarrow}{\dia}{1}{\dia}{1}
\fulltile{3}{3}{white}{black}{\tl{or}}{\tl D}{1}{1}{1}
\fulltile{4.5}{3}{colPos}{white}{\pmb+}{\str}{}{\str}{1}
\fulltile{0}{4.5}{colModd}{white}{\tl c}{}{\str}{\bul}{}
\fulltile{1.5}{4.5}{colAux}{black}{\tl c}{}{\str}{\dia}{\str}
\fulltile{3}{4.5}{colMod}{white}{\tl D}{}{\str}{\tl D}{\str}
\fulltile{4.5}{4.5}{colAux}{black}{\tl{d}}{}{}{\str}{\str}
\node at (2.25,-1) {$q_2$};
\end{scope}
\end{tikzpicture}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\fulltile{0}{0}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{1.5}{0}{white}{black}{\uparrow\!_0}{0}{\dia}{0}{\dia}
\fulltile{3}{0}{white}{black}{\uparrow\!_1}{1}{\dia}{1}{\dia}
\fulltile{4.5}{0}{white}{black}{\uparrow\!_0}{0}{\dia}{0}{\dia}
\fulltile{6}{0}{white}{black}{\uparrow\!_1}{1}{\dia}{1}{\dia}
\fulltile{7.5}{0}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{1.5}{white}{black}{\overset0\rightarrow}{\dia}{0}{\dia}{0}
\fulltile{1.5}{1.5}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{3}{1.5}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{4.5}{1.5}{white}{black}{\tl{or}}{\tl C}{1}{0}{1}
\fulltile{6}{1.5}{white}{black}{\tl{or}}{\tl D}{1}{1}{1}
\fulltile{7.5}{1.5}{colPos}{white}{\pmb+}{\str}{}{\str}{1}
\fulltile{0}{3}{colAux}{black}{\tl c}{}{\str}{\dia}{\str}
\fulltile{1.5}{3}{colMod}{white}{\tl A}{}{\str}{\tl A}{\str}
\fulltile{3}{3}{colMod}{white}{\tl B}{}{\str}{\tl B}{\str}
\fulltile{4.5}{3}{colMod}{white}{\tl C}{}{\str}{\tl C}{\str}
\fulltile{6}{3}{colMod}{white}{\tl D}{}{\str}{\tl D}{\str}
\fulltile{7.5}{3}{colAux}{black}{\tl{d}}{}{}{\str}{\str}
\node at (3.75,-1) {$q_5$};
\end{tikzpicture}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\begin{scope}
\fulltile{0}{0}{colAux}{black}{\tl{Z_4}}{\tri}{\dia}{}{}
\fulltile{1.5}{0}{white}{black}{v}{1}{\dia}{v}{\dia}
\fulltile{0}{1.5}{colAux}{black}{v}{}{v}{\tri}{}
\fulltile{1.5}{1.5}{colMod}{white}{v}{}{}{1}{v}
\node at (.75,-1) {$r_1(v)$};
\end{scope}
\begin{scope}[xshift=4cm]
\fulltile{0}{0}{colAux}{black}{\tl{Z_4}}{\tri}{\dia}{}{}
\fulltile{1.5}{0}{white}{black}{\neg v}{0}{\dia}{\neg v}{\dia}
\fulltile{0}{1.5}{colAux}{black}{v}{}{v}{\tri}{}
\fulltile{1.5}{1.5}{colModd}{white}{\tilde v}{}{}{0}{v}
\node at (.75,-1) {$r_2(v)$};
\end{scope}
\begin{scope}[xshift=8cm]
\fulltile{0}{0}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{1.5}{0}{white}{black}{v}{1}{\dia}{v}{\dia}
\fulltile{3}{0}{white}{black}{\neg v}{0}{\dia}{\neg v}{\dia}
\fulltile{4.5}{0}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{1.5}{white}{black}{\overset0\rightarrow}{\dia}{0}{\dia}{0}
\fulltile{1.5}{1.5}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{3}{1.5}{white}{black}{\tl{or}}{\tl C}{1}{0}{1}
\fulltile{4.5}{1.5}{colPos}{white}{\pmb+}{\str}{}{\str}{1}
\node at (2.25,-1) {$r_3(v)$};
\end{scope}
\end{tikzpicture}
\hspace*{-2cm}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\begin{scope}
\fulltile{0}{0}{colAux}{black}{\tl{Z_4}}{\tri}{\dia}{}{}
\fulltile{1.5}{0}{white}{black}{u}{0}{\dia}{u}{\dia}
\fulltile{0}{1.5}{colAux}{black}{u}{}{u}{\tri}{}
\fulltile{1.5}{1.5}{colMod}{white}{u}{}{}{0}{u}
\node at (.75,-1) {$r_1(u)$};
\end{scope}
\begin{scope}[xshift=4cm]
\fulltile{0}{0}{colAux}{black}{\tl{Z_4}}{\tri}{\dia}{}{}
\fulltile{1.5}{0}{white}{black}{\neg u}{1}{\dia}{\neg u}{\dia}
\fulltile{0}{1.5}{colAux}{black}{u}{}{u}{\tri}{}
\fulltile{1.5}{1.5}{colModd}{white}{\tilde u}{}{}{1}{u}
\node at (.75,-1) {$r_2(u)$};
\end{scope}
\begin{scope}[xshift=8cm]
\fulltile{0}{0}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{1.5}{0}{white}{black}{u}{0}{\dia}{u}{\dia}
\fulltile{3}{0}{white}{black}{\neg u}{1}{\dia}{\neg u}{\dia}
\fulltile{4.5}{0}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{1.5}{white}{black}{\overset0\rightarrow}{\dia}{0}{\dia}{0}
\fulltile{1.5}{1.5}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{3}{1.5}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{4.5}{1.5}{colPos}{white}{\pmb+}{\str}{}{\str}{1}
\node at (2.25,-1) {$r_3(u)$};
\end{scope}
\end{tikzpicture}
\hspace*{-2cm}
\begin{tikzpicture}
[scale=1,every node/.style={scale=1node}]
\fulltile{0}{0}{colAux}{black}{\tl a}{\dia}{\dia}{\dia}{\dia}
\fulltile{1.5}{0}{white}{black}{u}{0}{\dia}{u}{\dia}
\fulltile{3}{0}{white}{black}{v}{1}{\dia}{v}{\dia}
\fulltile{4.5}{0}{white}{black}{\neg w}{0}{\dia}{\neg w}{\dia}
\fulltile{6}{0}{colAux}{black}{\tl b}{\str}{}{\str}{\dia}
\fulltile{0}{1.5}{white}{black}{\overset0\rightarrow}{\dia}{0}{\dia}{0}
\fulltile{1.5}{1.5}{white}{black}{\tl{or}}{\tl A}{0}{0}{0}
\fulltile{3}{1.5}{white}{black}{\tl{or}}{\tl B}{1}{1}{0}
\fulltile{4.5}{1.5}{white}{black}{\tl{or}}{\tl C}{1}{0}{1}
\fulltile{6}{1.5}{colPos}{white}{\pmb+}{\str}{}{\str}{1}
\node at (3,-1) {$s(C)$};
\end{tikzpicture}
\end{center}
\section{\texorpdfstring{\mathbb{N}P-hardness of \tmbpats}{NP-hardness of 3-MBPATS}}
\label{sec:bwg}
The purpose of this section is to prove the \mathbb{N}P-hardness of \tmbpats.
Let us define a set of restricted input pairs $\ensuremath{\mathcal{I}}\xspace$ for \pats.
The set \ensuremath{\mathcal{I}}\xspace contains all pairs $(P,m)$ where $P = P_F$ is the pattern for a formula $F$ in 3-CNF as defined in Sect.~\ref{sec:pats} and $m = \abs{C(P)} + 3$.
Consider the following restriction of \pats.
\myproblem{A pair $(P,m)$ from $\ensuremath{\mathcal{I}}\xspace$}
{``Yes'' if $P$ can uniquely be self-assembled by using $m$ tile types}
{\modpats}
As we choose exactly those pairs $(P,m)$ as input for the problem that are generated by the reduction, stated in Theorem~\ref{thm:reduction:pats}, we obtain the following corollary which forms the foundation for the result in this section.
\begin{corollary}
\modpats is \mathbb{N}P-hard.
\end{corollary}
The \mathbb{N}P-hardness of \tmbpats follows by the polynomial-time reduction from \modpats to \tmbpats, stated in Theorem~\ref{thm:reduction}.
\begin{theorem}\label{thm:reduction}
For every input pair $(P,m)\in \ensuremath{\mathcal{I}}\xspace$ there exist a black/white/gray-colored pattern $Q$ and integers $m_b, m_w, m_g$ such that:
$P$ can be self-assembled by a \tas with at most $m$ tile types if and only if $Q$ can be self-assembled by a \tas with at most $m_b$ black tile types, $m_w$ white tile types, and $m_g$ gray tile types.
Moreover, the tuple $(Q,m_b,m_w,m_g)$ can be computed from $P$ in polynomial time.
\end{theorem}
Lemma~\ref{lem:if} states the ``if part'' and Lemma~\ref{lem:only-if} states the ``only if part'' of Theorem~\ref{thm:reduction}.
Lemma~\ref{lem:poly:time} states that $(Q,m_b,m_w,m_g)$ can be computed from $P$ in polynomial time.
\begin{corollary}\label{cor:mbpats}
\tmbpats is \mathbb{N}P-hard.
\end{corollary}
For the remainder of this section, let $(P,m)\in\ensuremath{\mathcal{I}}\xspace$ be one fixed pair, let $C=C(P)$ and $k = \abs C$.
We may assume that $C=[k]$ is a subset of the positive integers.
The tile bounds are
\begin{inparaitem}[]
\item $m_b = 1$ for black tile types,
\item $m_w = 5 k - 3(w(P)+h(P)) + 14$ for white tile types, and
\item $m_g = 2 k + 3$ for gray tile types.
\end{inparaitem}
Note that, due to the pattern design in Sect.~\ref{sec:pats}, $h(P) = 6$ is constant.
\begin{figure}
\caption{Black/white/gray supertile which portrays a color $c\in C$.}
\label{fig:supertile}
\end{figure}
Let $\ell = 5k+8$.
For a color $c\in C$, we define an $\ell\times \ell$ square pattern as shown in Fig.~\ref{fig:supertile}.
We refer to this pattern as well as to its underlying tile assignment as {\em supertile}.
The blowup of such a supertile with a possible tile assignment is shown in Figure~\ref{fig:blowup}.
In contrast to the previous section, the positions in the supertile are labeled which does not mean that the colors or the tiles used to self-assemble the pattern are labeled; the colors are black, white, or gray.
The horizontal and vertical {\em color counters} are the $c$ gray positions in the top row, respectively right column, which are succeeded by a white tile in position \tl{D_2}, respectively \tl{D_1}.
The color counters illustrate the color $c$ that is {\em portrayed} by the supertile.
The patterns of two supertiles which portray two distinct colors differ only in the place the white tile is positioned in its top row and right column.
\begin{figure}
\caption{
Blowup of one supertile with possible tile assignment, representing a tile with color $c$ and glues $\tl{n}
\label{fig:blowup}
\end{figure}
For colors in the bottom row and left column of the pattern $P$ we use {\em incomplete supertiles}:
a supertile portraying a color $c$ in the bottom row of pattern $P$ lacks the white row with positions \tl A, \tl{B_1}, and \tl{C_1};
a supertile representing a color $c$ in the left column of pattern $P$ lacks the white column with positions \tl A, \tl{B_2}, and \tl{C_2}.
In particular, the supertile portraying color $P(1,1)$ does not contain any of the positions \tl A, \tl{B_1}, \tl{B_2}, \tl{C_1}, and \tl{C_2}.
Recall that all incomplete supertiles portray a color $c$ that is unique in $P$.
\begin{figure}
\caption{Black/white/gray pattern $Q$ defined by the $k$-color pattern $P$ with $\tl w = w(P)$ and $\tl h=h(P)$.}
\label{fig:patternQ}
\end{figure}
The pattern $Q$ is shown in Fig.~\ref{fig:patternQ}.
By $Q\gen{x,y}$ we denote the pattern of the supertile covering the square area spanned by positions $((x-1)\cdot \ell,(y-1)\cdot \ell)$ and $(x\cdot \ell-1,y\cdot \ell-1)$ in $Q$;
the incomplete supertiles cover one row and/or column less.
The pattern is designed such that supertile $Q\gen{x,y}$ portrays the color $P(x,y)$ for all $x\in[w(P)]$ and $y\in[h(P)]$.
Additionally, $Q$ contains three {\em gadget rows} and three {\em gadget columns} which are explained in Fig.~\ref{fig:gadget}.
The purpose of these gadget rows and columns is to ensure that the color counters can only be implemented in one way when using no more than $m_g$ gray tile types.
All together $Q$ is of dimensions $w(Q) = \ell\cdot w(P) +2$ times $h(Q) = \ell\cdot h(P) +2$.
Obviously, the pattern $Q$ can be computed from $P$ in polynomial time.
\begin{lemma}\label{lem:poly:time}
$(Q,m_b,m_w,m_g)$ can be computed from $P$ in polynomial time.
\end{lemma}
\begin{proof}
This is obvious by the design of the pattern.
\end{proof}
\begin{figure}
\caption{The gadget rows on the north border of the pattern $Q$, the gadget columns are symmetrical:
the middle row (resp.,\ column) contains gray tiles except for one white tile in position $k+1$;
the upper and lower rows (resp.,\ left and right columns) contain gray tiles in positions above the gray column (resp.,\ right of the gray row) of a supertile, the other tiles are black.}
\label{fig:gadget}
\end{figure}
For a \tas $\Theta$ which self-assembles $Q$, we extend our previous notion such that $Q_\Theta\gen{x,y}$ denotes the tile assignment of supertile $Q\gen{x,y}$ given by $\Theta$.
In the following, we will prove properties of such a \tas $\Theta$.
Our first observation is about the black and gray tile types plus two of the white tile types.
\begin{figure}
\caption{The black tile type, two of the white tile types, and all gray tile types: the labeled tile types are used in the corresponding positions of each supertile and the gadget pattern; the unlabeled tile types, called {\em counter tiles}
\label{fig:black:gray}
\end{figure}
\begin{lemma}\label{lem:black:gray}
Let $\Theta$ be a \tas which self-assembles the pattern $Q$ using at most $m_b=1$ black tile types and $m_g=2k+3$ gray tile types.
The black and gray tile types in $\Theta$ are of the form shown in Fig.~\ref{fig:black:gray} and $\Theta$ contains two white tiles of the form shown in the figure.
In every supertile, the horizontal and vertical color counters are implemented by a subset of the counter tile types and for a position \tl{E}, \tl{D_1}, \tl{D_2}, \tl{F_1}, \tl{F_2}, or \tl G the correspondingly labeled tile type is used.
Furthermore, the glues $\bul,\dia,0,1,\ldots, k$ are all distinct.
\end{lemma}
\begin{proof}
In every supertile we find a black square that consists of more than just one tile, therefore, the sole black tile type must have the same north and south glues, respectively east and west glues.
We may assume that \bul is the glue on all four edges of the black tile type.
Now, we have a look at the tile assignment of the three gadget rows; see Fig.~\ref{fig:gadget}.
In the middle row, the first $k$ gray tiles are succeeded by a white tile.
As the south input of all $k$ tiles is the north glue \bul of the black tiles, if the same tile type would be used in two of these $k$ positions, there were a cycle in the gray tile types used which would be repeated over and over and no white tile could occur after $k$ gray tiles; furthermore, all the east outputs (resp.,\ west inputs) of these tiles are distinct.
Thus, these tiles implement a horizontal counter which is capable of counting to $k$, or more intuitively, counting from $k$ downto $1$.
Next a white tile is used whose west input is distinct from its east output; otherwise, the same white tile type would be used again.
As the white tile is succeeded by $4k+6$ gray tiles with south input \bul, all gray tiles used in this row have to occur at least twice, therefore, all tiles, used in these positions, have to be distinct from the tiles implementing the horizontal counter; we use the label \dia for the east output of the white tile in position \tl{D_2} and the west input of the gray tile in left-most position \tl{F_2}.
Finally, observe that all tiles described so far have south and north glue \bul and that their east and west glues are distinct from \bul.
From the three gadget columns we obtain analogous results for the vertical counter, the white tile in position \tl{D_1}, and the gray tiles in the first $4k+6$ positions \tl{F_1}.
None of the tiles in the middle column can coincide with a tile in the middle row since they have the glue \bul on different inputs.
Since we need at least one tile for each of the positions \tl{F_1} and \tl{F_2}, there is only one gray tile type left in $\Theta$.
Any tile at position \tl G has north and east outputs from $\dia, 0,1,\ldots, k$ as these edges are adjacent to gray tiles.
We cannot use a tile which we have described so far for any of the positions \tl G.
Therefore, all tiles in positions \tl{F_1} (resp.,\ \tl{F_2}) in the gadget are of the same type whose north and south (resp.,\ east and west) glues equal.
All positions \tl G share the same tile type $\tau$ whose south and west inputs are \dia.
If the east output of $\tau$ were one of $0,1,\ldots, k$, then a white tile were among the $k+1$ tiles succeeding the left-most position \tl G in the middle gadget row; hence, $\tau(E) = \dia$ and, symmetrically, $\tau(N) = \dia$.
All supertiles have to share the same black and gray tile types as there are no other black and gray tile types in $\Theta$.
The color counters have to be implemented by the counter tile types.
As the south and west inputs of the tiles in positions \tl{D_1} and \tl{D_2} are determined by its gray and black neighbors, it is also clear that only the described white tiles can be used in these positions.
Now, the types of the tiles in positions \tl{F_1}, \tl{F_2}, and \tl G are also determined by their inputs.
\end{proof}
\begin{remark}\label{rem:controltile}
Consider a \tas $\Theta$ that self-assembles the pattern $Q$ using most $m_b$ black tile types and $m_g$ gray tiletypes.
If we have a look at the tile assignment of the black square plus the gray column and row in a supertile, we see that this block has inputs \bul on all edges except for edges where the color counters are initialized and it has outputs \bul on all edges, except for its right-most and top-most output edges which are \dia.
This means that all information on how to initialize the color counters has to be carried through the white lines and rows, that are, the tiles in positions \tl A, \tl{B_1}, \tl{B_2}, \tl{C_1}, \tl{C_2}.
Moreover, the tile in position \tl A is the only one with non-generic input from other supertiles.
This tile fully determines the tile assignment of the supertile and can be seen as the {\em control tile} or {\em seed} of the supertile.
Henceforth, for a supertile $s=Q_\Theta\gen{x,y}$ we extend our notion of glues such that $s(S)$ and $s(W)$ denote the south and west input of the tile in position \tl A, respectively, $s(N)$ and $s(E)$ denote the north and east output of the tiles in positions \tl{C_2} and \tl{C_1}, respectively.
For incomplete supertiles only one of $s(N)$ or $s(E)$ is defined.
Two supertiles in $Q_\Theta$ are considered distinct if their tile assignment differs in at least one position.
By the observations above, two complete supertiles are distinct if and only if their control tiles are of distinct types; this is equivalent to require that the inputs of the two supertiles differ.
Since incomplete supertiles portray unique colors in $P$, they are distinct from any supertile in $Q_\Theta$ but itself.
\end{remark}
There is some flexibility in how the white tile types are implemented in a \tas $\Theta$ which self-assembles $Q$.
Let us present one possibility which proves the ``only if part'' of Theorem~\ref{thm:reduction}.
\begin{lemma}\label{lem:only-if}
If $P$ can be self-assembled by a \tas $T$ with $m$ tile types, then $Q$ can be self-assembled by a \tas $\Theta$ using $m_b$ black tile types, $m_w$ white tile types, and $m_g$ gray tile types.
\end{lemma}
\begin{proof}
Let the \tas $\Theta$ contain the tile types given in Fig.~\ref{fig:black:gray}.
By Lemma~\ref{lem:black:gray}, the tiles of these types can self-assemble the gadget pattern and the black square plus color counters in every supertile.
Recall from Lemma~\ref{lem:q_or} that $T$ contains four tile types for the \tl{or}-gate and only one tile type for every other color in $C$.
\begin{figure}
\caption{White tile types for the supertile portraying a color $c\in C$, except for the \tl{or}
\label{fig:white1}
\end{figure}
First, consider $c\in C\setminus\set{\mathbb{C}or}$ and let $t\in T$ be the tile type with color $c$.
We use the tile types in Fig.~\ref{fig:white1} for the supertile portraying $c$.
Note that none of the five tile types share the same inputs.
The labels on the them depict the positions in the supertile where each tile is used.
We do not need any of these white tile types for the incomplete supertile representing the color $P(1,1)$.
For the other incomplete supertiles we only need two of these tile types.
Recall that, by design, a color which is portrayed by an incomplete supertile in $Q$ is unique in $P$.
This amounts to $5 \cdot (\abs{C}-1) - 3 \cdot ( h(P)+ w(P)) + 1$ white tile types in $\Theta$ for all the supertiles portraying colors in $C\setminus\set{\mathbb{C}or}$.
We have one remark for the colors in the top row and right column of $P$:
since these colors do not occur in any other position of the pattern and the north and/or east glues of the respective tiles are not used in $P$, we may assume that all these glues are \bul; this allows for the proper attachment of the gadget pattern.
\begin{figure}
\caption{White tile types for supertiles portraying the \tl{or}
\label{fig:whiteor}
\end{figure}
Now, consider $c = \mathbb{C}or$.
Recall from Lemma~\ref{lem:q_or} that there are $o_1,o_2,o_3,o_4\in T$ with color $c$ as depicted in Fig.~\ref{fig:or-tiles}.
We use the 16 white tile types in Fig.~\ref{fig:whiteor} for the four supertiles portraying $c$.
The supertiles for $o_2$, $o_3$, and $o_4$ share tile types in positions \tl{B_1} and \tl{C_1}, as labelled.
Note that the inputs of the 16 tile types are mutually distinct.
Let $\Theta$ contain all the white tile types we have defined plus the two white tile types from Fig.~\ref{fig:black:gray} and note that the white tile types add up to
\[
m_w = 5 \cdot \abs{C} - 3 \cdot ( h(P)+ w(P)) + 14
\]
as desired.
If two distinct tile types in $\Theta$ had the same inputs, it had to be two tile types for position \tl A which implies that two distinct tile types in $T$ would have same inputs as well; thus, $\Theta$ is a properly defined \tas.
The $L$-shaped seed of $\Theta$ is defined such that the incomplete supertiles for the bottom row and left column, as well as the gadgets, can properly attach.
By Remark~\ref{rem:controltile}, it is clear that, starting from its control tile (or the $L$-shaped seed), every supertile properly self-assembles as long as its west and south neighboring supertiles are present.
Let $Q'$ be the pattern that is self-assembled by $\Theta$.
Using induction over $x$ and $y$, the supertile $s=Q'_\Theta\gen{x,y}$ represents the tile $t=P_T(x,y)$ because the respective glues of $s$ and $t$ coincide, for all $x\in[w(P)]$ and $y\in[h(P)]$.
Furthermore, by design of the supertiles in the top row and right column of $Q'$, the gadget rows and columns can self-assemble.
We conclude that $\Theta$ self-assembles $Q = Q'$.
\end{proof}
For the converse implication of Theorem~\ref{thm:reduction}, let us show how to obtain a \tas that self-assembles $P$ from the supertiles in $Q_\Theta$.
The following result follows from the bijection between supertiles in $Q_\Theta$ and tiles in $P_T$.
\begin{lemma}\label{lem:st:to:t}
Let $\Theta$ be a \tas which self-assembles $Q$ using at most $m_b$ black tile types and $m_g$ gray tile types, and let
\[
S=\sett{Q_\Theta\gen{x,y}}{x\in[w(P)],y\in[h(P)]}
\]
be the set of all distinct supertiles in $Q_\Theta$.
There exists a \tas $T$ with $\abs S$ tile types which self-assembles $P$ such that for each supertile $s\in S$ there exists a tile type $t_s\in T$ with the same glues on the respective edges and $s$ portrays the color of $t_s$.
For an incomplete supertile the statement holds for the defined glue.
\end{lemma}
\begin{proof}
Note that, except for the tiles with colors in the first row and column of $P$, the tiles in $T$ are fully defined.
For the undefined glues on tile types, representing incomplete supertiles, we introduce unique matching glues; the $L$-shaped seed of $T$ is defined to match these glues.
Clearly, the first column and row of $P$ can be self-assembled by $T$.
Recall from Remark~\ref{rem:controltile} that the control tile in position \tl A of a complete supertile $s$ fully determines the supertile and its outputs.
The placement of the control tile is determined by the east output of the west neighbor and the north output of the south neighbor of $s$.
Let $P'$ be the pattern which is self-assembled by $T$.
Using induction over $x$ and $y$, we see that if $s = Q_\Theta\gen{x,y}$, then $t_s = P'_T(x,y)$ for all $x\in[w(P)]$ and $y\in[h(P)]$;
thus, $T$ self-assembles $P=P'$.
\end{proof}
We continue with the investigation of the white tile types that are used to self-assemble the pattern $Q$.
The next lemma follows by a case study of what would go wrong if one tile type were used in two of the positions.
\begin{lemma}\label{lem:white:positions}
Let $\Theta$ be a \tas which self-assembles the pattern $Q$ using at most $m_b$ black tile types and $m_g$ gray tile types.
A white tile type from $\Theta$ which is used in one of the positions \tl A, \tl{B_1}, \tl{B_2}, \tl{C_1}, \tl{C_2}, \tl{D_1}, or \tl{D_2} cannot be used in another position in any supertile.
\end{lemma}
\begin{proof}
Clearly, we do not have to argue about positions which are gray or black.
By Lemma~\ref{lem:black:gray} and the design of supertiles, a tile type used in a position
\begin{compactenum}[1.)]
\item \tl{B_1} has south and north glue \bul;
\item \tl{B_2} has west and east glue \bul,
\item \tl{C_1} has south glue \dia and north glue in $[k]$;
\item \tl{C_2} has west glue \dia and east glue in $[k]$;
\item \tl{D_1} has east and west glue \bul, south glue $0$, and north glue \dia;
\item \tl{D_2} has south and north glue \bul, west glue $0$, and east glue \dia.
\end{compactenum}
First, suppose the same tile type would be used in position $\tl A$ and one of the positions \tl{B_1}, \tl{B_2}, \tl{C_1}, \tl{C_2}, \tl{D_1}, or \tl{D_2}.
The tile type had north or east output from $\bul, \dia, 1,\ldots,k$; therefore, the north or east neighbor of the tile of this type in position \tl A would be black or gray --- a contradiction.
Due to the distinction in their inputs, the same tile type cannot be used in the following pairs of positions:
$(\tl{B_1},\tl{C_1})$, $(\tl{B_1},\tl{D_1})$,
$(\tl{B_2},\tl{C_2})$, $(\tl{B_2},\tl{D_2})$,
$(\tl{C_1},\tl{D_1})$, $(\tl{C_1},\tl{D_2})$,
$(\tl{C_2},\tl{D_1})$, $(\tl{C_2},\tl{D_2})$,
$(\tl{D_1},\tl{D_2})$.
If a tile type were used in any pair of positions
$(\tl{B_1},\tl{B_2})$, $(\tl{B_1},\tl{C_2})$,
$(\tl{B_2},\tl{C_1})$, or
$(\tl{C_1},\tl{C_2})$
it would have the same inputs as one of the gray or black tiles.
If a tile type were used in position $\tl{B_1}$ and $\tl{D_2}$, then the east neighbor of the tile of this type in position \tl{B_1} would be the gray tile labelled $F_2$ or $G$ from Fig.~\ref{fig:black:gray}.
Symmetrically, no tile type can be used in both positions \tl{B_2} and \tl{D_1}.
\end{proof}
Let \tl{B_1^*} be the right-most position \tl{B_1} in a supertile, adjacent to position \tl{C_1}, and let \tl{B_2^*} be the top-most position \tl{B_2} in a supertile, adjacent to position \tl{C_2}.
The following argument is about tiles in the five positions $K = \set{\tl A, \tl{B_1^*}, \tl{B_2^*}, \tl{C_1}, \tl{C_2}}$ of each supertile.
Following Remark~\ref{rem:controltile} it is clear that a tile in position~\tl{A} fully determines the supertile, tiles in positions \tl{B_1^*} and \tl{C_1} carry the color and the east glue of a supertile, whereas tiles in positions \tl{B_2^*} and \tl{C_2} carry the color and the north glue.
\begin{lemma}\label{lem:white}
Let $\Theta$ be a \tas which self-assembles $Q$ using at most $m_b$ black tile types and $m_g$ gray tile types.
Let $s_1$ and $s_2$ be supertiles in $Q_\Theta$.
\begin{compactenum}[\ i.)]
\item If $s_1$ and $s_2$ portray different colors, they cannot share any tile types in positions from $K$.
\item If $s_1(E) \neq s_2(E)$, they cannot share any tile types in \tl A, \tl{B_1^*}, or \tl{C_1}.
\item If $s_1(N) \neq s_2(N)$, they cannot share any tile types in \tl A, \tl{B_2^*}, or \tl{C_2}.
\end{compactenum}
The three statements hold for all available positions in incomplete supertiles.
\end{lemma}
\begin{proof}
By Lemma~\ref{lem:white:positions}, we do not have to consider mixups of positions.
Firstly, recall from Remark~\ref{rem:controltile} that the tile in position \tl A determines the supertile and its outputs.
Two supertiles portraying different colors or having different outputs cannot share the same tile type in positions \tl A.
Now, consider a supertile $s_1$ representing the color $c$.
The type $\gamma$ of the tile in position \tl{C_1} defines the east output $s_1(E)=\gamma(E)$ and initializes the vertical color counter.
If $\gamma$ is used in another supertile $s_2$ in position \tl{C_1}, then $s_2$ portrays the same color $c$ and $s_1(E) = s_2(E)$.
As the south input of every tile in a position \tl{C_1} is \dia, the type $\beta$ of the tile in position \tl{B_1^*} of $s_1$ determines the placement of $\gamma$ in position \tl{C_1}.
Thus, if $\beta$ is used in another supertile $s_2$ in position \tl{B_1^*}, then $s_2$ portrays the same color $c$ and $s_1(E) = s_2(E)$.
This concludes the proof of statement {\it ii.)}.
Statements~{\it i.)}\ and~{\it iii.)}\ follow by symmetric arguments on the tile types in positions \tl{C_2} an \tl{B_2^*}.
\end{proof}
\begin{remark}
We do not claim that two supertiles could not share any tile types in positions \tl{B_1} or \tl{B_2} while portraying different colors or having different output.
Indeed, consider two supertiles $s_1$ and $s_2$ portraying different colors and let $\beta$ and $\beta'$ be tile types for positions \tl{B_1} with $\beta(E) = \beta'(W) = \tl x$ and $\beta(W) = \beta'(E) = \tl y$.
If the control tile of $s_1$ has east glue \tl x while the control tile of $s_2$ has east glue \tl y, these supertiles have different tile types in positions \tl{C_1} but share the tile types $\beta$ and $\beta'$ in their positions \tl{B_1}.
\end{remark}
Let us conclude the proof of Theorem~\ref{thm:reduction}.
\begin{lemma}\label{lem:if}
The pattern $P$ can be self-assembled by a \tas $T$ with $m$ tile types if $Q$ can be self-assembled by a \tas $\Theta$ with $m_b$ black tile types, $m_w$ white tile types, and $m_g$ gray tile types.
\end{lemma}
\begin{proof}
We will prove that $Q_\Theta$ cannot contain more than $m$ distinct supertiles while $\Theta$ respects the given tile bounds.
Then, the claim follows from Lemma~\ref{lem:st:to:t}.
The black, gray, and two white tile types in $\Theta$ are defined by Lemma~\ref{lem:black:gray}.
We are now counting the minimal number of white tile types that we need to self-assemble the pattern $Q$.
The number of distinct tile types used as control tiles, in positions \tl A, equals to the number of distinct complete supertiles of $Q_\Theta$.
Consider a color $c\in C$.
There is at least one supertile in $Q_\Theta$ which portrays color $c$ and, assuming the supertile is complete, we need five white tile types in positions from $K$ of the supertile, by Lemma~\ref{lem:white:positions}, and these five tile types cannot be used for any of positions from $K$ of a supertile portraying another color, by Lemma~\ref{lem:white}.
If the supertile is incomplete but $c\neq P(1,1)$, hence $c$ is unique in $P$, we only need two white tile types which cannot be used in any of the positions from $K$ of another supertile.
We do not need any additional white tile types for the supertile portraying $P(1,1)$.
For the supertiles portraying colors in $C\setminus\set{\mathbb{C}or}$ we need
$5 \cdot (\abs{C} -1) - 3 \cdot (w(P) + h(P)) + 1$
white tiles which cannot be used in a position from $K$ in a supertile representing the \tl{or}-gate;
furthermore, the two white tile types for positions \tl{D_1} and \tl{D_2} also cannot be used in a position from $K$.
Among these white tile types we find only $\abs{C} - w(P) - h(P))$ types used as control tiles.
We only have 16 white tile types left for the \tl{or}-gate supertiles.
From Lemma~\ref{lem:p_or} and Lemma~\ref{lem:st:to:t} we infer that $Q_\Theta$ contains either
\begin{compactenum}[\it i.)]
\item three distinct supertiles $s_1,s_2,s_3\in T$ portraying \mathbb{C}or all having distinct north and east glues,
\item four distinct supertiles $s_1,s_2,s_3,s_4\in T$ portraying \mathbb{C}or all having distinct north glues and together having at least two distinct east glues,
\item four distinct supertiles $s_1,s_2,s_3,s_4\in T$ portraying \mathbb{C}or all having distinct east glues and together having at least two north glues, or
\item eight distinct supertiles $s_1,\ldots,s_8\in T$ portraying \mathbb{C}or all having distinct east or north glues.
\end{compactenum}
Indeed, if none of these conditions were true for $Q_\Theta$, then, using the construction given in Lemma~\ref{lem:st:to:t}, we could generate a \tas $T$ which self-assembled $P$ and invalidated Lemma~\ref{lem:p_or}.
By Lemma~\ref{lem:white}, two distinct supertiles portraying \mathbb{C}or cannot share tile types in positions \tl A, they can only share tile types in positions \tl{B_1^*} and \tl{C_1} if their east outputs equal, and they can only share tile types in positions \tl{B_1^*}, \tl{C_1} if their north outputs equal.
For case~{\it i.)}\ we need at least 15 white tile types of which three can be used as control tiles; the left over tile type might be used as another control tile.
For cases~{\it ii.)}\ and~{\it iii.)}\ we need at least 16 white tile types of which four can be used as control tiles.
Case~{\it iv.)} is not possible because we would need at least 26 white tile types.
A more involved analysis reveals that only case~{\it ii.)}\ is possible, but for our purpose it is enough that the remaining 16 white tile types contain at most four types that can be used as control tiles.
The number of distinct supertiles in $Q_\Theta$ is limited by the number of tile types that can be used as control tiles plus the number incomplete supertiles.
We obtain that $Q_\Theta$ contains $k + 3 = m$ distinct supertiles as desired.
Using Lemma~\ref{lem:st:to:t}, the pattern $P$ can be self-assembled by a \tas $T$ with $m$ tile types.
\end{proof}
\section*{Conclusions}
We prove that \kmbpats, a natural variant of \kpats, is \mathbb{N}P-complete for $k = 3$.
Furthermore, we present a novel proof for the \mathbb{N}P-completeness of \pats and our proof is more concise than previous proofs.
We introduce several new techniques for pattern design in our proofs, in particular in Sect.~\ref{sec:bwg}, and we anticipate that these techniques can ultimately be used to prove that $2$-\mbpats and also $2$-\pats are \mathbb{N}P-hard.
\end{document}
|
\begin{document}
\title{Grey-Box Learning of Register Automata}
\begin{abstract}
\iflong
Model learning (a.k.a.\ active automata learning) is a highly effective technique for obtaining black-box finite state models of software components. Thus far, generalization to infinite state systems with inputs and outputs that carry data parameters has been challenging. Existing model learning tools for infinite state systems face scalability problems and can only be applied to restricted classes of systems (register automata with equality/inequality).
In this article, we
\else
We
\fi
show how one can boost the performance of model learning techniques by extracting the constraints on input and output parameters from a run, and making this grey-box information available to the learner.
More specifically, we provide new implementations of the tree oracle and equivalence oracle from the RALib tool, which
use the derived constraints. We extract the constraints from runs of Python programs using an existing tainting library for Python, and compare our grey-box version of RALib with the existing black-box version on several benchmarks, including some data structures from Python's standard library.
Our proof-of-principle implementation results in almost two orders of
magnitude improvement in terms of numbers of inputs sent to the software
system. Our approach, which can be generalized to richer model classes, also enables RALib to learn models that are out of reach of black-box techniques, such as combination locks.
\keywords{Model learning \and Active Automata Learning \and Register Automata \and RALib \and Grey-box \and Tainting}
\end{abstract}
\section{Introduction}\label{sec:introduction}
Model learning, also known as active automata learning, is a black-box
technique for constructing state machine models of software and hardware
components from information obtained through testing (i.e.,
providing inputs and observing the resulting outputs).
Model learning has been successfully used in
numerous applications, for instance for generating conformance test
suites of software components \cite{HMNSBI2001}, finding mistakes in
implementations of security-critical protocols
\cite{FJV16,FiterauEtAl17,FH17}, learning interfaces of classes in
software libraries \cite{HowarISBJ12}, and checking that a legacy
component and a refactored implementation have the same behaviour
\cite{SHV16}. We refer to \cite{Vaa17,HowarS2018} for surveys and
further references.
In many applications it is crucial for models to
describe \emph{control flow}, i.e., states of a component,
\emph{data flow}, i.e., constraints on data parameters that
are passed when the component interacts with its environment, as well
as the mutual influence between control flow and data flow.
Such
models often take the form of \emph{extended finite state machines}
(EFSMs). Recently, various techniques have been employed to extend
automata learning to a specific class of EFSMs called \emph{register
automata}, which combine control flow with guards and assignments to
data variables~\cite{Cassel2016,AJUV15,CEGAR12}.
While these works demonstrate that it is theoretically
possible to infer such richer models, the presented approaches
do not scale well and are not
yet satisfactorily developed for richer classes of models
(c.f. \cite{HowarJV19}):
Existing techniques
either rely on manually constructed mappers that abstract the data
aspects of input and output symbols into a finite alphabet, or
otherwise infer guards and assignments from black-box observations of
test outputs. The latter can be costly, especially for models where
control flow depends on test on data parameters in input: in this
case, learning an exact guard that separates two control flow branches
may require a large number of queries.
One promising strategy for addressing the challenge of identifying
data-flow constraints is to augment learning algorithms with
white-box information extraction methods, which are able to
obtain information about the System Under Test (SUT) at lower cost than black-box techniques.
Several researchers have explored this idea.
Giannakopoulou \xspace{et al.}\ \cite{acm2414956} develop an active learning algorithm
that infers safe interfaces of software components with
guarded actions. In their model, the teacher is implemented
using concolic execution for the identification of guards.
Cho \xspace{et al.}\ \cite{acm2028077} present MACE an approach for concolic exploration
of protocol behaviour. The approach uses active automata learning
for discovering so-called deep states in the protocol behaviour.
From these states, concolic execution is employed in order to
discover vulnerabilities.
Similarly, \citet{BotincanB13} present a learning algorithm for
inferring models of stream transducers that integrates active
automata learning with symbolic execution and counterexample-guided
abstraction refinement. They show how the models
can be used to verify properties of input sanitizers in
Web applications.
Finally,~\citet{acm2483783} extend the work of~\cite{acm2414956}
and integrate knowledge obtained through static code analysis
about the potential effects of component
method invocations on a component's state
to improve the performance during symbolic
queries.
So far, however, white-box techniques have never been integrated
with learning algorithms for register automata.
In this article, we present the first active learning algorithm for a general class of register automata that uses white-box techniques.
More specifically, we show how dynamic taint analysis can be used to
efficiently extract constraints on input and output parameters
from a test, and how these constraints can be used to improve the performance of the $\mathit{SL}^{\ast}$ algorithm of~\citet{Cassel2016}.
The $\mathit{SL}^{\ast}$ algorithm generalizes the classical
\(\mathit{L}^*\) algorithm of \citet{DanaAngluinBasePaper} and has
been used successfully to learn register automaton models,
for instance of Linux and Windows implementations of TCP~\cite{FH17}.
We have implemented the presented method on top of
RALib~\cite{raLibFirstPaper}, a library that provides an
implementation of the $\mathit{SL}^{\ast}$ algorithm.
\begin{figure}
\caption{MAT Framework (Our addition --- tainting --- in red):
Double arrows indicate possible multiple instances of a query made by an
oracle for a single query by the learner.
\label{fig:matFramework}
\label{fig:matFramework}
\end{figure}
The integration of the two techniques (dynamic taint analysis
and learning of register automata models) can be explained
most easily with reference to the architecture of RALib,
shown in \Cref{fig:matFramework}, which is a variation of the
\emph{Minimally Adequate Teacher} (MAT) framework of~\cite{DanaAngluinBasePaper}:
In the MAT framework, learning is viewed as a game in which a \emph{learner} has to infer the behaviour of an unknown register automaton $\mathcal{M}$ by asking queries to a \emph{teacher}.
We postulate $\mathcal{M}$ models the behaviour of a \emph{System Under Test (SUT)}.
In the learning phase, the learner (that is, $\mathit{SL}^{\ast}$) is allowed to ask questions to the
teacher in the form of \emph{tree queries} (TQs) and the teacher responds with
\emph{symbolic decision trees} (SDTs).
In order to construct these SDTs, the teacher uses a \emph{tree oracle}, which queries
the SUT with \emph{membership queries} (MQs) and receives a yes/no reply to each.
Typically, the tree oracle asks multiple MQs to answer a single tree query
in order to infer causal impact and flow of data values.
Based on the answers on a number of tree queries, the learner constructs a \emph{hypothesis} in the form of a register
automaton \(\mathcal{H}\). The learner submits
\(\mathcal{H}\) as an \emph{equivalence query (EQ)} to the teacher, asking whether \(\mathcal{H}\) is equivalent to the SUT model $\mathcal{M}$.
The teacher uses an \emph{equivalence oracle} to answer equivalence queries.
Typically, the equivalence oracle asks multiple MQs to answer a single equivalence query.
If, for all membership queries, the output produced by the SUT is consistent with hypothesis $\mathcal{H}$,
the answer to the equivalence query is `Yes' (indicating learning is complete).
Otherwise, the answer `No' is provided, together with a \emph{counterexample} (CE) that indicates
a difference between \(\mathcal{H}\) and $\mathcal{M}$. Based on this CE, learning continues.
In this extended MAT framework, we have constructed new implementations of
the tree oracle and equivalence oracle that leverage the constraints on
input and output parameters that are imposed by a program run:
dynamic tainting is used to extract the constraints on parameters
that are encountered during a run of a program.
Our implementation learns models of Python programs,
using an existing tainting library for Python~\cite{abs-1810-08289}.
Effectively, the combination of the $\mathit{SL}^{\ast}$ with our new tree and equivalence oracles constitutes a \emph{grey-box} learning algorithm, since we only give the learner partial information about the internal structure of the SUT\@.
We compare our grey-box tree and equivalence oracles with the existing
black-box versions of these oracles on several benchmarks, including Python's
{\tt queue} and {\tt set} modules.
Our proof-of-concept implementation\footnote{Available at \url{https://bitbucket.org/toonlenaerts/taintralib/src/basic}.} results in almost two orders of magnitude
improvement in terms of numbers of inputs sent to the software system.
Our approach, which generalises to richer model classes,
also enables RALib to learn models that are completely out of reach
for black-box techniques, such as combination locks.
\noindent
\textbf{Outline:}~\Cref{sec:preliminaries} contains preliminaries;~\Cref{sec:tainting}
discusses tainting in our Python SUTs;~\Cref{sec:learningWithTainting} contains
the algorithms we use to answer TQs using tainting and the definition for the
tainted equivalence oracle needed to learn combination lock
automata;~\Cref{sec:experimentalEval} contains the experimental evaluation of
our technique; and~\Cref{sec:conclusion} concludes.
\iflong
\else
\fi
This section is divided into two portions: the first introduces the early work
of automata learning and efforts towards learning more expressive models, and
the second discusses integration of white-box techniques in automata learning.
Beginning with Angluin's seminal paper~\cite{DanaAngluinBasePaper}, which
introduced the MAT framework and \(L^*\), the first active learning algorithm for deterministic finite
automata, there has been steady progress towards \((a)\) improving the
scalability of active learning and \((b)\) learning richer (i.e., more
expressive) models.
\citet{mealyLearning1, mealyLearning2} extended active learning to inferring
mealy machines, allowing learning of explicit input/output behaviour.
Additionally, the TTT algorithm~\cite{tttAlgorithm} improved scalability of
counterexample analysis as compared to the \(L^*\) algorithm.
Improvements in equivalence testing for mealy machines have also played a role
in improving scalability of the same~\cite{wMethod, wpMethod,
improveTestingMethod}.
Learning of models with \emph{infinite} statespaces as register automata was
proposed by~\cite{10.1007/978-3-642-27940-9_17}.
Data parameters could be stored in registers with equality guards being defined
over those parameters.
Register automata were then extended to support richer operations as
well and could be used to learn models of data structures, amongst
others~\cite{raLibFirstPaper,cassel2015ralib,Cassel2016}.
However, these black-box approaches lead to scalability issues.
In order to improve scalability, there has been some work done in integrating
white-box techniques in automata learning: the authors
of~\cite{symbolicComponentInterfaces, sigmaStar, mace}
all use some form of symbolic or `concolic' (concrete + symbolic) execution in
order to improve scalability of active automata learning.
In this work, we use tainting to reduce the number of inputs needed to learn a
model with guards based on equality operations between data parameters.
We use `tainting' in a distinct sense from the commonly-used technique for
security analysis~\cite{Schwartz2010a}, which is --- at risk of
over-simplification --- an information flow technique.
In the context of this work, `tainting' indicates knowledge of what is
`happening' to a data variable, and not the flow of the variable through a
program.
\section{Preliminary definitions and constructions}\label{sec:preliminaries}
This section contains the definitions and constructions necessary to understand active automata learning for models with dataflow.
We first define the concept of a \emph{structure}, followed by \emph{guards}, \emph{data languages}, \emph{register automata}, and finally \emph{symbolic decision trees}.
\begin{definition}[Structure]\label{def:theory}
A structure \( \mathcal{S} = \tuple{R, \mathcal{D},\mathcal{R}}\) is a triple where $R$ is a set of relation symbols, each equipped with an arity, \(\mathcal{D} \) is an infinite domain of data values, and \(\mathcal{R} \) contains a distinguished $n$-ary relation $r^{\mathcal{R}} \subseteq \mathcal{D}^n$ for each $n$-ary relation symbol $r \in R$.
\end{definition}
In the remainder of this article, we fix a structure \( \mathcal{S} = \tuple{R, \mathcal{D},\mathcal{R}}\), where $R$ contains a binary relation symbol $=$ and unary relation symbols $=c$, for each $c$ contained in a finite set $C$ of constant symbols, $\mathcal{D}$ equals the set $\mathbb{N}$ of natural numbers, $=^{\mathcal{R}}$ is interpreted as the equality predicate on $\mathbb{N}$, and to each symbol $c \in C$ a natural number $n_c$ is associated such that $(=c)^{\mathcal{R}} = \{ n_c \}$.
Guards are a restricted type of Boolean formulas that may contain relation symbols from $R$.
\begin{definition}[Guards]
We postulate a countably infinite set $\mathcal{V} = \{ v_1, v_2,\ldots \}$ of \emph{variables}.
In addition, there is a variable $p \not\in\mathcal{V}$ that will play a special role as formal parameter of input symbols; we write $\mathcal{V}^+ = \mathcal{V} \cup \{ p \}$.
A {\em guard} is a conjunction of relation symbols and negated relation symbols over variables.
Formally, the set of \emph{guards} is inductively defined as follows:
\begin{itemize}
\item
If $r\in R$ is an $n$-ary relation symbol and $x_1 ,\ldots, x_n$ are variables from $\mathcal{V}^+$, then $r(x_1,\ldots,x_n)$ and $\neg r(x_1,\ldots,x_n)$ are guards.
\item
If $g_1$ and $g_2$ are guards then $g_1 \wedge g_2$ is a guard.
\end{itemize}
Let $X \subset \mathcal{V}^+$.
We say that $g$ is a guard \emph{over} $X$ if all variables that occur in $g$ are contained in $X$.
A \emph{variable renaming} is a function $\sigma:X \rightarrow \mathcal{V}^+$.
If $g$ is a guard over $X$ then $g[\sigma]$ is the guard obtained by replacing each variable $x$ in $g$ by $\sigma(x)$.
\end{definition}
Next, we define the notion of a \emph{data language}. For this, we fix a finite set of \emph{actions} \(\Sigma \).
A \emph{data symbol} \(\alpha(d)\) is a pair consisting of an action \(\alpha \in \Sigma \) and a data value \(d \in \mathcal{D} \).
While relations may have arbitrary arity, we will assume that all actions have an arity of one to ease notation and simplify the text.
A \emph{data word} is a finite sequence of data symbols, and a \emph{data language} is a set of data words.
We denote concatenation of data words $w$ and $w'$ by \(w \cdot w'\), where \(w\) is the \emph{prefix} and \(w'\) is the \emph{suffix}.
\(\mathit{Acts}(w)\) denotes the sequence of actions \(\alpha_1 \alpha_2 \ldots \alpha_n\) in \(w\), and \(\mathcal{V}als(w)\) denotes the sequence of data values \(d_1 d_2 \ldots d_n\) in \(w\).
We refer to a sequence of actions in $\Sigma^{\ast}$ as a \emph{symbolic suffix}. If $w$ is a symbolic suffix then we write $\dbracket{w}$ for the set of data words $u$ with $\mathit{Acts}(u) = w$.
Data languages may be represented by \emph{register automaton}, defined below.
\begin{definition}[Register Automaton]\label{def:registerAutomaton}
A Register Automaton (RA) is a tuple \(\mathscr{M} = (L, l_0, \mathcal{X}, \Gamma, \lambda)\) where
\begin{itemize}
\item \(L\) is a finite set of locations, with \(l_0\) as the initial location;
\item \(\mathcal{X}\) maps each location \(l \in L\) to a finite set of registers \(\mathcal{X}(l)\);
\item \(\Gamma \) is a finite set of transitions, each of the form \(\tuple{l, \alpha(p), g, \pi, l'}\), where
\begin{itemize}
\item \(l, l'\) are source and target locations respectively,
\item \(\alpha(p)\) is a parametrised action,
\item \(g\) is a \emph{guard} over \(\mathcal{X}(l) \cup \set{p} \), and
\item \(\pi \) is an assignment mapping from \(\mathcal{X}(l')\) to \(\mathcal{X}(l) \cup \set{p}\); and
\end{itemize}
\item \(\lambda \) maps each location in \(L\) to either accepting $(+)$ or rejecting $(-)$.
\end{itemize}
We require that $\mathscr{M}$ is {\em deterministic} in the sense
that for each location $l \in L$ and input symbol $\alpha \in \Sigma$,
the conjunction of the guards of any pair of distinct $\alpha$-transitions with source $l$ is not satisfiable.
$\mathscr{M}$ is \emph{completely specified} if for all \(\alpha \)-transitions out of a location, the disjunction of the guards of the \(\alpha \)-transitions is a tautology.
$\mathscr{M}$ is said to be \emph{simple} if there are no registers in the initial location, i.e., \(\mathcal{X}(l_{0}) = \varnothing \).
In this text, all RAs are assumed to be completely specified and simple, unless explicitly stated otherwise.
Locations $l \in L$ with $\lambda(l) = +$ are called \emph{accepting}, and locations with $\lambda(l) = -$ \emph{rejecting}.
\end{definition}
\begin{example}[FIFO-buffer]
The register automaton displayed in~\Cref{fig:fifo} models a FIFO-buffer with capacity 2. It has three accepting locations $l_0$, $l_1$ and $l_2$ (denoted by a double circle), and one rejecting ``sink'' location $l_3$ (denoted by a single circle). Function $\mathcal{X}$ assigns the empty set of registers to locations $l_0$ and $l_3$, singleton set $\set{x}$ to location $l_1$, and set $\set{x, y}$ to $l_2$.
\begin{figure}
\caption{FIFO-buffer with a capacity of 2 modeled as a register automaton.}
\label{fig:fifo}
\end{figure}
\end{example}
\subsection{Semantics of a RA}\label{sec:semanticsRA}
We now formalise the semantics of an RA\@.
A \emph{valuation} of a set of variables $X$ is a function $\nu : X \rightarrow \mathcal{D}$ that assigns data values to variables in $X$.
If $\nu$ is a valuation of $X$ and $g$ is a guard over $X$ then $\nu \models g$ is defined inductively by:
\begin{itemize}
\item
$\nu \models r(x_1,\ldots,x_n)$ iff $(\nu(x_1),\ldots,\nu(x_n)) \in r^{\mathcal{R}}$
\item
$\nu \models \neg r(x_1,\ldots,x_n)$ iff $(\nu(x_1),\ldots,\nu(x_n)) \not\in r^{\mathcal{R}}$
\item
$\nu \models g_1 \wedge g_2$ iff $\nu \models g_1$ and $\nu \models g_2$
\end{itemize}
A \emph{state} of a RA \( \mathscr{M} = (L, l_0, \mathcal{X}, \Gamma,
\lambda)\) is a pair \(\tuple{l,\nu}\), where \(l \in L\) is a location and \(\nu : \mathcal{X}(l) \xrightarrow[]{} \mathcal{D} \) is a valuation of the set of registers at location \(l\).
A \emph{run} of \(\mathscr{M}\) over data word \(w = \alpha_1(d_1) \ldots \alpha_n(d_n)\) is a sequence
\[
\tuple{l_0, \nu_0} \xrightarrow{\alpha_1(d_1), g_1, \pi_1} \tuple{l_1, \nu_1} \ldots
\tuple{l_{n-1}, \nu_{n-1}} \xrightarrow{\alpha_n(d_n), g_n, \pi_n} \tuple{l_n, \nu_n},
\]
where
\begin{itemize}
\item
for each $0 \leq i \leq n$, $\tuple{l_i, \nu_i}$ is a state (with $l_0$ the initial location),
\item
for each $0 < i \leq n$, \(\tuple{l_{i-1}, \alpha_i(p), g_i, \pi_i, l_i} \in \Gamma \) such that
\( \iota_i \vDash g_i \) and \(\nu_i = \iota_i \circ \pi_i \), where $\iota_i = \nu_{i-1} \cup \{ [p \mapsto d_{i}] \}$ extends \(\nu_{i-1}\) by mapping $p$ to $d_i$.
\end{itemize}
A run is \emph{accepting} if \(\lambda(l_n) = + \), else \emph{rejecting}.
The language of $\mathscr{M}$, notation $L(\mathscr{M})$, is the set of words $w$ such that $\mathscr{M}$ has an accepting run over $w$.
Word $w$ is \emph{accepted (rejected) under} valuation $\nu_0$ if $\mathscr{M}$ has an accepting (rejecting) run that starts in state $\mathcal{L}le l_0, \nu_o \rangle$.
\begin{example}
Consider the FIFO-buffer example from~\Cref{fig:fifo}. This RA has a run
\begin{eqnarray*}
\tuple{l_0, \nu_0 = []} & \xrightarrow{{\sf Push}(7), g_1 \equiv \top, \pi_1 = [x \mapsto p]} & \tuple{l_1, \nu_1 = [x \mapsto 7]} \\
& \xrightarrow{{\sf Push}(7), g_2 \equiv \top, \pi_2 = [x \mapsto x, y \mapsto p]} & \tuple{l_2, \nu_2 = [x\mapsto 7, y \mapsto 7]} \\
& \xrightarrow{{\sf Pop}(7), g_3 \equiv p=x, \pi_3 = [x \mapsto y]}& \tuple{l_1, \nu_3 = [x \mapsto 7]}\\
& \xrightarrow{{\sf Push}(5), g_4 \equiv \top, \pi_4 = [x \mapsto x, y \mapsto p]} & \tuple{l_2, \nu_4 = [x \mapsto 7, y\mapsto 5]} \\
& \xrightarrow{{\sf Pop}(7), g_5 \equiv p=x, \pi_5 = [x \mapsto y]} & \tuple{l_1, \nu_5 = [x \mapsto 5]}\\
& \xrightarrow{{\sf Pop}(5), g_6 \equiv p=x, \pi_6 = []} & \tuple{l_0, \nu_6 = []}
\end{eqnarray*}
and thus the trace is ${\sf Push}(7) ~ {\sf Push}(7)~ {\sf Pop}(7) ~ {\sf Push}(5) ~ {\sf Pop}(7) ~ {\sf Pop}(5)$.
$\lrcorner${}
\end{example}
\subsection{Symbolic Decision Tree\label{sec:SymbolicDecisionTree}}
The $\mathit{SL}^*$ algorithm uses \emph{tree queries} in place of membership queries.
The arguments of a tree query are a prefix data word $u$ and a symbolic suffix $w$, i.e., a data word with uninstantiated data parameters.
The response to a tree query is a so called \emph{symbolic decision tree} (SDT), which
has the form of tree-shaped register automaton that accepts/rejects suffixes
obtained by instantiating data parameters in one of the symbolic suffixes.
\begin{figure}
\caption{SDT for prefix ${\sf Push}
\label{fig:SDT}
\end{figure}
Let us illustrate this on the FIFO-buffer example from~\Cref{fig:fifo}
for the prefix ${\sf Push}(5)~ {\sf Push}(7)$ and the symbolic suffix ${\sf Pop} ~ {\sf Pop}$.
The acceptance/rejection of suffixes obtained by instantiating data parameters after ${\sf Push}(5) ~ {\sf Push}(7)$ can be represented by the SDT in~\Cref{fig:SDT}.
In the initial location, values $5$ and $7$ from the prefix are stored in registers $x_1$ and $x_2$, respectively.
Thus, SDTs will generally not be simple RAs.
Moreover, since the leaves of an SDT have no outgoing transitions, they are also not completely specified.
We use the convention that register $x_i$ stores the $i^{\mathit{th}}$ data value.
Thus, initially, register $x_1$ contains value $5$ and register $x_2$ contains value $7$.
The initial transitions in the SDT contain an update $x_3 := p$, and the final transitions an update $x_4 :=p$.
For readability, these updates are not displayed in the diagram.
The SDT accepts suffixes of form ${\sf Pop}(d_1) ~ {\sf Pop}(d_2)$ iff $d_1$ equals the value stored in register $x_1$, and $d_2$ equals the data value stored in register $x_2$.
\iflong
The formal definitions of an SDT and the notion of a tree oracle are presented in Appendix~\ref{appendix:treeOracleEquality}.
\fi
For a more detailed discussion of SDTs we refer to~\cite{Cassel2016}.
\section{Tainting}\label{sec:tainting}
We postulate that the behaviour of the SUT (in our case: a Python program) can be modeled by a register automaton $\mathscr{M}$.
In a black-box setting, observations on the SUT will then correspond to words from the data language of $\mathscr{M}$.
In this section, we will describe the additional observations that a learner can make in a grey-box setting, where the constraints on the data parameters that are imposed within a run become visible.
In this setting, observations of the learner will correspond to what we call tainted words of $\mathscr{M}$.
Tainting semantics is an extension of the standard semantics in which each input value is ``tainted'' with a unique marker from \(\mathcal{V} \).
In a data word \(w = \alpha_1(d_1) \alpha_2(d_2) \ldots \alpha_n(d_n)\), the first data value $d_1$ is tainted with marker $v_1$, the second data value $d_2$ with $v_2$, etc.
While the same data value may occur repeatedly in a data word, all the markers are different.
\subsection{Semantics of Tainting}\label{sec:taintingSemantics}
A \emph{tainted state} of a RA \( \mathscr{M} = (L, l_0, \mathcal{X}, \Gamma,
\lambda)\) is a triple \(\tuple{l,\nu, \zeta}\), where \(l \in L\) is a location, \(\nu : \mathcal{X}(l) \rightarrow \mathcal{D} \) is a valuation, and $\zeta : \mathcal{X}(l) \rightarrow \mathcal{V}$ is a function that assigns a marker to each register of $l$.
A \emph{tainted run} of \(\mathscr{M}\) over data word \(w = \alpha_1(d_1) \ldots \alpha_n(d_n)\) is a sequence
\[
\tau = \tuple{l_0, \nu_0, \zeta_0} \xrightarrow{\alpha_1(d_1), g_1, \pi_1} \tuple{l_1, \nu_1, \zeta_1} \ldots
\tuple{l_{n-1}, \nu_{n-1}, \zeta_{n-1}} \xrightarrow{\alpha_n(d_n), g_n, \pi_n} \tuple{l_n, \nu_n, \zeta_n},
\]
where
\begin{itemize}
\item
$\tuple{l_0, \nu_0} \xrightarrow{\alpha_1(d_1), g_1, \pi_1} \!\tuple{l_1, \nu_1} \ldots
\!\tuple{l_{n-1}, \nu_{n-1}} \xrightarrow{\alpha_n(d_n), g_n, \pi_n} \!\tuple{l_n, \nu_n}$ is a run of \(\mathscr{M}\),
\item
for each $0 \leq i \leq n$, $\tuple{l_i, \nu_i, \zeta_i}$ is a tainted state,
\item
for each $0 < i \leq n$, $\zeta_i = \kappa_i \circ \pi_i$, where $\kappa_i = \zeta_{i-1} \cup \{ (p,v_i) \}$.
\end{itemize}
The tainted word of $\tau$ is the sequence $w = \alpha_1 (d_1) G_1 \alpha_2 (d_2) G_2 \cdots \alpha_n (d_n) G_n$,
where $G_i = g_i[\kappa_i]$, for $0 < i \leq n$.
We define $\mathit{constraints}_{\mathscr{M}}(\tau) = [G_1,\ldots, G_n]$.
Let $w = \alpha_1(d_1) \ldots \alpha_n(d_n)$ be a data word. Since register automata are deterministic, there is a unique tainted run $\tau$ over $w$. We define $\mathit{constraints}_{\mathscr{M}}(w) = \mathit{constraints}_{\mathscr{M}}(\tau)$, that is, the constraints associated to a data word are the constraints of the unique tainted run that corresponds to it.
In the untainted setting a membership query for data word $w$ leads to a response ``yes'' if $w \in L(\mathscr{M})$, and a response ``no'' otherwise, but in a tainted setting the predicates $\mathit{constraints}_{\mathscr{M}}(w)$ are also included in the response, and provide additional information that the learner may use.
\begin{example}
Consider the FIFO-buffer example from~\Cref{fig:fifo}. This RA has a tainted run
\begin{alignat*}{2}
\tuple{l_0, [],[]} & \xrightarrow{{\sf Push}(7)} && \tuple{l_1, [x \mapsto 7], [x\mapsto v_1]}
\xrightarrow{{\sf Push}(7)} \tuple{l_2, [x\mapsto 7,y\mapsto 7], [x\mapsto v_1, y \mapsto v_2]} \\
& \xrightarrow{{\sf Pop}(7)}&& \tuple{l_1, [x\mapsto 7], [x\mapsto v_2]}
\xrightarrow{{\sf Push}(5)} \tuple{l_2, [x\mapsto 7,y\mapsto 5], [x\mapsto v_2, y \mapsto v_4]} \\
& \xrightarrow{{\sf Pop}(7)} && \tuple{l_1, [x\mapsto 5], [y\mapsto v_4]}
\xrightarrow{{\sf Pop}(5)} \tuple{l_0, [], []}
\end{alignat*}
(For readability, guards $g_i$ and assignments $\pi_i$ have been left out.)
The constraints in the corresponding tainted trace can be computed as follows:
\begin{align*}
&\kappa_1 = [p \mapsto v_1] && G_1 \equiv \top[\kappa_1] \equiv \top\\
&\kappa_2 = [x \mapsto v_1, p \mapsto v_2] && G_2 \equiv \top[\kappa_2] \equiv \top\\
&\kappa_3 = [x \mapsto v_1, y \mapsto v_2, p \mapsto v_3] && G_3 \equiv (p=x)[\kappa_3] \equiv v_3 = v_1\\
&\kappa_4 = [x \mapsto v_2, p \mapsto v_4] && G_4 \equiv \top[\kappa_4] \equiv \top\\
&\kappa_5 = [x \mapsto v_2, y \mapsto v_4, p \mapsto v_5] && G_5 \equiv (p=x)[\kappa_5] \equiv v_5 = v_2\\
&\kappa_6 = [x \mapsto v_4, p \mapsto v_6] && G_6 \equiv (p=x)[\kappa_6] \equiv v_6 = v_4
\end{align*}
and thus the tainted word is:
\[
{\sf Push}(7) ~ \top~ {\sf Push}(7) ~ \top ~ {\sf Pop}(7) ~ v_3 = v_1 ~ {\sf Push}(5) ~ \top~ {\sf Pop}(7) ~ v_5=v_2 ~ {\sf Pop}(5) ~ v_6 = v_4,
\]
and the corresponding list of constraints is $ [ \top, \top, v_3 = v_1, \top, v_5 = v_2, v_6 = v_4]$.
$\lrcorner${}
\end{example}
Various techniques can be used to observe tainted traces, for instance symbolic and concolic execution.
In this work, we have used a library called ``\texttt{taintedstr}'' to achieve tainting in Python and make tainted traces available to the learner.
\subsection{Tainting in Python}\label{sec:PythonTainting}
Tainting in Python is achieved by using a library called
``\texttt{taintedstr}''\footnote{See \cite{abs-1810-08289} and
\url{https://github.com/vrthra/taintedstr}.}, which implements a
``\texttt{tstr}'' (\emph{tainted string}) class.
We do not discuss the entire implementation in detail, but only introduce the
portions relevant to our work.
The ``\texttt{tstr}'' class works by \emph{operator overloading}:
each operator is overloaded to record its own invocation.
The \texttt{tstr} class overloads the implementation of the ``\texttt{\_\_eq\_\_}''
(equality) method in Python's \texttt{str} class, amongst others.
In this text, we only consider the equality method.
A \texttt{tstr} object \(x\) can be considered as a triple \(\tuple{o, t, \mathit{cs}}\),
where \(o\) is the (base) string object, \(t\) is the taint value associated
with string \(o\), and \(\mathit{cs}\) is a set of comparisons made by \(x\) with other
objects, where each comparison \(c \in \mathit{cs}\) is a triple \(\tuple{f, a, b}\) with
\(f\) the name of the binary method invoked on \(x\), \(a\) a copy of \(x\), and
\(b\) the argument supplied to \(f\).
Each a method \(f\) in the \texttt{tstr} class is an overloaded implementation
of the relevant (base) method \(f\) as follows:
\begin{lstlisting}[style=py, language=Python]
def f(self, other):
self.cs.add((m._name_, self, other))
return self.o.f(other) # `o' is the base string
\end{lstlisting}
We present a short example of how such an overloaded method would work below:
\begin{example}[\texttt{tstr} tainting]\label{example:tstrTainting}
Consider two \texttt{tstr} objects: \(x_1 = \tuple{ \textnormal{``1''} , 1, \emptyset}\)
and \(x_2 = \tuple{ \textnormal{``1''}, 2, \emptyset}\).
Calling \(x_1 == x_2 \) returns \textbf{True} as $x_{1}.o = x_{2}.o$.
As a side-effect of $f$, the set of comparisons $x_{1}.cs$ is updated with the
triple \(c = \tuple{ \textnormal{``\_\_eq\_\_''}, x_1, x_2}\).
We may then confirm that \(x_1\) is compared to \(x_2\) by checking the taint
values of the variables in comparison \(c\): \(x_1.t = 1\) and \(x_2.t
= 2\).
Note, our approach to tainting limits the recorded information to operations performed
on a \texttt{tstr} object.
\iflong
$\lrcorner${}
\end{example}
\begin{example}[Complicated Comparison]
\fi
Consider the following snippet, where \(x_1, x_2, x_3\) are \texttt{tstr} objects
with \(1,2,3\) as taint values respectively:
\begin{lstlisting}[style=py, language=Python, gobble=2]
if not (x_1 == x_2 or (x_2 != x_3)):
# do something
\end{lstlisting}
If the base values of \(x_1\) and \(x_2\) are equal, the
Python interpreter will ``short-circuit'' the if-statement and the second
condition, \(x_2 \neq x_3\), will not be evaluated.
Thus, we only obtain one comparison: $x_{1} = x_{2}$.
On the other hand, if the base values of \(x_1\) and \(x_2\) are not equal,
the interpreter will not short-circuit, and both comparisons will be recorded
as \( \set{x_2 = x_3, x_1 \neq x_2}\).
\iflong
While the comparisons are stored as a set, from the perspective of the tainted
trace, the guard(s) is a single conjunction: \( x_2 = x_3 \wedge x_1 \neq x_2
\).
\fi
However, the external negation operation
will not be recorded by any of the \texttt{tstr} objects: the
negation was not performed on the \texttt{tstr} objects.
$\lrcorner${}
\end{example}
\section{Learning Register Automata using Tainting}\label{sec:learningWithTainting}
Given an SUT and a tree query, we generate an SDT in the following steps:
\emph{(i)} construct a \emph{characteristic predicate} of the tree query (\Cref{alg:decisionQuery}) using membership and guard queries,
\emph{(ii)} transform the characteristic predicate into an SDT (\Cref{alg:fullSDT}), and
\emph{(iii)} minimise the obtained SDT (\Cref{alg:minimalSDT}).
\subsection{Tainted Tree Oracle \label{sec:taintedTreeOracle}}
\subsubsection{Construction of Characteristic Predicate}\label{sec:decisionRelation}
For $u = \alpha(d_1) \cdots \alpha_k(d_k)$ a data word, $\nu_u$ denotes the valuation of $\set{x_1,\ldots,x_k}$ with $\nu_u(x_i) = d_i$, for $1 \leq i \leq k$.
Suppose $u$ is a prefix and $w= \alpha_{k+1} \cdots \alpha_{k+n}$ is a symbolic suffix. Then $H$ is a \emph{characteristic predicate} for $u$ and $w$ in $\mathscr{M}$ if, for each valuation $\nu$ of $\set{x_1,\ldots, x_{k+n}}$ that extends $\nu_u$,
\[
\nu \models H \iff \alpha_1(\nu(x_1)) \cdots \alpha_{k+n}(\nu(x_{k+n})) \in L(\mathscr{M}),
\]
that is, $H$ characterizes the data words $u'$ with $\mathit{Acts}(u') = w$ such that $u \cdot u'$ is accepted by $\mathscr{M}$.
In the case of the FIFO-buffer example from~\Cref{fig:fifo}, a characteristic predicate
for prefix ${\sf Push}(5)~ {\sf Push}(7)$ and symbolic suffix ${\sf Pop} ~ {\sf Pop}$ is $x_3 = x_1 \wedge x_4 = x_2$.
A characteristic predicate for the empty prefix and symbolic suffix ${\sf Pop}$ is $\perp$, since this trace will inevitably lead to the sink location $l_3$ and there are no accepting words.
\Cref{alg:decisionQuery} shows how a characteristic predicate may be computed by systematically exploring all the (finitely many) paths of $\mathscr{M}$ with prefix $u$ and suffix $w$ using tainted membership queries.
During the execution of~\Cref{alg:decisionQuery}, predicate $G$ describes the part of the parameter space that still needs to be explored, whereas $H$ is the characteristic predicate for the part of the parameter space that has been covered.
We use the notation $H \equiv T$ to indicate syntactic equivalence, and $H = T$ to indicate logical equivalence.
Note, if there exists no parameter space to be explored (i.e., $w$ is empty)
and $u \in L(\mathscr{M})$, the algorithm returns $H \equiv \perp \vee \top$ (as the empty conjunction equals $\top$).
\begin{algorithm}[t]
\KwData{A tree query consisting of prefix \(u = \alpha_1(d_1) \cdots \alpha_k(d_k)\) and symbolic suffix \(w = \alpha_{k+1} \cdots \alpha_{k+n} \)}
\KwResult{A characteristic predicate for $u$ and $w$ in $\mathscr{M}$}
\(G := \top \), \(H := \bot \),
$V := \set{x_1,\ldots, x_{k+n}}$ \;
\While{$\exists \mbox{ valuation } \nu \mbox{ for } V \mbox{ that extends } \nu_u \mbox{ such that } \nu \models G$}{
\(\nu := \) valuation for $V$ that extends \(\nu_u \) such that \(\nu \models G \) \;
\(z := \alpha_1(\nu(x_1)) \cdots \alpha_{k+n}(\nu(x_{k+n})) \)
\tcp*{Construct membership query}
\(I := \bigwedge_{i=k+1}^{k+n} \mathit{constraints}_{\mathscr{M}}(z)[i]\)
\tcp*{Constraints resulting from query}
\uIf(\tcp*[f]{Result query ``yes'' or ``no''}){$z \in L(\mathscr{M})$}{
\(H :=H \vee I\)
}
\(G := G \wedge \neg I \) \;
}
\textbf{return} \(H \)
\caption{\texttt{ComputeCharacteristicPredicate}}\label{alg:decisionQuery}
\end{algorithm}
\begin{example}[\Cref{alg:decisionQuery}]
Consider the FIFO-buffer example and the tree query with prefix ${\sf Push}(5)~ {\sf Push}(7)$ and symbolic suffix ${\sf Pop} ~ {\sf Pop}$.
After the prefix location $l_2$ is reached. From there, three paths are possible with actions ${\sf Pop} ~ {\sf Pop}$:
$l_2 l_3 l_3$, $l_2 l_1 l_3$ and $l_2 l_1 l_0$. We consider an example run of \Cref{alg:decisionQuery}.
Initially, $G_0 \equiv \top$ and $H_0 \equiv \perp$. Let
$\nu_1 = [ x_1 \mapsto 5, x_2 \mapsto 7, x_3 \mapsto 1, x_4 \mapsto 1]$.
Then $\nu_1$ extends $\nu_u$ and $\nu_1 \models G_0$. The resulting tainted run corresponds to path $l_2 l_3 l_3$ and so the tainted query gives path constraint $I_1 \equiv x_3 \neq x_1 \wedge \top$. Since the tainted run is rejecting, $H_1 \equiv \perp$ and $G_1 \equiv \top \wedge \neg I_1$.
In the next iteration, we set $\nu_2 = [ x_1 \mapsto 5, x_2 \mapsto 7, x_3 \mapsto 5, x_4 \mapsto 1]$.
Then $\nu_2$ extends $\nu_u$ and $\nu_2 \models G_1$. The resulting tainted run corresponds to path $l_2 l_1 l_3$ and so the tainted query gives path constraint $I_2 \equiv x_3 = x_1 \wedge x_4 \neq x_2$. Since the tainted run is rejecting, $H_2 \equiv \perp$ and $G_2 \equiv \top \wedge \neg I_1 \wedge \neg I_2$.
In the final iteration, we set $\nu_3 = [ x_1 \mapsto 5, x_2 \mapsto 7, x_3 \mapsto 5, x_4 \mapsto 7]$.
Then $\nu_3$ extends $\nu_u$ and $\nu_3 \models G_2$. The resulting tainted run corresponds to path $l_2 l_1 l_0$ and the tainted query gives path constraint $I_3 \equiv x_3 = x_1 \wedge x_4 = x_2$. Now the tainted run is accepting, so $H_3 \equiv \perp \vee I_3$ and $G_3 = \top \wedge \neg I_1 \wedge \neg I_2 \wedge \neg I_3$. As $G_3$ is unsatisfiable, the algorithm terminates and returns characteristic predicate $H_3$.
\end{example}
\subsubsection{Construction of a non-minimal SDT\label{sec:nonMinimalSDT}}
For each tree query with prefix $u$ and symbolic suffix $w$, the corresponding characteristic predicate $H$ is sufficient to construct an SDT using~\Cref{alg:fullSDT}.
\begin{algorithm}[h]
\KwData{Characteristic predicate $H$, index $n = k+1$,
\\Number of suffix parameters $N$}
\KwResult{Non-minimal SDT \(\mathcal{T} \)}
\uIf{\( n = k+N +1\)}{
\(l_0 := \) SDT node\;
\(z :=\) if $H \iff \bot $ then $-$ else $+$
\tcp*{Value $\lambda$ for leaf node of the SDT}
\textbf{return} \(\tuple{\set{l_0}, l_0, [l_0 \mapsto \emptyset], \varnothing, [l_0 \mapsto z]} \)
\tcp*{RA with single location}
}
\uElse{
\(\mathcal{T} := \) SDT node\;
\(I_t := \set{i \mid x_{n} \odot x_{i} \in H, \, n > i} \)
\tcp*{$x_{i}$ may be a parameter or a constant}
\uIf{\(I_t \) is \(\varnothing \)}{
\(t := \) \texttt{SDTConstructor}\(\parens{H, n+1, N} \)
\tcp*{No guards present}
Add \(t \) with guard $\top$ to \(\mathcal{T} \) \;
}
\uElse{
\(g := \bigwedge_{i \in I_t} x_{n} \neq x_{i} \) \tcp*{Disequality guard case}
\(H' := \bigvee_{f \in H} f \wedge g \) if $f \wedge g$ is
satisfiable else $\bot$ \tcp*{$f$ is a disjunct}
\(t' := \) \texttt{SDTConstructor}\(\parens{H', n+1, N} \) \;
Add \(t' \) with guard $g$ to \(\mathcal{T} \) \;
\For{\(i \in I_t \)}{
\(g := x_{n} = x_{i} \) \tcp*{Equality guard case}
\(H' := \bigvee_{f \in H} f \wedge g \) if $f \wedge g$ is
satisfiable else $\bot$ \;
\(t':=\) \texttt{SDTConstructor}\(\parens{H', n+1, N} \) \;
Add \(t' \) with guard $g$ to \(\mathcal{T} \)
}
}
\textbf{return} \(\mathcal{T} \)
}
\caption{\texttt{SDTConstructor}}\label{alg:fullSDT}
\end{algorithm}
\iflong
\Cref{alg:fullSDT} proceeds in the following manner: for a symbolic action
\(\alpha\parens{x_{n}} \) with parameter \(x_{n} \), construct the \emph{potential set} \(I_t \) (lines 6 \& 7), that is,
the set of parameters to which $x_{n}$ is compared to in $H$.
For line $7$, recall that $H$ is a DNF formula, hence each literal $x_{j} \odot x_{k}$ is considered in the set comprehension, rather than the conjunctions making up the predicate $H$.
Each element $x_{i} \in I_{t}$ can be either a formal parameter in the tree query or a constant $c_{i} \in C$ from our chosen structure.
Using \(I_t \), we can construct the guards as follows:
\begin{itemize}
\item \textbf{Disequality guard}: The disequality guard will be \(g :=
\bigwedge_{\set{i \in I_t}} x_{n} \neq x_{i} \).
We can then check which guards in $H$ are still satisfiable with the
addition of $g$ and constructs the predicate $H'$ for the next call of
\Cref{alg:fullSDT} (lines 13--16).
\item \textbf{Equality guard (s)}: For each parameter $x_{i}$ for
\(i \in I_t \), the equality guard will be \(g := x_{n} = x_{i} \).
We can then check which guards in $H$ are still satisfiable with the
addition of $g$ and this becomes the predicate $H'$ for the next call of
\Cref{alg:fullSDT} (lines 18--21).
\end{itemize}
At the base case (lines $1-4$), there are no more parameters remaining
and we return a non-accepting leaf if $H = \bot$, otherwise accepting.
As mentioned, at each non-leaf location $l$ of the SDT $\mathcal{T}$ returned
by~\Cref{alg:fullSDT}, there exists a potential set \(I_t\).
For each parameter $x_{i}$, we know that there is a comparison between
$x_i$ and $x_{n}$ in the SUT.
\else
We construct the SDT recursively while processing each action in the symbolic suffix $w= \alpha_{k+1} \cdots \alpha_{k+m}$ in order.
The valuation $\nu$ is unnecessary, as there are no guards defined over the prefix parameters.
During the execution of~\Cref{alg:fullSDT}, for a suffix action
\(\alpha(x_{n}) \), the \emph{potential set} \(I_t \) contains the set of parameters to which $x_{n}$ is compared to in $H$.
Each element in $I_{t}$ can be either a formal parameter in the tree query or a constant.
For each parameter $x_{i} \in I_{t}$ we construct an \emph{equality} sub-tree where $x_{n} = x_{i}$.
We also construct a \emph{disequality} sub-tree where $x_{n}$ is not equal to any of the parameters in $I_{t}$.
The base case (i.e., $w = \epsilon$) return an accepting or rejecting leaf node according to the characteristic predicate at the base case: if $H \iff \bot$ then rejecting, else accepting.
\Cref{example:fullSDT} provides a short explanation of~\Cref{alg:fullSDT}.
\fi
\begin{example}[\Cref{alg:fullSDT}\label{example:fullSDT}]
Consider a characteristic predicate $H \equiv I_{1} \vee I_{2} \vee I_{3} \vee I_{4}$, where
$I_{1} \equiv x_{2} \neq x_{1} \wedge x_{3} \neq x_{1}$,
$I_{2} \equiv x_{2} = x_{1} \wedge x_{3} \neq x_{1}$,
$I_{3} \equiv x_2 \neq x_{1} \wedge x_3 = x_{1}$,
$I_{4} \equiv x_2 = x_{1} \wedge x_3 = x_{1}$.
We discuss only the construction of the sub-tree rooted at node $s_{21}$
for the SDT visualised in~\Cref{fig:nonMinimalSDT}; the construction of the
remainder is similar.
Initially, $x_{n} = x_{k+1} = x_{2}$.
Potential set $I_{t}$ for $x_{2}$ is $\set{x_{1}}$ as $H$ contains the literals
$x_{2} = x_{1}$ and $x_{2} \neq x_{1}$.
Consider the construction of the equality guard $g := x_{2} = x_{1}$.
The new characteristic predicate is
$H' \equiv (I_{2} \wedge g) \vee (I_{4} \wedge g)$, as
$I_{1}$ and $I_{3}$ are unsatisfiable when conjugated with $g$.
For the next call, with $n=3$, the current variable is $x_{3}$,
with predicate $H = H'$ (from the parent instance).
We obtain the potential set for $x_{3}$ as $\set{x_{1}}$.
The equality guard is $g' := x_{3} = x_{1}$ with the new characteristic predicate
$H'' \equiv I_{4} \wedge g \wedge g'$, i.e., $H'' \iff x_{2} = x_{1} \wedge x_{3} = x_{1}$
(note, $I_{2} \wedge g \wedge g'$ is unsatisfiable).
In the next call, we have $n = 4$, thus we compute a leaf.
As $H''$ is not $\bot$, we return an accepting leaf $t$.
The disequality guard is $g'' := x_{3} \neq x_{1}$ with characteristic predicate
$H''' \iff x_{2} = x_{1} \wedge x_{3} = x_{1} \wedge x_{3} \neq x_{1} \iff \bot$.
In the next call, we have $n=4$, and we return a non-accepting leaf $t'$.
The two trees $t$ and $t'$ are added as sub-trees with their respective guards $g'$ and $g''$
to a new tree rooted at node $s_{21}$ (see~\Cref{fig:nonMinimalSDT}).
$\lrcorner${}
\end{example}
\subsubsection{SDT Minimisation}\label{sec:SDTminimisation}
\Cref{example:fullSDT} showed a characteristic predicate $H$ containing redundant comparisons, resulting in the non-minimal SDT in~\Cref{fig:nonMinimalSDT}.
We use~\Cref{alg:minimalSDT} to minimise the SDT in~\Cref{fig:nonMinimalSDT} to the SDT in~\Cref{fig:minimalSDT}.
\begin{algorithm}[h]
\KwData{Non-minimal SDT \(\mathcal{T} \), current index $n$}
\KwResult{Minimal SDT \(\mathcal{T}' \)}
\uIf(\tcp*[h]{Base case}){\(\mathcal{T} \) is a leaf}{
\textbf{return} \(\mathcal{T} \)
}
\uElse{
\(\mathcal{T}' := \) SDT node\\
\tcp{Minimise the lower levels}
\For{guard \(g \) with associated sub-tree \(t \) in \(\mathcal{T} \)}{
Add guard \(g \) with associated sub-tree \(\texttt{MinimiseSDT}(t, n+1) \) to \(\mathcal{T}' \)
}
\tcp{Minimise the current level}
\(I := \) Potential set of root node of \(\mathcal{T} \)\\
\(t' := \) disequality sub-tree of \(\mathcal{T} \) with
guard $\bigwedge_{i \in I} x_{n} \neq x_{i}$\\
\(I' := \varnothing \)\\
\For{\(i \in I \)}{
\(t := \) sub-tree of \(\mathcal{T} \) with guard \(x_{n} = x_{i} \)\\
\uIf{\(t' \tuple{x_{i},x_{n}} \not\simeq t \) or
\(t' \tuple{x_{i}, x_{n}} \) is undefined}{
\(I' := I' \cup \set{x_i} \)\\
Add guard \(x_{n} = x_{i} \) with corresponding sub-tree
\(t \) to \(\mathcal{T}' \)
}
}
Add guard \(\bigwedge_{i \in I'} x_{n} \neq x_{i} \) with
corresponding sub-tree \(t' \) to \(\mathcal{T}' \)\\
\textbf{return} \(\mathcal{T}' \)
}
\caption{\texttt{MinimiseSDT}}\label{alg:minimalSDT}
\end{algorithm}
\begin{figure}
\caption{SDT Minimisation:
Redundant nodes (in red, left SDT) are merged together (in green, right SDT).
\label{fig:minimisationExample}
\label{fig:nonMinimalSDT}
\label{fig:minimalSDT}
\label{fig:minimisationExample}
\end{figure}
We present an example of the application of~\Cref{alg:minimalSDT}, shown for the
SDT of~\Cref{fig:nonMinimalSDT}.
\Cref{fig:nonMinimalSDT} visualises a non-minimal SDT $\mathcal{T}$, where \(s_{20}\) and
\(s_{21}\) (in red) are essentially ``duplicates'' of each other: the sub-tree for node
\(s_{20}\) is isomorphic to the sub-tree for node \(s_{21}\) under the relabelling
``\(x_2 = x_1\)''.
We indicate this relabelling using the notation $\mathcal{T}[s_{20}]\tuple{x_1, x_2}$ and the
isomorphism relation under the relabelling as $\mathcal{T}[s_{20}]\tuple{x_1, x_2} \simeq \mathcal{T}[s_{21}]$.
\Cref{alg:minimalSDT} accepts the non-minimal SDT of~\Cref{fig:nonMinimalSDT}
and produces the equivalent minimal SDT in~\Cref{fig:minimalSDT}.
Nodes \(s_{20}\) and \(s_{21}\) are merged into one node, \(s_2\), marked in
green.
We can observe that both SDTs still encode the same decision tree.
With~\Cref{alg:minimalSDT}, we have completed our tainted tree oracle, and can
now proceed to the tainted equivalence oracle.
\subsection{Tainted Equivalence Oracle\label{sec:taintedEquivalenceOracle}}
The \emph{tainted equivalence oracle} (TEO), similar to its non-tainted counterpart, accepts
a hypothesis $\mathcal{H}$ and verifies whether $\mathcal{H}$ is equivalent to
register automaton $\mathcal{M}$ that models the SUT\@.
If $\mathcal{H}$ and $\mathcal{M}$ are equivalent, the oracle replies ``yes'',
otherwise it returns ``no'' together with a CE.
The RandomWalk Equivalence Oracle in RALib constructs random traces in order to find a CE\@.
\begin{definition}[Tainted Equivalence Oracle]\label{def:taintedEquivalenceOracle}
For a given hypothesis $\mathcal{H}$, maximum word length $n$, and an SUT $\mathcal{S}$, a tainted
equivalence oracle is a function $\mathcal{O}_{\mathcal{E}}(\mathcal{H}, n,
\mathcal{S})$ for all tainted traces $w$ of $\mathcal{S}$ where $| w | \leq n$,
\( \mathcal{O}_{\mathcal{E}} (\mathcal{H}, n, \mathcal{S}) \) returns $w$ if \( w
\in \mathcal{L}(\mathcal{H}) \iff w \in \mathcal{L}(\mathcal{S}) \) is false, and `Yes' otherwise.
\end{definition}
The TEO is similar to the construction of the characteristic predicate to find a CE: we randomly generate a symbolic suffix of specified length $n$ (with an empty prefix), and construct a predicate $H$ for the query.
For each trace $w$ satisfying a guard in $H$, we confirm whether $w \in \mathcal{L}(\mathcal{H}) \iff w \in \mathcal{L}(\mathcal{M})$.
If false, $w$ is a CE\@.
If no $w$ is false, then we randomly generate another symbolic suffix.
In practise, we bound the number of symbolic suffixes to generate.
\Cref{example:combinationLock} presents a scenario of a combination lock
automaton that can be learned (relatively easily) using a TEO but cannot be handled by normal oracles.
\begin{example}[Combination Lock RA]\label{example:combinationLock}
A combination lock is a type of RA which requires a \emph{sequence} of
specific inputs to `unlock'.
\begin{figure}
\caption{Combination Lock \(\mathcal{C}
\label{fig:combinationLock}
\end{figure}
\Cref{fig:combinationLock} presents an RA \(\mathcal{C}\) with a `4-digit'
combination lock that can be unlocked by the
sequence \( w = \alpha(c_0)\alpha(c_1)\alpha(c_2)\alpha(c_3)\), where \(\set{c_0, c_1, c_2, c_3}\) are constants.
Consider a case where a hypothesis \(\mathcal{H}\) is being checked for
equivalence against the RA \(\mathcal{C}\) with \(w \not\in
\mathcal{L}(\mathcal{H})\).
While it would be difficult for a normal equivalence oracle to generate the word
\(w\) randomly; the tainted equivalence oracle will record at every location
the comparison of input data value \(p\) with some constant \(c_i\) and explore
all corresponding guards at the location, eventually constructing the word
\(w\).
For the combination lock automaton, we may note that as the `depth' of the lock increases, the possibility of randomly finding a CE decreases.
$\lrcorner${}
\end{example}
\section{Experimental Evaluation}\label{sec:experimentalEval}
We have used stubbed versions of the Python FIFO-Queue and Set modules\footnote{From Python's
\texttt{queue} module and standard library, respectively.} for learning the FIFO and Set models, while the Combination Lock automata were constructed manually.
Source code for all other models was obtained by translating existing benchmarks from~\cite{Neider2019} (see also \url{automata.cs.ru.nl}) to Python code.
We also utilise a `reset' operation:
A `reset' operation brings an SUT back to its initial state, and is counted as an `input' for our purposes.
Furthermore, each experiment was repeated 30 times with different random seeds.
Each experiment was bounded according to the following constraints:
learning phase: \(10^9\) inputs and \(5 \times 10^7\) resets;
testing phase: \(10^9\) inputs and \(5 \times 10^4\) resets;
length of the longest word during testing: 50; and
a ten-minute timeout for the learner to respond.
\Cref{fig:benchmarkPlots} gives an overview of our experimental results.
We use the notation `TTO' to represent `Tainted Tree Oracle' (with similar labels for the other
oracles).
In the figure, we can see that as the size of the container increases, the
difference between the fully tainted version (TTO+TEO, in
blue) and the completely untainted version (NTO+NEO, in red) increases.
In the case where only a tainted tree oracle is used (TTO+NEO, in green), we see
that it is following the fully tainted version closely (for the FIFO models) and
is slightly better in the case of the SET models.
\begin{figure}
\caption{Benchmark plots: Number of symbols used with tainted
oracles (blue and green) are generally \emph{lower}
\label{fig:benchmarkPlots}
\end{figure}
The addition of the TEO gives
a conclusive advantage for the Combination Lock and Repetition benchmarks.
The addition of the TTO by itself results in
significantly fewer number of symbols, even without the tainted equivalence
oracle (TTO v/s NTO, compare the green and red lines).
With the exception of the Combination Lock and Repetition benchmarks,
the TTO+TEO combination does not provide vastly better results in comparison to
the TTO+NEO results, however, it is still (slightly) better.
We note that --- as expected --- the NEO does not manage to provide CEs for the
Repetition and Combination Lock automata.
The TEO is therefore much more useful for finding CEs in SUTs which utilise
constants.
\iflong
For complete details of the data used to produce the plots, please refer to
Appendix~\ref{appendix:benchmarkTable}.
\fi
\section{Conclusions and Future Work}\label{sec:conclusion}
In this article, we have presented an integration of
dynamic taint analysis, a white-box technique for tracing data flow,
and register automata learning, a black-box technique for
inferring behavioral models of components. The combination
of the two methods improves upon the state-of-the-art in
terms of class of systems for which models can be generated
and in terms of performance:
Tainting makes it possible to infer data-flow constraints even
in instances with a high essential complexity (e.g., in the
case of so-called combination locks).
Our implementation outperforms pure black-box learning by
two orders of magnitude with a growing impact in the
presence of multiple data parameters and registers.
Both improvements are important steps towards the
applicability of model learning in practice
as they will help scaling to industrial use cases.
At the same time our evaluation shows the need for further
improvements:
Currently, the $\mathit{SL}^*$ algorithm uses symbolic decision trees and tree
queries globally, a well-understood weakness of learning algorithms
that are based on observation tables. It also uses individual
tree oracles each type of operation and relies on
syntactic equivalence of decision trees. A more advanced learning
algorithm for extended finite state machines will
be able to consume fewer tree queries, leverage semantic
equivalence of decision trees. Deeper integration with
white-box techniques could enable the analysis of many
(and more involved) operations on data values.
\paragraph{Acknowledgement}
We are grateful to Andreas Zeller for explaining the use of tainting for dynamic tracking of constraints, and to Rahul Gopinath for helping us with his library for tainting Python programs.
We also thank the anonymous reviewers for their suggestions.
\iflong
\else
\newcommand{\SortNoop}[1]{}
\fi
\iflong
\begin{subappendices}
\renewcommand{\Alph{section}}{\Alph{section}}
\section{Tree Oracle for Equalities}\label{appendix:treeOracleEquality}
In this appendix, we prove that the tainted tree oracle generates
SDTs which are isomorphic to the SDTs generated by the normal tree oracle as defined
in~\cite{Cassel2016}.
In order to do so, we first introduce the constructs used by~\citet{Cassel2016}
for generating SDTs.
We begin with some preliminaries:
For a word \(u\) with \(\mathit{Vals}(u) = d_1 \ldots d_k\), we define a \emph{potential} of \(u\).
The potential of \(u\), written as \(\mathit{pot}(u)\),
is the set of indices \(i \in \set{1, \ldots, k}\) for which
there exists no \(j \in \set{1, \ldots, k}\) such that \(j > i\) and \(d_i = d_j\).
The concept of potential essentially allows unique access to a data value,
abstracting away from the concrete position of a data value
in a word.
For a guard $g$ defined over $\mathcal{V}^{+}$ for a word $u$ with $\mathit{Vals}(u) = d_{1}, \ldots d_{k}$,
a representative data value $d^{g}_{u}$
is a data value s.t. $\nu(u) \cup \set{[p \mapsto d_{u}^{g}]} \vDash g$.
Furthermore, for a word $w = \alpha \cdot w'$ (where $w'$ may be $\epsilon$)
, $w'$ can be represented as $\alpha^{\textnormal{-}1} w$.
The same notation is also extended to sets of words: $\alpha^{\textnormal{-}1} V = \set{\alpha^{\textnormal{-}1} w \mid w \in V}$.
We may now define an SDT\@:
\begin{definition}[Symbolic Decision Tree]\label{def:SDT}
A Symbolic Decision Tree (SDT) is a register automaton \(\mathcal{T} = (L,l_0, \mathcal{X}, \Gamma, \lambda)\) where \(L\) and \(\Gamma \) form a tree rooted at \(l_0\).
\end{definition}
For location \(l\) of SDT \(\mathcal{T} \), we write \(\mathcal{T}[l]\) to denote the subtree of \(\mathcal{T} \) rooted at \(l\).
An SDT that results from a tree query \( (u, w) \) (of a prefix word $u$ and a symbolic suffix $w$),
is required to satisfy some canonical form,
captured by the following definition.
\begin{definition}[\((u,w)\)-tree]\label{def:uVTree}
For any data word $u$ with $k$ actions and any symbolic suffix $w$, a \((u,w)\)-tree is an SDT $\mathcal{T}$ which has runs
over all data words in $\dbracket{w}$, and which satisfies the following
restriction: whenever \( \tuple{l, \alpha(p), g, \pi, l' } \) is the
\(j^{\mathit{th}}\) transition on some path from $l_0$, then for each
\( x_i \in \mathcal{X}(l') \) we have either \emph{(i)} \( i < k + j
\) and \( \pi (x_i) = x_i\), or \emph{(ii)} \( i = k + j \) and \( \pi (x_i) =
p \).
\end{definition}
If $u = \alpha(d_1) \cdots \alpha_k(d_k)$ is a data word then $\nu_u$ is the valuation of $\set{x_1,\ldots,x_k}$ satisfying $\nu_u(x_i) = d_i$, for $1 \leq i \leq k$. Using this definition, the notion of a \emph{tree oracle},
which accepts tree queries and returns SDTs, can be described as follows.
\begin{definition}[Tree Oracle]\label{def:treeOracle}
A tree oracle for a structure $\mathcal{S} $ is a function $\mathcal{O}$ which,
for a data language $ \mathcal{L} $, prefix word $u$ and symbolic
suffix $w$ returns a \( ( u, w ) \)-tree \( \mathcal{O} (\mathcal{L}, u, w)\) s.t. for any
word \( v \in \dbracket{w}\), the following holds: $v$ is accepted by \(
\mathcal{O}(\mathcal{L}, u, w)\) under $\nu_u$ iff \( u \cdot v \in \mathcal{L} \).
\end{definition}
A tree oracle returns \emph{equality trees}, defined below:
\begin{definition}[Equality Tree]\label{def:equalityTree}
An \emph{equality tree} for a tree query $(u, V)$ is a $(u,V)$-tree $\mathcal{T}$ such that:
\begin{itemize}
\item for each action $\alpha$, there is a potential set $I \subseteq
\mathit{pot}(u)$ of indices such that the initial $\alpha$-guards consist
of the equalities of form $p = x_i$ for $i \in I$ and one disequality of
form $\wedge_{i \in I } p \neq x_i$, and
\item for each initial transition $\tuple{l_0, \alpha(p), g, l}$ of
$\mathcal{T}$, the tree $\mathcal{T}[l]$ is an equality tree for $(u \alpha(d^g_u),
\alpha^{\textnormal{-}1} V)$.
\end{itemize}
\end{definition}
\citet{Cassel2016} require their (equality trees) SDTs to be \emph{minimal} (called \emph{maximally abstract} in~\cite{Cassel2016}), i.e., the
SDTs must not contain any redundancies (such as~\Cref{fig:nonMinimalSDT}).
This can be achieved by checking if two sub-trees are equal under some relabelling, and the process of constructing a tree by relabelling an equality sub-tree is called
\emph{specialisation of equality tree}:
\begin{definition}[Specialisation of equality tree]\label{def:treeOracleSpecialisation}
Let \( \mathcal{T} \) be an equality tree for prefix \(u\) and set of symbolic suffixes \(V\), and let \(J \subseteq pot(u)\) be a set of indices.
Then \(\mathcal{T} \tuple{J}\) denotes the equality tree for \((u,V)\) obtained from \(\mathcal{T} \) by performing the following transformations for each \(\alpha \):
\begin{itemize}
\item Whenever \(\mathcal{T} \) has several initial \(\alpha \)-transitions of
form \(\tuple{ l_0, \alpha(p), (p=x_j), l_j }\) with \(j \in J\), then
all subtrees of form \((\mathcal{T} [l_j]) \tuple{J [(k+1) \mapsto j]}\) for \(j \in J\) must be defined and isomorphic, otherwise \(\mathcal{T} \tuple{J}\) is undefined.
If all such subtrees are defined and isomorphic, then \(\mathcal{T} \tuple{J}\) is obtained from \(\mathcal{T} \) by
\begin{enumerate}
\item replacing all initial \(\alpha \)-transitions of form \(\tuple{l_0, \alpha(p), (p=x_j), l_j}\) for \(j \in J\) by the single transition \(\tuple{l_0, \alpha(p), (p=x_m),l_m}\) where \(m = \max(J)\),
\item replacing \(\mathcal{T} [l_m]\) by \((\mathcal{T} [l_m])
\tuple{J[(k+1) \mapsto m]}\), and
\item replacing all other subtrees \(\mathcal{T} [l']\) reached by initial \(\alpha \)-transitions (which have not been replaced in Step \(1\) by \((\mathcal{T} [l']) \tuple{J}\).
\end{enumerate}
\end{itemize}
\end{definition}
If, for some \(\alpha \), any of the subtrees generated in Step \(2\) or \(3\) are undefined, then \(\mathcal{T} \tuple{J}\) is also undefined, otherwise \(\mathcal{T} \tuple{J}\) is obtained after performing Steps \(1-3\) for each \(\alpha \).
\begin{definition}[Necessary Potential set for Tree Oracle]\label{def:necessaryPotSet}
A necessary potential set \(I\) for the root location \(l_0\) of an equality
tree \(\mathcal{O}(\mathcal{L}, u,V)\) is a subset of \(\mathit{pot}(u)\) such that for each index \(i \in I\) the following holds:
\begin{enumerate}
\item \(\mathcal{O}(\mathcal{L}, u\alpha(d^0_u), V_\alpha) \tuple{\set{i, k+1}}\) is undefined, or
\item \(\mathcal{O}(\mathcal{L}, u\alpha(d_u^0), V_\alpha) \tuple{\set{i, k+1}}
\not\simeq\mathcal{O}(\mathcal{L}, u\alpha(d_i), V_\alpha)\).
\end{enumerate}
\end{definition}
Intuitively, a necessary potential set contains indices of data values which influence future behaviour of the SUT\@.
Consequently, indices of data values which do not influence the behaviour of the SUT are excluded from the necessary potential set.
We are now ready to define the tree oracle for equality:
\begin{definition}[Tree oracle for equality]\label{def:treeOracleEquality}
For a language \(\mathcal{L}\), a prefix \(u\), and the set of symbolic suffixes
\(V\), the equality tree \(\mathcal{O}(\mathcal{L}, u,V)\) is constructed as follows:
\begin{itemize}
\item If \(V = \set{\epsilon}\), then \(\mathcal{O}(\mathcal{L}, u,\set{\epsilon})\) is the trivial tree with one location \(l_0\) and no registers.
It is accepting if the word is accpeted, i.e., \(\lambda(l_0) = +\) if \(u \in \mathcal{L} \), else \(\lambda(l_0) = -\).
To determine \(u \in \mathcal{L} \), the tree oracle performs a membership query on \(u\).
\item If \( V \neq \set{\epsilon}\), then for each \( \alpha \) such that \(V_\alpha = \alpha^{\textnormal{-}1} V\) is non-empty,
\begin{itemize}
\item let \(I\) be the necessary potential set (\Cref{def:necessaryPotSet}),
\item \( \mathcal{O}(\mathcal{L}, u,V)\) is constructed as
\(\mathcal{O}(\mathcal{L}, u,V) = (L, l_0, \Gamma, \lambda) \), where,
letting\\ \(\mathcal{O}(\mathcal{L}, u\alpha(d_i), V_\alpha) \) be the tuple \( (L_i^\alpha, l_{0i}^\alpha, \Gamma_{i}^\alpha, \lambda_i^\alpha) \) for \( i \in (I \cup \set{0}) \),
\begin{itemize}
\item \(L\) is the disjoint union of all \(L_i^\alpha \) plus an additional initial location \( l_0\),
\item \( \Gamma \) is the union of all \( \Gamma_i^\alpha \) for \( i \in (I \cup \set{0})\), and in addition the transitions of form \(\tuple{l_0, \alpha(p), g_i, l_{0i}^\alpha}\) with \(i \in (I \cup \set{0})\), where \(g_i\) is \( \bigwedge_{j \in I} p \neq x_j \) for \( i = 0 \), and \(g_i\) is \(p = x_i\) for \(i \neq 0\), and
\item \(\lambda \) agrees with each \(\lambda_i^\alpha \) on \(L_i^\alpha \).
Moreover, if \(\epsilon \in V\), then \(\lambda(l_0) = + \) if \(u \in \mathcal{L} \), otherwise \(\lambda(l_0)=-\).
Again, to determine whether \(u \in \mathcal{L} \), the tree oracle performs a membership query for \(u\).
\end{itemize}
\end{itemize}
\end{itemize}
\end{definition}
\noindent
Intuitively, \(\mathcal{O}(\mathcal{L}, u,V)\) is constructed by joining the trees
\(\mathcal{O}(\mathcal{L}, u\alpha(d_i), V_\alpha)\) with guard \(p = x_i\) for \(i \in
I\), and the tree \(\mathcal{O}(\mathcal{L}, u\alpha(d_u^0), V_\alpha)\) with guard \(\bigwedge_{i \in I} p \neq x_i\), as children of a new root.
Note, while $V$ is a set of symbolic suffixes, RALib technically
handles tree queries sequentially, i.e., as sequential tree queries of prefix $u$ and symbolic suffix $w$.
Consequently, we treat the set of symbolic suffixes $V$ as a singleton,
referred to as `$w$'.
\(\mathcal{O}(\mathcal{L}, u,w)\) is constructed bottom-up, recursively building new `roots' at the top with larger and larger symbolic suffixes (and consequently, shorter and shorter prefixes).
The choice of the necessary potential set \(I\) plays a crucial role: if \(I\)
is larger than necessary, \(\mathcal{O}(\mathcal{L}, u,w)\) contains redundant guards (and is
hence a `non-minimal' SDT).
We now have a clear goal for our proof: we must show that the SDT returned
by~\Cref{alg:minimalSDT} is isomorphic to the SDT returned by the tree oracle for equality as defined
in~\Cref{def:treeOracleEquality} (under the assumption that the `set' of symbolic
suffixes $V$ is a singleton).
We can divide our proof into the following steps:
\begin{enumerate}
\item We show that~\Cref{alg:decisionQuery} produces a characteristic predicate
for tree query $(u,w)$, and contains all the information for constructing an
equality tree,
\item Next, we show that~\Cref{alg:fullSDT} guarantees that for potential
set $I_t$ of a location $l_t$ of the tainted equality tree $\mathcal{T}_t$, the
potential set $I$ of equivalent location $l$ of the normal equality tree
$\mathcal{T}$ is a subset of $I_{t}$: $I \subseteq I_t$, and finally,
\item We can then reduce the make the tainted potential set equal to the
normal potential set (using~\Cref{alg:minimalSDT}) and the resulting tainted equality tree
will be isomorphic to the normal equality tree.
\end{enumerate}
Each of the above steps correspond to one of our algorithms.
We now begin with step 1: from~\Cref{alg:decisionQuery}, we can state the
following lemmas:
\begin{lemma}[Characteristic Predicate]\label{lemma:characteristicPredicate}
For a tree query $(u,w)$, \Cref{alg:decisionQuery} always produces a
characteristic predicate $H$.
\end{lemma}
\begin{proof}
We recall that, under the test hypothesis, an SUT $\mathscr{M}$ is deterministic and
has a finite number of logically disjoint branches to be followed from
each state.
\Cref{alg:decisionQuery} initialises two variables $G := \top$ and $H := \bot$.
For each word $z = u \cdot w$ under a valuation $\nu \vDash G$, we may
perform a membership query on $\mathscr{M}$.
Each query returns the guard $I = \wedge_{i=k+1}^{k+n} \mathit{constraints}_{\mathscr{M}}(z)[i]$
such that $\nu \vDash I$ and the acceptance of the word $z$ in the language
of $\mathscr{M}$, i.e., $z \in \mathscr{M}$.
For each iteration of the do-while loop, the variable $G$ is updated with the
negation of the previously-satisfied guard $I$, i.e., $G := G \wedge \neg I$.
This guarantees that any new valuation $\nu'$ will not satisfy $I$, and hence,
the next iteration of the do-while loop shall induce a different run of $\mathscr{M}$.
Given that $\mathscr{M}$ only has a finite number of logical branches,
\Cref{alg:decisionQuery} terminates.
We also know that for each tainted word $z$, we obtain the acceptance of $z \in L(\mathscr{M})$.
If $z \in L(\mathscr{M})$, the variable $H$ is updated to $H \vee I$.
Therefore, the predicate $H$ returned by \Cref{alg:decisionQuery} is the
characteristic predicate for the tree query $(u,w)$.
\qed{}
\end{proof}
After constructing the characteristic predicate, we convert it to a non-minimal SDT
using \Cref{alg:fullSDT}, providing us with the following lemma:
\begin{lemma}[Non-minimal SDT]\label{lemma:nonMinimalSDT}
For any location \(l_t \) of a non-minimal SDT with an equivalent location
\(l \) of a minimal SDT, the necessary potential set \(I_t \) of the
non-minimal SDT is a superset of the necessary potential set \(I \) of the
minimal SDT\@: \(I \subseteq I_t \subseteq \mathit{pot}(u) \) where
\(\mathit{pot}(u) \) is the potential of the prefix \(u\) of locations $l_t$
and $l$.
\end{lemma}
\begin{proof}
We know that \(I \subseteq \mathit{pot}(u) \) by definition of the necessary potential set.
For any word $w = u \cdot v$ where the prefix $u$ leads to location $l_t$ of
the tainted non-minimal SDT,
\Cref{alg:fullSDT} guarantees that the
suffixes of $u$ will be classified correctly.
If the suffixes are classified correctly, we derive that $I_t \supseteq I$
(otherwise the suffixes will not be classified correctly).
Since \(I_t \supseteq I\) and \(I, I_t \subseteq \mathit{pot} (u)\), we
conclude \(I \subseteq I_t \subseteq \mathit{pot} (u)\).
\qed{}
\end{proof}
Following~\Cref{lemma:nonMinimalSDT}, if we wish to make \(I = I_t \), we can
simply remove all elements from \(I_t \) which do not satisfy the conditions
outlined in~\Cref{def:necessaryPotSet}.
Since we already know that \(I \subseteq I_t \), we can confirm that after
removal of all irrelevant parameters, \(I = I_t \).
\Cref{alg:minimalSDT} accomplishes the same.
\citet{Cassel2016} use the concept of representative data values for
constructing the SDT, while we treat the values symbolically: a representative
data value `represents' the set of data values that satisfy a guard during
construction of the SDT; in our case, we simply let \texttt{Z3} decide on all
the values to use for our membership queries and obtain the guards about them
using their taint markers as identifiers.
\begin{theorem}[Isomorphism of tree oracles]\label{theorem:isoTreeOracles}
The SDTs generated by the tainted tree oracle and the untainted tree oracle
for a tree query $(u,w)$ are isomorphic.
\end{theorem}
\begin{proof}
\Cref{lemma:characteristicPredicate} guarantees that \Cref{alg:decisionQuery}
returns a characteristic predicate $H$ for the tree query $(u,w)$.
Application of \Cref{alg:fullSDT} on $H$ constructs a non-minimal SDT\@.
Using~\Cref{lemma:nonMinimalSDT} and~\Cref{alg:minimalSDT} on the
non-minimal SDT, we can conclude that the root locations of the tainted tree
oracle and normal tree oracle have the same necessary potential set.
By inductive reasoning on the depth of the trees, the same holds for all
sub-trees of both oracles, eventually reducing to the leaves,
showing that the tainted tree oracle is isomorphic to tree oracle.
\qed{}
\end{proof}
\section{Detailed Benchmark results}\label{appendix:benchmarkTable}
\Cref{tbl:benchmarks} contains the full results of the values used to create the
plots from~\Cref{fig:benchmarkPlots}.
\begin{small}
\input{figures/combinedTexPython.tex}
\end{small}
\end{subappendices}
\fi
\end{document}
|
\begin{document}
\title{Unified Gas-kinetic Wave-Particle methods I: Continuum and Rarefied Gas Flow }
\author[ad1]{Chang Liu}
\ead{[email protected]}
\author[ad3]{Yajun Zhu}
\ead{[email protected]}
\author[ad1,ad2]{Kun Xu\corref{cor1}}
\ead{[email protected]}
\address[ad1]{Department of Mathematics, Hong Kong University of Science and Technology, Hong Kong}
\address[ad2]{Department of Mechanical and Aerospace Engineering, Hong Kong University of Science and Technology, Hong Kong, China}
\address[ad3]{National Key Laboratory of Science and Technology on Aerodynamic Design and Research, Northwestern Polytechnical University, Xi'an, Shaanxi 710072, China}
\cortext[cor1]{Corresponding author}
\begin{abstract}
The unified gas-kinetic scheme (UGKS) provides a framework for simulating multiscale transport with
the updates of both gas distribution function and macroscopic flow variables on the
cell size and time step scales.
The multiscale dynamics in UGKS is achieved through the coupled particle transport and collision in the particle
evolution process within a time step.
In this paper, under the UGKS framework, we propose an efficient multiscale unified gas-kinetic wave-particle (UGKWP) method.
The gas dynamics in UGKWP method is described by the individual particle movement coupled with the evolution of the probability density function (PDF).
During a time step, the trajectories of simulation particles are tracked until collision happens,
and the post-collision particles are evolved collectively through the evolution of the corresponding distribution function.
The evolution of simulation particles and distribution function is guided by evolution of macroscopic variables.
The two descriptions on a gas particle, i.e. wave and particle, switch dynamically with time.
A new concept of multiscale multi-efficiency preserving (MMP) method is introduced, and the UGKWP method is shown to be an MMP scheme.
Multiscale preserving means UGKWP method preserves the flow regime from collisionless regime to hydrodynamic regime without requiring the cell size and time step to be less than the mean free path and collision time.
Multi-efficiency preserving means the computational cost of UGKWP method including the computational time and memory cost is on the same level as the particle methods in the rarefied regime, and becomes comparable to the hydrodynamic solvers in continuum regime.
The UGKWP method is specially efficient for hypersonic flow simulation in all regimes in comparison with the wave-type discrete ordinate methods,
and presents a much lower stochastic noise in the continuum flow regime in comparison with the particle-based Monte Carlo methods.
Numerical tests for flows over a wide range of Mach and Knudsen numbers are presented. The examples include mainly the hypersonic flow passing a circular cylinder at Mach numbers $20$ and $30$ and Knudsen numbers $1$ and $10^{-4}$, low speed lid-driven cavity flow, and laminar boundary layer.
These results validate the accuracy, efficiency, and multiscale property of UGKWP method.
\end{abstract}
\begin{keyword}
Unified gas-kinetic scheme, Multiscale transport, Wave-Particle formulation, Non-equilibrium flow.
\end{keyword}
\maketitle
\section{Introduction}
The Boltzmann equation is a fundamental equation for gas dynamics, which resolves the particle mean free path and collision time scale gas flow physics.
Theoretically, from the Boltzmann equation the flow physics in all Knudsen regimes can be captured through the accumulation of
molecular dynamic evolution on the kinetic scale.
The numerical methods for solving kinetic Boltzmann equations can be categorized into two groups: the stochastic methods and the deterministic methods.
For stochastic methods, the evolution of velocity distribution function is represented by the motion of simulation particles.
Such kind of Lagrangian-type schemes achieve high computational efficiency in rarefied and hypersonic flow simulation.
The real gas effects, such as quantum effect, ionization, and chemical reaction, can be properly handled on the particle level.
Moreover, the particle methods are robust and not sensitive to mesh quality.
However, the particle methods suffer from statistical noise that greatly reduces the computational efficiency for low speed flow simulation.
At the same time, the cell size and time step of conventional particle methods are usually restricted to be less than the mean free path and collision time
due to the splitting treatment of particle transport and collision.
Therefore, in the near continuum regime, with the increase of collision rate, the computational cost will become very high.
The deterministic methods are constructed on a discretized space and time.
Compared to the stochastic method, the deterministic method usually achieves high accuracy.
However, it suffers the ray effect for a relative long time simulation of rarefied flow,
and the computational cost is high for the calculation of hypersonic and multidimensional flow due to the direct discretization of particle velocity space.
Similar to the stochastic methods, many deterministic methods also have the constraints on the cell size and time step for an accurate description of flow physics.
The direct simulation Monte Carlo (DSMC) method is one of the most popular methods for the simulation of rarefied hypersonic flow \cite{bird1963approach},
which effectively captures the real molecular physics through the separate modeling of particle transport and collision \cite{bird1994molecular}.
Suffering from stochastic noise, DSMC has low efficiency in low speed flow simulation like all particle methods.
Progresses has been made to reduce the noise, improve the efficiency, and extend its applicable regimes.
To deal with the low-signal flow, the information preserving (IP) method \cite{shen2001information,sun2002direct},
as well as an efficient low variance DSMC method \cite{baker2005variance,homolle2007low} has also been developed.
Since the cell size and time step are restricted to be less than the mean free path and collision time, DSMC is highly expensive in the simulation of continuum flow.
In order to extend DSMC to the continuum flow simulation, the asymptotic preserving Monte Carlo methods \cite{pareschi2000asymptotic,ren2014asymptotic},
moment-guided Monte Carlo method \cite{degond2011moment}, low diffusion particle method \cite{burt2008low}, as well as hybrid methods between DSMC and CFD methods \cite{schwartzentruber2006hybrid} have been constructed.
The stochastic methods based on the kinetic model equations, for example the BGK/ES-BGK equation \cite{macrossan2001particle,fei2018particle} and the kinetic Fokker-Planck equation \cite{jenny2010solution} have been proposed to reduce the computational cost of DSMC.
Among those stochastic kinetic methods, the particle Fokker-Planck method \cite{jenny2010solution} and the stochastic BGK method \cite{fei2018particle} are applicable over different flow regimes.
The deterministic discrete ordinate methods (DOM) for Boltzmann and kinetic equations have been extensively studied in the last several decades \cite{chu1965,JCHuang1995,Mieussens2000,tcheremissine2005direct,Kolobov2007,LiZhiHui2009,ugks2010,wu2015fast,aristov2012direct},
which have great advantages for the simulation of low speed microflow \cite{huang2013,wu2014solving}.
In order to improve the efficiency and remove time step limitations of kinetic methods, asymptotic preserving schemes \cite{jin1999efficient}, implicit schemes \cite{chen2017unified}, and kinetic-fluid hybrid methods \cite{degond2010multiscale} have been proposed and developed.
Following the direct modeling methodology \cite{xu-book}, an effective multiscale unified gas-kinetic scheme (UGKS) has been
proposed and developed \cite{ugks2010,huang2012,liu2016}.
In the construction of UGKS, the particle transport and collision are closely coupled for both flux transport and inner cell evolution,
and the scheme is effectively applicable from rarefied to continuum flows.
In the continuum flow regime, the UGKS is able to accurately capture the Navier-Stokes (NS) solutions, such as capturing the laminar boundary layer,
even with the cell size being much larger than the local particle mean free path and the time step being larger than the local collision time.
In order to improve the computational efficiency, the implicit and multigrid techniques have been incorporated into the UGKS \cite{zhu2016implicit,zhu2017implicit,zhu2018implicit}, and the scheme becomes highly efficient and accurate for flow simulations over a wide range of Knudsen and Mach numbers.
For example, for a space vehicle computation at hypersonic speed in the near space flight, the local Knudsen number around the
flying vehicle can be changed greatly over a fifth-order magnitude and the UGKS can present accurate solution with a large variation of the
ratio between the local cell size and local particle mean free path \cite{jiang}.
The UGKS has also been successfully extended to radiative transfer \cite{sun2015asymptotic,sun2017,sun2018,li2018ugkp}, plasma transport \cite{liu2017}, and multiphase flow \cite{liu2018unified}.
The UGKS framework plays an important role in the construction of unified gas-kinetic wave-particle (UGKWP) method.
Therefore a brief review of UGKS is given here, and readers can refer to \cite{wang2015unified} for detailed formulae.
The UGKS is a finite volume scheme for the update of both gas distribution function $f(\vec{x},t,\vec{v})$ and macroscopic flow variables $\vec{W}$ in physical and velocity space.
For UGKS, the time step is only limited by the CFL condition,
and the cell size and time step are not restricted to be less than the mean free path and collision time to accurately capture the flow physics.
The modeled dynamics depends on the ratios of the time step over the particle collision time and the cell size over the particle mean free path.
For a physical control volume $\Omega_i$ and velocity control volume $\Omega_j$,
the evolution equation of cell averaged gas distribution function $f_{ij}$ in the control volume $\Omega_{ij}=\Omega_i\bigcap\Omega_j$
from time step $t^n$ to $t^{n+1}$ is modeled as
\begin{equation} \label{kinetic-f} \footnotesize
f_{ij}^{n+1}=
f_{ij}^n-\frac{1}{|\Omega_{ij}|}\int_{t^n}^{t^{n+1}}\oint_{\partial \Omega_i}
\vec{v_j}\cdot \vec{n} f_{\partial \Omega_{ij}}(t,\vec{v}_j) ds dt
+\frac{\Delta t}{2} (Q^{n}_{ij} + Q^{n+1}_{ij}),
\end{equation}
where $\vec{v}_j$ is the particle velocity,
$f_{\partial \Omega_{ij}}(t,\vec{v}_j)$ is time-dependent solution at a cell interface $\partial\Omega_i$ for the flux evaluation,
and $Q_{ij}$ is cell averaged collision term.
The above evolution equation for the velocity distribution function is coupled with the evolution of cell averaged macroscopic flow variables $\vec{W}_{i}$,
\begin{equation}\label{kinetic-m}
\vec{W}_i^{n+1}=\vec{W}_i^{n}-\frac{1}{|\Omega_i|}\int^{t^{n+1}}_{t^n}\int\oint_{\partial \Omega_i}
\psi \vec{v}\cdot \vec{n} f_{\partial \Omega_i}(t,\vec{v}) ds d\Xi dt,
\end{equation}
where $d\Xi = dudvdwd\vec{\xi}$, with $\vec{v}=(u,v,w)$ the particle velocity and $\vec{\xi}$ the internal variable.
In order to close the above two discretized governing equations and capture multiscale gas evolution,
the time-dependent interface gas distribution function $f_{\partial \Omega_i}$
and the cell averaged collision term $Q_{ij}$ need to be modeled.
Since the time step $\Delta t=t^{n+1} - t^n$ can be large in comparison with the particle collision time $\tau$, the particle transport and collision effect have
to be included in the modeling of the interface evolution of the distribution function.
One of the important ingredients in UGKS is to use a time-dependent interface distribution function,
i.e. the integral equation of the kinetic Shakhov model \cite{shakhov1968generalization}, which reads
\begin{equation}\label{integral-solution-ugks}
f_{\partial \Omega_i}(t,\vec{v})=\frac{1}{\tau}\int_{t^n}^{t} f^+(\vec{x}^\prime,t^\prime,\vec{v})\mathrm{e}^{-(t-t^\prime)/\tau}dt^\prime+
\mathrm{e}^{-t/\tau}f_0(\vec{x}_{\partial \Omega_i}-\vec{v}t,\vec{v}),
\end{equation}
where $\tau$ is the local relaxation parameter, $f^+$ is post collision distribution function, $\vec{x}'=\vec{x}_{\partial \Omega_i}-\vec{v}(t-t')$ is the particle trajectory,
and $f_0$ is the distribution function at time $t^n$.
Above integral equation explicitly provides the solution of velocity distribution at cell interface
once the initial distribution is given and the post collision distribution is constructed.
And this time evolution solution couples the particle free transport and collision.
When the time step $\Delta t$ is used in the time evolution solution,
it determines the flow dynamics from the initial distribution to equilibrium evolution, and provides a multiscale flux construction for UGKS.
Based on above UGKS formulation, in the continuum flow regime with $\Delta t \gg \tau$, the integral solution converges to the
Chapman-Enskog type Navier-Stokes distribution function, and the UGKS will automatically give the Navier-Stokes solution, which is the same
solution obtained from the gas-kinetic scheme (GKS) \cite{xu2001gas}.
The GKS is designed for solving NS equations where the Chapman-Enskog expansion is directly used to reconstruct the initial cell interface distribution function $f_0$
from macroscopic flow variables.
One important concept in theoretical and computational fluid dynamics is the multiscale modeling.
In the field of theoretical fluid dynamics, multiscale equation refers to the equation which can recover multiple physical scale flow phenomena.
The concept is trivial because the equations constructed on a specific physical scale always hold for the scales above,
and the multiscale research in theoretical fluid dynamics focuses on deriving concise equations on large scale.
In the field of computational fluid dynamics,
multiscale scheme or multiple physical-numerical scale scheme mean the numerical scheme can capture multiple physical flow phenomena on variable numerical scales.
Different from the theoretical fluid dynamics, the numerical scheme constructed on a small scale may not be valid on large scale.
For example, the DOM-based direct Boltzmann solver is constructed on the scale of mean free path, and its numerical valid scale is fixed to be the mean free path scale.
Even the direct Boltzmann solver is used to simulate the continuum flow, the cell size is required to be less than the local mean free path \cite{chen2015comparative}.
Due to the coupled treatment of particle transport and collision,
the UGKS is a multiscale scheme captures the flow physics from rarefied regime to continuum regime without the small cell size limitation.
Analogous to quantum mechanics, the gas particle can be described in terms of not only particles, but also waves or its probability density function.
At current stage, most numerical schemes use either particle description or wave description, for example the particle description based Monte Carlo methods and the wave description based discrete ordinate methods.
The particle methods suffer from statistical noise and the discrete ordinate methods suffer large computational cost especially for the simulation of hypersonic flow.
A wave-particle formulation is proposed in this paper, and the framework can be applied to simulate many other transport process.
The purpose of this paper is to apply the wave-particle formulation to solve the equations of UGKS.
In unified gas-kinetic wave-particle method, the gas particles are divided into the hydro-particles, collisional particles, and collisionless particles,
and the definition of three particles will be given in Section \ref{method}.
The hydro-particles are described by the probability density function, while the collisional and collisionless particles are described by the simulation particles.
The integral equation Eq.\eqref{integral-solution-ugks} is solved through the simulation particles, which is coupled with the evolution of macroscopic variables.
The update of macroscopic flow variables is the same as Eq.\eqref{kinetic-m}, but partially use the simulation particles to evaluate the interface flux.
Due to the Lagrangian formulation through particle transport to get the solution of gas distribution function in Eq.\eqref{integral-solution-ugks} directly inside each cell, the Eulerian formulation for the separate update of gas distribution function in Eq.\eqref{kinetic-f} within each cell is not necessary anymore.
One of the distinguishable points in UGKWP method is that the particles are divided into hydro-particles, collisional particles, and collisionless particles.
The dynamics of hydro-particles can be described analytically and its computation is very cost-effective.
The proportion of three kinds of particles varies dynamically in different flow regimes.
Physically, the collisionless particles are mainly used for the description of non-equilibrium transport dynamics and the hydro-particles for the equilibrium one.
The three kinds of particles have a circular relation from collisionless to collisional to hydro-particles and then go back to collisionless particles.
In the continuum flow regime, the number of collisional and collisionless particles will be greatly reduced and
the UGKWP method will automatically converge to the GKS with the same amount of computational cost.
In other words, the numerical flux in Eq.\eqref{kinetic-m} converges to GKS flux in the hydrodynamic regime.
For the simulation of hypersonic flow, the UGKWP method will be much more efficient than the original UGKS due to the use of simulation particles,
which has a nature adaption in particle velocity space.
The computational cost of UGKWP method is similar to the particle methods in rarefied regime and reduces to the hydrodynamic solver in continuum regime,
and such property is summarized as the multi-efficiency preserving scheme in Section \ref{discussion}.
The rest of the paper is organised as following.
In Section \ref{method}, in order to understand the UGKWP method we will first introduce a fully particle description based unified gas-kinetic particle (UGKP) method,
and then the wave-particle description based unified gas-kinetic wave-particle method will be proposed.
Both UGKP and UGKWP methods are multiscale methods for all flow regimes, but UGKWP method is more cost-efficient than UGKP method due to the
analytical formulation for the hydro-particles.
The definition of multiscale multi-efficiency method will be introduced in Section \ref{discussion},
as well as the analysis of the asymptotic properties of UGKWP method.
Numerical tests for flows over a wide range of Mach and Knudsen numbers are shown in Section \ref{numericaltest}.
Section \ref{conclusion} is the conclusion.
\section{Unified Gas-kinetic Wave-Particle method}\label{method}
In this section, two novel multiscale numerical schemes will be introduced under UGKS framework,
i.e. the unified gas-kinetic particle method and the unified gas-kinetic wave-particle method.
The UGKP method is a particle based method and its computational cost and statistical noise keep the same level in different flow regimes.
The UGKWP method improves the UGKP method by decomposing the simulation particles into the particle-described collisional
and collisionless particles, and the wave-described hydro-particles which can be described by an analytical distribution function.
Both methods are built on a discretized physical space $\sum_i \Omega_i\subset\mathcal{R}^3$ and discretized time $t^n\in\mathcal{R^+}$.
\subsection{Unified gas-kinetic particle method}\label{method1}
The particle dynamics in UGKP method is constructed based on the kinetic BGK equation \cite{BGK1954},
\begin{equation}\label{BGK}
\frac{\partial f}{\partial t}+ \vec{v}\cdot\nabla_{\vec{x}} f = \frac{g-f}{\tau},
\end{equation}
where $f(\vec{x},t,\vec{v})$ is the velocity distribution function of gas particle, $\tau$ is the local relaxation parameter which is determined by $\tau=\mu/p$
with the gas pressure $p$ and dynamic viscosity $\mu$.
The local equilibrium Maxwellian distribution $g(\vec{x},t,\vec{v})$ has the form
\begin{equation}\label{maxwellian}
g(\vec{x},t,\vec{v})=\rho\left(\frac{\lambda}{\pi}\right)^\frac{K+3}{2} \exp(-(\vec{v}-\vec{U})^2+\vec{\xi}^2),
\end{equation}
with density $\rho$, velocity $\vec{U}$, internal degree of freedom $K$, and the internal variable $\vec{\xi}$.
The main idea of UGKP method is to track the particle trajectory until the collision happens.
Once the particle collide with other particles, it will be numerically merged into the macroscopic flow quantities,
and get re-sampled from the updated macroscopic flow variables at the beginning of next time step.
The evolution of particles will be given in Section \ref{section211},
which is coupled with the evolution of macroscopic quantities presented in Section \ref{section212}.
\subsubsection{Evolution of particles}\label{section211}
The simulation particle $P_k(m_k,\vec{x}_k,\vec{v}_k,e_k)$ is represented by its weight $m_k$,
position coordinate $\vec{x}_k$, velocity coordinate $\vec{v}_k$, and internal energy $e_k$,
whose evolution follows the integral form of the kinetic BGK equation,
\begin{equation}\label{integral-solution}
f(\vec{x},t,\vec{v})=\frac{1}{\tau}\int_{t^n}^t e^{-(t-t')/\tau} g(\vec{x}',t',\vec{v}) dt'+e^{-t/\tau}f_0(\vec{x}_0,\vec{v}),
\end{equation}
where $f_0$ is the initial distribution function at $t=t^n$, and $g$ is the local equilibrium distribution function.
The equilibrium distribution is integrated along the characteristics $\vec{x}'=\vec{x}+\vec{v}(t'-t)$.
Numerically, the equilibrium distribution function can be expanded as
\begin{equation}
g(\vec{x}',t',\vec{v})=g(\vec{x},t,\vec{v})+\nabla_{\vec{x}} g(\vec{x},t,\vec{v})\cdot(\vec{x}'-\vec{x})+\partial_tg(\vec{x},t,\vec{v})t'+O((\vec{x}'-\vec{x})^2,t'^2),
\end{equation}
following which the integral solution can be expressed as
\begin{equation}\label{particle}\\
f(\vec{x},t,\vec{v})=(1-e^{-t/\tau}) g^+(\vec{x},t,\vec{v})+e^{-t/\tau}f_0(\vec{x}_0,\vec{v}).
\end{equation}
The first order expansion of $g$ implies
\begin{equation}\label{1st-particle}
g^+(\vec{x},t,\vec{v})=g(\vec{x},t,\vec{v}),
\end{equation}
and the second order expansion gives
\begin{equation}\label{2nd-particle}
g^+(\vec{x},t,\vec{v})=g(\vec{x},t,\vec{v})
+\frac{e^{-t/\tau}(t+\tau)-\tau}{1-e^{-t/\tau}}(\partial_tg(\vec{x},t,\vec{v})+\vec{v}\cdot\nabla_{\vec{x}}g(\vec{x},t,\vec{v})).
\end{equation}
Above $g^+$ is named the hydrodynamic distribution function with analytical formulation.
For UGKP method, the first order expansion of $g$ is used for a simple particle-sampling algorithm \cite{bird1994molecular}.
The particle evolution equation Eq.\eqref{particle} means that the simulation particle has a probability of $e^{-t/\tau}$ to free stream, and has a probability of $(1-e^{-t/\tau})$ to collide with other particles and follow the velocity distribution $g^+(\vec{x},t,\vec{v})$.
The time, when one simulation particle stops free streaming and follows the distribution $g^+$, is defined as its `first collision time' $t_c$.
The cumulative distribution function of the first collision time is
\begin{equation}\label{tc-distribution}
F(t_c<t)=1-\exp(-t/\tau),
\end{equation}
from which $t_c$ can be sampled as $t_c=-\tau\ln(\eta)$ with $\eta$ a uniform distribution $\eta\sim(0,1)$.
From a simulation time step $t^{n}$ to $t^{n+1}$, all simulation particles in UGKP method can be categorized into two groups: the `collisionless particle' \index{collisionless particle} $P^f$ and the `collisional particle' \index{collisional particle} $P^c$.
The categorization is based on the relation between the first collision time $t_c$ and the time step $\Delta t$.
More specifically, the collisionless particle is defined as the particle whose first collision time $t_c$ greater than or equal to the time step $\Delta t$,
and the collisional particle is defined as the particle whose first collision time $t_c$ smaller than $\Delta t$.
For the collisionless particle, its trajectory is fully tracked during the whole time step.
For collisional particle, the particle trajectory is tracked till $t_c$.
Then the particle's mass, momentum, and energy are merged into the macroscopic quantities in that cell and the simulation particle gets eliminated.
Those eliminated particles will get re-sampled once the updated macroscopic quantities $\vec{W}^{n+1}$ are obtained.
As shown in Eq.\eqref{particle}, the re-sampled particles follow the hydrodynamic distribution $g^+$ and therefore they are defined as `hydro-particle' \index{hydro-particle} $P^h$. The macroscopic quantities corresponding to the hydro-particles are defined as `hydro-quantities' \index{hydro-quantities} $\vec{W}^h$.
The hydro-particles will be sampled at the beginning of each time step and become the candidates for collisionless/collisional particles
again in the next time step evolution according to their newly-sampled $t_c$.
The dynamical circulation of particle-described collisionless particle $P^f$, collisional particle $P^c$, wave-described hydro-particle $P^h$, and macroscopic variables is shown in Fig. \ref{dynamical1}, and the algorithm for the evolution of particles is presented as following.
\begin{figure}
\caption{Dynamical circulation of particle-described collisionless particle $P^f$, collisional particle $P^c$, wave-described hydro-particle $P^h$, and macroscopic variables for UGKP method.}
\label{dynamical1}
\end{figure}
\begin{description}
\item[Step 1] Sample the first collision time $t_c$ for all particles $P_k$. For example, if $P_k\in\Omega_i$, then $t_{c,k}=-\tau_i\ln(\eta)$, where $t_{c,k}$ is the first collision time for $P_k$ and $\tau_i$ is calculated from the cell averaged macroscopic quantities $\vec{W}_i$ in cell $i$,
\begin{equation}
\vec{W}_i\defeq\frac{1}{|\Omega_i|}\int_{\Omega_i} \vec{W} dx;
\end{equation}
\item[Step 2] Steam collisionless particles $P^{f,n}$ to $P^{f,n+1}$ by $\vec{x}_k^{n+1}=\vec{x}_k^n+\vec{v}_k (t^{n+1}-t^n)$;
\item[Step 3] Update the cell averaged variables $\vec{W}^{n+1}_i$ by Eq.\eqref{kinetic-m}, and calculate the hydro-quantities
$\vec{W}_{i}^h=(\rho_i^h,\rho_i^hU_i^h,\rho_i^hV_i^h,\rho_i^hW_i^h,\rho_i^hE_i^h)$ by
\begin{equation}
\vec{W}_i^h=\vec{W}_i^{n+1}-\left(\vec{W}_i^{P^f}\right)^{n+1},
\end{equation}
Here $\left(\vec{W}_i^{P^f}\right)^{n+1}$ is the cell averaged macroscopic quantities from all remaining collisionless particles in cell i,
\begin{equation}\label{wf}
\left(\vec{W}_i^{P^f}\right)^{n+1}=\frac{1}{|\Omega_i|}\sum_k \left( m^f_k, m^f_k u^f_k, m^f_k v^f_k,
m^f_k w^f_k,\frac12 m^f_k \left(|\vec{v}^f_k|^2+e^f_k\right)\right)^T,
\end{equation}
where the index $k$ covers all collisionless particles $P_k^f$ in cell $i$.
The detailed formulation of the evolution of macroscopic quantities will be given in next subsection. Note that the calculation of hydro-quantities will also be used in UGKWP method.
\item[Step 4] Sample hydro-particles from a $\vec{W}^{n+1}_i$-based Maxwellian distribution with a total mass of $\Omega_i \rho_i^h$, and the physical coordinates of these hydro-particles are uniformly distributed in cell i.
\end{description}
\subsubsection{Evolution of macroscopic flow variables}\label{section212}
The evolution of simulation particles is coupled with the evolution of macroscopic quantities
$\vec{W}=\left(\rho,\rho U,\rho V,\rho W,\rho E\right)^T$, where $\rho$ is density, $\vec{U}=(U,V,W)^T$ is macroscopic velocity, and $E$ is energy per unit mass.
The cell averaged macroscopic variables $\vec{W}_i$ are evolved by the macroscopic governing equation of UGKS
\begin{equation}\label{update-w}
\vec{W}_i^{n+1}=\vec{W}_i^{n}-\frac{1}{|\Omega_i|} \sum_{l_s\in\partial\Omega} |l_s|\vec{F}_{s},
\end{equation}
where the UGKS flux for the macroscopic variables are
\begin{equation}\label{flux-w-ugks}
\vec{F}_{s}=\int_{t^n}^{t^{n+1}}\int\bigg[\frac1\tau\int_0^{t} e^{(t'-t)/\tau}g(\vec{x}'_s,t',\vec{v})dt'+
e^{-t/\tau}f_0(\vec{x}_s-\vec{v}t,\vec{v})\bigg] \vec{v}\cdot \vec{n}_s \vec{\psi} d\Xi dt,
\end{equation}
with the outward normal $\vec{n}_s$ and the characteristics $\vec{x}'_s=\vec{x}_s+\vec{v}(t'-t)$.
The flux terms related to the Maxwellian distribution are denoted as $F_{g,s}$
\begin{equation}\label{Fg}
\vec{F}_{g,s}\defeq\int_{t^n}^{t^{n+1}}\int\frac1\tau\int_0^{t} e^{(t'-t)/\tau}g(\vec{x}'_s,t',\vec{v})dt' \vec{v}\cdot\vec{n}_s\vec{\psi} d\Xi dt,
\end{equation}
and the flux terms related to the initial distribution are $\vec{F}_{f,s}$
\begin{equation}\label{Ff}
\vec{F}_{f,s}\defeq\int_{t^n}^{t^{n+1}}\int e^{-t/\tau} f_0(\vec{x}_s-\vec{v}t,\vec{v}) \vec{v}\cdot \vec{n}_s\vec{\psi} d\Xi dt,
\end{equation}
where $\vec{\psi}$ is the vector of conservative moments
\begin{align*}
\vec{\psi}=\left(1,u,v,w,\frac12(\vec{v}^2+\vec{\xi}^2)\right)^T.
\end{align*}
The calculation of the Maxwellian-related terms are the same as UGKS \cite{ugks2010}.
Assume the interface is located at $\vec{x}_0$ with a local coordinate $(\vec{e}_1,\vec{e}_2,\vec{e}_3)$ and $\vec{e}_1$ is the outward unit normal $\vec{n}_0$.
The Maxwellian distribution is expanded around $\vec{x}_0$ as
\begin{equation}\label{g-expansion1}
\begin{aligned}
g(\vec{x},t,\vec{v})=&g_0(\vec{x}_0,\vec{v})
+(1-H[\bar{x}])\frac{\partial^l}{\partial\vec{e}_1} g_0(\vec{x}_0,\vec{v})\bar{x}
+H[\bar{x}]\frac{\partial^r}{\partial\vec{e}_1} g_0(\vec{x}_0,\vec{v})\bar{x}\\
&+\frac{\partial}{\partial\vec{e}_2}g_0(\vec{x}_0,\vec{v})\bar{y}
+\frac{\partial}{\partial\vec{e}_3}g_0(\vec{x}_0,\vec{v})\bar{z}
+\frac{\partial}{\partial t}g_0(\vec{x}_0,\vec{v})(t-t^n) \\
=&g_0(\vec{x}_0,\vec{v})\left[1+(1-H[\bar{x}])a^l\bar{x}+H[\bar{x}]a^r\bar{x}+b\bar{y}+c\bar{z}+A(t-t^n)\right],
\end{aligned}
\end{equation}
where $g_0(\vec{x}_0,\vec{v})=g(\vec{x}_0,t^n,\vec{v})$, and $\bar{x}=\Delta x\cdot \vec{e}_1$, $\bar{y}=\Delta y\cdot \vec{e}_2$, and $\bar{z}=\Delta z\cdot \vec{e}_3$.
The derivative functions of Maxwellian distribution $a^l$, $a^r$, $b$, $c$, and $A$ have the following form
\begin{align*}
a^l&=a^l_1+a^l_2u+a^l_3v+a^l_4w+a^l_5\frac12(\vec{v}^2+\vec{\xi}^2)=a_\alpha^l\psi_\alpha,\\
a^r&=a^r_1+a^r_2u+a^r_3v+a^r_4w+a^r_5\frac12(\vec{v}^2+\vec{\xi}^2)=a_\alpha^r\psi_\alpha,\\
&\qquad\qquad\qquad...\\
A&=A_1+A_2u+A_3v+A_4w+A_5\frac12(\vec{v}^2+\vec{\xi}^2)=A_\alpha\psi_\alpha.
\end{align*}
The heaviside function $H[x]$ is
\begin{equation}\nonumber
H[x]=\left\{
\begin{aligned}
1 \quad x>0,\\
0 \quad x\le0.
\end{aligned}\right.
\end{equation}
The Maxwellian at $\vec{x}_0$ and its derivative functions can be obtained from the reconstructed macroscopic variables.
In this paper, the van Leer limiter is used for reconstruction,
\begin{equation}
s=(\text{sign}(s_l)+\text{sign}(s_r))\frac{|s_l||s_r|}{|s_l|+|s_r|},
\end{equation}
where $s$, $s_l$, and $s_r$ are the slopes of macroscopic variables.
The Maxwellian distribution at cell interface can be obtained from
\begin{equation}
\vec{W}_{0}=\int \vec{\psi} \left(g^l_{0}H[\bar{u}]+g^r_{0}(1-H[\bar{u}])\right)d\Xi,
\end{equation}
where $\vec{W}_{0}$ is the macroscopic variables at $x_0$ corresponding to $g_0$, and $\bar{u}=\vec{u}\cdot\vec{e}_1$.
The derivative functions $a^l,a^r,b,c,A$ are calculated from the spatial and time derivatives of $g_0$, Taking $A$ as an example,
\begin{align}
A&=\frac{1}{g_0}\bigg(\frac{\partial g_0}{\partial \vec{W}_0}\bigg)\bigg(\frac{\partial \vec{W}_0}{\partial t}\bigg)_{t=t^n},
\end{align}
and
\begin{align}
&A_5=\frac{\rho}{3p^2}\left(2\frac{\partial \rho E}{\partial t}+\left(U_iU_i-\frac{3p}{\rho}\right)\frac{\partial \rho}{\partial t}-2U_i\frac{\partial \rho U_i}{\partial t}\right),\\
&A_{i+1}=\frac{1}{p}\left(\frac{\partial \rho U_i}{\partial t}-U_i\frac{\partial \rho}{\partial t}\right)-U_iA_5 \quad (i=1,2,3),\\
&A_1=\frac{1}{\rho}\frac{\partial \rho}{\partial t}-U_ia_{i+1}-\frac12\left(U_iU_i+\frac{3p}{\rho}\right)A_5,
\end{align}
where the macroscopic quantities are those at $(\vec{x}_0,t^n)$. The time derivatives of macroscopic variables are determined by the conservative moments requirements on the first order Chapman-Enskog expansion \cite{chapman1990mathematical}, which gives
\begin{equation}
\left(\frac{\partial\vec{W}_0}{\partial t}\right)=-\int \left(a^l\bar{u}H[\bar{u}]+a^r\bar{u}(1-H[\bar{u}])+b\bar{v}+c\bar{w}\right)g_0 \vec{\psi} d\Xi.
\end{equation}
Readers can refer to \cite{wang2015unified} for derivation and detailed formulae.
Once the Maxwellian distribution at cell interface and its derivative functions are determined, the partial flux function Eq.\eqref{Fg} can be obtained using the expansion Eq.\eqref{g-expansion1} for the interface distribution function, which gives
\begin{equation}\label{Fg1}
\begin{aligned}
\vec{F}_{g,s}=\int \vec{v}\cdot \vec{n}_s \vec{\psi}\bigg\{&\left(\tau e^{-\Delta t/\tau}+\Delta t-\tau\right)g_0(\vec{x}_0,\vec{v})\\
&+\tau\left(-e^{-\Delta t/\tau}(\Delta t+2\tau)-\Delta t+2\tau\right)
\left[a^lH[\bar{u}]+a^r(1-H[\bar{u}])\right]\bar{u} g_0(\vec{x}_0,\vec{v})\\
&+\tau\left(e^{-\Delta t/\tau}(\Delta t+2\tau)-\Delta t+2\tau\right)(b\bar{v}+c\bar{w})g_0(\vec{x}_0,\vec{v})\\
&+\left(-\tau^2e^{-\Delta t/\tau}+\Delta t^2/2-\tau\Delta t+\tau^2\right)Ag_0(\vec{x}_0,\vec{v})\bigg\} d\Xi.
\end{aligned}
\end{equation}
The free transport terms in UGKS flux are calculated from the simulation particles.
The net flux of cell $i$ contributed by the free transport terms are
\begin{equation}
\vec{F}_{f,i}=\left(\vec{W}_i^{P}\right)^{n+1}-\left(\vec{W}_i^{P}\right)^{n},
\end{equation}
where $\vec{W}_i^{P}$ is the vector of the macroscopic quantities of all particles in cell $i$.
The updated $\left(\vec{W}_i^{P}\right)^{n+1}$,
\begin{equation}
\left(\vec{W}_i^{P}\right)^{n+1}=\sum_k\left( m_k,m_k u_k, m_k v_k,
m_k w_k, \frac12 m_k \left(|\vec{v}_k|^2+e_k\right)\right)^T,
\end{equation}
only counts the collisionless particles at the end of this time step.
Note that those particles differ from the total particles at the beginning of next time step which will also include the newly sampled hydro-particles.
For UGKP method, we have $\vec{W}_i^n=\left(\vec{W}_i^{P}\right)^{n}$, and the evolution equation of the macroscopic variables follows
\begin{equation}
\begin{aligned}
\vec{W}^{n+1}=&\vec{W}^n+\frac{1}{|\Omega_i|} \left(\sum_{l_s\in\partial \Omega}|l_s| \vec{F}_{g,s}+\vec{F}_{f,i}\right)\\
=&\left(\vec{W}_i^{P}\right)^{n+1}+\frac{1}{|\Omega_i|} \sum_{l_s\in\partial \Omega}|l_s| \vec{F}_{g,s}.
\end{aligned}
\end{equation}
\subsection{Unified gas-kinetic wave-particle method}\label{method2}
The UGKWP method improves UGKP method mainly in the following two aspects:
\begin{itemize}
\item The free transport terms in numerical flux contributed by the hydro-particles are evaluated analytically;
\item Only collisionless hydro-particles are sampled.
\end{itemize}
Firstly, since the distribution of the hydro-quantities is known as $g^+$, we can analytically evaluate the flux contributed by the free transport of hydro-particles, which gives
\begin{equation}
\sum_s |l_s| \vec{F}_{f,s}=\sum_s |l_s| \vec{F}_{f,s}^h+\vec{F}_{f,i}^p,
\end{equation}
where $\vec{F}_{f,s}^h$ is the free transport flux contributed by the hydro-quantities,
\begin{equation}\label{Ff1}
\vec{F}_{f,s}^h=\int_{t^n}^{t^{n+1}}\int\bigg\{e^{-t'/\tau}\left[g_0^{+,l}(\vec{x}-\vec{v}t',\vec{v})H[\bar{u}]
+g_0^{+,r}(\vec{x}-\vec{v}t',\vec{v})(1-H[\bar{u}])\right]\bigg\} \vec{v}\cdot\vec{n}_s \vec{\psi} d\Xi dt.
\end{equation}
Here the second order expansion of $g^+$ Eq.\eqref{2nd-particle} is used.
The numerical flux contributed by the streaming of collisionless and collisional particle, i.e. $\vec{F}_{f,i}^p$, will be given later.
Secondly, since $\vec{F}_{f,s}^h$ is analytically evaluated, there is no need to sample all hydro-particles.
Only the collisionless hydro-particles \index{collisionless hydro-particles} will be sampled.
Based on the cumulative distribution function of the first collision time Eq.\eqref{tc-distribution},
the collisionless hydro-particles are sampled from a $\vec{W}^{n+1}_i$-based Maxwellian distribution with the total mass of $e^{-t/\tau_i}\Omega_i\rho^h_i$.
Different from UGKP method, for the UGKWP method, simulation particles are categorized into three groups: the collisionless particle $P^f$, the collisional particle $P^c$, and the collisionless hydro-particle $P^{hf}$.
The flux contribution from collisionless hydro-particles can be evaluated analytically. The net flux $\vec{F}_{f,i}^p$
contributed by the collision and collisionless particles is
\begin{equation}\label{Fp}
\vec{F}_{f,i}^P=\left(\vec{W}_i^{fc}\right)^{n+1}-\left(\vec{W}_i^{fc}\right)^{n},
\end{equation}
where $\vec{W}_i^{fc}$ is the macroscopic quantities of collisionless particle $P^f$ and collisional particle $P^c$ in cell $i$.
The evolution of the macroscopic variables for UGKWP method is
\begin{equation} \label{WF}
\vec{W}^{n+1}=\vec{W}^n+\frac{1}{|\Omega_i|} \left[\sum_{l_s\in\partial \Omega}|l_s| \left(\vec{F}_{g,s}+\vec{F}_{f,s}^h\right)+\vec{F}_{f,i}^P\right].
\end{equation}
For the evolution of simulation particles, the physical coordinates of the collisionless particles $P^f$ and the collisionless hydro-particles $P^{hf}$ are updated by
\begin{equation}
\left(\vec{x}^{f,h}\right)^{n+1}=\left(\vec{x}^{f,h}\right)^n+\vec{v}^{f,h}\Delta t,
\end{equation}
and the collisional particles are eliminated after flux calculation.
The updated collisionless particles and collisionless hydro-particles gather to be the candidates of collisionless and collisional particles in the next time step calculation according to their newly-sampled $t_c$.
In summary, the dynamical circulation of particle-described collisionless particle $P^f$, collisional particle $P^c$, wave-described free-transport hydro-particle $P^{hf}$, and macroscopic variables is shown in Fig \ref{dynamical2}, and the flow chart of the UGKWP method is given in Fig. \ref{flowchart}.
\begin{figure}
\caption{Dynamical circulation of particle-described collisionless particle $P^f$, collisional particle $P^c$, wave-described free-transport hydro-particle $P^{hf}
\label{dynamical2}
\end{figure}
\begin{figure}
\caption{Flow chart of the UGKWP method. In the flux calculation, the Maxwellian related flux $\vec{F}
\label{flowchart}
\end{figure}
\section{Analysis and discussion}\label{discussion}
\subsection{Asymptotic behavior in continuum regime}
In this section, the asymptotic behavior and computational cost of the UGKWP method
will be analyzed in continuum regime when the time step $\Delta t$ is much larger than the local relaxation parameter $\tau$.
When $\Delta t \gg \tau$, for cell $i$, the total mass of the sampled collisionless hydro-particles is
$M^h=e^{-\Delta t/\tau_i}\Omega_i \rho_i^h$, and the total mass of the collisionless and collisional particles $M^p$ is proportional to $M^h$,
i.e. $M^p\sim O(e^{-\Delta t/\tau_i})$.
Therefore, the numerical flux contribution by collisionless and collisional particle streaming is $\vec{F}_{f,i}^p\sim O(e^{-\Delta t/\tau_i})$.
In the free transport flux of hydro-quantities $\vec{F}_{f,s}^h$ given by Eq.\eqref{Ff1},
the hydrodynamic distribution functions $g_0^{+,l,r}(\vec{x},\vec{v})$ become
\begin{equation}\label{glimit}
\begin{aligned}
g_0^{+,l,r}(\vec{x},\vec{v})=&g_0^{l,r}(\vec{x},\vec{v})-\tau
\left(\partial_t g_0^{l,r}(\vec{x},\vec{v}),\vec{v}\cdot \nabla_{\vec{x}}g_0^{l,r}(\vec{x},\vec{v})\right)+O(e^{-\Delta t/\tau})\\
=&f_{\text{NS}}^{l,r}+O(e^{-\Delta t/\tau}),
\end{aligned}
\end{equation}
where $f_{\text{NS}}^{l,r}$ is the local first order expansion in Chapman-Enskog asymptotic series.
Substituting Eq.\eqref{glimit} into Eq.\eqref{flux-w-ugks}, the total flux of the macroscopic variables in Eq.\eqref{WF}
converges to the flux of gas-kinetic scheme \cite{xu2001gas},
\begin{equation}\label{fluxlimit}
\vec{F}_{s}=\int_{t^n}^{t^{n+1}}\int \left[\frac{1}{\tau} \int_0^t e^{(t'-t)/\tau} g(\vec{x}'_s,t',\vec{v}) dt'+e^{-t/\tau} f_{\text{NS},0}(\vec{x}_s-\vec{v}t,\vec{v})\right]\vec{v}\cdot\vec{n}_s\vec{\psi}d\Xi dt+O(e^{-\Delta t/\tau}).
\end{equation}
For a well resolved flow region with $g_0^l=g_0^r$ and $\nabla_{\vec{x}}^l g=\nabla_{\vec{x}}^r g$, it can be derived from Eq.\eqref{fluxlimit} and Eq.\eqref{Fg1} that the total flux becomes
\begin{equation}
\vec{F}_{s}=\int_{t^n}^{t^{n+1}}\int \big[ g_0-\tau\left(\partial_t g_0+\vec{v}\cdot\nabla_{\vec{x}}g\right)\big] \vec{v}\cdot\vec{n}_s\vec{\psi}d\Xi dt,
\end{equation}
which gives consistent flux to the first order Chapman-Enskog expansion of kinetic equation for the NS solutions.
From the above analysis, it is concluded that the UGKWP method preserves the hydrodynamic Navier-Stokes equations in continuum regime for $\Delta t \gg \tau$.
In the continuum regime with $\Delta t \gg \tau$, for a fixed particle mass $m_p$, the number of sampled collisionless hydro-particles in cell $i$ is $e^{-\Delta t/\tau_i}\Omega_i \rho_i^h/m_p$. Therefore, the total simulation particle number $N_p$ in such regime decreases exponentially, $N_p\sim O(e^{-\Delta t/\tau})$, which means the computational cost of the UGKWP method becomes comparable to hydrodynamic NS solvers.
We define a numerical scheme as a `Multiscale Multi-efficiency Preserving' (MMP) \index{Multiscale multi-efficiency preserving method} scheme if the following two constraints are satisfied:
\begin{enumerate}
\item The scheme preserves all flow regime solutions, i.e. from collisionless regime to hydrodynamic Navier-Stokes regime and Euler regime, and the cell size and time step are not constrained to be less than the mean free path and collision time.
\item The computational efficiency of this scheme is comparable to the high efficient schemes in all flow regimes, for example the computational cost including the computational time and memory cost is comparable to the NS solvers in continuum regime. At the same time, for highly non-equilibrium hypersonic flow, the efficiency goes to the purely particle method.
\end{enumerate}
It is shown in above analysis that UGKWP method is a multiscale multi-efficiency preserving method.
\subsection{Consistent sampling}\label{consistency}
For both UGKP and UGKWP methods, we need to sample particles from a given Maxwellian distribution.
For cell $\Omega_i$ with sampling quantities $\vec{W}_{s,i}=\left(\rho_{s,i},U_{s,i},V_{s,i},W_{s,i},\rho_{s,i}E_{s,i}\right)^T$,
hydro-particles are sampled from the Maxwellian distribution
\begin{equation}
f_{s,i}=\rho_{s,i}\left(\frac{\lambda_{s,i}}{\pi}\right)^{\frac{K+3}{2}}
\exp\left\{-\lambda_{s,i}\left[\left(\vec{v}-\vec{U}_{s,i}\right)^2+\vec{\xi}^2\right]\right\},
\end{equation}
where $\lambda_{s,i}=\frac{\rho_{s,i}}{2(\gamma-1)\rho_{s,i}e_{s,i}}$, $\rho_{s,i}e_{s,i}=\rho_{s,i}E_{s,i}-\frac12\rho_{s,i}\vec{U}_{s,i}^2$, and $\gamma=\frac{K+5}{K+3}$.
The sampled particles $P^s_k$, $k=1,...,N_s$, follows
\begin{equation}
\begin{aligned}
&m_p^s=\frac{\rho_{s,i}|\Omega|_i}{N_s}, \quad \vec{x}_{k}^s\sim U(\omega_i), \quad e_k^s=e_{s,i}, \\
&\vec{v}_k^s=(-\ln (\vec{\eta_1})/\lambda_{s,i})^{1/2}\cos(\vec{\eta}_2), \quad \vec{\eta}_{1,2}\sim U(0,1)^3,
\end{aligned}
\end{equation}
where $U(\omega_i)$ is the uniform distribution on $\Omega_i$ and $U(0,1)^3$ is the uniform distribution on $(0,1)^3$.
A velocity transformation
\begin{equation}
\vec{v}_k^{s\prime}=b\left(\vec{v}_k^{s}-\vec{a}-\vec{U}_{s,i}\right)+\vec{U}_{s,i}
\end{equation}
is required to make the macroscopic quantities of the sampled particles $P_k^s$ consistent with the sampling quantities $\vec{W}_{s,i}$.
The parameters $\vec{a}$ and $b$ are solved from the consistent constraints
\begin{equation}
\left\{
\begin{aligned}
&\sum_k m_k^s \vec{v}_k^{s\prime}= \rho_{s,i}\vec{U}_{s,i}|\Omega_i|,\\
&\sum_k\left(\frac12 m_k^s \left(\vec{v}_k^{s\prime}\right)^2+m_k^se_k^s\right)=\rho_{s,i}E_{s,i}|\Omega_i|,
\end{aligned}
\right.
\end{equation}
which give
\begin{equation}
\left\{
\begin{aligned}
&\vec{a}=\frac{1}{N_s}\sum_k\vec{v}_{k}^s-\vec{U}_{s,i},\\
&b=\frac{-c_2\pm\sqrt{c_2^2-4c_1c_3}}{2c_1},
\end{aligned}
\right.
\end{equation}
where $c_1=\sum_k\frac12\left(\vec{v}_k^s-\vec{a}-\vec{U}_{s,i}\right)^2$,
$c_2=\sum_k\vec{U}_{s,i}\vec{v}_k^s-N_s(\vec{a}+\vec{U}_{s,i})\vec{U}_{s,i}$, and
$c_3=\frac12 N_s\vec{U}_{s,i}^2+\sum_k e_k^s-E_{s,i}$.
\subsection{Robustness and conservation}\label{correction}
Due to statistical noise, the density and temperature of the cell averaged macroscopic quantities $\vec{W}_i$ and hydro-quantities $\vec{W}_i^h$ may become negative.
Therefore two corrections are required. The first correction of
\begin{equation}\label{correct1}
\tilde{\rho}_i=\max\left(\rho_i,0\right),
\tilde{\rho}_i\tilde{E}_i=\max\left(\rho_iE_i,\frac12\rho_i\vec{U}_i^2\right)
\end{equation}
is put in the update of cell averaged macroscopic variables, and the second one
\begin{equation}\label{correct2}
\tilde{\rho}_i^h=\max\left(\rho_i^h,0\right),
\tilde{\rho}_i^h\tilde{E}_i^h=\max\left(\rho_i^hE_i^h,\frac12\rho_i^h(\vec{U}_i^h)^2\right)
\end{equation}
is put in the calculating of the hydro-quantities.
The UGKWP method is a finite volume scheme for the macroscopic variables. As long as Eq.\eqref{correct1} doesn't take effect, which means $\tilde{\rho}_i=\rho_i,\tilde{\rho}_i\tilde{E}_i=\rho_iE_i$, the mass, momentum, and energy conservation will be satisfied.
For all the numerical tests in this paper, Eq.\eqref{correct1} does not take effect at all.
It only gives a safe-guided warrantee once it is needed.
\section{Numerical tests}\label{numericaltest}
In this section, five numerical tests are calculated to demonstrate the multiscale property, and to present the computational efficiency of the UGKWP method.
The test cases are one dimensional Sod shock tube, normal shock wave, two dimensional flow passing a cylinder,
lid-driven cavity flow, and flat plate boundary layer.
The solution of UGKWP method shows good agreement with the reference solution for both high speed and low speed flow in all flow regimes.
For hypersonic flow, the UGKWP method shows a much higher efficiency and lower memory cost than the conventional UGKS with a
direct discretization of particle velocity space.
In the near continuum flow regime, the UGKWP method shows a much higher efficiency and low statistical noise than conventional particle methods.
In the following numerical tests, the reference solution of kinetic equation is given by UGKS and the reference NS solution is given by GKS.
The code is sequential and the computation is carried out on a computer with Intel i7-8700K CPU, 64 GB memory.
\subsection{Sod shock tube problem}
We first calculate the Sod shock tube problem with difference Knudsen number to show the performance of the UGKWP method in different flow regime, and two set of simulation particle number is used to study the statistical noise.
In this calculation, the dimensionless quantities are used. The computational domain is [-0.5,0.5], with initial condition
\begin{equation}\nonumber
(\rho, u, p)=\left\{
\begin{aligned}
&(1.0,0,1.0) \qquad x\le0,\\
&(0.125,0,0.1) \quad x>0.
\end{aligned}\right.
\end{equation}
The viscous coefficient is given as
\begin{equation}\label{vhs-vs1}
\mu=\mu_{ref}\left(\frac{T}{T_0}\right)^{\omega},
\end{equation}
with the temperature dependency index $\omega=0.81$, and the reference viscosity
\begin{equation}\label{vhs-vs2}
\mu_{ref}=\frac{15\sqrt{\pi}}{2(5-2\omega)(7-2\omega)}\mathrm{Kn}.
\end{equation}
The comparison between the UGKWP method and UGKS solution at $t=0.15$ is shown in Fig. \ref{sod1}-\ref{sod3}.
The solution in rarefied regime with $\mathrm{Kn}=0.1$ is shown in Fig. \ref{sod1}, the solution of UGKWP method with $10^{4}$ number particle per cell agrees with the UGKS solution, and extreme large statistical noise can be observed when we reduce the number of particle to 10 per cell. The results show that the UGKWP method can capture the rarefied solution.
However, a sufficient number of simulation particles is required or averaging process need to be carried out in order to get sufficiently accurate solution.
Next, we reduce the Knudsen number to $10^{-3}$. As shown in Fig. \ref{sod2}, the solution with large number of simulation particles ($10^{4}$ per cell) agree well with UGKS solution, and the noise of the solution with small number of simulation particles (10 particles per cell) is smaller than the rarefied case with $\mathrm{Kn}=0.1$, especially in the upstream.
When we move to $\mathrm{Kn}=10^{-5}$, even with 10 simulation particles per cell, the UGKWP method well agrees with the UGKS solution. The Sod shows that the UGKWP method can capture the flow physical in different flow regimes, and in the near continuum regime the statistical noise of UGKWP method is much lower than conventional particle methods.
\subsection{Normal shock}
To demonstrate the capability of UGKWP method in capturing the highly non-equilibrium flow, the one dimensional shock wave is studied. The gaseous medium is argon, the viscous coefficient of which follows Eq.\eqref{vhs-vs1}-\eqref{vhs-vs2} with the temperature dependency index $\omega=0.81$.
In this calculation, the reference length is the upstream mean free path, and the computational domain is [-25,25] with 100 cells.
The upstream ($x\le0$) and downstream ($x>0$) is connected by the Rankine–-Hugoniot condition.
We calculate two test cases with upstream Mach number $\mathrm{M}=8$ and $\mathrm{M}=10$.
In order to reduce the statistical noise, $5\times10^{4}$ number of simulation particles are used in each cell.
As shown in Fig. \ref{shock}, the normalized solution of UGKWP method agrees well with the UGKS solution, which proves the ability of UGKWP method in capturing the non-equilibrium flow.
\subsection{Cylinder flow}
In this section, we calculate the supersonic argon gas flow passing over a circular cylinder at different Mach and Knudsen numbers.
For argon gas, the molecular mass is $m_0=6.63\times 10^{-26}$ kg; the molecular diameter $d=4.17\times10^{-10}$ m; and the specific heat ratio $\gamma=5/3$. The variable hard sphere (VHS) model is used to model the molecular interaction, and the viscosity follows
\begin{equation}
\mu=\frac{15\sqrt{\pi m k T_\infty}}{2\pi d^2(5-2\omega)(7-2\omega)}\left(\frac{T}{T_\infty}\right)^\omega,
\end{equation}
with the temperature dependency index $\omega=0.81$.
As shown in table \ref{cylinder-initial}, the incoming flow Mach number is chosen to be $5, 20, 30$, and the Knudsen number with respect to the cylinder radius is set as $1.0, 0.1, 10^{-4}$. The dimensionless quantities are used with respect to the reference length as the cylinder radius $L_{ref}=R$, the reference velocity $U_{ref}=\sqrt{2RT_\infty}$, the reference time $t_{ref}=L_{ref}/U_{ref}$, the reference density $\rho_{ref}= \rho_\infty$, and the reference temperature $T_{ref}=T_\infty$.
\begin{table}[ht]
\caption{Incoming flow condition for cylinder flow}
\centering
\begin{tabular}{|c|c|c|c|c|c|c|}
\hline
$\mathrm{Kn}_{\infty}$ & $n_{\infty}\mathrm{[particles/m^3]}$ & $\rho_{\infty}\mathrm{[Kg/m^3]}$ &
$T_{\infty}\mathrm{[K]}$ & $T_{w}\mathrm{[K]}$ & $U_{\infty}\mathrm{[m/s]}$ & $\mathrm{M}_{\infty}$ \\ \hline
\multirow{3}{*}{$10^{-4}$} & \multirow{3}{*}{$1.294\times10^{24}$} & \multirow{3}{*}{$8.585\times10^{-2}$} & \multirow{3}{*}{273} & \multirow{3}{*}{273} & 1538.794 & 5 \\ \cline{6-7}
& & & & & 6155.17 & 20 \\ \cline{6-7}
& & & & & 9232.76 & 30 \\ \hline
\multirow{3}{*}{0.1} & \multirow{3}{*}{$1.294\times10^{21}$} & \multirow{3}{*}{$8.585\times10^{-5}$} & \multirow{3}{*}{273} & \multirow{3}{*}{273} & 1538.794 & 5 \\ \cline{6-7}
& & & & & 6155.17 & 20 \\ \cline{6-7}
& & & & & 9232.76 & 30 \\ \hline
\multirow{3}{*}{1} & \multirow{3}{*}{$1.294\times10^{20}$} & \multirow{3}{*}{$8.585\times10^{-6}$} &
\multirow{3}{*}{273} & \multirow{3}{*}{273} & 1538.794 & 5 \\ \cline{6-7}
& & & & & 6155.17 & 20 \\ \cline{6-7}
& & & & & 9232.76 & 30 \\ \hline
\end{tabular}
\label{cylinder-initial}
\end{table}
We compare the results and computational efficiency of UGKWP method to UGKS/GKS.
For the case of $\mathrm{M}=5$ and $\mathrm{Kn}=0.1$, the physical domain of the UGKWP method is discretized by a mesh with $64\times64$ cells, and the mass of simulation particle is set $m_p=1.52\times10^{-3}$.
For UGKS, the same discretization in physical space is used and the velocity space is $[-10,10]\times[-10,10]$ discretized by $100\times100$ velocity points. The CFL condition for both UGKS and UGKWP method is set to be 0.9. Fig. \ref{cylinder11} shows the contours of steady state pressure, temperature, and velocity, where the flood is the UGKWP method solution and lines are the UGKS solution. The density, velocity, pressure, temperature profiles along the stagnation line are shown in Fig. \ref{cylinder12}, where the solution of UGKWP method is denoted by symbols and the solution of UGKS is shown in lines.
To get the steady state solution, the computational time for UGKS is 10.9 hours and the UGKWP method is 21.5 minutes (including the averaging procedure).
The memory cost for UGKS is 3.4 GB and for UGKWP method is 75 MB.
The computational time of UGKWP method is about 30 times faster than explicit UGKS,
and the memory cost is 46 times less than UGKS.
For the case of $\mathrm{M}=20$ and $\mathrm{Kn}=1.0$, the discretization of the physcial domain is 64 cells along azimuth direction, and 110 cells along radial direction. For UGKWP method, the mass of simulation particle is set $m_p=1.0\times 10^{-3}$, and the velocity space for UGKS is $[-50,50]\times[-50,50]$ discretized by $200\times200$ velocity points. Fig. \ref{cylinder21} shows the contours of steady state pressure, temperature, and velocity, where the flood is the UGKWP method solution and lines are the UGKS solution. The density, velocity, pressure, temperature profiles along the stagnation line are shown in Fig. \ref{cylinder22}, where the solution of UGKWP method is denoted by symbols and the solution of UGKS is shown in lines.
The averaged number of simulation particle per cell is shown in Fig. \ref{cylinder-number}-(a).
To get the steady state solution, the computational time for UGKS is about 429 hours and the UGKWP method is 36.1 minutes (including the averaging procedure).
The memory cost for UGKS is 22.3 GB and for UGKWP method is less than 100 MB.
The computational time of UGKWP method is about 713 times faster than explicit UGKS,
and the memory cost is 228 times less than UGKS.
For the case of $\mathrm{M}=20$ and $\mathrm{Kn}=10^{-4}$, a discretization of $100\times150$ cells is used in physical space and the simulation particle mass is set $m_p=4.7\times 10^{-3}$. The solution of the UGKWP method is compared with the Navier-Stokes solution calculated by the gas-kinetic scheme, and the solution contour and profile along the stagnation line are shown in Fig. \ref{cylinder31} and \ref{cylinder32} respectively.
The averaged number of simulation particle per cell is shown in Fig. \ref{cylinder-number}-(b).
The simulation time for UGKWP method is 17.2 minutes which is comparable to GKS.
To demonstrate the capability of UGKWP method in the simulation of multiscale high Mach number flow, we calculate the cylinder flow at Mach number 30 and Knudsen number $\mathrm{Kn}=1.0,10^{-4}$. The solution contours are shown in Fig. \ref{cylinder4}-\ref{cylinder5}. The computational time for the rarefied case is around 40 minutes and for $\mathrm{Kn}=10^{-4}$ is around 20 minutes, both including the averaging procedure. The comparison between the computational cost between UGKWP method and UGKS is shown in table \ref{efficiency1}-\ref{efficiency2}.
\begin{table}[ht] \footnotesize
\centering
\begin{tabular}{ |c | c | c| c|c|c|}
\hline
$\mathrm{M}$ and $\mathrm{Kn}$ of cylinder flow & Time of UGKS &Time of UGKWP & Time ratio $\frac{\text{UGKS}}{\text{UGKWP}}$ \\
\hline
$\mathrm{M}=5$,\ $\mathrm{Kn}=0.1$& 10.9 hours & 21.5 minutes& 30 \\
\hline
$\mathrm{M}=20$,\ $\mathrm{Kn}=1$& 429 hours & 36.1 minutes & 713\\
\hline
\end{tabular}
\caption{Comparison between the computational time between UGKWP method and UGKS}
\label{efficiency1}
\end{table}
\begin{table}[ht]\footnotesize
\centering
\begin{tabular}{ |c | c | c| c|c|c|}
\hline
$\mathrm{M}$ and $\mathrm{Kn}$ of cylinder flow & Memory of UGKS &Memory of UGKWP & Memory ratio $\frac{\text{UGKS}}{\text{UGKWP}}$ \\
\hline
$\mathrm{M}=5$,\ $\mathrm{Kn}=0.1$& 3.4 GB & 75 MB & 46 \\
\hline
$\mathrm{M}=20$,\ $\mathrm{Kn}=1$& 22.3GB & 100 MB&228\\
\hline
\end{tabular}
\caption{Comparison between the memory cost between UGKWP method and UGKS}
\label{efficiency2}
\end{table}
\subsection{Cavity flow}
To show the capability of UGKWP method in simulating the multiscale low speed flow, we calculate the lid-driven cavity flow at different Knudsen number $\mathrm{Kn}=1.0, 0.75, 1.42\times10^{-4}$.
The gaseous medium is assumed to consist of monatomic argon gas, which is modeled by the VHS model. The particle parameters of argon and the formulation of viscosity coefficient are the same as in the calculation of cylinder flow.
The wall temperature is kept $T_w=273$ K and the top lid is moving with a fixed velocity of $U_w=50$ m/s.
The dimensionless quantities are used with respect to the reference length as the cavity, the reference temperature as the initial gas temperature $T_{ref}=T_0$, the reference velocity $U_{ref}=\sqrt{2RT_0}$, the reference time $t_{ref}=L_{ref}/U_{ref}$, the reference density as the gas initial density $\rho_{ref}= \rho_0$.
For $\mathrm{Kn}=1.0$, $5000$ number of simulation particles per cell is used for UGKWP method and UGKS use $50\times50$ discrete velocity points. The computational time for UGKS is 14.4 hours, and for UGKWP method is 20 hours
(including 1800 averaging steps). The memory cost for UGKS is 500 MB and for UGKWP method is 2.5 GB. The computational time of UGKWP method is 1.38 times slower than explicit UGKS, and the memory cost is 5 times larger than UGKS. For $\mathrm{Kn}=0.075$, the same numerical set up is used with a similar computational cost as the case of $\mathrm{Kn}=1.0$.
The solution of the UGKWP method is shown in Fig. \ref{cavity11}-\ref{cavity22}, compared to the UGKS solution. Fig. \ref{cavity11} and \ref{cavity21} show the density, velocity, and temperature contours of UGKWP method (flood) and UGKS (lines). For both $\mathrm{Kn}=1.0$ and $\mathrm{Kn}=0.075$, the density and velocity contours agree well between two methods, while the temperature shows relative large statistical noise for UGKWP method.
Fig. \ref{cavity12} and \ref{cavity21} show the velocity profile along the vertical and horizontal lines, and good agreement can be observed between the UGKWP method and UGKS.
Next, we calculate the cavity flow at $\mathrm{Kn}=1.42 \times 10^{-4}$, i.e., $\mathrm{Re}=1000$.
In this calculation, the number of particle used for UGKWP method is 100 particles per cell,
and UGKS use $28\times28$ discrete velocity points.
The computational time for UGKS is 11 hours, and for UGKWP method is 38.7 minutes.
The memory cost for UGKS is 13 GB and for UGKWP method is 500 MB.
The computational time of UGKWP method is 17 times faster than explicit UGKS, and the memory cost is 26 times less than UGKS. The velocity profiles of UGKWP method along vertical and horizontal lines agrees well with the Navier-Stokes solution as shown in Fig. \ref{cavity32}.
For the low speed flow calculation, in the rarefied flow regime the UGKWP method is more expensive than the UGKS, and in the continuum flow, the UGKWP method is more efficient than the explicit UGKS, with a numerical cost close to GKS for the Navier-Stokes solution.
UGKS can use other acceleration techniques, such as implicit and multigrid \cite{zhu2016implicit,zhu2017implicit}, which improve the efficiency of UGKS by hundreds of times.
But, the memory in UGKS can be hardly reduced due to the discretization of particle velocity space in all flow regimes.
\subsection{Boundary layer}
It is challenging for a particle method to calculate the Navier-Stokes solution under a cell size much larger than the mean free path and time step much larger than the collision time.
To show the ability of the UGKWP method in capturing the Navier-Stokes solution under such condition, we calculate the Navier-Stokes boundary layer.
A gas flow with density $\rho_0=1.0$ and temperature $T_0=5.56\times10^{-2}$ passes over a flat plate at speed $U_0=0.1$. The Reynolds number is set to be $\mathrm{Re}=10^{5}$, and the viscosity is fixed at $\mu=1.05\times10^{-4}$.
The computational domain is $[-44.16,112.75]\times[0,29.8]$, a rectangular mesh with $120\times30$ nonuniform grid points is used as shown in Fig. \ref{layer1}(a).
The CFL condition is chosen as 0.95. At steady state, the velocity profile of UGKWP method agrees well with the Blasius solution as shown in Fig. \ref{layer2}.
For this calculation, the simulation particle mass is $6.32\times10^{-18}$.
Since the time step is much larger than the local collision time, only about 1-2 particles will be stored in each cell, and the computational time is less than 2 minutes.
\section{Conclusion}\label{conclusion}
In this paper, a multiscale multi-efficiency preserving unified gas-kinetic wave-particle method is proposed under the UGKS framework.
The UGKWP method is highly efficient in the simulation of multiscale gas flows from hypersonic to low speed microflows.
In the UGKWP method, both probability density distribution and simulation particles are used to describe the gas particles,
and the simulation particles are sampled only for capturing local non-equilibrium caused by the particle free transport.
The evolution of microscopic quantities is coupled with the evolution of macroscopic quantities in the mesh size scale,
where the flow physics has been directly modeled.
The multiscale modeling or the multiple physical-numerical modeling requires the inclusion of numerical cell size and time step scale into the construction of numerical models instead of a direct discretization of partial differential equations.
UGKS and UGKWP method model gas evolution on the scales of cell size and time step.
According to the ratio between the time step and the particle collision time,
the schemes capture flow dynamics in all flow regimes efficiently.
The UGKWP method is a multi-efficiency preserving scheme, which means the computational cost of the scheme is on the scale of particle methods in the rarefied regime,
and comparable to hydrodynamic solvers in the continuum regime.
Specially, the UGKWP method converges to the gas-kinetic scheme in the continuum regime and does not suffer from stochastic noise.
Due to the implementation of simulation particles in UGKWP method, for hypersonic flows the computational cost has been reduced by hundreds of times in comparison with the UGKS with a direct discretization of particle velocity space.
The current method is based on the BGK-type model for the particle collision and more realistic collisional model can be included in UGKWP method as well.
The methodology of UGKWP method is important for the theoretical fluid dynamics study as well.
On the mesh size scale, even in the near continuum flow regime the use of particles can capture the non-equilibrium effect through the particle streaming or fluid element penetration when a cell size is much larger than the turbulent eddies, which cannot be described by the hydrodynamic equations with averaged flow variables only. The fundamental difficulties associated with the Navier-Stokes equations for the description of separation flow and turbulence may come from the continuum mechanics assumption for continuous connection of fluid elements without breakdown.
The direct modeling equations in UGKWP method release such a constraint and may help in the study of non-equilibrium turbulent flow.
The non-equilibrium transports in other system, such as chemical reaction, plasma, multiphase, and
radiation, can be solved efficiently by UGKWP method as well.
\section*{Acknowledgments}
The current research is supported by Hong Kong research grant council (16207715, 16206617)
and National Science Foundation of China (11772281, 91530319).
\section*{References}
\biboptions{numbers,sort&compress}
\begin{figure}
\caption{(a) Density, (b) velocity, (c) temperature, and (d) pressure profiles of Sod shock tube at $t=0.15$ with Knudsen number $\mathrm{Kn}
\label{sod1}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) velocity, (b) temperature, and (d) pressure profiles of Sod test at $t=0.15$ with Knudsen number $\mathrm{Kn}
\label{sod2}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) velocity, (b) temperature, and (d) pressure profiles of Sod test at $t=0.15$ with Knudsen number $\mathrm{Kn}
\label{sod3}
\end{figure}
\begin{figure}
\caption{Normalized density, velocity, and temperature profile of normal shock wave at $\mathrm{M}
\label{shock}
\end{figure}
\begin{figure}
\caption{(a) Pressure, (b) temperature, (c) x directional velocity, and (d) y directional velocity contour for $\mathrm{M}
\label{cylinder11}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) x direction velocity, (c) pressure, (d) temperature profile along stagnation line for $\mathrm{M}
\label{cylinder12}
\end{figure}
\begin{figure}
\caption{(a) Pressure, (b) temperature, (c) x directional velocity, and (d) y directional velocity contour for $\mathrm{M}
\label{cylinder21}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) x direction velocity, (c) pressure, (d) temperature profile along stagnation line for $\mathrm{M}
\label{cylinder22}
\end{figure}
\begin{figure}
\caption{(a) Pressure, (b) temperature, (c) x directional velocity, and (d) y directional velocity contour for $\mathrm{M}
\label{cylinder31}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) x direction velocity, (c) pressure, (d) temperature profile along stagnation line for $\mathrm{M}
\label{cylinder32}
\end{figure}
\begin{figure}
\caption{Number of simulation particles per cell for the cylinder flow with Mach number 20: (a)Kn=1.0; (b)Kn=$1.0\times10^{-4}
\label{cylinder-number}
\end{figure}
\begin{figure}
\caption{(a) Pressure, (b) temperature, (c) x directional velocity, and (d) y directional velocity contour of UGKWP method for $\mathrm{M}
\label{cylinder4}
\end{figure}
\begin{figure}
\caption{(a) Pressure, (b) temperature, (c) x directional velocity, and (d) y directional velocity contour of UGKWP method for $\mathrm{M}
\label{cylinder5}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) x directional velocity, (c) y directional velocity, and (d) temperature contour for the lid-driven cavity flow at $\mathrm{Kn}
\label{cavity11}
\end{figure}
\begin{figure}
\caption{Left figure shows x directional velocity along $x=0.5$, and right figure shows y directional velocity along $y=0.5$ for lid-driven cavity flow at $\mathrm{Kn}
\label{cavity12}
\end{figure}
\begin{figure}
\caption{(a) Density, (b) x directional velocity, (c) y directional velocity, and (d) temperature contour for the lid-driven cavity flow at $\mathrm{Kn}
\label{cavity21}
\end{figure}
\begin{figure}
\caption{Left figure shows x directional velocity along $x=0.5$, and right figure shows y directional velocity along $y=0.5$ for lid-driven cavity flow at $\mathrm{Kn}
\label{cavity22}
\end{figure}
\begin{figure}
\caption{Stream line and velocity contour of the lid-driven cavity flow at $\mathrm{Re}
\label{cavity31}
\end{figure}
\begin{figure}
\caption{Left figure shows x directional velocity along $x=0.5$, and right figure shows y directional velocity along $y=0.5$ for lid-driven cavity flow at $\mathrm{Re}
\label{cavity32}
\end{figure}
\begin{figure}
\caption{Laminar boundary layer computation using UGKWP method at $\mathrm{M}
\label{layer1}
\end{figure}
\begin{figure}
\caption{Velocity profile of UGKWP method comparing to Navier-Stokes reference solution. (a) U-velocity distribution at different locations; (b) V-velocity distribution at different locations. Symbols: solution of UGKWP method, lines: Blasius solution.}
\label{layer2}
\end{figure}
\end{document}
|
\begin{document}
\begin{abstract}
Let $\mathcal{F}$ be a coherent $\mathcal{O}_X$-module over a complex manifold $X$, and let $G$ be a vector bundle on $X$. We describe an explicit isomorphism between two different representations of the global $\Ext$ groups $\Ext^k(\mathcal{F},G)$. The first representation is given by the cohomology of a twisted complex in the sense of Toledo and Tong, and the second one is obtained from the Dolbeault complex associated with $G$. A key tool that we introduce for explicitly describing this isomorphism is a residue current associated with a twisted resolution of $\mathcal{F}$.
\end{abstract}
\maketitle
\section{Introduction}
Let $X$ be a complex manifold, and let $\mathfrak{U}$ be a Leray cover. If $G$ is a vector bundle over $X$, then the sheaf cohomology $H^k(X,G)$ can be represented as \v{C}ech cohomology $\check{H}^k(\mathfrak{U},G)$ or as Dolbeault cohomology $H^{0,k}(X,G)$. If $(\rho_\alpha)$ is a partition of unity subordinate to $\mathfrak{U}$, then there is an explicit isomorphism between these representations, $\check{H}^k(\mathfrak{U},G) \overset{\cong}{\to} H^{0,k}(X,G)$, given by
\begin{equation} \label{eq:cechDolbeault}
[(c_{\alpha_0\dots \alpha_k})] \mapsto
\left[ \sum_{(\alpha_0,\dots,\alpha_k)}
\bar{\partial}\rho_{\alpha_0} \wedge \dots \wedge \bar{\partial}\rho_{\alpha_{k-1}}
\rho_{\alpha_k} c_{\alpha_0 \dots \alpha_k} \right].
\end{equation}
One way to prove this is to use the partition of unity to construct an appropriate homotopy operator on the Dolbeault-\v{C}ech-complex,
cf., for example the analoguous case of de Rham cohomology in \cite{BT}*{\S II.9}.
Recall that $H^k(X,G) \cong \Ext^k(\mathcal{O}_X,G)$, so the above result could be viewed as an explicit isomorphism between different representations of $\Ext^k(\mathcal{O}_X,G)$. In this paper, we will generalize this result to provide an explicit isomorphism between different representations of $\Ext^k(\mathcal{F},G)$, where $\mathcal{F}$ is any coherent $\mathcal{O}_X$-module.
In \cite{TT1}, Toledo and Tong introduced the notion of a twisting cochain $(F,a)$, which gives a \emph{twisted complex} $(C^\bullet(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)),D^0,D')$. A particular twisting cochain is a \emph{twisted resolution} of $\mathcal{F}$,
and the authors proved that the $k$th cohomology of the total complex with differential $D = D^0 + D'$ of the twisted complex computes $\Ext^k(\mathcal{F},G)$, i.e.,
\[
H^k\left(
\bigoplus_{p+r=\bullet}
C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))
\right) \cong
\Ext^k(\mathcal{F},G).
\]
If $\mathcal{F} = \mathcal{O}_X$, then this twisted complex is just the ordinary \v{C}ech complex with respect to $G$.
If $\mathcal{F}$ has a locally free resolution $F$, then the cohomology of the total complex of the twisted complex is the hypercohomology
of $\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F,G)$ as defined in
for example \cite{GH}*{p. 446}.
Analogously, there is also a generalization of Dolbeault cohomology to represent $\Ext^k(\mathcal{F},G)$ as the $k$th cohomology of $\Hom(\mathcal{F},\mathcal{C}^{0,\bullet}(G))$.
The main result of this paper is to give an explicit isomorphism between these two representations of $\Ext^k(\mathcal{F},G)$. The key tool that we introduce for explicitly describing this isomorphism is a residue current $R$ associated with a twisted resolution $(F,a)$.
The residue current $R$ is a current with values in $C^\bullet(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,F))$, which we describe in Section~\ref{section:residue}.
This construction is a generalization of the residue current associated with a locally free resolution of a sheaf, as introduced by Andersson and Wulcan
in \cite{AW1}, which in turn is a generalization of the classical Coleff--Herrera product introduced in \cite{CH}.
\begin{theorem}
\label{thm:main}
Let $\mathcal{F}$ be a coherent $\mathcal{O}_X$-module, and let $G$ be a holomorphic vector bundle on $X$. Let $(F,a)$ be a twisted resolution of $\mathcal{F}$,
and let $R$ be the associated residue current. Then there is an isomorphism between the above mentioned representations of $\Ext^k(\mathcal{F},G)$:
\begin{equation}
\label{eq:twisted-iso}
H^k
\left(
\bigoplus_{p+r=\bullet}
C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))
\right) \overset{\cong}{\to}
H^k(\Hom(\mathcal{F},\mathcal{C}^{0,\bullet}(G)))
\end{equation}
given by
\begin{equation} \label{eq:twisted-iso-formula}
[\xi] \mapsto
\sum_j \left[ (\bar{\partial} v)^j (\xi R)^j \right],
\end{equation}
where $\xi R$ denotes the composition of $\xi$ and $R$ defined by \eqref{eq:pairing}, $(\xi R)^j$ denotes the component of \v{C}ech degree $j$,
cf., Section~\ref{ssect:notation},
$v$ is the operator defined in Section~\ref{ssection:vdef}, and $(\bar{\partial} v)^j$ denotes the composition $\bar{\partial}\circ v$
repeated $j$ times.
\end{theorem}
In case $\mathcal{F}$ is locally free, and one identifies $\mathcal{F}$ as a twisting cochain concentrated in degree $0$, then
the associated residue current $R$ equals the identity, and the isomorphism \eqref{eq:twisted-iso-formula} becomes
$\xi \mapsto (\bar{\partial} v)^k \xi$, which reduces to the isomorphism \eqref{eq:cechDolbeault}.
Theorem~\ref{thm:main} may be considered as a global version of a corresponding statement for sheaf Ext, $\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^k(\mathcal{F},\omega)$, where an explicit isomorphism between different representations is provided by residue currents, see Proposition~\ref{prop:Exts}. This result is due to Andersson, \cite{And1}, generalizing earlier work by Dickenstein--Sessa, \cite{DS}, in the case of $\mathcal{F}=\mathcal{O}_Z$ being the structure sheaf of a complete intersection $Z$.
One situation in which the different realizations of Ext groups in Theorem~\ref{thm:main} appear is in connection with Serre duality, as originally proven for locally free sheaves, in \cite{Serre}, and extended to coherent $\mathcal{O}_X$-modules by work of Malgrange, \cite{MalSerre}. This version of Serre duality concerns a perfect pairing between Ext groups and sheaf cohomology, and for the two different realizations of Ext in Theorem~\ref{thm:main}, and corresponding realizations for sheaf cohomology, one may define two different such pairings. It turns out that the isomorphism \eqref{eq:twisted-iso} is compatible with these two realizations
of the pairing, as is elaborated on in Section~\ref{sect:serre}.
Twisted resolutions generalize locally free resolutions, and loosely speaking consist of local free resolutions glued
together in an appropriate way. In contrast to locally free resolutions one may always find globally a twisted resolution of
any coherent $\mathcal{O}_X$-module $\mathcal{F}$.
The residue current we introduce associated with a twisted resolution consists of one part which
is the residue currents as defined in \cite{AW1} associated with the local free resolutions that are glued together, and
additional parts so that the whole residue current have various desirable properties, cf.,
Proposition-Definition~\ref{prop:URdef} and Theorem~\ref{thm:R}.
This paper is organized as follows. We begin, in Section~\ref{section:twisting-cochains}, by giving a brief introduction to the notion of twisting cochains. Here we also make some small adaptations so that we can define currents associated with these objects. Residue currents are best described using the language of almost semi-meromorphic and pseudomeromorphic currents. In Section~\ref{section:pseudo}, we recall the necessary definitions and results about these currents that we shall need in this paper. In Section~\ref{section:residue} we define a residue current associated with a twisting cochain, and we prove that it satisfies certain properties, which we then use to prove our main theorem in Section~\ref{section:main-theorem}. Finally, as mentioned above, in Section~\ref{sect:serre}, we discuss how Theorem~\ref{thm:main} fits into the context of Serre duality.
\section{Twisting cochains}
\label{section:twisting-cochains}
Throughout this paper, $X$ will denote a complex manifold of dimension $n$, and $\mathfrak{U} = (\mathcal{U}_\alpha)$ will denote a covering of $X$ by Stein open sets.
We will use the notation $\mathcal{U}_{\alpha_0 \dots \alpha_p} \vcentcolon= \mathcal{U}_{\alpha_0} \cap \dots \cap \mathcal{U}_{\alpha_p}$.
In this section, we will mainly recall the relevant parts about twisting cochains, twisted resolutions and twisted complexes from \cite{TT1}.
This material is essentially the same as described in Sections 1 and 2 of \cite{TT1}.
In order to incorporate this theory with the theory of residue currents from \cite{AW1}, and get our sign convention consistent
with both \cite{TT1} and \cite{AW1}, we consider current-valued analogues of the objects described in \cite{TT1}.
Let $F \vcentcolon= (F_\alpha)$ and $G \vcentcolon= (G_\alpha)$ be families of bounded graded holomorphic vector bundles over $\mathcal{U}_\alpha$.
Recall that
\[
\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_\alpha,G_\beta) =
\bigoplus_j
\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_\alpha^{-j},G_\beta^{-j+r}).
\]
Let $\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_\alpha,G_\beta))$ denote the sheaf over $\mathcal{U}_{\alpha \beta}$ of $\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_\alpha,G_\beta)$-valued $(0,q)$-currents.
We define a bilinear pairing
\[
\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_\beta,G_\gamma)) \times
\mathcal{C}^{0,q'}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r'}(E_\alpha,F_\beta)) \to
\mathcal{C}^{0,q+q'}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r+r'}(E_\alpha,G_\gamma))
\]
over $\mathcal{U}_{\alpha \beta \gamma}$.
For decomposable sections $\eta \otimes f$ and $\tau \otimes g$, i.e.,
where $\eta$ and $\tau$ are $(0,q)$ and $(0,q')$-currents, respectively,
and $f$ and $g$ are sections of $\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_\beta,G_\gamma)$ and
$\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r'}(E_\alpha,F_\beta)$, respectively,
the pairing is defined as
\begin{equation}
\label{eq:prod1}
(\eta \otimes f)(\tau \otimes g) \vcentcolon=
(-1)^{rq'}
(\eta \wedge \tau) \otimes fg,
\end{equation}
provided that either $\eta$ or $\tau$ is smooth.
This pairing is defined in a way consistent with the sign convention of the super structure
in \cite{AW1}*{Section 2}.
We will consider a sort of \v{C}ech cochains with coefficients in these sheaves. We define
\begin{equation}
\label{eq:cechCurrentHom}
C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))) \vcentcolon=
\prod_{(\alpha_0, \dots, \alpha_p)}
\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_{\alpha_p},G_{\alpha_0}))
({\mathcal{U}_{\alpha_0 \dots \alpha_p}}).
\end{equation}
For an element $f \in C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$, we define its \emph{(total) degree} as $\deg f = p+q+r$,
and we call $p$ the \emph{\v{C}ech degree}, $q$ the \emph{current degree} and $r$ the \emph{Hom degree}.
In this paper the families of vector bundles that we shall consider will be concentrated in nonpositive degree. We use this convention to be consistent with the conventions in \cite{TT1}. In \cite{AW1}, complexes of vector bundles are considered to be concentrated in nonnegative degree, and the differential is assumed to be decreasing the degree, in contrast to \cite{TT1}.
We define a bilinear pairing
\begin{multline}
\label{eq:pairing}
C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))) \times
C^{p'}(\mathfrak{U},\mathcal{C}^{0,q'}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r'}(E,F))) \\ \to
C^{p+p'}(\mathfrak{U},\mathcal{C}^{0,q+q'}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r+r'}(E,G))),
\end{multline}
which maps $(f,g)$ to the product $fg$ defined by
\begin{equation}
\label{eq:prod2}
(fg)_{\alpha_0 \dots \alpha_{p+p'}} \vcentcolon=
(-1)^{(q+r)p'}
f_{\alpha_0 \dots \alpha_p} g_{\alpha_p \dots \alpha_{p+p'}},
\end{equation}
where the product on the right-hand side is defined by \eqref{eq:prod1}.
We will consider three differentials acting on $C^\bullet(\mathfrak{U},\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)))$,
$D$, $\delta$ and $\bar{\partial}$, and the signs in \eqref{eq:prod1} and \eqref{eq:prod2} are chosen in a way that
makes all these operators into antiderivations with respect to the product \eqref{eq:prod2}.
Informally, one could consider the elements to have the ``\v{C}ech part'' to the left, the ``current part'' in the middle, and the ``$\mathop{\mathcal{H}\!\mathit{om}}\nolimits$ part'' to the right,
and these different parts anticommute according to the degree in the products \eqref{eq:prod1} and \eqref{eq:prod2}.
We will now describe how the $\bar{\partial}$-operator and an analogue of the \v{C}ech coboundary acts on $C^\bullet(\mathfrak{U},\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)))$.
We let the $\bar{\partial}$-operator act as an operator of degree 1 on $C^\bullet(\mathfrak{U},\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)))$ by
\[
(\bar{\partial} f )_{\alpha_0 \dots \alpha_p} \vcentcolon=
(-1)^p \bar{\partial} f_{\alpha_0 \dots \alpha_p}.
\]
With this definition we have that $\bar{\partial}(fg) = (\bar{\partial} f)g + (-1)^{\deg f} f (\bar{\partial} g$), and as usual $\bar{\partial}^2 = 0$.
Next we define an operator of degree 1,
\[
\delta: C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))) \to
C^{p+1}(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))),
\]
by
\[
(\delta f)_{\alpha_0 \dots \alpha_{p+1}} \vcentcolon=
\sum_{k=1}^p (-1)^k
f_{\alpha_0 \dots \widehat{\alpha}_k \dots \alpha_{p+1}}
|_{\mathcal{U}_{\alpha_0 \dots \alpha_{p+1}}}.
\]
Note that $\delta$ is similar to the usual \v{C}ech coboundary, but in the sum, it is necessary to omit $f_{\alpha_1 \dots \alpha_{p+1}}$ and $f_{\alpha_0 \dots \alpha_p}$ since these do not belong to $\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_{\alpha_{p+1}},F_{\alpha_0})$. However, we still have that $\delta$ is a differential and an antiderivation with respect to the product \eqref{eq:prod2}, i.e., $\delta^2 = 0$, and
\[
\delta(fg) = (\delta f) g + (-1)^{\deg f} f (\delta g).
\]
We are now ready to define the notion of a twisting cochain. We define
\[
C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)) \vcentcolon=
\prod_{(\alpha_0, \dots, \alpha_p)}
\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_{\alpha_p},G_{\alpha_0})
({\mathcal{U}_{\alpha_0 \dots \alpha_p}}).
\]
Note that this is the subgroup of $\bar{\partial}$-closed elements of $C^p(\mathfrak{U},\mathcal{C}^{0,0}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$.
This subgroup is the group of twisted \v{C}ech cochains considered in \cite{TT1}, and when we restrict to this subgroup,
the sign conventions here coincide with the ones in \cite{TT1}.
\begin{definition}
\label{def:twisting-cochain}
A \emph{twisting cochain} $a \in C^\bullet(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,F))$ is an element $a = \sum_{k \geq 0} a^k$, where $a^k \in C^k(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{1-k}(F,F))$,
such that
\begin{equation}
\label{eq:twisting-cochain}
\delta a + aa = 0,
\end{equation}
and $a_{\alpha \alpha}^1 = \id_{F_\alpha}$ for all $\alpha$. For simplicity we shall simply refer to the pair $(F,a)$ as a twisting cochain.
\end{definition}
Recall that a holomorphic vector bundle is defined by a 1-cocycle. A twisting cochain is a generalization of this. By \eqref{eq:twisting-cochain}, $a$ must satisfy
\begin{align}
&a_\alpha^0 a_\alpha^0 = 0 \label{eq:twisted0} \\
&a_\alpha^0 a_{\alpha \beta}^1 = a_{\alpha \beta}^1 a_\beta^0 \label{eq:twisted1} \\
&a_{\alpha \gamma}^1 - a_{\alpha \beta}^1 a_{\beta \gamma}^1 =
a_\alpha^0 a_{\alpha \beta \gamma}^2 + a_{\alpha \beta \gamma}^2 a_\gamma^0 \label{eq:twisted2}.
\end{align}
The first equation says that $(F_\alpha,a^0)$ is a complex, the second says that $a_{\alpha \beta}^1$ defines a chain map $(F_\beta|_{\mathcal{U}_{\alpha \beta}},a^0) \to (F_\alpha|_{\mathcal{U}_{\alpha \beta}},a^0)$, and the third says that, over $\mathcal{U}_{\alpha \beta \gamma}$, $a_{\alpha \gamma}^1$ and $a_{\alpha \beta}^1 a_{\beta \gamma}^1$ are chain homotopic, with the homotopy given by $a^2_{\alpha\beta\gamma}$. In particular, from the condition $a_{\alpha \alpha}^1 = \id_{F_\alpha}$, it follows that $a_{\alpha \beta}^1$ and $\alpha_{\beta \alpha}^1$ are chain homotopy inverses to each other.
Thus for each $\alpha$ we have cohomology sheaves $\mathcal{H}^\bullet_{a_\alpha^0}(F_\alpha)$ over $\mathcal{U}_\alpha$, and over each intersection $\mathcal{U}_{\alpha \beta}$ we have an isomorphism $H(a_{\alpha \beta}^1): \mathcal{H}^\bullet_{a_\beta^0}(F_\beta)|_{\mathcal{U}_{\alpha \beta}} \to \mathcal{H}^\bullet_{a_\alpha^0}(F_\alpha)|_{\mathcal{U}_{\alpha \beta}}$ such that over each $\mathcal{U}_{\alpha \beta \gamma}$, $H(a_{\alpha \beta}^1) H(a_{\beta \gamma}^1) = H(a_{\alpha \gamma}^1)$. We denote by $\mathcal{H}_a^\bullet$ the sheaf that we obtain by gluing the sheaves $\mathcal{H}^\bullet_{a_\alpha^0}$ via these isomorphisms.
The twisting cochains that are of interest in this paper arise in the following way.
\begin{definition}[A twisted resolution of $\mathcal{F}$]
Let $\mathcal{F}$ be a coherent $\mathcal{O}_X$-module.
By the syzygy theorem one can find a cover $\mathfrak{U}$ such that for each $\alpha$ there exists a free resolution of $\mathcal{F}|_{\mathcal{U}_\alpha}$,
\[
\dots
\overset{a_\alpha^0}{\longrightarrow} F_\alpha^{-1}
\overset{a_\alpha^0}{\longrightarrow} F_\alpha^0
\longrightarrow \mathcal{F}|_{\mathcal{U}_\alpha}
\longrightarrow 0,
\]
of length at most $\dim X$. Over each intersection $\mathcal{U}_{\alpha \beta}$ one can find a chain map $a_{\alpha \beta}^1: (F_\beta|_{\mathcal{U}_{\alpha \beta}},a_\beta^0) \to (F_\alpha|_{\mathcal{U}_{\alpha \beta}},a_\alpha^0)$ that extends the identity morphism on $\mathcal{F}|_{\mathcal{U}_{\alpha \beta}}$, and which can be chosen to be the identity if $\alpha = \beta$. Since $a_{\alpha \gamma}^1$ and $a_{\alpha \beta}^1 a_{\beta \gamma}^1$ are chain maps $(F_\gamma|_{\mathcal{U}_{\alpha \beta \gamma}},a_\gamma^0) \to (F_\alpha|_{\mathcal{U}_{\alpha \beta \gamma}},a_\alpha^0)$ that extends the identity morphism on $\mathcal{F}|_{\mathcal{U}_{\alpha \beta \gamma}}$, there exists a chain homotopy $a_{\alpha \beta \gamma}^2$ between these maps. As explained in \cite{OTT}*{Section~1.3}, one can proceed inductively to construct a twisting cochain $a = \sum_k a^k$. Note that $\mathcal{H}_a^k = 0$ if $k > 0$ and $\mathcal{H}_a^0 \cong \mathcal{F}$. We shall refer to $(F,a)$ as a \emph{twisted resolution of $\mathcal{F}$}.
\end{definition}
\begin{example} \label{ex:globalRes}
Assume that $\mathcal{F}$ actually has a global locally free resolution $(E,\varphi)$ of finite length.
Then we may in the construction above always choose restrictions of the given resolution as the local resolutions,
i.e., the twisted resolution is defined by $F_\alpha \vcentcolon= E|_{\mathcal{U}_\alpha}$, $a = a^0+a^1$, where
$a^0_\alpha = \varphi|_{\mathcal{U}_\alpha}$ and $a^1_{\alpha\beta} = \id_{E|_{\mathcal{U}_{\alpha\beta}}}$.
Conversely, it follows by \eqref{eq:twisted2} that for any twisted resolution $(F,a)$ of $\mathcal{F}$ with $a^2=0$,
$a^1$ defines a $1$-cocycle for each $F^k_\bullet$, and may thus be used to define graded holomorphic
vector bundles, and by \eqref{eq:twisted0} and \eqref{eq:twisted1}, these graded vector bundles may be turned into a complex
with the differential defined by $a^0$.
\end{example}
Consider two twisting cochains $(F,a)$ and $(G,b)$. We define an operator $D$ of degree 1 on $C^\bullet(\mathfrak{U},\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)))$,
\[
Df \vcentcolon= \delta f + bf - (-1)^{\deg f} fa.
\]
We have that $D^2=0$, and
\begin{equation} \label{eq:Dderivation}
D(fg) = (Df)g + (-1)^{\deg f} f (Dg).
\end{equation}
Note that if $(F,a)$ and $(G,b)$ are global complexes of vector bundles as Example~\ref{ex:globalRes},
and $a^1=\id=b^1$, then the part $D^1$ of $D$ of \v{C}ech degree $1$, $D^1 f = \delta f + b^1 f - (-1)^{\deg f} a^1$
equals the usual \v{C}ech differential, i.e., $a^1$ and $b^1$ ``compensate'' for indices $k=0$ and $k=p+1$ that
are not included in the definition of $\delta$ above. In particular, in this situation,
$H^k(\bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$ equals the usual hypercohomology,
as in for example \cite{GH}*{p. 446}.
We have that $\bar{\partial} D = -D \bar{\partial}$. We combine $D$ and $\bar{\partial}$ into an operator $\nabla = D - \bar{\partial}$ of degree 1 on $C^\bullet(\mathfrak{U},\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)))$. Clearly, we have that $\nabla^2 = 0$, and
\begin{equation} \label{eq:nablaDerivation}
\nabla(fg) = (\nabla f) g + (-1)^{\deg f} f (\nabla g).
\end{equation}
If $G$ is a vector bundle, then we can view it as a family of graded vector bundles concentrated in degree 0, $G = (G_\alpha)$, where $G_\alpha = G|_\alpha$, and we have that a twisting cochain for $G$ is given by $b = b^1 = \id$.
We will tacitly do this kind of identification when for example $(F,a)$ is a twisting cochain and $G$ is a vector bundle,
and we consider groups like $C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$ or $C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))$, as for example
in Theorem~\ref{thm:main}.
\subsection{Notation for parts of an element $f \in C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$}
\label{ssect:notation}
We denote by $f_k^\ell$ the parts of $f$ that belong to
\[
\prod_{(\alpha_0, \dots, \alpha_p)} \mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_{\alpha_p}^{-\ell},G_{\alpha_0}^{-k}))
(\mathcal{U}_{\alpha_0 \dots \alpha_p}),
\]
i.e., when using both a superscript and a subscript, we pick out morphisms between bundles
in certain degrees. We will also use the notation \[f^\ell_\bullet \vcentcolon= \sum_k f^\ell_k.\]
We say that \emph{$f$ takes values in $\Hom(F^{-\ell},F^{-k})$} if $f=f^\ell_k$,
and that \emph{$f$ takes values in $\Hom(F^{-\ell},F)$} if $f=f^\ell_\bullet$.
We denote by $f^p$ the parts of $f$ that belong to
\[
\prod_{(\alpha_0, \dots, \alpha_p)} \mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_{\alpha_p},G_{\alpha_0}))
({\mathcal{U}_{\alpha_0 \dots \alpha_p}}),
\]
i.e., when using a single superscript, we pick out a certain \v{C}ech degree.
\section{Pseudomeromorphic and almost semi-meromorphic currents}
\label{section:pseudo}
We will use the language of residues of almost semi-meromorphic currents
as introduced in \cite{AW3} to describe the currents we study in this paper.
Let $s$ be a holomorphic section of a Hermitian holomorphic line bundle $L$ on $X$,
The \emph{principal value current} $[1/s]$ can be defined as $[1/s] \vcentcolon= \lim_{\epsilon \to 0} \chi(|s|^2/\epsilon)\frac{1}{s}$,
where $\chi : \mathbb{R}_{\geq 0} \to \mathbb{R}_{\geq 0}$ is a smooth cut-off function, i.e., $\chi(t) \equiv 0$ for $t \ll 1$ and $\chi(t) \equiv 1$ for $t \gg 1$.
A current is \emph{semi-meromorphic} if it is of the form $[\omega/s] \vcentcolon= \omega[1/s]$, where $\omega$ is a smooth form with values in $L$.
A current $a$ is \emph{almost semi-meromorphic} on $X$, written $a \in ASM(X)$, is there is a modification $\pi : X' \to X$ such that
$$a=\pi_*(\omega/s),$$ where $\omega/s$ is a semi-meromorphic current in $X'$.
The almost semi-meromorphic currents on $X$ form an algebra over
smooth forms.
Almost semi-meromorphic functions are special cases of so-called pseudomeromorphic currents, as introduced in \cite{AW2}.
The class of pseudomeromorphic currents is closed under multiplication with smooth forms and under $\bar{\partial}$.
One important property of pseudomeromorphic currents is that they satisfy the following \emph{dimension principle}.
\begin{proposition} \label{prop:dimPrinciple}
Let $T$ be a pseudomeromorphic $(*,q)$-current on $X$ with support on a subvariety $Z$.
If $\codim Z > q$, then $T = 0$.
\end{proposition}
Given a pseudomeromorphic current $T$ and a subvariety $V$, in \cite{AW2} was introduced a restriction of $T$ to $X\setminus V$,
which is a pseudomeromorphic current on $X$ defined by $\mathbf{1}_{X \setminus V} T \vcentcolon= \lim_{\epsilon \to 0} \chi(|F|^2/\epsilon) T$, where $\chi$
is a cut-off function as above, and $F$ is a section of a holomorphic vector bundle such that $V = \{ F = 0 \}$.
A pseudomeromorphic current $T$ on $X$ is said to have the standard extension property (SEP) if $\mathbf{1}_{X \setminus V} T = T$
for any subvariety $V$ of positive codimension.
It follows from the dimension principle and the fact that the restriction commutes with multiplication with smooth forms that
almost semi-meromorphic currents have the SEP. In particular, if $\alpha$ is a smooth form on $X \setminus V$, and $\alpha$ has
an extension as an almost semi-meromorphic current $a$ on $X$, then the extension is given by
\begin{equation} \label{eq:asmExtension}
a = \lim_{\epsilon \to 0} \chi(|F|^2/\epsilon) \alpha.
\end{equation}
Let $a$ be an almost semi-meromorphic current on $X$. Let $Z$ be the smallest subvariety of $X$ of positive codimension such that $a$
is smooth outside of $Z$. By \cite{AW3}*{Proposition~4.16}, $\mathbf{1}_{X \setminus Z} \bar{\partial} a$ is almost semi-meromorphic on $X$.
The \emph{residue} $R(a)$ of $a$ is defined by
\begin{equation*}
R(a) \vcentcolon= \bar{\partial} a - \mathbf{1}_{X \setminus Z}\bar{\partial} a.
\end{equation*}
Note that
\begin{equation} \label{eq:resSupport}
\supp R(a) \subseteq Z.
\end{equation}
Since $a$ is almost semi-meromorphic, and thus has the SEP, it follows
by \eqref{eq:asmExtension} that
\begin{equation}
\label{eq:residue}
R(a)=\lim_{\epsilon \to 0}
\left(\bar{\partial}(\chi_\epsilon a) - \chi_\epsilon \bar{\partial} a \right)
= \lim_{\epsilon \to 0} \bar{\partial}\chi_\epsilon \wedge a,
\end{equation}
where $\chi$ is as above, $F$ is a section of a vector bundle such that $\{ F = 0 \} \supseteq Z$, and $F \not\equiv 0$
and $\chi_\epsilon = \chi(|F|^2/\epsilon)$.
It follows directly from for example \eqref{eq:residue} that if $\psi$ is a smooth form, then
\begin{equation} \label{eq:residueSmooth}
R(\psi \wedge a) = (-1)^{\deg \psi} \psi\wedge R(a).
\end{equation}
For elements in (\ref{eq:cechCurrentHom}), we will say that they are almost semi-meropmorphic and pseduomeromorphic respectively if their components are. Moreover the residue of an element in (\ref{eq:cechCurrentHom}) is defined as the residues of the components.
\section{A residue current associated with a twisted resolution}
\label{section:residue}
In \cite{AW1}, Andersson and Wulcan constructed a residue current from a locally free resolution of a sheaf of positive codimension. In this section we will generalize this construction to construct a residue current from a twisted resolution of a coherent $\mathcal{O}_X$-module.
Throughout this section, let $(F,a)$ be a twisted resolution of a coherent $\mathcal{O}_X$-module $\mathcal{F}$.
We will tacitly assume that the bundles $F_\alpha^k$ are equipped with Hermitian metrics.
Let $Z$ be the smallest subvariety of $X$ such that $\mathcal{F}|_{X \setminus Z}$ is a vector bundle.
For each $\alpha$, let $\mathcal{U}_\alpha' \vcentcolon= \mathcal{U}_\alpha \setminus Z$, and define the cover $\mathfrak{U}' \vcentcolon= (\mathcal{U}_\alpha')$ of $X \setminus Z$. We define an element $\sigma^0 \in C^0(\mathfrak{U}',\mathcal{C}^{0,0}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{-1}(F,F)))$ in the following way. For each $\alpha$, let $\sigma_\alpha^0$ be the minimal right-inverse of $a_\alpha^0$ on $\mathcal{U}_\alpha'$, i.e., the Moore--Penrose inverse, which can be defined by the properties $a_\alpha^0 \sigma_\alpha^0 a_\alpha^0 = a_\alpha^0$, $\sigma_\alpha^0|_{(\im a_\alpha^0)^\perp}= 0$, and $\im \sigma_\alpha^0 \perp \ker a_\alpha^0$. From the last two properties it follows that $(\sigma^0)^2 = 0$.
We write $a = a^0 + a'$, i.e., $a' = \sum_{k\geq 1} a^k$.
Define
\begin{equation} \label{eq:sigmadef}
\sigma \vcentcolon= \sigma^0(\id+a' \sigma^0)^{-1} = \sigma^0-\sigma^0 a'\sigma^0 + \sigma^0 a'\sigma^0a'\sigma^0 - \dots,
\end{equation}
where the sum in the right-hand side is finite since $a'\sigma^0$ has negative Hom degree, and thus is nilpotent since the complexes have finite length.
Throughout this section, $\id$ denotes the identity element on $F$ in $C^0(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^0(F,F))$ or $C^0(\mathfrak{U}',\mathop{\mathcal{H}\!\mathit{om}}\nolimits^0(F,F))$.
Since $(\sigma^0)^2 = 0$, it follows that $\sigma^2 = 0$.
\begin{propdef}
\label{prop:URdef}
Define
\begin{equation} \label{eq:udef}
u \vcentcolon= \sigma(\id-\bar{\partial}\sigma)^{-1} = \sigma+\sigma\bar{\partial}\sigma+\sigma(\bar{\partial}\sigma)^2+ \dots.
\end{equation}
Then
\begin{equation} \label{eq:Udef}
U \vcentcolon= \lim_{\epsilon \to 0} \chi_\epsilon u
\end{equation}
is an almost semi-meromorphic extension of $u$. Moreover,
\begin{equation} \label{eq:Rdef}
R \vcentcolon= \id - \nabla U
\end{equation}
is pseudomeromorphic and $\nabla$-closed, i.e., $\nabla R = 0$.
\end{propdef}
We shall refer to $R$ as the residue current associated with $(F,a)$.
Note that $\sigma^0$ has degree $-1$, and $a'$ has degree $1$, so
it follows by \eqref{eq:sigmadef}, \eqref{eq:udef}, and \eqref{eq:Udef}
that $\sigma$, $u$, and $U$ have degree $-1$.
Since $\nabla$ is an operator of degree $1$, it follows that $R$ has degree $0$.
\begin{proof}
On $\mathcal{U}_\alpha'$, $\sigma^0_\alpha$ coincides with the form $\sigma$ associated with $F_\alpha^\bullet$
defined in \cite{AW1}. As explained in \cite{AW3}*{Example 4.18}, this form has
an extension as an almost semi-meromorphic current on $\mathcal{U}_\alpha$. By \eqref{eq:sigmadef}, $\sigma$
is a sum of products of $\sigma^0$ and $a'$, and since the almost semi-meromorphic currents
form an algebra over smooth forms and may be restricted to open subsets, and $a'$ is holomorphic,
it follows that $\sigma$ has an almost semi-meromorphic extension.
In addition, by \cite{AW3}*{Proposition~4.16} (or the argument in \cite{AW3}*{Example 4.18}), $\bar{\partial}\sigma$ has an
almost semi-meromorphic extension, so by \eqref{eq:udef}, it follows that $u$ has an
almost semi-meromorphic extension. By \eqref{eq:asmExtension}, this extension $U$ is given by
\eqref{eq:Udef}.
We have that $U$ is almost semi-meromorphic, and thus pseudomeromorphic.
Thus $\nabla U$ is also pseudomeromorphic since the class of pseudomeromorphic currents is
preserved by $\bar{\partial}$, multiplication with smooth forms, and restrictions to open subsets.
The same then holds for $R$ by \eqref{eq:Rdef}.
Since $\nabla \id = 0$ and $\nabla^2=0$, it follows that $\nabla R = 0$.
\end{proof}
The main aim of this section is to prove that the residue current has the following properties.
\begin{theorem}
\label{thm:R}
Let $(F,a)$ be a twisted resolution of a coherent $\mathcal{O}_X$-module $\mathcal{F}$, and let $R$ be its associated residue current. Then
\begin{enumerate}[\normalfont(a)]
\item
$R$ takes values in $\Hom(F^0,F)$,
\item
$R_k^0 = 0$ for $k < \codim \mathcal{F}$,
\item
$R a^0 = 0$, and
\item
$\supp R \subseteq \supp \mathcal{F}$.
\end{enumerate}
\end{theorem}
\begin{remark}
For simplicity, we will only consider residue currents associated with a coherent $\mathcal{O}_X$-module,
since this assumption is crucial in Theorem~\ref{thm:R}, while with minor adpations, one could in
Proposition-Definition~\ref{prop:URdef} consider residue currents associated with arbitrary twisting cochains,
not necessarily twisted resolutions of coherent $\mathcal{O}_X$-modules.
\end{remark}
For the proof of Theorem~\ref{thm:R} we will need the following result, which provides a useful decomposition of the residue current.
\begin{proposition} \label{prop:Rasm}
Let $R$ be the current defined by \eqref{eq:Rdef}. Then, $R$ can be decomposed as
\begin{equation*}
R = R(U) + R',
\end{equation*}
where $R(U)$ is the residue in the sense of \eqref{eq:residue} of the current $U$ defined by \eqref{eq:Udef},
and $R'$ is almost semi-meromorphic with values in $\Hom(F^0,F)$.
If $\codim \mathcal{F} > 0$, then $R' = 0$.
\end{proposition}
The proof of this proposition in turn relies on the following lemma, whose proof we postpone.
\begin{lemma} \label{lmaQ}
Let $Q \vcentcolon= \id -D\sigma$. Then $Q$ and $\nabla Q$ take values in
$\Hom(F^0,F)$ and
\begin{equation} \label{eq:nablau}
\nabla u = \id - Q+u\nabla Q.
\end{equation}
Furthermore, if $\codim \mathcal{F} > 0$, then $Q = 0$.
\end{lemma}
\begin{proof}[Proof of Proposition~\ref{prop:Rasm}]
On $X\setminus Z$, $U=u$, and consequently it follows by \eqref{eq:Rdef}
and \eqref{eq:nablau} that $R|_{X\setminus Z} = Q-u\nabla Q$.
Since almost semi-meromorphic currents have the SEP,
$R'$ must be the almost semi-meromorphic extension of $Q-u\nabla Q$, provided it exists.
Indeed, this extension exists, since by Proposition-Definition~\ref{prop:URdef}, $u$ has an extension
as an almost semi-meromorphic current, and by the same argument, the same holds for $Q$ and $\nabla Q$,
and consequently also for $Q-u\nabla Q$.
By \eqref{eq:asmExtension}, we thus have that
\begin{equation} \label{eq:Rprim}
R' \vcentcolon= \lim_{\epsilon \to 0} \chi_\epsilon(Q-u\nabla Q).
\end{equation}
By Lemma~\ref{lmaQ}, it then follows that $R'$ takes values in $\Hom(F^0,F)$.
Thus, by \eqref{eq:Udef}, \eqref{eq:Rdef}, \eqref{eq:nablau}, \eqref{eq:Rprim},
and \eqref{eq:residue}, it follows that
\[
R-R' = \lim_{\epsilon \to 0} \bar{\partial}\chi_\epsilon \wedge u = R(U).
\]
Finally, if $\codim \mathcal{F} > 0$, then $Q=0$ by the last part of Lemma~\ref{lmaQ},
and consequently, $R' = 0$ by \eqref{eq:Rprim}.
\end{proof}
In the proof of Proposition~\ref{thm:R}, we will make use of singularity subvarieties associated
with a twisted resolution, which is a straight-forward generalization of the corresponding
subvarieties considered for a global resolution in for example \cite{AW1}*{Section~3}.
For a fixed $\alpha$, let $Z_\alpha^k \subseteq \mathcal{U}_\alpha$ denote the analytic set where $a_\alpha^0: F_\alpha^{-k} \to F_\alpha^{-(k-1)}$ does not have optimal rank.
The $Z_\alpha^k$ are independent of the choice of resolution,
since they coincide (locally) with the corresponding sets defined for any minimal resolution.
It follows that $Z^k = \cup_\alpha Z_\alpha^k$ is a subvariety of $X$.
Note that $Z^1=Z$, where $Z$ is defined above as the smallest subvariety such that $\mathcal{F}|_{X\setminus Z}$ is a vector bundle. By \cite{Eis}*{Theorem~20.9},
\[
\codim Z_\alpha^k \geq k,
\]
and by \cite{Eis}*{Corollary 20.12},
\[
Z_\alpha^{k+1} \subseteq Z_\alpha^k.
\]
It follows that
\begin{equation} \label{eq:codimZk}
\codim Z^k \geq k,
\end{equation}
and
\begin{equation} \label{eq:Zkinclusion}
Z^{k+1} \subseteq Z^k.
\end{equation}
Note that $(\sigma^0)^\ell_{\ell+1}$ is smooth outside of $Z^{\ell+1}$.
Thus, it follows by \eqref{eq:sigmadef} and \eqref{eq:Zkinclusion} that
\begin{equation} \label{eq:sigmaSmooth}
\sigma^\ell_\bullet \text{ is smooth outside of $Z^{\ell+1}$.}
\end{equation}
\begin{proof}[Proof of Theorem~\ref{thm:R}]
We begin by proving part (a), i.e., that $R$ takes values in $\Hom(F^0,F)$,
which is equivalent to proving that $R^\ell_\bullet = 0$ for $\ell \geq 1$.
By Proposition~\ref{prop:Rasm}, $R^\ell_\bullet = R(U)^\ell_\bullet$ for $\ell \geq 1$.
We will prove that $R(U)^\ell_\bullet = 0$ for $\ell=1$, the case $\ell>1$ follows in the same way.
By \eqref{eq:udef} and \eqref{eq:Udef}, it follows that $R(U)^1_\bullet = \sum_{m \geq 1} R([\sigma(\bar{\partial}\sigma)^{m-1}])^1_\bullet$. Here and throughout the proof we use the brackets to emphasize that we are considering the almost-semimeropmorphic extension.
Thus, it suffices to prove that $R([\sigma(\bar{\partial}\sigma)^{m-1}])^1_\bullet = 0$ for $m \geq 1$, which we will prove by induction over $m$.
Applying $\bar{\partial}$ to $\sigma^2=0$ and using that $\sigma$ has degree $1$, we get that $0 = \bar{\partial}(\sigma^2) = (\bar{\partial}\sigma)\sigma-\sigma\bar{\partial}\sigma$, so $\sigma(\bar{\partial}\sigma)^{m-1} = (\bar{\partial}\sigma)^{m-1} \sigma$,
and we may thus equivalently prove that
\begin{equation} \label{eq:R1Induction}
R([(\bar{\partial}\sigma)^{m-1}\sigma])^1_\bullet = 0
\end{equation}
for $m \geq 1$.
By \eqref{eq:sigmaSmooth}, $\sigma^1_\bullet$ is smooth outside of $Z^2$, so $\supp R([\sigma])^1_\bullet \subseteq Z^2$ by \eqref{eq:resSupport}.
Thus, \eqref{eq:R1Induction} holds in the basic case $m=1$ by the dimension principle, Proposition~\ref{prop:dimPrinciple}, and \eqref{eq:codimZk}
since $R([\sigma])^1_\bullet$ has bidegree $(0,1)$.
We may thus assume by induction that \eqref{eq:R1Induction} holds for $m$.
Note that since $\sigma^0$ takes values in $\bigoplus_\ell \Hom(F^{-\ell},F^{-(\ell+1)})$,
and $a'$ takes values in $\bigoplus_{\ell \leq k} \Hom(F^{-\ell},F^{-k})$, it follows that
$\sigma$ takes values in $\bigoplus_{\ell < k} \Hom(F^{-\ell},F^{-k})$.
In particular, $R([(\bar{\partial}\sigma)^{m-1}\sigma])^1_\bullet$ takes values in
$\bigoplus_{k \geq m+1} \Hom(F^{-1},F^{-k})$.
In view of \eqref{eq:sigmaSmooth} and \eqref{eq:residueSmooth}, we thus have outside of $Z^{m+2}$ that
\[ R([(\bar{\partial}\sigma)^m\sigma])^1_\bullet = \sum_{\ell \geq m+1} (\bar{\partial}\sigma)^\ell_\bullet R([(\bar{\partial}\sigma)^{m-1}\sigma])^1_\ell. \]
By the induction assumption, this current vanishes outside of $Z^{m+2}$, and since
$R([(\bar{\partial}\sigma^m)\sigma])^1_\bullet$ is a pseudomeromorphic $(0,m+1)$-current, with support on $Z^{m+2}$,
which has codimension $\geq m+2$ by \eqref{eq:codimZk}, it vanishes by the dimension principle.
We now prove part (c). By part (a), $R^\ell_\bullet=0$ for $\ell \geq 1$ and since $\nabla R = 0$ it follows that $R a^0 = 0$
since \[ 0 = (\nabla R)^1_\bullet = a R^1_\bullet + R^1_\bullet a^1 -R^0_\bullet a^0. \]
It remains to prove parts (b) and (d).
By Proposition~\ref{prop:Rasm}, $(R')^\ell_\bullet = 0$ for $\ell \geq 1$.
In addition, the conditions $(R')^0_k = 0$ for $k < \codim \mathcal{F}$
and that $R'$ has support on $\supp \mathcal{F}$ are non-trivial only when $\codim \mathcal{F}> 0$,
but then $R'=0$ by Proposition~\ref{prop:Rasm}, and hence these conditions are indeed trivially verified.
Thus, it remains to prove parts (b) and (d), assuming that $R=R(U)$.
To prove that $\supp R(U) \subseteq \supp \mathcal{F}$, we may without loss of generality
assume that $\codim \mathcal{F} > 0$, so that $\supp \mathcal{F}=Z=Z^1$.
Since $\sigma^0$ and consequently $\sigma$ and $U$ are smooth outside of $Z$,
it then follows by \eqref{eq:resSupport} that indeed $\supp R(U) \subseteq Z = \supp \mathcal{F}$.
Since $R(U)^0_k$ is a pseudomeromorphic $(0,k)$-current, with support on $Z$, it follows by the dimension
principle that $R(U)^0_k =0$ for $k < \codim \mathcal{F}$.
\end{proof}
Let us now return to the proof of Lemma~\ref{lmaQ}, which relies on the following technical lemma.
\begin{lemma} \label{lmaQ0}
Define $Q^0 \vcentcolon= \id-a^0\sigma^0-\sigma^0 a^0$.
Then $Q^0$ and $DQ^0$ take values in $\Hom(F^0,F)$,
\begin{equation} \label{eq:Q0a0}
Q^0 a^0 = 0, \quad DQ^0 a^0 = 0,
\end{equation}
and
\begin{equation} \label{eq:sigma0Q0}
\sigma = \sigma^0(D\sigma^0+Q^0)^{-1}.
\end{equation}
Furthermore, the element $Q = \id - D\sigma$ defined in Lemma~\ref{lmaQ} equals
\begin{equation} \label{eq:QQ0}
Q = Q^0-\sigma DQ^0.
\end{equation}
\end{lemma}
In the proofs of Lemma~\ref{lmaQ} and Lemma~\ref{lmaQ0},
we will use that if
\[
\alpha,\beta \in C^\bullet(\mathfrak{U}',\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F,F))),
\]
and $\alpha$ takes values in $\Hom(F^0,F)$ and $\beta$ is nilpotent and takes values in $\bigoplus_{k \geq 1} \Hom(F,F^{-k})$, then
$\alpha\beta = 0$, and
\begin{equation} \label{eq:alphaidbeta}
\alpha(\id+\beta)^{-1} = \alpha(\id-\beta+\beta^2-\dots) = \alpha.
\end{equation}
\begin{proof}[Proof of Lemma~\ref{lmaQ0}]
It follows from the properties defining $\sigma^0$ that
\[
Q^0\in C^0(\mathfrak{U}',\mathcal{C}^{0,0}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F^0,F^0))),
\]
and it may alternatively be defined by $Q_\alpha^0 = \id_{F_\alpha^0}- (a_\alpha^0 \sigma_\alpha^0)^0_0$,
and it has the property that $Q^0 a^0 = 0$.
Since $a = a^0 + a'$, we thus have that
\begin{equation} \label{eq:Dsigma0}
D \sigma^0 =
a \sigma^0 + \sigma^0 a =
\id - Q^0 + a' \sigma^0 + \sigma^0 a'.
\end{equation}
Since $(\sigma^0)^2 = 0$, it follows from \eqref{eq:sigmadef} that \eqref{eq:sigma0Q0} holds.
Thus,
\begin{equation}
\begin{aligned} \label{eq:Dsigma}
D \sigma &=
D \sigma^0 (D \sigma^0 + Q^0)^{-1} - \sigma^0 D (D \sigma^0 + Q^0)^{-1} \\ &=
(D \sigma^0 + Q^0 - Q^0)(D \sigma^0 + Q^0)^{-1} - \sigma^0 D (D \sigma^0 + Q^0)^{-1} \\ &=
\id - Q^0 - \sigma^0 D (D \sigma^0 + Q^0)^{-1},
\end{aligned}
\end{equation}
where we have used \eqref{eq:Dderivation} in the first equality, and in the third equality,
we have used that $Q^0 (D\sigma^0+Q^0)^{-1} = Q^0$ by \eqref{eq:alphaidbeta} since $D\sigma^0+Q^0=\id+a'\sigma^0+\sigma^0 a'$ by \eqref{eq:Dsigma0}.
Since $Q^0$ takes values in $\Hom(F^0,F^0)$ and $Q^0 a^0 = 0$,
$D Q^0 = a' Q^0 - Q^0 a^1$, and in particular, $D Q^0$
takes values in $\Hom(F^0,F)$. Thus, using that $D\alpha^{-1}=-\alpha^{-1}(D\alpha) \alpha^{-1}$, we get that
\begin{equation}
\begin{aligned} \label{eq:DDsigma0Q0inv}
D (D \sigma^0 + Q^0)^{-1} &=
-(D \sigma^0 + Q^0)^{-1} D Q^0 (D \sigma^0 + Q^0)^{-1} \\ &=
-(D \sigma^0 + Q^0)^{-1} D Q^0,
\end{aligned}
\end{equation}
where we in the second equality used that $D Q^0 (D \sigma^0 + Q^0)^{-1} = D Q^0$,
for the same reasons as for $Q^0$ in the previous paragraph.
The formula \eqref{eq:QQ0} then follows by combining \eqref{eq:sigma0Q0}, \eqref{eq:Dsigma}
and \eqref{eq:DDsigma0Q0inv}.
\end{proof}
\begin{proof}[Proof of Lemma~\ref{lmaQ}]
Since $Q^0$ and $DQ^0$ take values in $\Hom(F^0,F)$, it follows by \eqref{eq:QQ0} that also $Q$ takes values
in $\Hom(F^0,F)$. By \eqref{eq:Q0a0} and \eqref{eq:QQ0} it follows that $Q a^0 = 0$,
and hence also $\nabla Q$ takes values in $\Hom(F^0, F)$.
It remains to prove \eqref{eq:nablau}.
Since by definition, $Q=\id-D\sigma$, we get that
\begin{equation} \label{eq:iddbarsigma}
\id-\bar{\partial}\sigma=\nabla\sigma+Q.
\end{equation}
Thus, by \eqref{eq:udef},
\begin{equation} \label{eq:uAlternative}
u = \sigma(\nabla\sigma+Q)^{-1}.
\end{equation}
It then follows that
\begin{align} \label{eq:nablasigmaQinv}
\nabla(\nabla \sigma + Q)^{-1} &=
-(\nabla \sigma + Q)^{-1} \nabla Q (\nabla \sigma + Q)^{-1} =
-(\nabla \sigma + Q)^{-1} \nabla Q,
\end{align}
where we in the second equality have used that $\nabla Q(\nabla\sigma+Q)^{-1}=\nabla Q$
by \eqref{eq:alphaidbeta} and \eqref{eq:iddbarsigma}.
Using \eqref{eq:uAlternative} and \eqref{eq:nablasigmaQinv}, we get that
\begin{align*}
\nabla u &=
\nabla \sigma (\nabla \sigma + Q)^{-1} -
\sigma \nabla(\nabla \sigma + Q)^{-1}
\\ &=
(\nabla \sigma + Q - Q)(\nabla \sigma + Q)^{-1} +
u \nabla Q
\\ &=
\id - Q + u \nabla Q,
\end{align*}
where we in the third equality have used that $Q(\nabla\sigma+Q)^{-1}=Q$ by
\eqref{eq:alphaidbeta} and \eqref{eq:iddbarsigma}.
\end{proof}
\begin{remark}
In the case that $\mathcal{F}$ has a global locally free resolution of finite length $(E,\varphi)$,
as in Example~\ref{ex:globalRes}, then $\sigma=\sigma^0$ since $(\sigma^0)^2=0$.
Assuming additionally that $\codim \mathcal{F} > 0$, the forms and currents $\sigma$, $u$, $U$
and $R$ then define global $\End(E)$-valued currents, which coincide with the ones
defined in \cite{AW1}, and the calculations here reduce to calculations similar to the ones
in \cite{AW1}*{Section~2}, since then $Q^0$, $Q$ and $R'$ vanish.
In \cite{AW1}, a way of handling the case when $\codim \mathcal{F} = 0$ is only mentioned briefly by a comment
in \cite{AW1}*{Corollary~2.4}, while the remainder of the paper is written under the assumption that
$\codim \mathcal{F} > 0$. This is a natural assumption in the context of that paper,
and it is not really specified how $R$ is defined in the case $\codim \mathcal{F} = 0$.
For us, it is natural to not have any assumptions on the codimension, so we have
made sure to provide a definition which works also this case.
The main issue is that in \cite{AW1}*{Section~2}, the equality
which in our notation may be written as $a^0 \sigma^0 + \sigma^0 a^0 = \id$ is being used,
but this equality only holds when $\codim \mathcal{F} > 0$, i.e., when $Q^0=0$.
We then use different formulas to define $u$ and $R$ in \eqref{eq:udef} and \eqref{eq:Rdef}
than in \cite{AW1}. The formulas we use as a definition actually appear as alternative formulas
in \cite{AW1}, since they are equivalent when $\codim \mathcal{F} > 0$.
Our definitions are made so that $R$ gets the crucial properties
that it takes values in $\Hom(F^0,F)$ and is $\nabla$-cohomologous to the identity mapping.
In the case that $\codim \mathcal{F} = 0$, with a global resolution $(E,\varphi)$,
then $Q_0 \neq 0$, and using that $a^1$ is the identity, it follows that $DQ_0 = 0$,
so by \eqref{eq:QQ0}, $Q=Q_0$, which outside of $Z$ equals the orthogonal projection onto
$\im \varphi_1$. By Proposition~\ref{prop:Rasm} and \eqref{eq:Rprim}, it follows that
\[
R = R(U)+[Q_0]+[u\bar{\partial} Q_0],
\]
where we by $[Q_0]$ and $[u \bar{\partial} Q_0]$ denote the almost semi-meromorphic extensions.
The duality result \cite{AW1}*{Corollary~2.4} states that a section $\phi$ of $E_0$
belongs to $\im \varphi_1$ if and only if $\varphi_1$ belongs generically to
$\im \varphi_1$ and $R(U)\phi = 0$.
Note that $[Q_0]$ is the only part of $R$ with values in $\Hom(F^0,F^0)$,
and that $[Q_0]\phi = 0$ if and only if $\phi$ belongs generically to $\im \varphi_1$,
and it holds that if $[Q_0] \phi = 0$, then $[u\bar{\partial} Q_0] \phi = 0$. Thus, with our definition of $R$,
it follows that \cite{AW1}*{Corollary~2.4} may be reformulated succinctly as:
$\phi$ belongs to $\im \varphi_1$ if and only if $R\phi = 0$.
\end{remark}
\begin{example}[Explicit description of $R^0$ and $R^1$]
Assume that $(F,a)$ is a twisted resolution of $\mathcal{F}$, and that $\codim \mathcal{F} > 0$.
We consider first the components $R^0_\alpha$ of \v{C}ech degree $0$.
Since $\sigma_\alpha = \sigma^0_\alpha$ equals the form $\sigma$ defined in \cite{AW1} associated with $(F_\alpha,a^0_\alpha)$
on $\mathcal{U}_\alpha$, and comparing the definitions in this section with the ones in \cite{AW1}, it follows that
$u_\alpha^0$, $U_\alpha^0$ and $R_\alpha^0$ equal the corresponding forms and currents $u$, $U$, and $R$ defined in \cite{AW1}*{Section~2}
associated with $(F_\alpha,a^0_\alpha)$.
We consider next the components $R^1_{\alpha\beta}$ of \v{C}ech degree $1$.
Using that $\nabla R = 0$ by Proposition-Definition~\ref{prop:URdef}, and considering a component of \v{C}ech degree $1$,
and using that $R a^0 = 0$, we obtain that
\begin{equation*}
0 = (\nabla R)_{\alpha \beta} =
a_\alpha^0 R_{\alpha \beta}^1 + a^1_{\alpha\beta} R^0_\beta -
R^0_\alpha a^1_{\alpha\beta} - \bar{\partial} R^1_{\alpha\beta},
\end{equation*}
i.e.,
\begin{equation} \label{eq:R1comparisonFormula}
R^0_\alpha a^1_{\alpha\beta} - a^1_{\alpha\beta} R^0_\beta = a^0_\alpha R^1_{\alpha\beta} - \bar{\partial} R^1_{\alpha\beta}.
\end{equation}
Note that $a^1_{\alpha\beta} : (F_\beta,a^0_\beta) \to (F_\alpha,a^0_\alpha)$ is a morphism of complexes, and \eqref{eq:R1comparisonFormula}
in fact coincides with the comparison formula from \cite{Lar} in this situation. Indeed, as explained above, $R^0_\alpha$ and $R^0_\beta$ are
the residue currents associated with $(F_\alpha,a^0_\alpha)$ and $(F_\beta,a^0_\beta)$, respectively.
Using that $(\sigma^0_\alpha)^2 = 0$, it follows that $\sigma^0_\alpha(\bar{\partial}\sigma^0_\alpha)^m\sigma^0_\alpha = 0$ for any $m \geq 1$,
and then a calculation using \eqref{eq:sigmadef} and \eqref{eq:udef} yields that if we let $u_\alpha^0 = \sigma^0_\alpha + \sigma^0_\alpha\bar{\partial}\sigma^0_\alpha+\dots$,
then $U_{\alpha\beta}^1 = u_\alpha^0 a^1_{\alpha\beta} u_\beta^0$. Since $R^1_{\alpha\beta} = R(U_{\alpha\beta}^1)$ by Proposition~\ref{prop:Rasm},
it follows that $R^1_{\alpha\beta}$ equals the current $M$ defined in \cite{Lar}*{(3.1)} associated with the morphism of complexes $a^1_{\alpha\beta}$,
and \eqref{eq:R1comparisonFormula} then coincides with the comparison formula \cite{Lar}*{Theorem~3.2} (see in particular \cite{Lar}*{(3.4)}).
\end{example}
\section{Twisted resolutions and residue currents}
\label{section:main-theorem}
Let $(F,a)$ be a twisted resolution of a coherent $\mathcal{O}_X$-module $\mathcal{F}$, and let $G$ be a holomorphic vector bundle. Note that the operator $D$ on $C^\bullet(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G))$ can be decomposed as $D = D^0 + D'$, where
\[
D^0: C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)) \to
C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r+1}(F,G)),
\]
and
\[
D': C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)) \to
\bigoplus_{j \geq 1} C^{p+j}(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^{r-j+1}(F,G)).
\]
The triple $(C^\bullet(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)),D^0,D')$ is referred to as a \emph{twisted complex}, and it was shown in \cite{TT1}*{Theorem~2.9} that
\[
H^k\left(
\bigoplus_{p+r=\bullet}
C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))
\right) \cong
\Ext^k(\mathcal{F},G).
\]
Our objective now is to make this isomorphism explicit by representing $\Ext^k(\mathcal{F},G)$ as the $k$th cohomology of $\Hom(\mathcal{F},\mathcal{C}^{0,\bullet}(G))$ as explained in the introduction.
We will begin by defining the operator $v$ that appears in the statement of Theorem~\ref{thm:main}.
\subsection{The operator $v$} \label{ssection:vdef}
Let $(\rho_\alpha)$ be a partition of unity subordinate to $\mathfrak{U}$. We define an operator $v: C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))) \to C^{p-1}(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$ as
\[
(v f)_{\alpha_0 \dots \alpha_{p-1}} \vcentcolon=
\sum_{\alpha} \rho_\alpha
f_{\alpha \alpha_0 \dots \alpha_{p-1}}
\]
for $p \geq 1$ and $vf \vcentcolon= 0$ otherwise. Here $\rho_\alpha f_{\alpha \alpha_0 \dots \alpha_{p-1}}$, which is defined on $\mathcal{U}_{\alpha \alpha_0 \dots \alpha_{p-1}}$, is extended by 0 to $\mathcal{U}_{\alpha_0 \dots \alpha_{p-1}}$.
A calculation yields that if $f \in C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))$, then
\begin{equation*}
Dvf = f - vDf
\end{equation*}
if $p \geq 1$ and $Dvf=0$ otherwise. Moreover, for $j \geq 1$, it follows by induction
and the fact that $D\bar{\partial}=-\bar{\partial} D$ that
\begin{equation*}
Dv(\bar{\partial} v)^j f =
(\bar{\partial} v)^j f + v(\bar{\partial} v)^{j-1} \bar{\partial} f -
v(\bar{\partial} v)^j Df
\end{equation*}
if $p \geq j+1$ and $D v(\bar{\partial} v)^j f=0$ otherwise.
By $(\bar{\partial} v)^j f$, we mean the composition $\bar{\partial}\circ v$ applied $j$ times to $f$.
Let now $f \in C^\bullet(\mathfrak{U},\mathcal{C}^{0,\bullet}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^\bullet(F,G)))$ be an arbitrary element, and as above we let $f^j$ denote the parts of $f$ of \v{C}ech degree $j$. By computing $Dv(f-f^0)$ and $Dv(\bar{\partial} v)^j \left(f - \sum_{\ell=0}^j f^\ell \right)$, we obtain the formulas
\begin{equation}
\label{eq:Dv1}
Dvf = f - vDf - f^0 + v D f^0,
\end{equation}
and
\begin{equation}
\label{eq:Dv2}
\begin{aligned}
Dv(\bar{\partial} v)^j f &=
(\bar{\partial} v)^j f + v(\bar{\partial} v)^{j-1} \bar{\partial} f -
v(\bar{\partial} v)^j Df
\\ &- (\bar{\partial} v)^j f^j - v(\bar{\partial} v)^{j-1} \bar{\partial} f^j +
\sum_{\ell=0}^j \left( v(\bar{\partial} v)^j D f^\ell \right)
\end{aligned}
\end{equation}
for any $j \geq 1$.
\subsection{Cohomology of graded complexes}
In the proof of Theorem~\ref{thm:main}, we will make repeated use of
a basic result about cohomology of certain graded complexes.
Graded complexes generalize double complexes, and we will make use
of this result both for double complexes and for twisted complexes.
Let $(A^{p,q},d_{p,q,r})$ be a \emph{graded complex} of abelian groups, i.e., for $p,q \in \mathbb{Z}$, $A^{p,q}$ are abelian groups,
and for any $r \in \mathbb{Z}$ there are morphisms
$d_{p,q,r} : A^{p,q} \to A^{p+r,q+1-r}$ such that if
\[ A^k \vcentcolon= \bigoplus_{p+q=k} A^{p,q}, \quad d_k \vcentcolon= \bigoplus_{\substack{p+q=k \\ r\in \mathbb{Z}}} \, d_{p,q,r} : A^k \to A^{k+1},\]
then $(A^\bullet,d)$ defines a complex.
We will always assume that $A^{p,q}=0$ for $p<0$ or $q<0$, and furthermore
that $d_{p,q,r} = 0$ for $r < 0$.
Note that a graded complex defines a filtered complex with the filtration given
by
\[
F^j A^k = \bigoplus_{\substack{p+q=k \\ q \geq j}} A^{p,q}.
\]
We write $d^0 = \bigoplus_{p,q} d_{p,q,0}$ and $d' = \bigoplus_{r \geq 1} d_{p,q,r}$.
Since $d_{p,q,r} = 0$ for $r<0$, it follows that $d=d^0+d'$ and
$(d^0)^2 = 0$, so for a fixed $p$, $(A^{p,\bullet},d^0)$ defines a complex.
\begin{proposition} \label{prop:cohomologyGradedComplex}
Let $(A^{p,q},d_{p,q,r})$ be a graded complex of abelian groups, such
that $A^{p,q}= 0$ if $p<0$ or $q<0$, and $d_{p,q,r} = 0$ for $r < 0$.
Assume furthermore that $H^q(A^{p,\bullet},d^0) = 0$ for $q>0$.
Then the inclusion $A^{k,0} \hookrightarrow A^k$ induces an isomorphism
\[
H^k(H^0(A^{\bullet,\bullet},d^0),d') \overset{\cong}{\to} H^k(A^\bullet,d).
\]
\end{proposition}
This follows from the general theory of spectral sequences associated with a filtered complex, see for example \cite{Gun3}*{Theorem C.8},
but may also be proven directly by arguments involving straight-forward diagram chases.
A special case of a graded complex is a double complex $(A^{p,q},d',d'')$ i.e., when $d_{p,q,r} = 0$ for $r \neq 0,1$.
so $d^0=d'' : A^{p,q} \to A^{p,q+1}$, $d' : A^{p,q} \to A^{p+1,q}$ and $d'd'' = - d''d'$.
In this case, one may construct a new double complex with the same total complex by switching roles of the indices.
Applying Proposition~\ref{prop:cohomologyGradedComplex}, one obtains the following well-known consequence:
Let $(A^{p,q},d',d'')$ be a double complex and assume that
$H^p(A^{\bullet,q},d') = 0$ for $p > 0$ and that $H^q(A^{p,\bullet})$ for $q > 0$.
Then there are isomorphisms
\begin{equation} \label{eq:doubleComplexCohomologyIsomorphism}
H^k(H^0(A^{\bullet,\bullet},d''),d') \cong H^k(A^\bullet,d) \cong
H^k(H^0(A^{\bullet,\bullet},d'),d'').
\end{equation}
Given an element $[[x']] \in H^k(H^0(A^{\bullet,\bullet},d''),d')$, one may find an element $y \in A^{k-1}$ such that
\[ d y = x' - x'',\]
for some $x'' \in A^{0,k}$. Then, the composed isomorphism between the left-most and right-most group in \eqref{eq:doubleComplexCohomologyIsomorphism} is given by
\begin{equation} \label{eq:doubleComplexExplicitIsomorphism}
[[x']] \mapsto [[x'']].
\end{equation}
\subsection{Proof of Theorem~\ref{thm:main}}
In the proof of Theorem~\ref{thm:main} we will make use of the the following
useful result, which is essentially a consequence of the stalkwise injectivity
of currents by Malgrange.
\begin{lemma} \label{lma:extCurrentsVanish}
Let $\mathcal{F}$ be a coherent $\mathcal{O}_X$-module, and let
$G$ be a locally free $\mathcal{O}_X$-module. Then
\[ \Ext^k(\mathcal{F},\mathcal{C}^{0,q}(G)) = 0 \]
for $k \geq 1$.
\end{lemma}
In particular, if $\mathcal{F}$ is coherent, then
$\mathcal{C}^{0,\bullet}(G)$ is an acyclic resolution of $G$
for the functor $\Hom(\mathcal{F},\bullet)$, so
\begin{equation}
\Ext^k(\mathcal{F},G) \cong H^k(\Hom(\mathcal{F},\mathcal{C}^{0,\bullet}(G))).
\end{equation}
\begin{proof}
By \cite{Mal}*{Theorem~VII.2.4}, $\mathcal{C}^{0,q}$ is stalkwise injective,
and consequently $\mathcal{C}^{0,q}(G)$ is as well.
Thus,
\[ \Ext^\ell_{\mathcal{O}_{X,x}}(\mathcal{F}_x,\mathcal{C}^{0,q}(G)_x) = 0 \]
for $\ell \geq 1$.
Since $\mathcal{F}$ is coherent,
\[
\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^{\ell}(\mathcal{F},\mathcal{C}^{0,q}(G))_x \cong \Ext^\ell_{\mathcal{O}_{X,x}}(\mathcal{F}_x,\mathcal{C}^{0,q}(G)_x),
\]
cf., e.g., \cite{Har}*{Proposition~III.6.8}.
In addition, since $\mathcal{C}^{0,q}$ is fine, $\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F},\mathcal{C}^{0,q}(G))$ is as well,
so
\[ H^k(X,\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F},\mathcal{C}^{0,q}(G))) = 0. \]
To conclude, $H^r(X,\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^s(\mathcal{F},\mathcal{C}^{0,q}(G))) = 0$ for $r+s=k$,
so $\Ext^k(\mathcal{F},\mathcal{C}^{0,q}(G)) = 0$ by the local to global
spectral sequence of $\Ext$.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:main}]
Consider the double complex
\[
E^{q,\ell} \vcentcolon=
\bigoplus_{p+r=\ell}
C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)))
\]
with differentials $D$ and $-\bar{\partial}$, and total differential $\nabla = D - \bar{\partial}$.
We have that
\begin{equation}
\begin{aligned} \label{eq:HqEql}
H^q(E^{\bullet,\ell}) &=
\bigoplus_{p+r=\ell} \prod_{(\alpha_0,\dots,\alpha_p)}
H^{0,q}(\mathcal{U}_{\alpha_0 \dots \alpha_p},
\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_{\alpha_p},G)) \\
&= \left\{\begin{array}{cc} 0 & \text{ for $q > 0$,} \\
\bigoplus_{p+r=\ell} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G)) & \text{ for $q=0$,}
\end{array}
\right.
\end{aligned}
\end{equation}
where the case $q > 0$ in the second equality follows by Cartan's Theorem B, since $\mathcal{U}_{\alpha_0 \dots \alpha_p}$ is Stein
and $\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F_{\alpha_p},G)$ is coherent.
We claim that
\begin{equation} \label{eq:HlEql}
H^\ell(E^{q,\bullet}) = \left\{\begin{array}{cc} 0 & \text{ for $\ell > 0$,} \\
\Hom(\mathcal{F}, \mathcal{C}^{0,q}(G)) & \text{ for $\ell=0$.}
\end{array}
\right.
\end{equation}
Thus, combining \eqref{eq:HqEql} and \eqref{eq:HlEql}, we obtain an isomorphism \eqref{eq:twisted-iso} by \eqref{eq:doubleComplexCohomologyIsomorphism}.
To prove \eqref{eq:HlEql} we fix $q$ and consider
\[
A^{p,r} \vcentcolon=
C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))) =
\prod_{(\alpha_0,\dots,\alpha_p)}
\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_{\alpha_p}^{-r}, \mathcal{C}^{0,q}(G))
(\mathcal{U}_{\alpha_0 \dots \alpha_p}),
\]
which provides a decomposition $E^{q,\ell} = \bigoplus_{p+r=\ell} A^{p,r}$.
By Lemma~\ref{lma:extCurrentsVanish} and the long exact sequence of $\Ext$,
for $\mathcal{U}$ open,
$\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\bullet,\mathcal{C}^{0,q}(G))(\mathcal{U})$
is an exact
functor on the category of coherent $\mathcal{O}_{\mathcal{U}}$-modules.
Thus,
\[
H^r(A^{p,\bullet}) \cong
\prod_{(\alpha_0,\dots,\alpha_p)}
H^r(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_{\alpha_p}^{-\bullet},\mathcal{C}^{0,q}(G))(\mathcal{U}_{\alpha_0 \dots \alpha_p})) = 0
\]
for $r > 0$. In addition,
\[
H^0(A^{p,\bullet}) = \prod_{(\alpha_0,\dots,\alpha_p)}
\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F}, \mathcal{C}^{0,q}(G))
(\mathcal{U}_{\alpha_0 \dots \alpha_p}).
\]
We may thus apply Proposition~\ref{prop:cohomologyGradedComplex} to compute $H^\ell(E^{q,\bullet})$.
A calculation yields that the differential induced by $D'$ on $H_{D^0}^0(A^{\bullet,\bullet}) \cong C^\bullet(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F},\mathcal{C}^{0,q}(G)))$
is just the ordinary \v{C}ech coboundary.
Thus
\[ H^\ell(E^{q,\bullet}) \cong H_{D'}^\ell(H_{D^0}^0(A^{\bullet,\bullet})) \cong \check{H}^\ell(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F}, \mathcal{C}^{0,q}(G))),\]
which vanishes for $\ell > 0$ since $\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F}, \mathcal{C}^{0,q}(G))$ is fine, and is isomorphic to $\Hom(\mathcal{F}, \mathcal{C}^{0,q}(G))$ for $\ell = 0$,
which proves \eqref{eq:HlEql}.
It remains to prove that the isomorphism is given by \eqref{eq:twisted-iso-formula}. To this end, let $\xi$ be a $D$-closed element of $\bigoplus_{p+r=k} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,G))$. Since $R$ takes values in $\Hom(F^0,F)$ by Theorem~\ref{thm:R},
and since $R$ has degree $0$, we have that
$\sum_j (\bar{\partial} v)^j (\xi R)^j$ defines an element of $H^0(E^{k,\bullet}) \cong \Hom(F^0,\mathcal{C}^{0,k}(G))$.
By \eqref{eq:doubleComplexExplicitIsomorphism}, it then remains to find a $\nabla$-potential
of $\xi-\sum_j (\bar{\partial} v)^j (\xi R)^j$. We set $w = v + v (\bar{\partial} v) + v (\bar{\partial} v)^2 + \dots$, and we claim that
\begin{equation} \label{eq:nablaPotential}
\nabla((-1)^k \xi U + w(\xi R)) =
\xi - \sum_j (\bar{\partial} v)^j (\xi R)^j,
\end{equation}
which thus proves \eqref{eq:twisted-iso-formula}.
To prove \eqref{eq:nablaPotential},
note first that since $\xi$ is $D$-closed and holomorphic, $\nabla \xi = 0$.
In addition, $R$ is $\nabla$-closed by Proposition-Definiton~\ref{prop:URdef},
and thus it follows from \eqref{eq:nablaDerivation} that $\nabla (\xi R) = 0$, i.e., $D(\xi R) = \bar{\partial} (\xi R)$.
Since $R$ takes values in $\Hom(F^0,F)$ and $R a^0=0$ by Theorem~\ref{thm:R},
it follows that $D (\xi R)^j$ has \v{C}ech degree $j+1$, and hence for degree reasons it follows that $D (\xi R)^j = \bar{\partial} (\xi R)^{j+1}$.
Thus it follows from \eqref{eq:Dv1} that
\begin{equation} \label{eq:DvxiR1}
Dv (\xi R) =
\xi R - v \bar{\partial}(\xi R) - (\xi R)^0 +
v \bar{\partial} (\xi R)^1.
\end{equation}
Moreover, from \eqref{eq:Dv2} it follows that
\begin{equation}
\begin{aligned} \label{eq:DvxiR2}
Dv(\bar{\partial} v)^j (\xi R) &=
(\bar{\partial} v)^j (\xi R) +
v(\bar{\partial} v)^{j-1} \bar{\partial} (\xi R) -
v(\bar{\partial} v)^j \bar{\partial} (\xi R) \\ &-
(\bar{\partial} v)^j (\xi R)^j -
v(\bar{\partial} v)^{j-1} \bar{\partial} (\xi R)^j +
v(\bar{\partial} v)^j \bar{\partial} (\xi R)^{j+1}
\end{aligned}
\end{equation}
for $j \geq 1$, since
\[
\left( \sum_{\ell=0}^j v (\bar{\partial} v)^\ell D (\xi R)^\ell \right) =
\left( \sum_{\ell=0}^j v (\bar{\partial} v)^\ell \bar{\partial} (\xi R)^{\ell+1} \right) =
v (\bar{\partial} v)^j \bar{\partial} (\xi R)^{j+1}.
\]
Using \eqref{eq:DvxiR1} and \eqref{eq:DvxiR2}, it follows that we have a telescoping sum
\begin{equation} \label{eq:nablawxiR}
\nabla(w (\xi R)) = \xi R - \sum_j (\bar{\partial} v)^j (\xi R)^j.
\end{equation}
In addition, since $\nabla \xi = 0$ and $\deg \xi = k$, it follows by \eqref{eq:Rdef} that
$\nabla ((-1)^k \xi U) = \xi - \xi R$, and combining this with \eqref{eq:nablawxiR},
we obtain \eqref{eq:nablaPotential}.
\end{proof}
\subsection{Sheaf $\mathop{\mathcal{E}\!\mathit{xt}}\nolimits$ and residue currents}
We end this section with a brief discussion on how one can use residue current to go between different representations of the sheaf $\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^k(\mathcal{F},G)$. This case offers no novelty since this is basically just a matter of putting some of the results of \cite{And1} into a global context and make appropriate interpretations so that it fits into the framework of twisted resolutions.
As usual, we let $(F,a)$ be a twisting cochain associated with $\mathcal{F}$. We have that for each index $\alpha$,
$a$ induces a complex
\begin{equation}
\label{eq:Homscomplex}
0 \to \mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_\alpha^0,G) \to \mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_\alpha^{-1},G) \to \dots,
\end{equation}
where a section $\xi_\alpha$ of $\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_\alpha^{-k},G)$ is mapped to $\xi_\alpha a_\alpha^0$. It is well known that over each $\mathcal{U}_\alpha$, $\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^k(\mathcal{F},G)$ can be computed as the $k$th cohomology of the complex \eqref{eq:Homscomplex}. Analogous to the discussion following Definition~\ref{def:twisting-cochain}, the cohomology sheaves $\mathcal{H}^k(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_\alpha^\bullet,G))$ can be glued together over each intersection $\mathcal{U}_{\alpha \beta}$ via the isomorphism induced by $a_{\alpha \beta}^1$, i.e.,
\begin{equation} \label{eq:Extsalphabeta}
[\xi_\alpha] \mapsto [\xi_\alpha a_{\alpha \beta}^1].
\end{equation}
For simplicity, let us in the following result identify $\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^k(\mathcal{F},G)$ with this sheaf.
\begin{proposition}
\label{prop:Exts}
Let $\mathcal{F}$ be a coherent $\mathcal{O}_X$-module, and let $G$ be a holomorphic vector bundle. Let $(F,a)$ be a twisted resolution of $\mathcal{F}$, and let $R$ be the associated residue current. Then there is a well-defined isomorphism
\begin{equation} \label{eq:Extsiso}
\mathop{\mathcal{E}\!\mathit{xt}}\nolimits^k(\mathcal{F},G) \overset{\cong}{\to}
\mathcal{H}^k(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F},\mathcal{C}^{0,\bullet}(G))),
\end{equation}
which over each $\mathcal{U}_\alpha$ is given by
\begin{equation} \label{eq:ExtsisoFormula}
[\xi_\alpha] \mapsto [\xi_\alpha R_\alpha^0].
\end{equation}
\end{proposition}
\begin{proof}
From \cite{And1} it follows that for each $\alpha$ there is an isomorphism
\[
\mathcal{H}^k(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(F_\alpha^\bullet,G)) \overset{\cong}{\to}
\mathcal{H}^k(\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F},\mathcal{C}^{0,\bullet}(G)))
\]
over $\mathcal{U}_\alpha$ given by \eqref{eq:ExtsisoFormula}.
We note that when we move to another open set $\mathcal{U}_\beta$, then the source of
\eqref{eq:Extsiso} is mapped via the isomorphism \eqref{eq:Extsalphabeta}.
In the target of \eqref{eq:Extsiso}, the implicit isomorphism $\mathcal{F} \cong F^0_\alpha/(\im a^0_\alpha)$ on $\mathcal{U}_\alpha$ is used,
and thus, when we move to another open set $\mathcal{U}_\beta$, then $[\xi_\alpha R^0_\alpha]$ is mapped to $[\xi_\alpha R^0_\alpha a^1_{\alpha\beta}]$,
so to prove that the isomorphism given by \eqref{eq:ExtsisoFormula} is well-defined, we should prove that
\begin{equation} \label{eq:ExtsisoCompatible}
[\xi_\alpha R_\alpha^0 a_{\alpha \beta}^1] =
[\xi_\alpha a_{\alpha \beta}^1 R_\beta^0].
\end{equation}
By \eqref{eq:R1comparisonFormula} and the fact that $\xi_\alpha a^0_\alpha = 0$,
we have that
\[
\xi_\alpha R_\alpha^0 a_{\alpha \beta}^1 -
\xi_\alpha a_{\alpha \beta}^1 R_\beta^0 =
\xi_\alpha a_\alpha^0 R_{\alpha \beta}^1 -
\xi_\alpha \bar{\partial} R_{\alpha \beta}^1 =
\pm \bar{\partial} (\xi_\alpha R_{\alpha \beta}^1),
\]
and hence we conclude that \eqref{eq:ExtsisoCompatible} holds.
\end{proof}
\section{The cup product and Serre duality}
\label{sect:serre}
One context in which global Ext groups appear is in the context of Serre duality for coherent $\mathcal{O}_X$-modules. For simplicity, we will in this section assume $X$ to be compact, but the discussion below also applies to non-compact $X$, provided one considers cohomology with compact support in one of the factors,
and impose appropriate Hausdorffness assumptions.
Let as before $\mathcal{F}$ be a coherent $\mathcal{O}_X$-module.
Recall that Serre duality states that there is a perfect pairing
\begin{equation}
\label{eq:SerrePairing}
H^{n-k}(X,\mathcal{F}) \times \Ext^k(\mathcal{F},\omega) \to \mathbb{C}.
\end{equation}
We now spell out how this pairing is realized in \cite{MalSerre}. We have that
\[
\mathcal{F} \longrightarrow \mathcal{F} \otimes \mathcal{E}^{0,0}
\overset{\bar{\partial}}{\longrightarrow} \mathcal{F} \otimes \mathcal{E}^{0,1}
\overset{\bar{\partial}}{\longrightarrow} \dots
\]
is a fine resolution of $\mathcal{F}$. Thus $H^k(X,\mathcal{F}) \cong H^k(\Hom(\mathcal{O}_X,\mathcal{F} \otimes \mathcal{E}^{0,\bullet}))$.
Here we have used the fact that for a $\mathcal{O}_X$-module $\mathcal{G}$, its global sections are given by $\Hom(\mathcal{O}_X,\mathcal{G})$,
a representation of global sections that fits well with the discussion below.
We also have an isomorphism $H^n(\Hom(\mathcal{O}_X,\mathcal{C}^{n,\bullet})) \cong \mathbb{C}$ via integration of currents.
The pairing \eqref{eq:SerrePairing} is then realized as a pairing
\begin{equation} \label{eq:serrePairing1}
H^{n-k}(\Hom(\mathcal{O}_X,\mathcal{E}^{0,\bullet} \otimes \mathcal{F})) \times
H^k(\Hom(\mathcal{F},\mathcal{C}^{n,\bullet})) \to
H^n(\Hom(\mathcal{O}_X,\mathcal{C}^{n,\bullet})),
\end{equation}
which is induced by combining the composition $\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{O}_X,\mathcal{F}) \times \mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{F},\mathcal{C}^{n,k}) \to \mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{O}_X,\mathcal{C}^{n,k})$
with the wedge product $\mathcal{E}^{0,n-k} \times \mathcal{C}^{n,k} \to \mathcal{C}^{n,n}$.
We will denote this pairing simply by $\wedge$.
On the other hand, it is also possible to realize the pairing \eqref{eq:SerrePairing} by representing the Ext groups as the cohomology of a twisted complex. Let $(F,a)$ be a twisted resolution of $\mathcal{F}$. Let $\omega$ denote the sheaf of holomorphic $n$-forms on $X$, and consider the following graded groups
\begin{equation}
\begin{aligned} \label{eq:ABCdef}
B^{q,\ell} &\vcentcolon= \bigoplus_{p+r=\ell} C^p(\mathfrak{U},\mathcal{E}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,F))) \cong
\bigoplus_{p+r=\ell} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,\mathcal{E}^{0,q}(F))), \\
E^{q,\ell} &\vcentcolon= \bigoplus_{p+r=\ell} C^p(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,\omega))) \cong
\bigoplus_{p+r=\ell} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,\mathcal{C}^{n,q})), \\
G^{q,\ell} &\vcentcolon= C^\ell(\mathfrak{U},\mathcal{C}^{0,q}(\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,\omega))) \cong
C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{O}_X,\mathcal{C}^{n,q})).
\end{aligned}
\end{equation}
We may identify $\mathcal{O}_X$ and $\omega$ with the corresponding twisting cochains concentrated in degree $0$.
In this way, $E^{q,\ell}$ and $G^{q,\ell}$ are groups of the form \eqref{eq:cechCurrentHom}.
In addition, $B^{q,\ell}$ may be viewed as a subgroup of groups of this form, by using the inclusion $\mathcal{E}^{0,q} \subseteq \mathcal{C}^{0,q}$.
Note that $B^{\bullet,\bullet}$, $E^{\bullet,\bullet}$, and $G^{\bullet,\bullet}$ are all double complexes with differentials $D$ and $-\bar{\partial}$.
Recall the calculations of $H^q(E^{\bullet,\ell})$, \eqref{eq:HqEql}, and $H^\ell(E^{q,\bullet})$, \eqref{eq:HlEql}, in the proof of Theorem~\ref{thm:main}.
Similar arguments yield similar expressions for the cohomology of $B^{\bullet,\bullet}$ and $G^{\bullet,\bullet}$,
where in the case of $B^{\bullet,\bullet}$, one may use that $\mathcal{E}^{0,q}$ is stalkwise flat and fine.
Hence, we may apply \eqref{eq:doubleComplexCohomologyIsomorphism}, which yields isomorphisms
\begin{equation}
\begin{aligned} \label{eq:cechDeRham}
H^k\left( \bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,F))\right) \overset{\cong}{\to} H^k( \Hom(\mathcal{O}_X,\mathcal{E}^{0,\bullet}(\mathcal{F}))), \\
H^k\left( \bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,\omega))\right) \overset{\cong}{\to} H^k(\Hom(\mathcal{F},\mathcal{C}^{n,\bullet})), \\
H^k(C^{\bullet}(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits(\mathcal{O}_X,\omega))) \overset{\cong}{\to} H^k(\Hom(\mathcal{O}_X,\mathcal{C}^{n,\bullet})).
\end{aligned}
\end{equation}
Using the isomorphisms \eqref{eq:cechDeRham}, we can alternatively express \eqref{eq:serrePairing1} as a pairing
\begin{equation}
\begin{gathered} \label{eq:serrePairing3}
H^{n-k}\left(\bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,F))\right)\times H^k\left( \bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,\omega))\right)
\\ \to H^n(C^\bullet(\mathfrak{U},\Hom(\mathcal{O}_X,\omega))).
\end{gathered}
\end{equation}
We will now show that it is given by the following cup product. Let $x$ and $y$ be $D$-closed elements of $\bigoplus_{p+r=n-k} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,F))$ and $\bigoplus_{p+r=k} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,\omega))$ respectively. We define $[x] \smile [y]$ as $[xy]$, where $xy$ is the product \eqref{eq:prod2}. It is easy to see that this is well defined since if, for example, $x = D \alpha$, then $xy = D(\alpha y)$, and hence $[xy] = 0$. So the product is independent of the choice of representatives.
\begin{proposition}
Let $\mathbb{P}si$ denote the isomorphisms in \eqref{eq:cechDeRham},
and let
\[
[x] \in H^{n-k}\left(\bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(\mathcal{O}_X,F))\right)
\text{ and } [y] \in H^k\left( \bigoplus_{p+r=\bullet} C^p(\mathfrak{U},\mathop{\mathcal{H}\!\mathit{om}}\nolimits^r(F,\omega))\right).
\]
Then
\begin{equation}
\mathbb{P}si( [x] \smile [y] ) = \mathbb{P}si([x]) \wedge \mathbb{P}si([y]).
\end{equation}
\end{proposition}
\begin{proof}
Let $x'$ and $y'$ be representatives of $\mathbb{P}si([x])$ and $\mathbb{P}si([y])$. In view of \eqref{eq:doubleComplexCohomologyIsomorphism} and \eqref{eq:doubleComplexExplicitIsomorphism}, there exist elements $\alpha$ and $\beta$ such that $x - x' = \nabla \alpha$ and $y - y' = \nabla \beta$.
Using this, one readily verifies that
\begin{equation*}
x y - x' y' = \nabla((-1)^{\deg x} x \beta + \alpha y').
\end{equation*}
Thus $x y$ and $x' y'$ are $\nabla$-cohomologus and from this the statement follows.
\end{proof}
\begin{bibdiv}
\begin{biblist}
\bib{And1}{article}{
author={Andersson, Mats},
title={Coleff-Herrera currents, duality, and Noetherian operators},
journal={Bull. Soc. Math. France},
volume={139},
date={2011},
number={4},
pages={535--554},
}
\bib{AW1}{article}{
author={Andersson, Mats},
author={Wulcan, Elizabeth},
title={Residue currents with prescribed annihilator ideals},
journal={Ann. Sci. \'{E}cole Norm. Sup. (4)},
volume={40},
date={2007},
number={6},
pages={985--1007},
}
\bib{AW2}{article}{
author={Andersson, Mats},
author={Wulcan, Elizabeth},
title={Decomposition of residue currents},
journal={J. Reine Angew. Math.},
volume={638},
date={2010},
pages={103--118},
}
\bib{AW3}{article}{
author={Andersson, Mats},
author={Wulcan, Elizabeth},
title={Direct images of semi-meromorphic currents},
journal={Ann. Inst. Fourier (Grenoble)},
volume={68},
date={2018},
number={2},
pages={875--900},
}
\bib{BT}{book}{
author={Bott, Raoul},
author={Tu, Loring W.},
title={Differential forms in algebraic topology},
series={Graduate Texts in Mathematics},
volume={82},
publisher={Springer-Verlag, New York-Berlin},
date={1982},
}
\bib{CH}{book}{
author={Coleff, N. R.},
author={Herrera, M. E.},
title={Les courants r\'{e}siduels associ\'{e}s \`a une forme m\'{e}romorphe},
series={Lecture Notes in Mathematics},
volume={633},
publisher={Springer, Berlin},
date={1978},
pages={x+211},
isbn={3-540-08651-X},
}
\bib{DS}{article}{
author={Dickenstein, A.},
author={Sessa, C.},
title={Canonical representatives in moderate cohomology},
journal={Invent. Math.},
volume={80},
date={1985},
number={3},
pages={417--434},
}
\bib{Eis}{book}{
author={Eisenbud, David},
title={Commutative algebra},
series={Graduate Texts in Mathematics},
volume={150},
publisher={Springer-Verlag, New York},
date={1995},
pages={xvi+785},
isbn={0-387-94268-8},
isbn={0-387-94269-6},
}
\bib{GH}{book}{
author={Griffiths, Phillip},
author={Harris, Joseph},
title={Principles of algebraic geometry},
series={Wiley Classics Library},
publisher={John Wiley \& Sons, Inc., New York},
date={1994},
}
\bib{Gun3}{book}{
author={Gunning, Robert C.},
title={Introduction to holomorphic functions of several variables. Vol.
III},
series={The Wadsworth \& Brooks/Cole Mathematics Series},
publisher={Wadsworth \& Brooks/Cole Advanced Books \& Software, Monterey,
CA},
date={1990},
}
\bib{Har}{book}{
author={Hartshorne, Robin},
title={Algebraic geometry},
note={Graduate Texts in Mathematics, No. 52},
publisher={Springer-Verlag, New York-Heidelberg},
date={1977},
}
\bib{Lar}{article}{
author={L\"{a}rk\"{a}ng, Richard},
title={A comparison formula for residue currents},
journal={Math. Scand.},
volume={125},
date={2019},
number={1},
pages={39--66},
issn={0025-5521},
}
\bib{Mal}{book}{
author={Malgrange, B.},
title={Ideals of differentiable functions},
series={Tata Institute of Fundamental Research Studies in Mathematics,
No. 3},
publisher={Tata Institute of Fundamental Research, Bombay; Oxford
University Press, London},
date={1967},
}
\bib{MalSerre}{article}{
author={Malgrange, Bernard},
title={Syst\`emes diff\'{e}rentiels \`a coefficients constants},
conference={
title={S\'{e}minaire Bourbaki, 1964, Vol. 8},
},
book={
publisher={Soc. Math. France, Paris},
},
date={1995},
pages={Exp. No. 246, 79--89},
}
\bib{OTT}{article}{
author={O'Brian, Nigel R.},
author={Toledo, Domingo},
author={Tong, Yue Lin L.},
title={The trace map and characteristic classes for coherent sheaves},
journal={Amer. J. Math.},
volume={103},
date={1981},
number={2},
pages={225--252},
issn={0002-9327},
}
\bib{Serre}{article}{
author={Serre, Jean-Pierre},
title={Un th\'{e}or\`eme de dualit\'{e}},
journal={Comment. Math. Helv.},
volume={29},
date={1955},
pages={9--26},
}
\bib{TT1}{article}{
author={Toledo, Domingo},
author={Tong, Yue Lin L.},
title={Duality and intersection theory in complex manifolds. I},
journal={Math. Ann.},
volume={237},
date={1978},
number={1},
pages={41--77},
issn={0025-5831},
}
\end{biblist}
\end{bibdiv}
\end{document}
|
\begin{document}
Figure ootnotetext[1]{Centre de Math\'ematiques Appliqu\'ees, UMR 7641, Ecole Polytechnique, France. \\
[email protected], [email protected] }
Figure ootnotetext[2]{Ecole Normale Sup\'erieure CMLA,
61 Av. du Pr\'esident Wilson,
94235 Cachan Cedex, France. \\
[email protected]}
Figure ootnotetext[3]{University of Florida,
Department of Electrical and Computer Engineering,
Gainesville, Florida. \\
[email protected], [email protected]}
\title{Diffusion approximations and control variates for MCMC}
\begin{abstract}
A new methodology is presented for the construction of control
variates to reduce the variance of additive functionals of Markov
Chain Monte Carlo (MCMC) samplers. Our control variates are defined
through the minimization of the asymptotic variance of the Langevin
diffusion over a family of functions, which can be seen as a quadratic risk
minimization procedure.
The use of these control variates is theoretically justified. We show that the asymptotic variances of some well-known MCMC algorithms, including the Random Walk Metropolis and the (Metropolis) Unadjusted/Adjusted Langevin Algorithm, are close to the asymptotic variance of the Langevin diffusion.
Several examples of Bayesian inference problems demonstrate that the corresponding reduction in the variance is significant.
Equation nd{abstract}
\section{Introduction}
\label{sec:motivations-contrib}
Let $U : \mathbb{R}^d \to \mathbb{R}$ be a measurable function on $(\mathbb{R}^d, \mathcal{B}(\mathbb{R}^d))$ such that $\int_{\mathbb{R}^d} \mathrm{e}^{-U(x)} \mathrm{d} x <\infty$. This function is associated to a probability measure $\pi$ on $(\mathbb{R}^d, \mathcal{B}(\mathbb{R}^d))$ defined for all $\mathsf{A} \in\mathcal{B}(\mathbb{R}^d)$ by
\[ \pi(\mathsf{A}) := \int_{\mathsf{A}} \mathrm{e}^{-U(x)} \mathrm{d} x / \int_{\mathbb{R}^d}\mathrm{e}^{-U(x)} \mathrm{d} x \;. \]
We are interested in approximating $\pi(f) := \int_{\mathbb{R}^d} f(x) \pi(\mathrm{d} x)$, where $f$ is a $\pi$-integrable function.
The classical Monte Carlo solution to this problem is to simulate i.i.d.\ random variables $(X_k)_{k\in\mathbb{N}}$ with distribution~$\pi$, and then to estimate $\pi(f)$ by the sample mean
\begin{equation}
\label{eq:def-invpihatn}
\hat{\pi}_n(f)=n^{-1}\sum_{i=0}^{n-1}f(X_{i})\;.
Equation nd{equation}
In most applications, sampling from $\pi$ is not an option. Markov Chain Monte Carlo (MCMC) methods
provide samples from a Markov chain $(X_k)_{k \in \mathbb{N}}$ with unique invariant probability $\pi$.
Under mild conditions \cite[Chapter 17]{bible}, the estimator $\hat{\pi}_n(f)$ defined by Equation qref{eq:def-invpihatn} satisfies for any initial distribution a Central Limit Theorem (CLT)
\begin{equation}\label{eq:TCL-discret}
n^{-1/2} \sum_{k=0}^{n-1} \tilde{f}(X_k) Figure lecheLimiteLoiNu \mathcal{N}(0, \varinf[\operatorname{d}](f)) \;,
Equation nd{equation}
where $ \tilde{f}= f - \pi(f)$ and $\varinf[\operatorname{d}](f) \geq 0$ is referred to as the asymptotic variance associated to $f$ and $\mathcal{N}(m,\sigma^2)$ denotes a Gaussian distribution with mean $m$ and variance $\sigma^2$.
The aim of the present paper is to propose a new methodology to reduce
the asymptotic variance of a family of MCMC algorithms.
This method consists in
constructing suitable control variates, \textit{i.e.}~we consider a family of $\pi$-integrable
functions $Equation nsuremath{\mathbb{C}}ontrolFuncSetH \subset \set{h : \mathbb{R}^d \to \mathbb{R}}{\pi(h) = 0}$ and then
choose $Equation nsuremath{\mathbb{C}}ontrolFunch \in Equation nsuremath{\mathbb{C}}ontrolFuncSetH$ such that
$\varinf[\operatorname{d}](f+ Equation nsuremath{\mathbb{C}}ontrolFunch) \leq
\varinf[\operatorname{d}](f)$.
Reducing the variance of Monte Carlo estimators is a very active research domain: see e.g.~\cite[Chapter 4]{robert:casella:2004}, \cite[Section 2.3]{liu2008monte}, and \cite[Chapter 5]{rubinstein:kroese:2017} for an overview of the main methods - see also Equation nsuremath{\mathbb{C}}ref{subsec:bibliography}.
Analysis and motivation are based on the Langevin diffusion defined by
\begin{equation}\label{eq:SDE}
\mathrm{d} Y_t = - \nabla U (Y_t) \mathrm{d} t + \sqrt{2} \mathrm{d} B_t \;,
Equation nd{equation}
where $(B_t)_{t\geq 0}$ is a $d$-dimensional Brownian motion. In the sequel, we assume that the Stochastic Differential Equation (SDE) Equation qref{eq:SDE} has a unique strong solution $(Y_t)_{t\geq 0}$ for
every initial condition $x\in\mathbb{R}^d$.
Under appropriate conditions (see \cite{Bhattacharya1982Classical,cattiaux2012central}), $\pi$ is invariant for the Markov process $(Y_t)_{t \geq 0}$ and the following CLT holds:
\begin{equation} \label{eq:TCL-continu}
t^{-1/2} \int_{0}^t \tilde{f}(Y_s) \mathrm{d} s
\overset{ext{weakly}}{\underset{t\to+\infty}{\Longrightarrow}}
\mathcal{N}(0, \varinf(f)) \;.
Equation nd{equation}
The main contribution of this paper is the introduction of a new method to compute control variates based on the expression of the asymptotic variance $\varinf(f)$ given in Equation qref{eq:TCL-continu}.
For any twice continuously differentiable function $\varphi$, the differential generator acting on $\varphi$ is denoted by
\begin{equation}\label{eq:def-generator}
\mathscr{L} \varphi = - \ps{\nabla U}{\nabla \varphi} + \Delta \varphi \;.
Equation nd{equation}
Under appropriate conditions on $\varphi$ and $\pi$, it may be shown that $\pi(\mathscr{L} \varphi) = 0$. This property suggests to consider the class of control functionals $Equation nsuremath{\mathbb{C}}ontrolFuncSetH= \set{h = \mathscr{L} g}{g \in Equation nsuremath{\mathbb{C}}ontrolFuncSet}$ for the Langevin diffusion, where $Equation nsuremath{\mathbb{C}}ontrolFuncSet$ is a family of ``smooth'' functions, and minimize over $Equation nsuremath{\mathbb{C}}ontrolFuncSetH$, the criterion
\begin{equation}
\label{eq:optimisation-criterion}
Equation nsuremath{\mathbb{C}}ontrolFunch \mapsto\varinf(f + Equation nsuremath{\mathbb{C}}ontrolFunch) \;.
Equation nd{equation}
The use of control functionals $h \in Equation nsuremath{\mathbb{C}}ontrolFuncSetH$ has already been proposed in \cite{assaraf1999zero} with applications to quantum Monte Carlo calculations; improved schemes have been later considered in \cite{Mira2013,papamarkou2014} with applications to computational Bayesian inference.
Although $Equation nsuremath{\mathbb{C}}ontrolFuncSetH$ is a class of control functionals for the
Langevin diffusion, the choice of controls variates minimizing the criterion
Equation qref{eq:optimisation-criterion} for some MCMC algorithms is motivated by the fact the asymptotic variance
$\varinf[\operatorname{d}](f)$, defined in Equation qref{eq:TCL-discret} and associated to the Markov chains associated with these methods, is
(up to a scaling factor) a good approximation of the asymptotic
variance of the Langevin diffusion $\varinf(f)$ defined in
Equation qref{eq:TCL-continu}.
The remainder of the paper is organized as follows.
In Equation nsuremath{\mathbb{C}}ref{sec:asymp-variance}, we present our methodology to minimize Equation qref{eq:optimisation-criterion} and the construction of control variates for some MCMC algorithms. In Equation nsuremath{\mathbb{C}}ref{sec:asympt-expans-asympt}, we state our main result which guarantees that the asymptotic variance $\varinf[\operatorname{d}](f)$ defined in Equation qref{eq:TCL-discret} and associated with a given MCMC method is close (up to a scaling factor) to the asymptotic variance of the Langevin diffusion $\varinf(f)$ defined in Equation qref{eq:TCL-continu}.
We show that under appropriate conditions on $U$, the Metropolis Adjusted/Unadjusted Langevin Algorithm (MALA and ULA) and the Random Walk Metropolis (RWM) algorithm fit the framework of our methodology.
In Equation nsuremath{\mathbb{C}}ref{sec:application_cv}, Monte Carlo experiments illustrating the performance of our method are presented.
The proofs are postponed to Equation nsuremath{\mathbb{C}}ref{sec:proofs,sec:geom-ergodicity-mala} and to the Appendix.
\subsection*{Notation}
Let $\mathcal{B}(\mathbb{R}^d)$ denote the Borel $\sigma$-field of $\mathbb{R}^d$. Moreover, let $\operatorname{L}^1(\mu)$ be the set of $\mu$-integrable functions for $\mu$ a probability measure on $(\mathbb{R}^d, \mathcal{B}orel(\mathbb{R}^d))$. Further, $\mu(f)=\int_{\mathbb{R}^d} f(x) \mathrm{d} \mu(x)$ for an $f\in\operatorname{L}^1(\mu)$.
Given a Markov kernel $R$ on $\mathbb{R}^d$, for all $x\in\mathbb{R}^d$ and $f$ integrable under $R(x,\cdot)$, denote by $R f(x) = \int_{\mathbb{R}^d} f(y) R(x, \mathrm{d} y)$.
Let $V: \mathbb{R}^d \to \coint{1,\infty}$ be a measurable function.
The $V$-total variation distance between two probability measures $\mu$ and $\nu$ on $(\mathbb{R}^d, \mathcal{B}orel(\mathbb{R}^d))$ is defined as $\Vnorm[V]{\mu-\nu} = \sup_{\absolute{f} \leq V} \abs{\mu(f) - \nu(f)}$.
If $V = 1$, then $\Vnorm[V]{\cdot}$ is the total variation denoted by $\tvnorm{\cdot}$.
For a measurable function $f:\mathbb{R}^d\to\mathbb{R}$, define $\Vnorm{f} = \sup_{x\in\mathbb{R}^d} \absolute{f(x)} / V(x)$.
For $u,v\in\mathbb{R}^d$, define the scalar product $\ps{u}{v} = \sum_{i=1}^d u_i v_i$ and the Euclidian norm $\norm{u} = \ps{u}{u}^{1/2}$.
Denote by $\mathbb{S}(\mathbb{R}^d) = \defEns{u\in\mathbb{R}^d : \norm{u} = 1}$.
For $a,b\in\mathbb{R}$, denote by $a\vee b = \max(a,b)$, $a \wedge b = \min(a,b)$ and $a_{+} = a \vee 0$.
For $a\in\mathbb{R}_+$, $Figure loor{a}$ and $\ceil{a}$ denote respectively the floor and ceil functions evaluated in $a$.
We take the convention that for $n,p \in \mathbb{N}$, $n <p$ then $\sum_{p}^n =0$, $\prod_p ^n = 1$ and $\defEns{p,\ldots,n} = Equation mptyset$.
Define for $t\in\mathbb{R}$, $\Phi(t) = (2\uppi)^{-1/2}\int_{-\infty}^t \mathrm{e}^{-r^2 / 2} \mathrm{d} r$ and $\bar{\Phi}(t) = 1 - \Phi(t)$.
In addition, $\boldsymbol{\varphi}$ stands for the $d$-dimensional standard Gaussian density, \textit{i.e.}~$\boldsymbol{\varphi}(z) = (2\uppi)^{-d/2} \mathrm{e}^{-\norm{z}^2 /2}$ for $z\in\mathbb{R}^d$.
For $k \in\mathbb{N}$, $m,m' \in\mathbb{N}^*$ and $\Omega,\Omega'$ two open sets of $\mathbb{R}^m, \mathbb{R}^{m'}$ respectively, denote by $Equation nsuremath{\mathbb{C}}setfunction^k(\Omega, \Omega')$, the set of
$k$-times continuously differentiable functions. For $f \in
Equation nsuremath{\mathbb{C}}setfunction^2(\mathbb{R}^d, \mathbb{R})$, denote by $\nabla f$ the gradient of $f$ and by $\Delta f$ the Laplacian of $f$.
For $k\in\mathbb{N}$ and $f \inEquation nsuremath{\mathbb{C}}setfunction^k(\mathbb{R}^d, \mathbb{R})$, denote by $\operatorname{D}^i f$ the $i$-th order differential of $f$ for $i\in\defEns{0,\ldots,k}$.
For $x\in\mathbb{R}^d$ and $i\in\defEns{1,\ldots,k}$, define $\norm{\operatorname{D}^0 f(x)} = \absolute{f(x)}$, $\norm{ \operatorname{D}^i f (x)} = \sup_{u_1,\ldots,u_i \in\mathbb{S}(\mathbb{R}^d)} \operatorname{D}^i f(x)[u_1, \ldots, u_i]$.
For $k,p\in\mathbb{N}$ and $f\inEquation nsuremath{\mathbb{C}}setfunction^{k}(\mathbb{R}^d,\mathbb{R})$, define the semi-norm
\begin{equation*}
\VnormEq[k,p]{f} = \sup_{x\in\mathbb{R}^d, \; i\in\{0,\ldots,k\}} \norm{\operatorname{D}^i f(x)} / (1+\norm[p]{x}) \;.
Equation nd{equation*}
Define $\setpoly{k}(\mathbb{R}^d,\mathbb{R}) = \defEns{f\inEquation nsuremath{\mathbb{C}}setfunction^{k}(\mathbb{R}^d,\mathbb{R}) : \inf_{p\in\mathbb{N}} \Vnorm[k,p]{f} < +\infty}$ and for any $f \in \setpoly{k}(\mathbb{R}^d,\mathbb{R})$, we consider the semi-norm
\begin{equation*}
\norm{f}_{k} = \norm{f}_{k,p} ext{ where } p = \min\{ q \in \mathbb{N} \, : \, \Vnorm[k,q]{f} < +\infty\} \;.
Equation nd{equation*}
Finally, define
$\setpoly{\infty}(\mathbb{R}^d,\mathbb{R}) = \cap_{k \in \mathbb{N}} \setpoly{k}(\mathbb{R}^d,\mathbb{R})$.
\section{Langevin-based control variates for MCMC methods}
\label{sec:asymp-variance}
\subsection{Method}
We introduce in the following our methodology based on control variates for the Langevin diffusion. In order not to obscure the main ideas of this method, we present it informally. Results which justify rigorously the related derivations are postponed to Equation nsuremath{\mathbb{C}}ref{sec:asympt-expans-asympt}.
We consider a family of control functionals $Equation nsuremath{\mathbb{C}}ontrolFuncSet \subset \setpoly{2}(\mathbb{R}^d,\mathbb{R})$.
There is a great flexibility in the choice of the family $Equation nsuremath{\mathbb{C}}ontrolFuncSet$.
We illustrate our methodology through a simple example
\begin{equation}
\label{eq:definition-linear-control}
Equation nsuremath{\mathbb{C}}ontrolFuncSet[\mathrm{lin}]= \set{g=\ps{\theta}{\psi}}{\theta \in \Theta}
ext{ where } \psi= \{\psi_i\}_{i=1}^{p}, \psi_i \in \setpoly{2}(\mathbb{R}^d,\mathbb{R}), \; i \in \{1,\dots,p\} \;,
Equation nd{equation}
with $\Theta \subset \mathbb{R}^{p}$,
but the method developed in this paper is by no means restricted to a linear parameterized family.
A key property of the Langevin diffusion which is the basis of our methodology is the following ``carré du champ'' property (see for example \cite[Section 1.6.2, formula 1.6.3]{bakry:gentil:ledoux:2014}): for all $g_1,g_2\in\setpoly{2}(\mathbb{R}^d,\mathbb{R})$,
\begin{equation}
\label{eq:carre-du-champ}
\pi\parenthese{ g_1 \mathscr{L} g_2} = \pi\parenthese{ g_2 \mathscr{L} g_1} = -\pi\parenthese{\ps{\nabla g_1}{\nabla g_2}} \;,
Equation nd{equation}
which reflects in particular that $\mathscr{L}$ is a self-adjoint operator on a dense subspace of $\operatorname{L}^2(\pi)$, the Hilbert space of square integrable function w.r.t.\ $\pi$.
A straightforward consequence of Equation qref{eq:carre-du-champ} (setting $g_1 = \operatorname{1}$) is that
$\pi(\mathscr{L} g) = 0$ for any function $g \in \setpoly{2}(\mathbb{R}^d,\mathbb{R})$. This observation implies that
$f$ and $f+ \mathscr{L} g$ have the same expectation with respect to $\pi$ for any $f \in \setpoly{2}(\mathbb{R}^d,\mathbb{R})$ and $g \in \setpoly{2}(\mathbb{R}^d,\mathbb{R})$.
Therefore, as emphasized in the introduction, if the CLT Equation qref{eq:TCL-continu} holds, a relevant choice of control variate for the Langevin diffusion to estimate $f \in \setpoly{2}(\mathbb{R}^d,\mathbb{R})$, is $h^{\star} = \mathscr{L} g^{\star}$, where $g^{\star}$ is a minimizer of
\begin{equation}
\label{eq:fun_asympt_var_g_g}
g \mapsto\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc) \;.
Equation nd{equation}
In the following, we explain how this optimization problem can be practically solved.
It is shown in \cite{Bhattacharya1982Classical} (see also \cite{glynn1996} and \cite{cattiaux2012central}) that under appropriate conditions on $U$ and $f$, the solution $(Y_t)_{t \geq 0}$ of the Langevin diffusion Equation qref{eq:SDE} satisfies the CLT Equation qref{eq:TCL-continu} where the asymptotic variance is given by
\begin{equation}
\label{eq:asymptotic-variance}
\varinf(f) = 2 \pi\parentheseLigne{\hat{f} \{f-\pi(f)\} } \;,
Equation nd{equation}
and $\hat{f} \in \setpoly{2}(\mathbb{R}^d,\mathbb{R})$ satisfies Poisson's equation:
\begin{equation}\label{eq:poisson-eq-langevin}
\mathscr{L} \hat{f} = - \tilde{f} \;, \quad ext{where $\tilde{f}= f - \pi(f)$} \;.
Equation nd{equation}
Another expression for $\varinf(f)$ is, using Equation qref{eq:carre-du-champ} and Equation qref{eq:poisson-eq-langevin}:
\begin{equation}
\label{eq:key-relation-variance}
\varinf(f) = 2\pi(\hat{f} \tilde{f} )
= - 2\pi ( \hat{f} \mathscr{L} \hat{f} )
= 2 \pi ( \|\nabla \hat{f} \|^2 ) \;.
Equation nd{equation}
Based on Equation qref{eq:carre-du-champ}, Equation qref{eq:asymptotic-variance} and Equation qref{eq:key-relation-variance}, we see now how the minimization of Equation qref{eq:fun_asympt_var_g_g} can be computed in practice. First, by definition Equation qref{eq:poisson-eq-langevin}, for all $Equation nsuremath{\mathbb{C}}ontrolFunc \in Equation nsuremath{\mathbb{C}}ontrolFuncSet$, $\hat{f}- Equation nsuremath{\mathbb{C}}ontrolFunc \in\setpoly{2}(\mathbb{R}^d,\mathbb{R})$ is a solution to the Poisson equation
\[
\mathscr{L} (\phif-Equation nsuremath{\mathbb{C}}ontrolFunc) = \pi(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc) - (f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc) \;.
\]
Therefore, we get for all $Equation nsuremath{\mathbb{C}}ontrolFunc \in Equation nsuremath{\mathbb{C}}ontrolFuncSet$, using $\pi(\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc) = 0$ and Equation qref{eq:asymptotic-variance}
\begin{equation*}
\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc)
= 2\pi\parenthese{(\hat{f} - Equation nsuremath{\mathbb{C}}ontrolFunc) \defEns{\tilde{f} + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc}} \;.
= 2\pi(\| \nabla \hat{f} - \nabla Equation nsuremath{\mathbb{C}}ontrolFunc \|^2)\;.
Equation nd{equation*}
In addition, by Equation qref{eq:carre-du-champ} and Equation qref{eq:poisson-eq-langevin}, we get that $\pi(\phif \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc)= -\pi(\tilde{f} Equation nsuremath{\mathbb{C}}ontrolFunc)$, and we obtain using Equation qref{eq:key-relation-variance} that
\begin{align}
\nonumber
\varinf(f+ \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc)
&= 2\pi ( \phif \tilde{f} ) - 2\pi(Equation nsuremath{\mathbb{C}}ontrolFunc \tilde{f} ) + 2\pi(\phif \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc) - 2\pi(Equation nsuremath{\mathbb{C}}ontrolFunc \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc) \\
\label{eq:key-expression}
&= 2\pi (\phif \tilde{f}) - 4\pi(Equation nsuremath{\mathbb{C}}ontrolFunc \tilde{f} ) + 2\pi( \| \nabla Equation nsuremath{\mathbb{C}}ontrolFunc\|^2) \;.
Equation nd{align}
Minimizing the map Equation qref{eq:fun_asympt_var_g_g} is equivalent to minimization of $Equation nsuremath{\mathbb{C}}ontrolFunc \mapsto - 4\pi(Equation nsuremath{\mathbb{C}}ontrolFunc \tilde{f}) + 2\pi( \| \nabla Equation nsuremath{\mathbb{C}}ontrolFunc\|^2)$. It means that we might actually minimize the function
$Equation nsuremath{\mathbb{C}}ontrolFunc \mapsto \varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc)$ Equation mph{without} computing the solution $\hat{f}$ of the Poisson equation, which is in general a computational bottleneck.
When $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta = \ps{\theta}{\psi} \in Equation nsuremath{\mathbb{C}}ontrolFuncSet[\mathrm{lin}]$, then Equation qref{eq:key-expression} may be rewritten as:
\begin{equation*}
\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta) = 2 \theta^{\operatorname{T}} H \theta - 4\ps{\theta}{b} + \varinf(f) \;,
Equation nd{equation*}
where $H \in \mathbb{R}^{p \times p}$ and $b$ are given for any $i,j \in \{1,\ldots,p\}$ by
\begin{equation*}
H_{ij} = \pi(\ps{\nabla \psi_i}{\nabla \psi_j}) \quad ext{and} \quad b_i= \pi(\psi_i \tilde{f}) \;.
Equation nd{equation*}
Note that $H$ is by definition a symmetric
semi-positive definite matrix. If $(1, \psi_1,\ldots,\psi_p)$ are linearly independent in
$\setpoly{2}(\mathbb{R}^d,\mathbb{R})$, then $H$ is full rank and the minimizer of $\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta)$ is given by
\begin{equation}
\label{eq:min-asymp-var-diffusion}
\thetastar = H^{-1} b \;.
Equation nd{equation}
In conclusion, in addition to its theoretical interest, the Langevin diffusion Equation qref{eq:SDE} is an attractive model because optimization of the asymptotic variance is greatly simplified. However, we are not advocating simulation of this diffusion in MCMC applications. The main contribution of this paper is to show that the optimal control variate for the diffusion remains nearly optimal for many standard MCMC algorithms.
One example is the Unadjusted Langevin Algorithm (ULA), the Euler
discretization scheme associated to the Langevin SDE Equation qref{eq:SDE}:
\begin{equation*}
X_{k+1} = X_k - \gamma \nabla \pU(X_k) + \sqrt{2\gamma} Z_{k+1} \;,
Equation nd{equation*}
where $\gamma>0$ is the step size and $(Z_k)_{k\in\mathbb{N}}$ is an i.i.d.~sequence
of standard Gaussian $d$-dimensional random vectors. The idea of using
the Markov chain $(X_k)_{k\in\mathbb{N}}$ to sample approximately from
$\pi$ has been first introduced in the physics literature by
\cite{parisi:1981} and popularized in the computational statistics
community by \cite{grenander:1983} and \cite{grenander:miller:1994}.
As shown below, other examples are the Metropolis Adjusted Langevin Algorithm (MALA) algorithm (for which an additional Metropolis-Hastings correction step is added) but also for MCMC algorithms which do not seem to be ``directly'' related to the Langevin diffusion, like the Random Walk Metropolis algorithm (RWM).
To deal with these different algorithms within the same theoretical framework, we consider a family of Markov kernels $\set{Equation nsuremath{\mathbb{R}}Ker_\gamma}{ \gamma \in \ocint{0,\bar{\gamma} }}$, parameterized by a scalar parameter $\gamma\in \ocint{0,\bar{\gamma} }$ where $\bar{\gamma} >0$.
For the ULA and MALA algorithm, $\gamma$ is the stepsize in the Euler discretization of the diffusion; for the RWM this is the variance of the random walk proposal.
For any initial probability $\xi$ on $(\mathbb{R}^d, \mathcal{B}(\mathbb{R}^d))$ and $\gamma\in \ocint{0,\bar{\gamma}}$, denote by $\mathbb{P}_{\xi, \gamma}$ and $\mathbb{E}_{\xi, \gamma}$ the probability and the expectation respectively on the canonical space of the Markov chain with initial probability $\xi$ and of transition kernel $Equation nsuremath{\mathbb{R}}kerg$.
By convention, we set $\mathbb{E}_{x,\gamma} = \mathbb{E}_{\updelta_x,\gamma}$ for all $x\in\mathbb{R}^d$. We denote by $(X_k)_{k \geq 0}$ the canonical process.
It is assumed below that $\set{Equation nsuremath{\mathbb{R}}Ker_\gamma}{ \gamma \in \ocint{0,\bar{\gamma} }}$, $f$ and $Equation nsuremath{\mathbb{C}}ontrolFuncSet$ satisfy the following assumptions. Roughly speaking, these conditions impose that for any $\gamma \in \ocint{0,\bar{\gamma}}$ and $g \in Equation nsuremath{\mathbb{C}}ontrolFuncSet$, the discrete CLT Equation qref{eq:TCL-discret} holds for the function $f + \mathscr{L} g$, and that the associated asymptotic variance $\sigma^2_{\infty,\gamma}(f+\mathscr{L} g)$ is sufficiently close to $\sigma_{\infty}(f + \mathscr{L} g)$ given by the continuous CLT Equation qref{eq:SDE}, as $\gamma\downarrow 0^+$, so that control functionals for the Markov chain $(X_k)_{k \in\mathbb{N}}$ can be derived using the methodology we developed above for the Langevin diffusion.
\begin{enumerate}[label=(Equation nsuremath{\mathbb{R}}oman*)]
\item \label{item:assum-general-1}
For each $\gamma \in \ocint{0,\bar{\gamma} } $, $Equation nsuremath{\mathbb{R}}ker_{\gamma}$
has an invariant probability distribution $\pig$ satisfying $\pi_\gamma(|f+ \mathscr{L} g|) < \infty$ for any $g \in Equation nsuremath{\mathbb{C}}ontrolFuncSet$.
\item \label{item:assum-general-2}
For any $g \in Equation nsuremath{\mathbb{C}}ontrolFuncSet$ and $\gamma \in \ocint{0,\bar{\gamma} } $,
\begin{equation}\label{eq:clt-Rkerg}
\sqrt{n}(\pihat(f+\mathscr{L} g) - \pi_\gamma(f+\mathscr{L} g))\overset{ext{weakly}}{\underset{n\to+\infty}{\Longrightarrow}}
\mathcal{N}(0, \varinf[\gamma](f+\mathscr{L} g))
Equation nd{equation}
where $\pihat(f+\mathscr{L} g)$ is the sample mean (see Equation qref{eq:def-invpihatn}),
and $\varinf[\gamma](f+\mathscr{L} g) \geq 0$ is the asymptotic variance (see Equation qref{eq:TCL-discret}) relatively to $Equation nsuremath{\mathbb{R}}kerg$.
\item \label{item:assum-general-3}
For any $g \in Equation nsuremath{\mathbb{C}}ontrolFuncSet$, as $\gamma \downarrow 0^+$,
\begin{align}
\label{eq:approximation-loi-variance}
\gamma \varinf[\gamma](f+\mathscr{L} g) &= \varinf(f+\mathscr{L} g) + o(1) \;, \\
\label{eq:approximation-invpig-invpi}
\pig(f+\mathscr{L} g) &= \pi(f+\mathscr{L} g) + O(\gamma) \;,
Equation nd{align}
where $\varinf(f+\mathscr{L} g)$ is defined in Equation qref{eq:asymptotic-variance}.
Equation nd{enumerate}
The verification that these assumptions are satisfied for the ULA, RWM and MALA algorithms (under appropriate technical conditions), in the case $f \in \setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$ and $Equation nsuremath{\mathbb{C}}ontrolFuncSet \subset \setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$, is postponed to Equation nsuremath{\mathbb{C}}ref{sec:asympt-expans-asympt}.
The standard conditions \ref{item:assum-general-1}--\ref{item:assum-general-2} are in particular satisfied if, for any $\gamma \in \ocint{0,\bar{\gamma}}$, $Equation nsuremath{\mathbb{R}}kerg$ is $V$-uniformly geometrically ergodic for some measurable function $V: \mathbb{R}^d \to \coint{1,+\infty}$, \textit{i.e.}\ it admits an invariant probability measure $\pi_\gamma$ such that $\pi_\gamma(V) < +\infty$ and
there exist $C_\gamma \geq 0$ and $\rho_\gamma \in \coint{0,1}$ such that for any probability measure $\xi$ on $(\mathbb{R}^d,\mathcal{B}(\mathbb{R}^d))$ and $n \in \mathbb{N}$,
\begin{equation*}
\label{eq:def_v_unif_ergo}
\Vnorm{\xi R^n_{\gamma} - \pi_{\gamma}} \leq C_\gamma \xi(V) \rho_\gamma^n \;,
Equation nd{equation*}
(see e.g.~\cite{bible} or \cite{douc:moulines:priouret:soulier:2018}). Condition \ref{item:assum-general-3} requires a specific form of the dependence of $C_\gamma$ and $\rho_\gamma$ on $\gamma$.
Based on \ref{item:assum-general-1}--\ref{item:assum-general-3} and Equation qref{eq:min-asymp-var-diffusion}, the estimator of $\pi(f)$ we suggest is given for $N,n,m\in\mathbb{N}^*$ by
\begin{equation}\label{eq:def-invpi-cv}
\picv_{N, n, m}(f) = Figure rac{1}{n}\sum_{k=N}^{n+N-1} \parenthese{f(X_k) + \mathscr{L} \hControlFuncOpt (X_k)} \;,
Equation nd{equation}
where $N$ is the length of the burn-in period
and $\hControlFuncOpt \in \operatorname*{arg\,min}_{Equation nsuremath{\mathbb{C}}ontrolFunc \in Equation nsuremath{\mathbb{C}}ontrolFuncSet} \EmpRisk(Equation nsuremath{\mathbb{C}}ontrolFunc)$ is a minimizer of the structural risk associated with Equation qref{eq:key-expression}
\begin{equation}
\label{eq:empirical-risk-minimization}
\EmpRisk(Equation nsuremath{\mathbb{C}}ontrolFunc) = Figure rac{1}{m} \sum_{k=N}^{N+m-1} \left\{-2 Equation nsuremath{\mathbb{C}}ontrolFunc(\tilde{X}_k) \tilde{f}_m(\tilde{X}_k) + \| \nabla Equation nsuremath{\mathbb{C}}ontrolFunc(\tilde{X}_k) \|^2 \right\} \;,
Equation nd{equation}
where $\tilde{f}_m(x)= f(x)-m^{-1} \sum_{k=N}^{N+m-1} f(\tilde{X}_k)$.
Here $(\tilde{X}_k)_{k\in\mathbb{N}}$ can be an independent copy of (or be identical to) the Markov chain $(X_k)_{k \in\mathbb{N}}$ and $m$ is the length of the sequence used to estimate the control variate.
In this article, we do not study to what extent minimizing the empirical asymptotic variance Equation qref{eq:empirical-risk-minimization} leads to the minimization of the asymptotic variance of $\picv_{N, n, m}(f)$ Equation qref{eq:def-invpi-cv} as $n\to+\infty$; such a problem has been tackled by \cite{Belomestny2018} in the i.i.d.~case.
To control the complexity of the class of functions $Equation nsuremath{\mathbb{C}}ontrolFuncSet$, a penalty term may be added in Equation qref{eq:empirical-risk-minimization}. The use of a penalty term to control the excess risk in the estimation of the control variate has been proposed and discussed in \cite{south:mira:drovandi:2018}.
Concerning the choice of $Equation nsuremath{\mathbb{C}}ontrolFuncSet$, the simplest case is $Equation nsuremath{\mathbb{C}}ontrolFuncSet[\mathrm{lin}]$ defined by Equation qref{eq:definition-linear-control}, corresponding to the parametric case, and it is by far the most popular approach.
It is possible to go one step further and adopt fully non-parametric approaches like kernel regression methods \cite{oates:girolami:chopin:2016} or neural networks \cite{zhu:wan:zhong:2018}.
If the control function is a linear combination of functions, $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta = \ps{\theta}{\psi}$ where $\psi= \set{\psi_i}{1 \leq i \leq p}$, then the empirical risk Equation qref{eq:empirical-risk-minimization} may be expressed as
\begin{equation*}
\EmpRisk(Equation nsuremath{\mathbb{C}}ontrolFunc_\theta)= -2\ps{\theta}{b_m} + \ps{\theta}{H_m \theta} \;,
Equation nd{equation*}
where for $1 \leq i,j \leq p$,
\begin{equation*}
[ b_m ]_i = Figure rac{1}{m} \sum_{k=N}^{N+m-1} \psi_i(\tilde{X}_k) \tilde{f}_m (\tilde{X}_k) \quad, \;
[H_m]_{ij} = Figure rac{1}{m} \sum_{k=N}^{N+m-1} \ps{\nabla \psi_i(\tilde{X}_k)}{\nabla \psi_j(\tilde{X}_k)} \, .
Equation nd{equation*}
In this simple case, an optimizer is obtained in closed form
\begin{equation}
\label{eq:def-paramhat-n-star}
\theta_m^* = H_m^{+} b_m \;,
Equation nd{equation}
where $H_m^{+}$ is the Moore-Penrose pseudoinverse of $H_m$.
\subsection{Comparison with other control variate methods for Monte Carlo simulation}
\label{subsec:bibliography}
The construction of control variates for MCMC and the related problem of approximating solutions of Poisson equations are very active fields of research. It is impossible to give credit for all the contributions undertaken in this area: see \cite{Dellaportas2012Control}, \cite{papamarkou2014}, \cite{oates:girolami:chopin:2016} and references therein for further background.
We survey in this section only the methods which are closely connected to our approach.
\cite{HendersonThesis} and \cite[Section 11.5]{meyn2008control} proposed control variates of the form $(Equation nsuremath{\mathbb{R}}Ker - \operatorname{Id}) Equation nsuremath{\mathbb{C}}ontrolFunc_\theta$ where $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta := \ps{\theta}{\psi}$ and $R$ is the Markov kernel associated to a Markov chain $(X_k)_{k \in \mathbb{N}}$ and
$\psi = (\psi_1, \ldots, \psi_p)$ are known $\pi$-integrable functions. The parameter $\theta\in\mathbb{R}^p$ is obtained by minimizing the asymptotic variance
\begin{equation}\label{eq:criterion-min-asymp-var}
\min_{\theta\in\mathbb{R}^{p}} \varinf[\operatorname{d}](f + (Equation nsuremath{\mathbb{R}}Ker - \operatorname{Id}) Equation nsuremath{\mathbb{C}}ontrolFunc_\theta) = \min_{\theta\in\mathbb{R}^{p}} \pi\parenthese{\defEns{\hat{f}_{\operatorname{d}} - Equation nsuremath{\mathbb{C}}ontrolFunc_\theta}^2 - \defEns{Equation nsuremath{\mathbb{R}}ker(\hat{f}_{\operatorname{d}} - Equation nsuremath{\mathbb{C}}ontrolFunc_\theta)}^2} \;,
Equation nd{equation}
where $\hat{f}_{\operatorname{d}}$ is solution of the extit{discrete} Poisson equation $(Equation nsuremath{\mathbb{R}}Ker - \operatorname{Id}) \hat{f}_{\operatorname{d}} = -\tilde{f}$.
The method suggested in \cite[Section 11.5]{meyn2008control} to minimize Equation qref{eq:criterion-min-asymp-var} requires estimates of the solution $\hat{f}_{\operatorname{d}}$ of the Poisson equation. Temporal Difference learning is a possible candidate, but this method is complex to implement and suffers from high variance.
\cite{Dellaportas2012Control} noticed that if $Equation nsuremath{\mathbb{R}}ker$ is reversible w.r.t.~$\pi$, it is possible to optimize the limiting variance Equation qref{eq:criterion-min-asymp-var} without computing explicitly the Poisson solution $\hat{f}_{\operatorname{d}}$. This approach is of course closely related with our proposed method: the reversibility of the Markov kernel is replaced here by the self-adjointness of the generator of the Langevin diffusion which implies the reversibility of the semi-group.
Each of the algorithms in the aforementioned literature requires computation of $R\psi_i$ for each $i\in\defEns{1,\ldots,p}$, which is in general difficult except in very specific examples. In \cite{HendersonThesis,meyn2008control} this is addressed by restricting to kernels $R(x,\,\cdot\,)$ with finite support for each $x$. In \cite{Dellaportas2012Control} the authors consider mainly Gibbs samplers in their numerical examples.
Our methodology is also related to the Zero Variance method proposed by \cite{Mira2013,papamarkou2014,oates:girolami:chopin:2016,south:mira:drovandi:2018}, which uses $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc$ as a control variate and chooses $Equation nsuremath{\mathbb{C}}ontrolFunc$ by minimizing $\pi( \{\tilde{f} +\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc\}^2)$.
A drawback of this method stems from the fact that the optimization criterion is theoretically justified if $(X_k)_{k\in\mathbb{N}}$ is i.i.d.\ and
might significantly differ from the asymptotic variance $\varinf[\gamma](f+\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc)$ defined in Equation qref{eq:clt-Rkerg}.
We compare the two approaches in Equation nsuremath{\mathbb{C}}ref{sec:application_cv}.
\section{Asymptotic expansion for the asymptotic variance of MCMC algorithms}
\label{sec:asympt-expans-asympt}
In this Section, we provide conditions upon which the approximations
Equation qref{eq:approximation-loi-variance}-Equation qref{eq:approximation-invpig-invpi}
are satisfied for $f \in \setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$ and
$Equation nsuremath{\mathbb{C}}ontrolFuncSet \subset \setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$.
We first assume that the gradient of the potential is Lipschitz:
\begin{assumption}
\label{assumption:U-Sinfty}
$U\in\setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$ and $\nabla U$ is Lipschitz, \textit{i.e.}~there exists $L \geq 0$ such that for all $x,y\in\mathbb{R}^d$,
\begin{equation*}
\norm{\nabla U(x) - \nabla U(y)} \leq L \norm{x-y} \;.
Equation nd{equation*}
Equation nd{assumption}
Denote by $(P_t)_{t\geq 0}$ the semigroup associated to the SDE Equation qref{eq:SDE} defined by $P_t f(x) = Equation xpe{f(Y_t)}$ where $f$ is bounded measurable and $(Y_t)_{t\geq 0}$ is a solution of Equation qref{eq:SDE} started at $x$. By construction, the target distribution $\pi$ is invariant for $(P_t)_{t\geq 0}$.
The
conditions we consider require that
$\{R_{\gamma} ,\: \, \gamma \in \ocint{0,\bar{\gamma}}\}$ is a family of
Markov kernels such that for any $\gamma \in \ocint{0,\bar{\gamma}}$, $R_{\gamma}$ approximates $P_{\gamma}$ in a sense specified below. Let $V:\mathbb{R}^d\to\coint{1,+\infty}$ be a
measurable function.
\begin{assumption}
\label{ass:geo_ergod}
\begin{enumerate}[label=(\roman*)]
\item \label{ass:geo_ergod_i}
For any $\gamma\in\ocint{0,\bar{\gamma}}$, $Equation nsuremath{\mathbb{R}}kerg$ has a unique invariant distribution $\pig$.
\item \label{ass:geo_ergod_ii} There exists $c >0$ such that $ \liminf_{\norm{x} \to \infty}\{ V(x) Equation xp(-c\norm{x})\} > 0$,
$\pi(V) < +\infty$ and $ \sup_{\gamma \in \ocint{0,\bar{\gamma}}} \pi_{\gamma}(V) < +\infty$.
\item \label{ass:geo_ergod_iii} There exist $C>0$ and $\rho\in\coint{0,1}$ such that for all $x\in\mathbb{R}^d$,
\begin{align}
\label{eq:def-V-unif}
ext{for any $n\in\mathbb{N}$, $\gamma\in\ocint{0,\bar{\gamma}}$} \;, \qquad & \Vnorm{\updelta_x Equation nsuremath{\mathbb{R}}Ker_\gamma^n - \pig} \leq C \rho^{n\gamma} V(x) \;, \\
\label{eq:def-V-unif_ii}
ext{ for any $t \geq 0$} \;, \qquad & \Vnorm{\updelta_x P_t - \pi} \leq C \rho^{t} V(x) \;.
Equation nd{align}
Equation nd{enumerate}
Equation nd{assumption}
These conditions imply that the kernels $Equation nsuremath{\mathbb{R}}ker_\gamma$ are
$V$-uniformly geometrically ergodic ``uniformly'' with
respect to the parameter $\gamma \in \ocint{0,\bar{\gamma}}$ with a mixing
time going to infinity as the inverse of the stepsize $\gamma$ when
$\gamma \downarrow 0^+$. Note that the mixing time of $P_{\gamma}$ is also inversely proportional to $\gamma$ when $\gamma \downarrow 0^+$.
Under Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}, by \cite[Lemma 2.6]{kopec:2015}, there exists a solution $\phif \in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ to Poisson's equation Equation qref{eq:poisson-eq-langevin} for any $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ which is given for any $x \in \mathbb{R}^d$ by
\begin{equation}
\label{eq:def_poisson_int}
\phif(x) = \int_{0}^{+\infty} P_t \tilde{f} (x) \mathrm{d} t \;.
Equation nd{equation}
Moreover, \cite[Theorem 3.1]{cattiaux2012central} shows that, for any $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$, $t^{-1/2} \int_{0}^t \tilde{f}(Y_s) \mathrm{d} s$ where $(Y_t)_{t \geq 0}$ is the solution of the Langevin SDE,
converges weakly to $\mathcal{N}(0,\sigma^2_{\infty}(f))$ where $\sigma_\infty^2(f)$ is given by Equation qref{eq:asymptotic-variance}.
Note that the assumption Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} implies that for any $x\in\mathbb{R}^d$,
\begin{align}
\label{eq:discrete-drift-uniform-bound}
ext{ for any $\gamma\in\ocint{0,\bar{\gamma}}$, $n\in\mathbb{N}^*$} \;, \qquad & Equation nsuremath{\mathbb{R}}kerg^n V(x) \leq C \rho^{n\gamma} V(x) + \sup_{\gamma \in \ocint{0,\bar{\gamma}}} \pi_{\gamma} (V)\;,\\
\nonumber
ext{ for any $t \geq 0$} \;, \qquad & P_t V(x) \leq C \rho^{t} V(x) + \pi (V)
\;.
Equation nd{align}
We now introduce an assumption guaranteeing that the limit $\gamma^{-1} (Equation nsuremath{\mathbb{R}}kerg - \operatorname{Id})$ as $\gamma \downarrow 0^+$ is equal to the infinitesimal generator of the Langevin diffusion defined, for a bounded measurable function $f$ and $x \in \mathbb{R}^d$, as $\mathscr{L} f(x) = \lim_{t\to +\infty} \{(P_tf(x) - f(x))/t \}$, if the limit exists. This is a natural assumption if the semigroup of the Langevin diffusion evaluated at time $t=\gamma$, $P_\gamma$, and $Equation nsuremath{\mathbb{R}}kerg$ are close as $\gamma \downarrow 0^+$.
\begin{assumption}
\label{ass:dev_generator_discrete}
There exist $\alpha\geq 1$ and a family of operators $(\mathscr{E}_{\step})_{\gamma\in\ocint{0,\bar{\gamma}}}$ with $\mathscr{E}_{\step} :\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R}) \to \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$, such that for all $f\in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ and $\gamma\in\ocint{0,\bar{\gamma}}$,
\begin{equation*}
Equation nsuremath{\mathbb{R}}kerg f = f + \gamma \mathscr{L} f + \gamma^{\alpha} \mathscr{E}_{\step} f \;.
Equation nd{equation*}
In addition, there exists $\ke \in \mathbb{N}$, $\ke\geq 2$ such that for all $p \in \mathbb{N}$ there exist $q \in \mathbb{N}$ and $C \geq 0$ (depending only on $k_e, p$) such that for any $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d, \mathbb{R})$,
\begin{equation*}
\sup_{\gamma \in \ocint{0,\bar{\gamma}}} \VnormEq[0,q]{\mathscr{E}_{\step} f} \leq C \VnormEq[k_e,p]{f} \;.
Equation nd{equation*}
Equation nd{assumption}
We show below that these conditions are satisfied for the Metropolis Adjusted /
Unadjusted Langevin Algorithm (MALA and ULA) algorithms (in which case
$\gamma$ is the stepsize in the Euler discretization of the Langevin
diffusion) and also by the Random Walk Metropolis algorithm (RWM) (in
which case $\gamma$ is the variance of the increment distribution).
We next give an upper bound on the difference between $\pig$ and $\pi$ which implies that Equation qref{eq:approximation-invpig-invpi} holds. The proofs are postponed to Equation nsuremath{\mathbb{C}}ref{sec:proofs}.
\begin{proposition}
\label{item-thm-var-3}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} and Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} and let $p \in \mathbb{N}$. Then there exists $C < \infty$ such that for all $f\in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ and $\gamma\in\ocint{0,\bar{\gamma}}$,
\begin{equation*}
\absolute{\pig(f) - \pi(f)} \leq C \normLigne{f}_{\ke,p} \gamma^{\alpha-1} \;.
Equation nd{equation*}
Equation nd{proposition}
\begin{proof}
The proof is postponed to Equation nsuremath{\mathbb{C}}ref{subsec:proof:item-thm-var-3}.
Equation nd{proof}
The next result which is the main theorem of this Section precisely formalizes Equation qref{eq:approximation-loi-variance}.
\begin{theorem} \label{prop:dev-weak-error}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} and Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}.
Then, there exists $C\geq 0$ such that for all $f\in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$, $\gamma\in\ocint{0,\bar{\gamma}}$, $x\in\mathbb{R}^d$, and $n\in\mathbb{N}^*$
\begin{multline*}
\absolute{Figure rac{\gamma}{n}Equation xpeMarkov{x, \gamma}{\parenthese{\sum_{k=0}^{n-1} \defEns{ f(X_k) - \pig(f)}}^2} - \varinf(f)}\\ \leq C \normLigne[2]{f}_{\ke +2,p} \defEns{\gamma^{(\alpha-1) \wedge 1} + Figure racaaa{V(x)}{n^{1/2}\gamma^{1/2}}} \;,
Equation nd{multline*}
where $\varinf(f)$ is defined in Equation qref{eq:asymptotic-variance}.
Equation nd{theorem}
\begin{proof}
The proof is postponed to Equation nsuremath{\mathbb{C}}ref{subsec:proof-weak-error-dev}.
Equation nd{proof}
We now consider the ULA algorithm.
The Markov kernel $Equation nsuremath{\mathbb{R}}ula$ associated to the ULA algorithm is given for $\gamma>0$, $x\in\mathbb{R}^d$ and $\mathsf{A} \in\mathcal{B}(\mathbb{R}^d)$ by
\begin{equation}\label{eq:def-kernel-ULA}
Equation nsuremath{\mathbb{R}}ula(x ,\mathsf{A}) =
\int_{\mathbb{R}^d} \mathbbm{1}_{\mathsf{A}}\parenthese{x- \gamma \nabla U(x) + \sqrt{2\gamma} z} \boldsymbol{\varphi}(z) \mathrm{d} z \;,
Equation nd{equation}
where $\boldsymbol{\varphi}$ is the $d$-dimensional standard Gaussian density $\boldsymbol{\varphi} : z \mapsto (2\uppi)^{-d/2} \mathrm{e}^{-\norm{z}^2}$.
Consider the following additional assumption.
\begin{assumption}
\label{ass:condition_MALA}
There exist $K_1 \geq 0$ and $m >0$ such that for any $x \not \in \ball{0}{K_1}$,
and $y \in \mathbb{R}^d$, $ \ps{\operatorname{D}^2 U(x) y }{y} \geq m \norm[2]{y}$. Moreover, there exists $M \geq 0$ such that for any $x \in \mathbb{R}^d$, $ \norm{ \operatorname{D}^3 U(x)} \leq M $.
Equation nd{assumption}
\begin{proposition}\label{thm:geometric_ergodicity_ula}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}.
There exist $\bar{\gamma}>0$ and $V:\mathbb{R}^d\to\coint{1,+\infty}$ such that Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} is satisfied for the family of Markov kernels $\{Equation nsuremath{\mathbb{R}}ula \, : \, \gamma \in \ocint{0,\bar{\gamma}}\}$.
Equation nd{proposition}
\begin{proof}
The proof follows from \cite[Theorem 14, Proposition 24]{debortoli2018back}. However, for completeness and since all the tools needed for the proof of this result are used in the study of MALA, the proof is given in Equation nsuremath{\mathbb{C}}ref{subsec:geom-ergodicity-ula}.
Equation nd{proof}
\begin{remark}
Note that in fact Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} holds for ULA under milder conditions on
$U$ using the results obtained in \cite{eberle:2015,eberle2018quantitative,debortoli2018back}. For
example, if Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} holds and there exist
$a_1,a_2>0$ and $c \geq 0$ such that
\begin{equation}\label{eq:cond-vgeom-ula}
\ps{\nabla U(x)}{x} \geq a_1 \norm{x} + a_2 \norm[2]{\nabla U(x)} -c \;,
Equation nd{equation}
\cite[Theorem 14, Proposition 24]{debortoli2018back} imply that Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} holds with $V(x) = Equation xp\{(a_1/8)(1+\norm[2]{x})^{1/2}\}$.
Equation nd{remark}
We now establish Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}. Let $\varphi\in\setpoly{\infty}(\mathbb{R}^d, \mathbb{R})$, $\bar{\gamma}>0$, $\gamma\in\ccint{0,\bar{\gamma}}$ and $x\in\mathbb{R}^d$. Denote by $X_1 = x - \gamma \nabla \pU(x) + \sqrt{2\gamma} Z$ where $Z$ is a standard $d$-dimensional Gaussian vector, the first step of ULA. A Taylor expansion of $\varphi(X_1)$ at $x$ and integration show that $Equation nsuremath{\mathbb{R}}ula \varphi(x) = \varphi(x) + \gamma \mathscr{L} \varphi(x) + \gamma^2 \genrula \varphi(x)$ where
\begin{multline}
\genrula \varphi(x) = Figure rac{1}{2} \operatorname{D}^2 \varphi(x)[\nabla \pU(x)^{\otimes 2}]
- Figure rac{1}{6} \gamma \operatorname{D}^3 \varphi(x)[\nabla \pU(x)^{\otimes 3}] \\
- Equation xpe{\operatorname{D}^3 \varphi(x)[\nabla \pU(x), Z^{\otimes 2}]} \\
\label{eq:Eula-def}
+ Figure rac{1}{6} \int_0^1 (1-t)^3 Equation xpe{\operatorname{D}^4 \varphi(x - t \gamma \nabla \pU(x) + t \sqrt{2\gamma} Z)[(-\sqrt{\gamma} \nabla \pU(x) + \sqrt{2} Z)^{\otimes 4}]} \mathrm{d} t \;.
Equation nd{multline}
A simple application of the Lebesgue dominated convergence theorem implies then the following result.
\begin{lemma}\label{prop:ULA-dev-ergo}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}. Then for any $\bar{\gamma} >0$, $\set{Equation nsuremath{\mathbb{R}}ula}{\gamma \in \ocint{0,\bar{\gamma}}}$ satisfies Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} with $\alpha = 2$, $\mathscr{E}_{\step} = \genrula$ and $\ke = 4$.
Equation nd{lemma}
We now examine the MALA algorithm.
The Markov kernel $Equation nsuremath{\mathbb{R}}mala$ of the MALA algorithm, see \cite{roberts:tweedie-Langevin:1996}, is given for $\gamma>0$, $x\in\mathbb{R}^d$, and $\mathsf{A} \in\mathcal{B}(\mathbb{R}^d)$ by
\begin{align}
\nonumber
&Equation nsuremath{\mathbb{R}}mala(x ,\mathsf{A}) = \int_{\mathbb{R}^d} \mathbbm{1}_{\mathsf{A}}\parenthese{x- \gamma \nabla U(x) + \sqrt{2\gamma} z} \min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)}) \boldsymbol{\varphi}(z) \mathrm{d} z \\
\label{eq:def-kernel-MALA}
&\phantom{--}+ \updelta_x(\mathsf{A}) \int_{\mathbb{R}^d} \defEns{1- \min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)})} \boldsymbol{\varphi}(z) \mathrm{d} z \;,\\
\nonumber
& \tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z) = U(x - \gamma \nabla U(x) + \sqrt{2 \gamma}z) - U(x) \\
\label{eq:def-alpha-MALA}
& \phantom{--} + Figure rac{\norm[2]{z-(\gamma/2)^{1/2}\defEns{\nabla U(x) + \nabla U(x-\gamma \nabla U(x) + \sqrt{2\gamma} z)}} -\norm[2]{z}}{2}\;.
Equation nd{align}
The analysis of the MALA algorithm is closely related to the study of the ULA algorithm. Indeed, the difference between the two Markov kernels can be expressed for any bounded measurable function $\phi:\mathbb{R}^d\to\mathbb{R}$ by
\begin{multline}
\label{eq:diff-rula-rmala}
Equation nsuremath{\mathbb{R}}mala \phi(x) - Equation nsuremath{\mathbb{R}}ula \phi(x) = \int_{\mathbb{R}^d}\{\phi(x) - \phi(x-\gamma \nabla U(x) + \sqrt{2 \gamma} z)\} \\
\times \{1 - \min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)}) \} \boldsymbol{\varphi}(z) \mathrm{d} z \;.
Equation nd{multline}
Since $1-\min(1,\mathrm{e}^{-t}) \leq \abs{t}$ for any $t \in \mathbb{R}$, properties of ULA can then be transferred to MALA from perturbation arguments achieved by a careful analysis of $\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}$ which is the content of the following result.
\begin{lemma}
\label{lem:bound_alpha_mala_1}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then, for any $\bar{\gamma} >0$, there exists $C_{1,\bar{\gamma}} < \infty$ such that for any $x,z \in \mathbb{R}^d$, $\gamma \in \ocint{0,\bar{\gamma}}$, it holds
\begin{equation*}
\abs{ \tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)} \leq C_{1,\bar{\gamma}} \gamma^{3/2}\{1+\norm[4]{z} + \norm[2]{x}\} \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
The proof is postponed to Equation nsuremath{\mathbb{C}}ref{subsec:geom-ergodicity-mala}.
Equation nd{proof}
A first easy consequence of Equation qref{eq:diff-rula-rmala} using Equation qref{eq:Eula-def} is that we get for any $\varphi\in\setpoly{\infty}(\mathbb{R}^d, \mathbb{R})$, $\bar{\gamma}>0$, $\gamma\in\ocint{0,\bar{\gamma}}$, $Equation nsuremath{\mathbb{R}}mala\varphi = \varphi + \gamma \mathscr{L} \varphi + \gamma^2 \genrmala \varphi$,
with $\genrmala \varphi= \genrula \varphi + g_\stepenrmala \varphi$ and for any $x \in \mathbb{R}^d$,
\begin{multline*}
g_\stepenrmala \varphi(x) = \mathbb{E} \bigg[\gamma^{-3/2} \defEns{1-\min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,Z)})} \\
\times \defEns{\int_0^1 \ps{\nabla \varphi(x-t\gamma\nabla \pU(x) + t\sqrt{2\gamma}Z)}{\sqrt{\gamma} \nabla \pU(x) - \sqrt{2} Z} \mathrm{d} t} \bigg] \;,
Equation nd{multline*}
where $Z$ is a $d$-dimensional standard Gaussian vector.
Note that by the Lebesgue dominated convergence theorem, for any $\varphi\in\setpoly{\infty}(\mathbb{R}^d, \mathbb{R})$, $\bar{\gamma}>0$, $\gamma\in\ocint{0,\bar{\gamma}}$, $ g_\stepenrmala \varphi$ is continuous. As a result and using Equation nsuremath{\mathbb{C}}ref{prop:ULA-dev-ergo} and Equation nsuremath{\mathbb{C}}ref{lem:bound_alpha_mala_1}, it follows that Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} holds.
\begin{lemma}\label{prop:MALA-dev-ergo}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}.
Then for any $\bar{\gamma} >0$, $\{Equation nsuremath{\mathbb{R}}mala \, : \, \gamma \in \ocint{0,\bar{\gamma}}\}$ satisfies Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} with $\alpha = 2$, $\mathscr{E}_{\step} = \genrmala$ and $\ke = 4$.
Equation nd{lemma}
We now turn to verifying Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}. Similarly to Equation nsuremath{\mathbb{C}}ref{prop:MALA-dev-ergo}, a key tool is the decomposition of $Equation nsuremath{\mathbb{R}}mala$ given by Equation qref{eq:diff-rula-rmala}.
\begin{proposition}\label{thm:geometric_ergodicity_mala}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}.
There exist $\bar{\gamma}>0$ and $V:\mathbb{R}^d\to\coint{1,+\infty}$ such that Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} is satisfied for the family of Markov kernels $\{Equation nsuremath{\mathbb{R}}mala \, : \, \gamma \in \ocint{0,\bar{\gamma}}\}$.
Equation nd{proposition}
\begin{proof}
The proof is postponed to Equation nsuremath{\mathbb{C}}ref{subsec:geom-ergodicity-mala}.
Equation nd{proof}
We now turn to the analysis of the RWM algorithm. For $\gamma>0$, the Markov kernel $Equation nsuremath{\mathbb{R}}rwm$ of the RWM algorithm with a Gaussian proposal of mean $0$ and variance $2\gamma$ is given for $x\in\mathbb{R}^d$ and $\mathsf{A} \in\mathcal{B}(\mathbb{R}^d)$ by
\begin{multline*}
Equation nsuremath{\mathbb{R}}rwm(x,\mathsf{A}) = \int_{\mathbb{R}^d} \mathbbm{1}_{\mathsf{A}} (x + \sqrt{2 \gamma} z) \min(1,\mathrm{e}^{-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)}) \boldsymbol{\varphi}(z) \mathrm{d} z \\
+ \updelta_x(\mathsf{A}) \defEns{1-\int_{\mathbb{R}^d} \min(1,\mathrm{e}^{-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)})} \boldsymbol{\varphi}(z) \mathrm{d} z \;,
Equation nd{multline*}
where $\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z) = U(x+\sqrt{2 \gamma} z) - U(x)$. We first consider Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} and adapt the proof of \cite[Lemma 1]{refId0}.
To this end, consider the following decomposition for any $\varphi \in \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$,
\begin{multline}
\label{eq:proof-RWM-1}
Equation nsuremath{\mathbb{R}}rwm \varphi (x) - \varphi(x) = Equation xpe{\varphi(x+\sqrt{2\gamma}Z) - \varphi(x)} \\
+ Equation xpe{\parenthese{\min(1,\mathrm{e}^{-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x, Z)}) -1} \defEns{\varphi(x+\sqrt{2\gamma}Z) - \varphi(x)}} \;,
Equation nd{multline}
where $Z$ is a standard $d$-dimensional Gaussian vector. While the first term in this decomposition can be easily handled by a Taylor expansion, we rely on the following result for the second term. Define $\zeta_\step:\mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}$ for all $x,z\in\mathbb{R}^d$ and $\gamma\in\ocint{0,\bar{\gamma}}$ by,
\begin{equation*}
\zeta_\step(x,z) = 1 - \min\parenthese{1,Equation xp\defEns{-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)}} - \sqrt{2\gamma} \ps{\nabla \pU(x)}{z}_{+} \;.
Equation nd{equation*}
\begin{lemma}
\label{lem:approx_rwm}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then, for all $\gamma\in\ocint{0,\bar{\gamma}}$ and $x,z\in\mathbb{R}^d$,
\begin{equation*}
\abs{\zeta_\step(x,z)}
\leq \gamma \norm[2]{z} \defEnsLigne{L + 2\norm[2]{\nabla U(x)} + 4\gamma L^2 \norm[2]{z}} \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
First, by a Taylor expansion and since $t \mapsto \max(0,t)$ is $1$-Lipschitz, we get for all $\gamma\in\ocint{0,\bar{\gamma}}$ and $x,z\in\mathbb{R}^d$
\begin{equation*}
\absolute{\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)_+ - \sqrt{2\gamma}\ps{\nabla \pU(x)}{z}_{+}} \leq L\gamma \norm{z}^2 \;,
Equation nd{equation*}
where for any $a \in \mathbb{R}$, $a_+ = \max(0,a)$.
Using that that for all $x,z\in\mathbb{R}^d$, \[ \min\{1,Equation xp(-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z))\} = Equation xp(-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)_+)\] and
\begin{equation*}
\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)_{+} - (1/2) \defEns{\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)_+}^2 \leq 1 - \mathrm{e}^{-\tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)_{+}} \leq \tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z)_+ \;,
Equation nd{equation*}
concludes the proof.
Equation nd{proof}
Let $\varphi \in \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$. Using a Taylor expansion, we get for all $x,z\in\mathbb{R}^d$,
\begin{align}
\nonumber
&\varphi(x+\sqrt{2\gamma}z) - \varphi(x) \\
&\quad = \sqrt{2\gamma}\ps{\nabla \varphi(x)}{z} + (2\gamma)\int_0^1 (1-t)
\nonumber
\operatorname{D}^2\varphi(x+t\sqrt{2\gamma}z)[z^{\otimes 2}] \mathrm{d} t \\
\nonumber
&\quad = \sqrt{2\gamma}\ps{\nabla \varphi(x)}{z} + \gamma \operatorname{D}^2\varphi(x)[z^{\otimes 2}] + (\sqrt{2}/3) \gamma^{3/2} \operatorname{D}^3\varphi(x)[z^{\otimes 3}] \\
\nonumber
&\phantom{-------------}+ (2/3)\gamma^2 \int_0^1 (1-t)^3 \operatorname{D}^4\varphi(x+t\sqrt{2\gamma}z)[z^{\otimes 4}] \mathrm{d} t \;.
Equation nd{align}
In addition, since for any $x \in \mathbb{R}^d$,
\[ Equation xpe{\ps{\nabla U(x)}{Z}_{+} \ps{\nabla \varphi(x)}{Z}} = (1/2) \ps{\nabla \pU(x)}{\nabla \varphi(x)} \;, \]
where $Z$ is a standard $d$-dimensional Gaussian vector, we get that by Equation qref{eq:proof-RWM-1} and Equation nsuremath{\mathbb{C}}ref{lem:approx_rwm}, for any $\varphi\in\setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$ and $\gamma >0$, $Equation nsuremath{\mathbb{R}}rwm\varphi = \varphi + \gamma\mathscr{L} \varphi +\gamma^{3/2}\genrrwm \varphi$ where for any $x \in \mathbb{R}^d$,
\begin{align*}
\nonumber
&\genrrwm\varphi(x) \\
\nonumber
&= -\mathbb{E} \bigg[
\int_0^1 (1-t)\operatorname{D}^2 \varphi(x+Z_t)[Z^{\otimes 2}] \mathrm{d} t
\defEns{2^{3/2}\ps{\nabla \pU(x)}{Z}_{+} + 2\gamma^{-1/2}\zeta_\step(x,Z)} \\
&\phantom{--}+\sqrt{2} \gamma^{-1}\zeta_\step(x,Z) \ps{\nabla \varphi(x)}{Z}
-(2/3)\sqrt{\gamma} \int_0^1 (1-t)^3 \operatorname{D}^4 \varphi (x+Z_t) [Z^{\otimes 4}] \mathrm{d} t
\bigg] \;,
Equation nd{align*}
where $Z_t = t\sqrt{2\gamma}Z$. Then, since $\zeta_\step$ is continuous and using the Lebesgue dominated convergence theorem, we end up with the following result.
\begin{lemma}\label{prop:RWM-dev-ergo}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then for any $\bar{\gamma} >0$, $\set{Equation nsuremath{\mathbb{R}}rwm}{\gamma \in \ocint{0,\bar{\gamma}}}$ satisfies Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} with $\mathscr{E}_{\step} = \genrrwm$, $\alpha = 3/2$ and $\ke = 4$.
Equation nd{lemma}
In Equation nsuremath{\mathbb{C}}ref{sec:additional-proofs}, we establish a similar result as Equation nsuremath{\mathbb{C}}ref{thm:geometric_ergodicity_ula} and Equation nsuremath{\mathbb{C}}ref{thm:geometric_ergodicity_mala} for the RWM algorithm.
\section{Numerical experiments}
\label{sec:application_cv}
In this Section, we compare numerically our methodology with the Zero Variance method suggested by \cite{Mira2013}, see Equation nsuremath{\mathbb{C}}ref{subsec:bibliography}, that consists in minimizing the marginal variance $\min_{Equation nsuremath{\mathbb{C}}ontrolFunc\inEquation nsuremath{\mathbb{C}}ontrolFuncSet} \pi(\{\tilde{f} + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc\}^2)$ instead of the asymptotic variance $\min_{Equation nsuremath{\mathbb{C}}ontrolFunc\inEquation nsuremath{\mathbb{C}}ontrolFuncSet} \varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc)$. In Equation nsuremath{\mathbb{C}}ref{subsec:numerical-comparison-toy-examples}, we consider a one dimensional example where explicit calculations are possible. In Equation nsuremath{\mathbb{C}}ref{subsec:bayesian-examples-numeric}, we study Bayesian logistic and probit regressions.
The code used to run the experiments is available at \url{https://github.com/nbrosse/controlvariates}.
\subsection{One dimensional example}
\label{subsec:numerical-comparison-toy-examples}
We consider an equally weighted mixture of two Gaussian densities of means $(\mu_1, \mu_2) = (-1, 1)$ and variance $\sigma^2 = 1/2$, and a test function $f(x) = x + x^3 /2 + 3\sin(x)$. The derivative of the Poisson equation Equation qref{eq:poisson-eq-langevin} is in such case analytically known: $\hat{f} '(x) = -(1/\pi(x))\int_{-\infty}^{x} \pi(t) \tilde{f}(t) \mathrm{d} t$, see Equation nsuremath{\mathbb{C}}ref{subsec:1-2d-numerics-practice} for a practical implementation.
We build a control variate $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta \in Equation nsuremath{\mathbb{C}}ontrolFuncSet[\mathrm{lin}] = \defEns{\ps{\theta}{\psi} : \theta\in\mathbb{R}^p}$ where $\psi=(\psi_1,\ldots,\psi_p)$ are $p$ Gaussian kernels regularly spaced on $\ccint{-4, 4}$, \textit{i.e.}~for all $i\in\defEns{1,\ldots,p}$ and $x\in\mathbb{R}$
\begin{equation*}
\label{eq:def-basis-gaussian-kernels}
\psi_i(x) = (2\uppi)^{-1/2} \mathrm{e}^{-(x-\mu_i)^2 / 2} \;, \quad ext{where } \mu_i\in\ccint{-4,4} \;.
Equation nd{equation*}
The optimal parameter $\thetastar\in\mathbb{R}^p$ minimizing the asymptotic variance $\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta)$ can be explicitly computed according to Equation qref{eq:min-asymp-var-diffusion}. For the Zero Variance estimator, the optimal parameter is given by
\begin{equation}\label{eq:paramzv}
\theta^{*}_{\operatorname{zv}} = - H_{\operatorname{zv}}^{-1} b_{\operatorname{zv}} \;,
Equation nd{equation}
where for $1\leq i,j \leq p$, $[H_{\operatorname{zv}}]_{ij} = \pi(\ps{\mathscr{L} \psi_i}{\mathscr{L} \psi_j})$ and $[b_{\operatorname{zv}}]_i = \pi(\tilde{f} \mathscr{L} \psi_i)$.
$H_{\operatorname{zv}}$ is invertible if $(\mathscr{L} \psi_1,\ldots,\mathscr{L}\psi_p)$ are linearly independent in $\setpoly{2}(\mathbb{R}^d,\mathbb{R})$.
The asymptotic variance $\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta)$ for the two different parameters, $\thetastar$ and $\theta^{*}_{\operatorname{zv}}$ are compared against the number of Gaussian kernels $p\in\defEns{4,\ldots,10}$ in Equation nsuremath{\mathbb{C}}ref{figure:resume_1d}.
Note that the asymptotic variance $\varinf(f)$ is $92.5$. We observe that the variance reduction is better for an even number $p$ of basis functions; when $p \geq 8$, the two methods achieve an almost identical large variance reduction. These results are supported by the plots of $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta '$ and $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta$ for $\theta\in\defEns{\thetastar, \theta^{*}_{\operatorname{zv}}}$ in Equation nsuremath{\mathbb{C}}ref{figure:resume_1d}, see also Equation nsuremath{\mathbb{C}}ref{subsec:1-2d-numerics-practice}.
\begin{figure}
\begin{center}
\includegraphics[scale=0.45]{resume_1d.pdf}
Equation nd{center}
\caption{\label{figure:resume_1d} extbf{Top Left.} Plot of the asymptotic variance $\varinf(f + \mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta)$ for $\theta\in\defEns{\thetastar, \theta^{*}_{\operatorname{zv}}}$ and $p\in\defEns{4,\ldots,10}$.
extbf{Top Right.} Autocovariances plot of ULA displaying $\omega^h_{N,n}(k)$ for $h=f +\mathscr{L}\ps{\theta}{\psi}$, $\theta\in\defEns{0,\thetastar,\theta^{*}_{\operatorname{zv}}}$ and $0\leq k < 100$.
extbf{Bottom Left and Right.} Plots of $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta '$ and $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta$ for $\theta\in\defEns{\thetastar, \theta^{*}_{\operatorname{zv}}}$ and $p=6$.}
Equation nd{figure}
We fix the number of basis functions $p=4$ and we now turn to the application to MCMC algorithms. We first define the sample mean with a burn-in period $N\in\mathbb{N}^*$ by
\begin{equation}\label{eq:def-invpihat-N-n}
\pihat[N, n](f) = Figure rac{1}{n}\sum_{k=N}^{N+n-1} f(X_k) \;,
Equation nd{equation}
where $n\in\mathbb{N}^*$ is the number of samples. In this Section, we consider the following estimators of $\pi(f)$: $\pihat[N,n](f + \mathscr{L} \ps{\thetastar}{\psi})$ and $\pihat[N,n](f + \mathscr{L} \ps{\theta^{*}_{\operatorname{zv}}}{\psi})$ where $\thetastar$ and $\theta^{*}_{\operatorname{zv}}$ are given in Equation qref{eq:min-asymp-var-diffusion} and Equation qref{eq:paramzv} respectively. In this simple one dimensional example, the optimal parameters $\thetastar$ and $\theta^{*}_{\operatorname{zv}}$ are explicitly computable; the problem of estimating them in higher dimensional models is addressed numerically in Equation nsuremath{\mathbb{C}}ref{subsec:bayesian-examples-numeric}.
The sequence $(X_k)_{k\in\mathbb{N}}$ is generated by the ULA, MALA or RWM algorithms starting at $0$, with a step size $\gamma=10^{-2}$ for ULA and $\gamma=5\times 10^{-2}$ for RWM and MALA, a burn-in period $N=10^5$ and a number of samples $n=10^6$.
For a test function $h:\mathbb{R}\to\mathbb{R}$ ($h=f + \mathscr{L} \ps{\theta}{\psi}$, $\theta\in\defEns{0, \thetastar, \theta^{*}_{\operatorname{zv}}}$), we estimate the asymptotic variance $\varinf[\gamma](h)$ of $\pihat[N,n](h)$ by a spectral estimator $\sigS[h]$ with a Tukey-Hanning window, see \cite{flegal:2010}, given by
\begin{align}
\label{eq:def-sigS}
\sigS[h] &= \sum_{k=-(Figure loor{n^{1/2}} -1)}^{Figure loor{n^{1/2}} -1} \defEns{Figure rac{1}{2} + Figure rac{1}{2}\cos\parenthese{Figure rac{\uppi\absolute{k}}{Figure loor{n^{1/2}}}}} \omega^h_{N,n}(\absolute{k}) \;, \\
\nonumber
\omega^h_{N,n}(k) &= Figure rac{1}{n} \sum_{s=N}^{N+n-1-k} \defEns{h(X_{s}) - \hat{\pi}_{N,n}(h)} \defEns{h(X_{s+k}) - \hat{\pi}_{N,n}(h)} \;.
Equation nd{align}
We compute the average of these estimators $\sigS[f+\mathscr{L} \ps{\theta}{\psi}]$, $\theta\in\defEns{0,\thetastar,\theta^{*}_{\operatorname{zv}}}$ over $10$ independent runs of the Monte Carlo algorithm (ULA, RWM or MALA), see Equation nsuremath{\mathbb{C}}ref{table:ula-asympt-var-1d}. We observe that minimizing the asymptotic variance improves upon the Zero Variance estimator.
\begin{table}
\centering
\begin{tabular}{|c|c|c|c|}
\hline
& $\gamma\sigS[f]$ & $\gamma\sigS[f+\mathscr{L} \ps{\theta^{*}_{\operatorname{zv}}}{\psi}]$ & $\gamma\sigS[f+\mathscr{L} \ps{\thetastar}{\psi}]$ \\
\hline
ULA & $82.06$ & $20.74$ & $5.33$ \\
\hline
RWM & $105.2$ & $28.19$ & $8.41$ \\
\hline
MALA & $93.27$ & $23.40$ & $5.00$ \\
\hline
Equation nd{tabular}
\caption{Values of $\sigS[f+\mathscr{L} \ps{\theta}{\psi}]$, $\theta\in\defEns{0,\thetastar,\theta^{*}_{\operatorname{zv}}}$ rescaled by the step size $\gamma$.}\label{table:ula-asympt-var-1d}
Equation nd{table}
A more detailed analysis can be carried out using the autocovariances plots that consist in displaying $\omega^h_{N,n}(k)$ for $h=f +\mathscr{L}\ps{\theta}{\psi}$, $\theta\in\defEns{0,\thetastar,\theta^{*}_{\operatorname{zv}}}$ and $0\leq k < 100$, see Equation nsuremath{\mathbb{C}}ref{figure:resume_1d}. The autocovariances plots for RWM and MALA are similar. By \cite[Theorem 21.2.11]{douc:moulines:priouret:soulier:2018}, the asymptotic variance $\varinf[\gamma](h)$ is the sum of the autocovariances:
\begin{equation*}
\varinf[\gamma](h) = \pig(\tilde{h}_\gamma^2) + 2 \sum_{k=1}^{+\infty} \pig(\tilde{h}_\gammaEquation nsuremath{\mathbb{R}}kerg^k \tilde{h}_\gamma) \;, \quad ext{where} \; \tilde{h}_\gamma = h - \pig(h) \;.
Equation nd{equation*}
The two methods are effective at reducing the autocovariances compared to the case without control variate. The zero variance estimator decreases more the autocovariance at $k=0$ compared to our method, which is indeed the objective of $\theta^{*}_{\operatorname{zv}}$, the minimizer of $\theta\mapsto\pi((\tilde{f} + \mathscr{L} \ps{\theta}{\psi})^2)$. Using $\theta = \thetastar$ lowers more effectively the tail of the autocovariances (for $k$ large enough) compared to $\theta=\theta^{*}_{\operatorname{zv}}$. This effect is predominant and explains the results of Equation nsuremath{\mathbb{C}}ref{table:ula-asympt-var-1d}.
\subsection{Bayesian logistic and probit regressions}
\label{subsec:bayesian-examples-numeric}
We illustrate the proposed control variates method on Bayesian logistic and probit regressions, see \cite[Chapter 16]{gelman2014bayesian}, \cite[Chapter 4]{marin2007bayesian}.
The examples and the data sets are taken from \cite{papamarkou2014}. Let $\mathsf{Y}=(\mathsf{Y}_1,\ldots,\mathsf{Y}_n)\in\defEns{0,1}^N$ be a vector of binary response variables, $x\in\mathbb{R}^d$ be the regression coefficients, and $\mathsf{Z}\in\mathbb{R}^{N \times d}$ be a design matrix.
The log-likelihood for the logistic and probit regressions are given respectively by
\begin{align*}
\logl{l}(\mathsf{Y} | x, \mathsf{Z}) & = \sum_{i=1}^{N} \defEns{\mathsf{Y}_i \mathsf{Z}_i^{\operatorname{T}} x - \ln\parenthese{1+\mathrm{e}^{\mathsf{Z}_i^{\operatorname{T}} x}}} \;, \\
\logl{p}(\mathsf{Y} | x, \mathsf{Z}) & = \sum_{i=1}^{N} \defEns{\mathsf{Y}_i \ln(\Phi(\mathsf{Z}_i^{\operatorname{T}} x)) + (1-\mathsf{Y}_i)\ln(\Phi(-\mathsf{Z}_i^{\operatorname{T}} x))} \;,
Equation nd{align*}
where $\mathsf{Z}_i^{\operatorname{T}}$ is the $i^{ext{th}}$ row of $\mathsf{Z}$ for $i\in\defEns{1,\ldots,N}$.
For both models,
a Gaussian prior of mean $0$ and variance $\varsigma^2\operatorname{Id}$ is assumed for $x$ where $\varsigma^2=100$.
The unnormalized posterior probability distributions $\pib{l}$ and $\pib{p}$ for the logistic and probit regression models are defined for all $x\in\mathbb{R}^d$ by
\begin{align*}
\pib{l}(x | \mathsf{Y}, \mathsf{Z}) &\propto Equation xp\parenthese{-\Ub{l}(x)} \; ext{with} \quad \Ub{l}(x) = -\logl{l}(\mathsf{Y} | x, \mathsf{Z}) + (2\varsigma^2)^{-1}\norm[2]{x} \;,\\
\pib{p}(x | \mathsf{Y}, \mathsf{Z}) &\propto Equation xp\parenthese{-\Ub{p}(x)} \; ext{with} \quad \Ub{p}(x) = -\logl{p}(\mathsf{Y} | x, \mathsf{Z}) + (2\varsigma^2)^{-1}\norm[2]{x} \;.
Equation nd{align*}
The following lemma enables to check the assumptions on $\Ub{l}$ and $\Ub{p}$ required to apply Equation nsuremath{\mathbb{C}}ref{prop:dev-weak-error} for the ULA, MALA and RWM algorithms.
\begin{lemma}
\label{lemma:log-probit-assumptions}
$\Ub{l}$ and $\Ub{p}$ satisfy Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}.
Equation nd{lemma}
\begin{proof}
The proof is postponed to Equation nsuremath{\mathbb{C}}ref{subsec:proof-log-probit-assumptions}.
Equation nd{proof}
Following \cite[Section 2.1]{papamarkou2014}, we compare two bases for the construction of a control variate, based on first and second degree polynomials and denoted by $\psia = (\psia_1,\ldots, \psia_d)$ and $\psib = (\psib_1,\ldots,\psib_{d(d+3)/2})$ respectively, see Equation nsuremath{\mathbb{C}}ref{sec:suppl-probit-reg} for their definitions.
The estimators associated to $\psia$ and $\psib$ are referred to as CV-1 and CV-2, respectively.
For the ULA, MALA and RWM algorithms, we make a run of $n=10^6$ samples with a burn-in period of $10^5$ samples, started at the mode of the posterior. The step size is set equal to $10^{-2}$ for ULA and to $5 \times 10^{-2}$ for MALA and RWM: with these step sizes, the average acceptance ratio in the stationary regime is equal to 0.23 for RWM and 0.57 for MALA, see \cite{roberts:gelman:gilks:1997,roberts:rosenthal:1998}.
We consider $2d$ scalar test functions $\{f_k\}_{k=1}^{2d}$ defined for all $x\in\mathbb{R}^d$ and $k\in\{1,\ldots,d\}$ by $f_k(x) = x_k$ and $f_{k+d}(x) = x_k^2$.
Contrary to the one dimensional case handled in Equation nsuremath{\mathbb{C}}ref{subsec:numerical-comparison-toy-examples}, the optimal parameters $\thetastar$ and $\theta^{*}_{\operatorname{zv}}$ corresponding to our method and to the zero variance estimator can not be computed in closed form and must be estimated. We consider then the control variate estimator $\picv_ {N,n,n}(f)$ defined in Equation qref{eq:def-invpi-cv} where $m=n$ and $(\tilde{X}_k)_{k\in\mathbb{N}}$ is equal to $(X_k)_{k\in\mathbb{N}}$; $\thetastar$ is approximated by $\theta^*_n$ given in Equation qref{eq:def-paramhat-n-star}.
For $k\in\{1,\ldots,2d\}$, we compute the empirical average $\pihat[N,n](f_k)$ defined in Equation qref{eq:def-invpihat-N-n} and confront it to $\picv_{N,n,n}(f_k)$.
For comparison purposes, the zero-variance estimators of \cite{papamarkou2014} using the same bases of functions $\psia$, $\psib$ are also computed and are referred to as ZV-1 for $\psia$ and ZV-2 for $\psib$.
We run $100$ independent Markov chains for ULA, MALA, RWM algorithms. The boxplots for the logistic example are displayed in Equation nsuremath{\mathbb{C}}ref{figure:log-1} for $x_1$ and $x_1^2$. Note the impressive decrease in the variance using the control variates for each algorithm ULA, MALA and RWM. It is worthwhile to note that for ULA, the bias $\absolute{\pi(f) - \pig(f)}$ is reduced dramatically using the CV-2 estimator. It can be explained by the fact that for $n$ large enough, $Equation nsuremath{\mathbb{C}}ontrolFunc_{\theta^*_n}= \ps{\theta^*_n}{\psib}$ approximates well the solution $\hat{f}$ of the Poisson equation $\mathscr{L} \hat{f} = -\tilde{f}$. We then get
\begin{equation*}
\pig(f) + \pig\parenthese{\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_{\theta^*_n}} \approx \pig(f) - \pig\parenthese{\tilde{f}} = \pi(f) \;.
Equation nd{equation*}
To have a more quantitative estimate of the variance reduction, we compute for each algorithm and test function $h\in\setpoly{}(\mathbb{R}^d, \mathbb{R})$, the spectral estimator $\sigS[h]$ defined in Equation qref{eq:def-sigS} of the asymptotic variance.
The average of these estimators $\sigS[f+\mathscr{L}\ps{\theta}{\psi}]$ for $\theta\in\defEns{0,\theta^*_n, [\theta^{*}_{\operatorname{zv}}]_n}$ over the $100$ independent runs of the Markov chains for the logistic regression are reported in Table~\ref{table:1}.
$[\theta^{*}_{\operatorname{zv}}]_n$ is an empirical estimator of $\theta^{*}_{\operatorname{zv}}$, see \cite{papamarkou2014} for its construction.
The Variance Reduction Factor (VRF) is defined as the ratio of the asymptotic variances obtained by the ordinary empirical average and the control variate (or zero-variance) estimator. We again observe the considerable decrease of the asymptotic variances using control variates.
In this example, our approach produces slightly larger VRFs compared to the zero-variance estimators.
We obtain similar results for the probit regression;
see Equation nsuremath{\mathbb{C}}ref{sec:suppl-probit-reg}.
\begin{figure}
\begin{center}
\includegraphics[scale=0.45]{log-sb-2.pdf}
Equation nd{center}
\caption{\label{figure:log-1} Boxplots of $x_1,x_1^2$ using the ULA, MALA and RWM algorithms for the logistic regression. The compared estimators are the ordinary empirical average (O), our estimator with a control variate Equation qref{eq:def-invpi-cv} using first (CV-1) or second (CV-2) order polynomials for $\psi$, and the zero-variance estimators of \cite{papamarkou2014} using a first (ZV-1) or second (ZV-2) order polynomial bases. The plots in the second column are close-ups for CV-2 and ZV-2. }
Equation nd{figure}
\begin{table}
\begin{tabular}{c|c|c|c c|c c|c c|c c|}
\multicolumn{11}{c}{} \\
& & MCMC & \multicolumn{2}{c|}{CV-1-MCMC} & \multicolumn{2}{c|}{CV-2-MCMC}
& \multicolumn{2}{c|}{ZV-1-MCMC} & \multicolumn{2}{c|}{ZV-2-MCMC} \\
& & Var.\ & VRF & Var.\ & VRF & Var.\ & VRF & Var.\ & VRF & Var.\ \\
\hline
$x_1$ & ULA & 2 & 33 & 0.061 & 3.2e+03 & 6.2e-4 & 33 & 0.061 & 3e+03 & 6.6e-4 \\
& MALA & 0.41 & 33 & 0.012 & 2.6e+03 & 1.6e-4 & 30 & 0.014 & 2.5e+03 & 1.7e-4 \\
& RWM & 1.3 & 33 & 0.039 & 2.6e+03 & 4.9e-4 & 32 & 0.04 & 2.7e+03 & 4.8e-4 \\
\hline
$x_2$ & ULA & 10 & 57 & 0.18 & 8.1e+03 & 1.3e-3 & 53 & 0.19 & 7.4e+03 & 1.4e-3 \\
& MALA & 2.5 & 59 & 0.042 & 7.7e+03 & 3.2e-4 & 54 & 0.046 & 7.3e+03 & 3.4e-4 \\
& RWM & 5.6 & 52 & 0.11 & 5.6e+03 & 1.0e-3 & 50 & 0.11 & 5.6e+03 & 1.0e-3 \\
\hline
$x_2$ & ULA & 10 & 56 & 0.18 & 7.3e+03 & 1.4e-3 & 52 & 0.19 & 6.7e+03 & 1.0e-35 \\
&MALA & 2.4 & 58 & 0.041 & 6.8e+03 & 3.5e-4 & 52 & 0.045 & 6.5e+03 & 3.7e-4 \\
&RWM & 5.6 & 45 & 0.13 & 5.1e+03 & 1.0e-31 & 42 & 0.13 & 5.1e+03 & 1.0e-31 \\
\hline
$x_4$ & ULA & 13 & 26 & 0.5 & 3.9e+03 & 3.3e-3 & 22 & 0.59 & 3.4e+03 & 3.8e-3 \\
&MALA & 3.1 & 25 & 0.12 & 3.6e+03 & 8.7e-4 & 21 & 0.14 & 3.3e+03 & 9.5e-4 \\
&RWM & 7.5 & 19 & 0.4 & 2.5e+03 & 3.0e-3 & 18 & 0.43 & 2.4e+03 & 3.0e-31 \\
\hline
$x_1^2$ & ULA & 4.6 & 10 & 0.46 & 5.5e+02 & 8.4e-3 & 9.3 & 0.49 & 4.8e+02 & 9.5e-3 \\
&MALA & 0.98 & 9.6 & 0.1 & 4.6e+02 & 2.1e-3 & 8.6 & 0.11 & 4.2e+02 & 2.3e-3 \\
&RWM & 3 & 8.3 & 0.36 & 4.3e+02 & 6.9e-3 & 8 & 0.37 & 4.3e+02 & 6.9e-3 \\
\hline
$x_2^2$ & ULA & 29 & 11 & 2.6 & 5.2e+02 & 0.055 & 10 & 2.8 & 4.7e+02 & 0.062 \\
&MALA & 7 & 11 & 0.64 & 5.2e+02 & 0.013 & 10 & 0.68 & 4.8e+02 & 0.014 \\
&RWM & 16 & 9.1 & 1.8 & 4.4e+02 & 0.037 & 8.8 & 1.8 & 4.3e+02 & 0.037 \\
\hline
$x_3^2$ & ULA & 46 & 11 & 4.1 & 6.7e+02 & 0.069 & 10 & 4.5 & 5.9e+02 & 0.079 \\
&MALA & 11 & 11 & 0.97 & 6e+02 & 0.018 & 10 & 1 & 5.6e+02 & 0.019 \\
&RWM & 26 & 9 & 2.9 & 4.3e+02 & 0.061 & 8.6 & 3.1 & 4.2e+02 & 0.062 \\
\hline
$x_4^2$ & ULA & 5.1e+02 & 14 & 37 & 8.2e+02 & 0.62 & 12 & 43 & 6.9e+02 & 0.73 \\
&MALA & 1.2e+02 & 14 & 9 & 7.9e+02 & 0.15 & 12 & 10 & 7.1e+02 & 0.17 \\
&RWM & 2.9e+02 & 11 & 27 & 5.8e+02 & 0.51 & 10 & 29 & 5.6e+02 & 0.53 \\
\hline
Equation nd{tabular}
\caption{Estimates of the asymptotic variances for ULA, MALA and RWM and each parameter $x_i$, $x_i^2$ for $i\in\{1,\ldots,d\}$, and of the variance reduction factor (VRF) on the example of the logistic regression.}
\label{table:1}
Equation nd{table}
\section{Proofs of Equation nsuremath{\mathbb{C}}ref{item-thm-var-3} and Equation nsuremath{\mathbb{C}}ref{prop:dev-weak-error}}
\label{sec:proofs}
In the proof
the notation $A(\gamma,n,x,f) \lesssim B(\gamma,n,x,f)$ means that there exist $\bar{\gamma} > 0$, and $C < \infty$ such that for all $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$, $\gamma \in \ocint{0,\bar{\gamma}}$, $x \in \mathbb{R}^d$, $n \in \mathbb{N}$, $A(\gamma,n,x,f) \leq C B(\gamma,n,x,f)$.
We preface the proofs by a technical result which follows from \cite[Lemma~2.6, Proposition~2.7]{kopec:2015} and Equation qref{eq:def_poisson_int} establishing the regularity of solutions of Poisson's equation.
\begin{proposition}
\label{prop:existence-sol-Poisson}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} and let $k \in \mathbb{N}s$.
For all $f\in\setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$, there exists $\phif\in\setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$ such that
$\mathscr{L} \phif = -\tilde{f}$, where $\tilde{f} = f- \pi(f)$, $\mathscr{L}$ is the generator of the Langevin diffusion defined in Equation qref{eq:def-generator}. In addition, for all $p\in\mathbb{N}$, there exist $C\geq 0$, $q\in\mathbb{N}$ such that for all $f\in\setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$, $\Vnorm[k,q]{\phif} \leq C \Vnorm[k,p]{f}$.
Equation nd{proposition}
\subsection{Proof of Equation nsuremath{\mathbb{C}}ref{item-thm-var-3}}
\label{subsec:proof:item-thm-var-3}
Let $p \in \mathbb{N}$.
Under Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}, by Equation nsuremath{\mathbb{C}}ref{prop:existence-sol-Poisson}, there exists $q_1\in\mathbb{N}$ such that for all $f \in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$, $\Vnorm[\ke,q_1]{\hat{f}} \leq C \Vnorm[\ke,p]{f}$, where $\mathscr{L} \hat{f} = -\tilde{f}$, $\tilde{f} = f-\pi(f)$. Under Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, we have for all $\gamma\in\ocint{0,\bar{\gamma}}$,
\begin{equation}\label{eq:proog-tp2}
Equation nsuremath{\mathbb{R}}kerg \hat{f} = \hat{f} + \gamma \mathscr{L} \hat{f} + \gamma^{\alpha} \mathscr{E}_{\step} \hat{f} =
\hat{f} - \gamma \{ f - \pi(f) \} + \gamma^\alpha \mathscr{E}_{\step} \hat{f} \;.
Equation nd{equation}
Integrating Equation qref{eq:proog-tp2} w.r.t.~$\pig$, we obtain that $\pig(f) - \pi(f) = \gamma^{\alpha-1} \pig(\mathscr{E}_{\step} \hat{f})$.
Under Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, there exists $q_2 \in \mathbb{N}$ such that
$\VnormEqs[0, q_2]{\mathscr{E}_{\step} \hat{f}} \lesssim \VnormEqs[\ke,q_1]{\hat{f}}$.
By Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}, we get
$|\pig(\mathscr{E}_{\step} \hat{f})| \leq \pig(|\mathscr{E}_{\step} \hat{f}|) \lesssim \VnormEqs[0, q_2]{\mathscr{E}_{\step} \hat{f}}$, which concludes the proof.
\subsection{Proof of Equation nsuremath{\mathbb{C}}ref{prop:dev-weak-error}}
\label{subsec:proof-weak-error-dev}
The proof is divided into two parts. In the first part which gathers Equation nsuremath{\mathbb{C}}ref{lemma:Sn-Sn2-discrete-chain}, Equation nsuremath{\mathbb{C}}ref{lemma:tech-step-n-dom} and Equation nsuremath{\mathbb{C}}ref{lem:bound_asympto_bias_generator}, we establish preliminary and technical results. In particular, we derive in Equation nsuremath{\mathbb{C}}ref{lemma:Sn-Sn2-discrete-chain} an elementary bound on the second order moment of the estimator $\pihat(f)$ defined in Equation qref{eq:def-invpihatn},
where $(X_k)_{k\in\mathbb{N}}$ is a Markov chain of kernel $Equation nsuremath{\mathbb{R}}kerg$. The arguments are based solely on the study of $Equation nsuremath{\mathbb{R}}kerg$ and rely on Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}.
In a second part, using our preliminary results, the proof of Equation nsuremath{\mathbb{C}}ref{prop:dev-weak-error} is then derived.
\begin{lemma}\label{lemma:Sn-Sn2-discrete-chain}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}.
Let $f: \mathbb{R}^d \to \mathbb{R}$ be such that $\VnormEq[V^{1/2}]{f} < +\infty$. For all $n\in\mathbb{N}^*$,
\begin{equation*}
Equation xpeMarkov{x, \gamma}{\parenthese{\sum_{k=0}^{n-1} \defEns{f(X_k) - \pig(f)}}^2} \lesssim \gamma^{-1} \VnormEq[V^{1/2}]{f}^2 \defEns{n + \gamma^{-1} V(x)} \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
Note that under Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}-\ref{ass:geo_ergod_iii}, by \cite[Definition D.3.1-(i)]{douc:moulines:priouret:soulier:2018} and Jensen inequality,
\begin{equation}\label{eq:Vbeta-unif}
\Vnorm[V^{1/2}]{\updelta_x Equation nsuremath{\mathbb{R}}Ker_\gamma^n - \pig} \lesssim \rho^{n\gamma/2} V^{1/2}(x) \;.
Equation nd{equation}
We have for all $n\in\mathbb{N}^*$
\begin{multline}\label{eq:vbeta-unif-1}
Equation xpeMarkov{x, \gamma}{\parenthese{\sum_{k=0}^{n-1} \defEns{f(X_k) - \pig(f)}}^2} \\
\lesssim \sum_{k=0}^{n-1} \sum_{s=0}^{n-1-k} Equation xpeMarkov{x, \gamma}{\parenthese{f(X_k) - \pig(f)}\parenthese{f(X_{k+s}) - \pig(f)}} \;.
Equation nd{multline}
For $k\in\defEns{0,\ldots,n-1}$ and $s\in\defEns{0,\ldots,n-1-k}$,
\begin{equation*}
Equation xpeMarkov{x, \gamma}{\parenthese{f(X_k) - \pig(f)}\parenthese{f(X_{k+s}) - \pig(f)}}
= Equation xpeMarkov{x, \gamma}{\parenthese{f(X_k) - \pig(f)}\parenthese{ Equation nsuremath{\mathbb{R}}kerg^s f(X_{k}) - \pig(f)}} \;.
Equation nd{equation*}
By Equation qref{eq:Vbeta-unif}, we obtain
\begin{align*}
& \absolute{Equation xpeMarkov{x, \gamma}{\parenthese{f(X_k) - \pig(f)}\parenthese{f(X_{k+s}) - \pig(f)}}} \\
& \phantom{---------}\lesssim \VnormEq[V^{1/2}]{f}\rho^{\gamma s/2}Equation xpeMarkov{x, \gamma}{\absolute{f(X_k) - \pig(f)} V^{1/2}(X_k)} \\
& \phantom{---------}\lesssim \VnormEq[V^{1/2}]{f}^2\rho^{\gamma s/2}Equation xpeMarkov{x, \gamma}{ V(X_k)} \;,
Equation nd{align*}
using that $V\geq 1$ and $\absolute{f(x) - \pig(f)} \leq \Vnorm[V^{1/2}]{f}(V^{1/2}(x) + \bar{\pi})$ where $\bar{\pi} = \sup_{\gamma\in\ocint{0,\bar{\gamma}}} \pig(V) \lesssim 1$.
By Equation qref{eq:discrete-drift-uniform-bound}, we get
\begin{equation*}
\absolute{Equation xpeMarkov{x, \gamma}{\parenthese{f(X_k) - \pig(f)}\parenthese{f(X_{k+s}) - \pig(f)}}} \lesssim \VnormEq[V^{1/2}]{f}^2\rho^{\gamma s/2} \defEns{\rho^{k\gamma} V(x) + \bar{\pi}} \;.
Equation nd{equation*}
Combining it with Equation qref{eq:vbeta-unif-1}, we have
\begin{equation*}
Equation xpeMarkov{x, \gamma}{\parenthese{\sum_{k=0}^{n-1} \defEns{f(X_k) - \pig(f)}}^2} \lesssim Figure rac{\VnormEq[V^{1/2}]{f}^2}{1-\rho^{\gamma/2}} \defEns{Figure rac{V(x)}{1-\rho^\gamma} + n\bar{\pi}} \;.
Equation nd{equation*}
Using that $1-\rho^{\upbeta \gamma } \geq \upbeta \gamma \log(1/\rho) \rho^{\upbeta \gamma }$ for all $\upbeta\in\ocint{0,1}$ concludes the proof.
Equation nd{proof}
Define for any $f : \mathbb{R}^d \to \mathbb{R}$, $x \in \mathbb{R}^d$ and
$\gamma \in \ocint{0,\bar{\gamma}}$, such that
$R_{\gamma}f^2(x) < +\infty$,
\begin{equation*}
\mmtilf{\gamma}(x) = Equation xpeMarkov{x,\gamma}{\{f(X_1) - Equation nsuremath{\mathbb{R}}kerg f(x)\}^2} \;.
Equation nd{equation*}
\begin{lemma}\label{lemma:tech-step-n-dom}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}.
For all $\gamma \in \ocint{0,\bar{\gamma}}$ and $f\in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$, $ \mmtilf{\gamma} \in \mathrm{C}^{\infty}_{\operatorname{poly}}r$ and in addition for all $p\in\mathbb{N}$ there exists $q \in \mathbb{N}$ such that for all $\gamma\in\ocint{0,\bar{\gamma}}$, $\Vnorm[0,q]{\mmtilf{\gamma}} \lesssim \gamma \Vnorm[\ke,p]{f}^2$.
Equation nd{lemma}
\begin{proof}
Let $p\in\mathbb{N}$ and $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}r$.
By Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, for all $\gamma\in\ocint{0,\bar{\gamma}}$ and $x\in\mathbb{R}^d$,
\begin{align}
\nonumber
0 \leq \mmtilf{\gamma}(x) &= Equation xpeMarkov{x,\gamma}{ \defEns{ f(X_1) - f(x) - \gamma \mathscr{L} f(x) - \gamma^\alpha \mathscr{E}_{\step} f(x)}^2} \\
\nonumber
&= Equation xpeMarkov{x,\gamma}{ \defEns{f(X_1) - f(x)}^2} - \gamma^2 \defEns{\mathscr{L} f(x) + \gamma^{\alpha-1} \mathscr{E}_{\step} f(x)}^2 \; \\
\label{eq:tech-step-n-dom-1}
&\leq Equation xpeMarkov{x,\gamma}{ \defEns{f(X_1) - f(x)}^2} \;.
Equation nd{align}
Besides, for all $\gamma\in\ocint{0,\bar{\gamma}}$ and $x\in\mathbb{R}^d$,
\begin{align*}
&Equation xpeMarkov{x,\gamma}{ \defEns{f(X_1) - f(x)}^2} = Equation xpeMarkov{x,\gamma}{ f^2(X_1)} + f^2(x) - 2f(x) Equation xpeMarkov{x,\gamma}{ f(X_1)} \\
&\phantom{----}= \gamma \mathscr{L} (f^2)(x) + \gamma^\alpha \mathscr{E}_{\step} (f^2)(x) - 2\gamma f(x) \mathscr{L} f(x) - 2 \gamma^\alpha f(x) \mathscr{E}_{\step} f(x) \\
&\phantom{----}= \gamma \defEns{ 2\norm[2]{\nabla f(x)} + \gamma^{\alpha -1} \parenthese{\mathscr{E}_{\step} (f^2)(x) - 2f(x) \mathscr{E}_{\step} f(x)}} \;.
Equation nd{align*}
Then, combining this result and Equation qref{eq:tech-step-n-dom-1}, under Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, $\mmtilf{\gamma} \in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ and since $\ke \geq 2$, there exists $q\in\mathbb{N}$ such that $\Vnorm[0,q]{\mmtilf{\gamma}} \lesssim \gamma \Vnorm[\ke,p]{f}^2$.
Equation nd{proof}
\begin{lemma}
\label{lem:bound_asympto_bias_generator}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} and Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}. Then for any $p \in \mathbb{N}$,
\begin{align}
\label{eq:borne-invpig-invpi-1}
\absolute{\pig ( \phif \mathscr{L} \phif )- \pi ( \phif \mathscr{L} \phif )} &\lesssim \VnormEq[\ke+2,p]{f}^2 \gamma^{\alpha-1} \;, \\
\label{eq:borne-sigma-inf}
\varinf(f) = -2 \pi(\phif \mathscr{L} \phif) &\lesssim \VnormEq[2,p]{f}^2 \;,
Equation nd{align}
where for any $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}r$, $\phif$ is the solution of Poisson's equation Equation qref{eq:poisson-eq-langevin} (see Equation nsuremath{\mathbb{C}}ref{prop:existence-sol-Poisson}).
Equation nd{lemma}
\begin{proof}
Let $p \in \mathbb{N}$. By Equation nsuremath{\mathbb{C}}ref{prop:existence-sol-Poisson} and Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, there
exists $q \in \mathbb{N}$ satisfying
\begin{equation}
\label{eq:1:lem:bound_asympto_bias_generator}
\Vnorm[\ke+2,q]{\phif} \lesssim \Vnorm[\ke+2,p]{f} ext{ and } \Vnorm[\ke+1,q]{U} \lesssim 1 \;.
Equation nd{equation}
In addition, using Equation nsuremath{\mathbb{C}}ref{item-thm-var-3}, we have
\begin{equation*}
\absLigne{\pig ( \phif \mathscr{L} \phif )- \pi ( \phif \mathscr{L} \phif )} \lesssim \gamma^{\alpha-1} \Vnorm[\ke,3 q]{\phif \mathscr{L} \phif} \;.
Equation nd{equation*}
Using that for any $k \in \mathbb{N}$ and $p_1,p_2 \in \mathbb{N}$, there exists $C_{k,p_1,p_2} \geq 0$ such that for
any $g_1,g_2 \in \mathrm{C}^{\infty}_{\operatorname{poly}}r$,
$\Vnorm[k,p_1+p_2]{fg} \leq C_{k,p_1,p_2} \Vnorm[k,p_1]{f} \Vnorm[k,p_2]{g}$ by the
general Leibniz rule, we get by definition of $\mathscr{L}$ Equation qref{eq:def-generator},
\begin{equation*}
\absolute{\pig ( \phif \mathscr{L} \phif )- \pi ( \phif \mathscr{L} \phif )} \lesssim \gamma^{\alpha-1} \Vnorm[\ke,q]{\phif} \Vnorm[\ke,2q]{ \mathscr{L} \phif} \lesssim \gamma^{\alpha-1} \Vnorm[\ke+2,q]{\phif}^2 \Vnorm[\ke+1,q]{ U } \;.
Equation nd{equation*}
The proof of Equation qref{eq:borne-invpig-invpi-1} then follows from Equation qref{eq:1:lem:bound_asympto_bias_generator}.
Similarly, by Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod},
\begin{equation*}
\varinf(f) = -2 \pi(\phif \mathscr{L} \phif) \lesssim \Vnorm[0, 3q]{\phif \mathscr{L} \phif} \lesssim \Vnorm[0,q]{\phif} \Vnorm[0,2q]{ \mathscr{L} \phif} \lesssim \Vnorm[2,q]{\phif}^2 \Vnorm[1,q]{U} \;,
Equation nd{equation*}
since $\Vnorm[1,q]{U} \leq \Vnorm[\ke+1,q]{U} \lesssim 1$. Using that $\Vnorm[2,q]{\phif} \leq \Vnorm[2,p]{f}$ concludes the proof of Equation qref{eq:borne-sigma-inf}.
Equation nd{proof}
\begin{proof}[Proof of Equation nsuremath{\mathbb{C}}ref{prop:dev-weak-error}]
Let $p \in \mathbb{N}$.
For any $f \in \mathrm{C}^{\infty}_{\operatorname{poly}}r$, let $\phif\in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ be the solution of Poisson's equation $\mathscr{L} \phif = -\tilde{f} $ (see Equation nsuremath{\mathbb{C}}ref{prop:existence-sol-Poisson}).
Using~Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, we get for all $\gamma\in\ocint{0,\bar{\gamma}}$,
\begin{equation}\label{eq:proof-dev-weak-1}
Equation nsuremath{\mathbb{R}}ker_\gamma \phif = \phif + \gamma \mathscr{L} \phif + \gamma^\alpha \mathscr{E}_{\step} \phif
= \phif - \gamma \{f-\pig(f)\} + \gamma^\alpha \mathscr{E}_{\step} \phif - \gamma \{ \pig(f)- \pi(f) \}\;,
Equation nd{equation}
which implies that
\begin{multline}\label{eq:decompo-poisson-12}
\sum_{k=0}^{n-1} \defEns{f(X_k) - \pig(f)} = Figure rac{\phif(X_0) - \phif(X_n)}{\gamma} + Figure rac{1}{\gamma}\sum_{k=0}^{n-1} \defEns{\phif(X_{k+1}) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_k)} \\
+ \gamma^{\alpha-1}\sum_{k=0}^{n-1} \defEns{ \mathscr{E}_{\step} \phif(X_k) -\gamma^{1-\alpha}\parenthese{\pig(f)-\pi(f)} } \;.
Equation nd{multline}
Consider the following decomposition based on Equation qref{eq:decompo-poisson-12},
\begin{equation*}
n^{-1}Equation xpeMarkov{x,\gamma}{\parenthese{\sum_{k=0}^{n-1} \defEns{f(X_k) - \pig(f)}}^2} = \sum_{i=1}^{4} A^{f}_i(x,n,\gamma) \;,
Equation nd{equation*}
where,
\begin{align*}
&A^{f}_1(x,n,\gamma) \\
& \phantom{--}= Figure rac{\gamma^{2(\alpha-1)}}{n}Equation xpeMarkov{x,\gamma}{\parenthese{\sum_{k=0}^{n-1} \defEns{ \mathscr{E}_{\step} \phif(X_k) -\gamma^{1-\alpha}\parenthese{\pig(f)-\pi(f)}}}^2} \;, \\
&A^{f}_2(x,n,\gamma) = (n \gamma^2 )^{-1} Equation xpeMarkov{x,\gamma}{(\phif(X_0) - \phif(X_n))^2 } \; , \\
&A^{f}_3(x,n,\gamma) = (n \gamma^2)^{-1} Equation xpeMarkov{x,\gamma}{\parenthese{\sum_{k=0}^{n-1} \phif(X_{k+1}) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_k)}^2} \;,
Equation nd{align*}
and by Cauchy-Schwarz inequality,
\begin{equation}\label{eq:A4-1}
(1/2) \absolute{A^{f}_4(x,n,\gamma)} \leq \sum_{1 \leq i < j \leq 3} A^{f}_i(x,n,\gamma)^{1/2} A^{f}_j(x,n,\gamma)^{1/2} \;.
Equation nd{equation}
We bound below $ \absolute{A_i^f(x,n,\gamma)}$ for any $i\in\defEns{1,\ldots,4}$.
By Equation nsuremath{\mathbb{C}}ref{prop:existence-sol-Poisson}, there exists $q_1\in\mathbb{N}$ such that
\begin{equation}
\label{eq:control_Poisson_proof_lem_prelim_bootstrap}
\Vnorm[\ke,q_1]{\phif} \lesssim \Vnorm[\ke,p]{f} \;,
Equation nd{equation}
which combined with Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}-\ref{ass:geo_ergod_iii} and Equation qref{eq:discrete-drift-uniform-bound} yield for all $n\in\mathbb{N}^*$,
\begin{equation}\label{eq:A1-1}
A^{f}_2(x,n,\gamma)\lesssim \Vnorm[V]{\phif^2} V(x) / (n\gamma^2) \lesssim \VnormEq[\ke,p]{f}^2 V(x) / (n\gamma^2) \;.
Equation nd{equation}
For any $\gamma\in\ocint{0,\bar{\gamma}}$, by Equation qref{eq:proof-dev-weak-1} and since $\mathscr{L} \phif = -\tilde{f} $, $\pig(\mathscr{E}_{\step} \phif) = \gamma^{1-\alpha}\{\pig(f)-\pi(f)\}$.
Under Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, there exists $q_3\in\mathbb{N}$ such that for all $\gamma \in \ocint{0,\bar{\gamma}}$,
$\VnormEqs[V^{1/2}]{\mathscr{E}_{\step} \phif} \lesssim \VnormEqs[0,q_3]{\mathscr{E}_{\step} \phif}\lesssim \VnormEqs[\ke,q_1]{ \phif} \lesssim \VnormEqs[\ke,p]{f}$ by Equation qref{eq:control_Poisson_proof_lem_prelim_bootstrap}.
Hence, applying Equation nsuremath{\mathbb{C}}ref{lemma:Sn-Sn2-discrete-chain} and using $\alpha \geq 3/2$ yield
\begin{align}
\label{eq:A1-true}
A^{f}_1(x,n,\gamma)&\lesssim Figure rac{\gamma^{2(\alpha-1)}}{n} Figure rac{\VnormEqs[\ke,p]{f}^2}{\gamma}\parenthese{n + Figure rac{V(x)}{\gamma}} \\
\nonumber
&\lesssim
\VnormEqs[\ke,p]{f}^2\defEns{1 + V(x)/(n\gamma)} \;.
Equation nd{align}
Since $(\sum_{k=0}^{n-1} \phif(X_{k+1}) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_k))_{k\in\mathbb{N}}$ is a $\mathbb{P}_{x,\gamma}$-square integrable martingale, we get that for all $n \in \mathbb{N}$,
\begin{equation}\label{eq:A2-1}
A^{f}_3(x,n,\gamma) = \gamma^{-2} Equation xpeMarkov{x,\gamma}{n^{-1} \sum_{k=0}^{n-1} g_\step(X_k)} \;,
Equation nd{equation}
where
\begin{equation}
\label{eq:def_tg_proof_lem_preli_bootstrap}
g_\step(x) = Equation xpeMarkov{x,\gamma}{\{\phif(X_1) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(x)\}^2} \;.
Equation nd{equation}
Equation nsuremath{\mathbb{C}}ref{lemma:tech-step-n-dom} shows that $g_\step\in\mathrm{C}^{\infty}_{\operatorname{poly}}(\mathbb{R}^d,\mathbb{R})$ and that there exists $q_2 \in \mathbb{N}$ such that $\Vnorm[V]{g_\step}\lesssim \Vnorm[0,q_2]{g_\step} \lesssim \gamma \Vnorm[\ke,q_1]{\phif}^2 \lesssim \gamma \Vnorm[\ke,p]{f}^2 $. Applying Equation qref{eq:def-V-unif}, we get that for all $n\in\mathbb{N}^*$,
\begin{multline}\label{eq:A2-1-1}
\absolute{Equation xpeMarkov{x,\gamma}{n^{-1} \sum_{k=0}^{n-1} g_\step(X_k)} - \pig(g_\step)} \\ \lesssim \VnormEq[V]{g_\step} (n\gamma)^{-1} V(x) \lesssim n^{-1} \VnormEq[\ke,p]{f}^2 V(x) \;.
Equation nd{multline}
We now show that $\pig(g_\step)$ is approximately equal to $\gamma \varinf(f)$.
Observe that by Equation qref{eq:def_tg_proof_lem_preli_bootstrap} and since $\pi_{\gamma}$ is invariant for $R_{\gamma}$, for any $\gamma \in \ocint{0,\bar{\gamma}}$,
\begin{align}
\nonumber
\pig(g_\step) &= Equation xpeMarkov{\pig,\gamma}{\{\phif(X_1) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_0)\}^2} \\
\label{eq:A2-3}
&= Equation xpeMarkov{\pig,\gamma}{\{\phif(X_1) - \phif(X_0)\}^2} - Equation xpeMarkov{\pig,\gamma}{\{\phif(X_0) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_0)\}^2} \;.
Equation nd{align}
Using that $\pig$ is the invariant distribution for $Equation nsuremath{\mathbb{R}}ker_\gamma$ again and Equation qref{eq:proof-dev-weak-1}, we have for any $\gamma \in \ocint{0,\bar{\gamma}}$,
\begin{align}
\nonumber
Equation xpeMarkov{\pig,\gamma}{\{\phif(X_1) - \phif(X_0)\}^2}
&= 2 Equation xpeMarkov{\pig,\gamma}{\phif(X_0)\{\phif(X_0) -Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_0)\}} \\
\label{eq:temp-invpig-invpi}
&= -2 \gamma\pig ( \phif \mathscr{L} \phif ) - 2 \gamma^{\alpha} \pig (\phif \mathscr{E}_{\step}\phif) \;.
Equation nd{align}
In the next step, we consider separately the cases $\pig = \pi$ and $\pig \ne\pi$.
If $\pi = \pig$, then
\begin{equation}
\label{eq:diff-invpig-invpi_0}
-\pig ( \phif \mathscr{L} \phif )=(1/2)\varinf(f) \;.
Equation nd{equation}
If $\pig \neq \pi$, Equation nsuremath{\mathbb{C}}ref{lem:bound_asympto_bias_generator} shows that
\begin{align}
\label{eq:diff-invpig-invpi}
\absolute{\pig ( \phif \mathscr{L} \phif )+(1/2)\varinf(f)} &= \absolute{\pig ( \phif \mathscr{L} \phif )- \pi ( \phif \mathscr{L} \phif )} \\
\nonumber
&\lesssim \VnormEq[\ke+2,p]{f}^2 \gamma^{\alpha-1} \;.
Equation nd{align}
Using Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete}, Equation qref{eq:discrete-drift-uniform-bound} and $\absolute{\pig (\phif \mathscr{E}_{\step}\phif)} \lesssim \Vnorm[\ke,p]{f}^2$ in Equation qref{eq:temp-invpig-invpi}, we obtain that
\begin{multline}
\label{eq:A2-4}
\mathcal{B}ig|Equation xpeMarkov{\pig,\gamma}{\{\phif(X_1) - \phif(X_0)\}^2} +2 \gamma\pig ( \phif \mathscr{L} \phif )\mathcal{B}ig|
\\ = 2 \gamma^\alpha \mathcal{B}ig| \pig(\phif \mathscr{E}_{\step} \phif)\mathcal{B}ig| \lesssim \VnormEq[\ke,p]{f}^2 \gamma^{\alpha} \;.
Equation nd{multline}
Similarly, using Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}-\ref{ass:geo_ergod_ii}, Equation qref{eq:discrete-drift-uniform-bound}, Equation qref{eq:proof-dev-weak-1}, Equation qref{eq:def-generator}, Equation nsuremath{\mathbb{C}}ref{ass:dev_generator_discrete} and Equation qref{eq:control_Poisson_proof_lem_prelim_bootstrap}, it holds since $\ke \geq 2$ that
\begin{equation*}
Equation xpeMarkov{\pig,\gamma}{\{\phif(X_0) - Equation nsuremath{\mathbb{R}}ker_\gamma \phif(X_0)\}^2} \lesssim \Vnorm[\ke,q_1]{\phif}^2 \gamma^2 \lesssim \Vnorm[\ke,p]{f}^2 \gamma^2 \;.
Equation nd{equation*}
Combining this result with Equation qref{eq:diff-invpig-invpi_0} or Equation qref{eq:diff-invpig-invpi} and Equation qref{eq:A2-4} in Equation qref{eq:A2-3} and using that $ \VnormEq[\ke,p]{f} \leq \VnormEq[\ke+2,p]{f}$, we obtain
\begin{equation*}
\absolute{\pig(g_\step) - \gamma \varinf(f)} \lesssim \VnormEq[\ke+2,p]{f}^2 \gamma^{\alpha \wedge 2} \;.
Equation nd{equation*}
Plugging this inequality and Equation qref{eq:A2-1-1} in Equation qref{eq:A2-1},
we obtain for all $n\in\mathbb{N}^*$,
\begin{equation}\label{eq:A2-5}
\absolute{A^{f}_3(x,n,\gamma) - \gamma^{-1} \varinf(f)} \lesssim \VnormEq[\ke+2,p]{f}^2 \defEns{ \gamma^{(\alpha-2) \wedge 0} + (n\gamma^2)^{-1} V(x)} \;.
Equation nd{equation}
Note that since $\alpha \geq 1$, by Equation qref{eq:borne-sigma-inf} and Equation qref{eq:A2-5},
\begin{equation*}
A^{f}_3(x,n,\gamma) \lesssim \VnormEq[\ke+2,p]{f}^2 \defEns{ \gamma^{-1} + (n\gamma^2)^{-1} V(x)} \;.
Equation nd{equation*}
Combining it with Equation qref{eq:A4-1}, Equation qref{eq:A1-1} and Equation qref{eq:A1-true} conclude the proof.
Equation nd{proof}
\section{Geometric ergodicity for the ULA and MALA algorithms}
\label{sec:geom-ergodicity-mala}
In this Section, we show that Equation qref{eq:def-V-unif} in Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod} is satisfied for the family of Markov kernel $\{Equation nsuremath{\mathbb{R}}ula \, : \, \gamma \in \ocint{0,\bar{\gamma}}\}$ and $\{Equation nsuremath{\mathbb{R}}mala \, : \, \gamma \in \ocint{0,\bar{\gamma}}\}$, with $\bar{\gamma} >0$, associated to the ULA and MALA algorithms (see Equation qref{eq:def-kernel-ULA} and Equation qref{eq:def-kernel-MALA}). Assume that there exist $V \in \mrC^2(\mathbb{R}^d,\coint{1,+\infty})$ and $a>0$ and $b \geq 0$ such that
\begin{equation}
\label{eq:drift_cont}
\mathscr{L} V \leq - a V + b \;.
Equation nd{equation}
Then,
\cite[Theorem 2.2]{roberts:tweedie-Langevin:1996} and \cite[Theorem 4.5]{meyn:tweedie:1993:III} show that $\pi(V) < +\infty$ and Equation qref{eq:def-V-unif_ii} is satisfied. It is standard to show that Equation qref{eq:drift_cont} holds under Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} but this result is given below for completeness.
We begin the proof by two technical lemmas, Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour,lem:bounde_pertub_hessian} which are used repeatedly throughout this Section.
In this Section, we assume without loss of generality that $\nabla U(0)=0$. Note that under Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}, $m \leq L$.
\begin{lemma}
\label{lem:quadratic_behaviour}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then there exists $K_2 \geq 0$ such that for any $x \not \in \ball{0}{K_2}$, $\ps{\nabla U(x)}{x} \geq (m/2) \norm[2]{x}$ and in particular $\norm{\nabla U(x)} \geq (m/2) \norm{x}$.
Equation nd{lemma}
\begin{proof}
Using Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}, we have for any $x \in \mathbb{R}^d$, $\norm{x }\geq K_1$,
\begin{align*}
\ps{\nabla U(x)}{x}
&= \int_{0}^{K_1/\norm{x}} \operatorname{D}^2 U(t x ) [x^{\otimes 2}] \mathrm{d} t + \int_{K_1/\norm{x}} ^ 1 \operatorname{D}^2 U(t x ) [x^{\otimes 2}] \mathrm{d} t\\
& \geq m\norm[2]{x} \{1- K_1 (1 +L/m) / \norm{x} \}\;,
Equation nd{align*}
which proves the first statement. The second statement is obvious.
Equation nd{proof}
\begin{lemma}
\label{lem:bounde_pertub_hessian}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then, for any $t \in \ccint{0,1}$, $\gamma \in \ocint{0,1/(4L)}$ and $x,z \in \mathbb{R}^d$, $\norm{z} \leq \norm{x}/(4\sqrt{2\gamma})$, it holds
\begin{equation*}
\norm{x+t\{-\gamma \nabla U(x) + \sqrt{2\gamma}z \}} \geq \norm{x}/2 \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
Let $t \in \ccint{0,1}$, $\gamma \in \ocint{0,1/(4L)}$ and $x,z \in \mathbb{R}^d$, $\norm{z} \leq \norm{x}/(4\sqrt{2\gamma})$.
Using the triangle inequality and Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, we have since $t \in \ccint{0,1}$
\begin{equation*}
\norm{x+t\{-\gamma \nabla U(x) + \sqrt{2\gamma}z \}} \geq (1-\gamma L ) \norm{x} -\sqrt{2\gamma} \norm{z} \;.
Equation nd{equation*}
The conclusion then follows from $\gamma \leq 1/(4L)$ and $\norm{z} \leq \norm{x}/(4\sqrt{2\gamma})$.
Equation nd{proof}
We now show that Equation qref{eq:drift_cont} holds.
\begin{proposition}
\label{propo:drift_cont}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then, for any $Equation ta \in \ocint{0, m/8}$, Equation qref{eq:drift_cont} holds with $V=V_{Equation ta}$, $a=2Equation ta$ and
\begin{equation*}
b = 2Equation taEquation xp\parenthese{Equation ta\defEns{K_2^2 \vee 4(d+1)/m}}\left[d +1 + (2Equation ta + L)\defEns{K_2^2 \vee 4(d+1)/m} \right] \;,
Equation nd{equation*}
where $K_2$ is defined in Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour}.
Equation nd{proposition}
\begin{proof}
Let $Equation ta \in \ocint{0, m/8}$. By Equation qref{eq:def-generator}, for all $x\in\mathbb{R}^d$,
\begin{equation*}
\mathscr{L} V_{Equation ta}(x) / (2Equation ta V_{Equation ta}(x)) = - \ps{\nabla U(x)}{x} + d + 2Equation ta\norm[2]{x} \;.
Equation nd{equation*}
By Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour}, for all $x\in\mathbb{R}^d$, $x\geq \max(K_2, 2\sqrt{(d+1)/m})$,
\begin{equation*}
\mathscr{L} V_{Equation ta}(x) / (2Equation ta V_{Equation ta}(x)) \leq - \defEns{(m/2) - 2Equation ta} \norm[2]{x} + d \leq -1 \;,
Equation nd{equation*}
which concludes the proof.
Equation nd{proof}
Therefore, to check Equation nsuremath{\mathbb{C}}ref{ass:geo_ergod}, it remains to show that for any $\gamma \in \ocint{0,\bar{\gamma}}$, for $\bar{\gamma} >0$, $Equation nsuremath{\mathbb{R}}ula$ (resp. $Equation nsuremath{\mathbb{R}}mala$) has an invariant distribution $\pi_{\gamma}$ (resp. $\pi$) and there exists $\bar{Equation ta} >0$ such that $\pi_{\gamma}(V_{\bar{Equation ta}}) < +\infty$ and Equation qref{eq:def-V-unif} holds with $V=V_{\bar{Equation ta}}$.
To this end, we establish minorization and drift conditions on $Equation nsuremath{\mathbb{R}}Ker_\gamma = Equation nsuremath{\mathbb{R}}ula$ and $Equation nsuremath{\mathbb{R}}Ker_\gamma= Equation nsuremath{\mathbb{R}}mala$, see e.g.~\cite[Chapter~19]{douc:moulines:priouret:soulier:2018} with an explicit dependence with respect to the parameter $\gamma$.
More precisely, assume that
\begin{enumerate}[label=(Equation nsuremath{\mathbb{R}}oman*)]
\item\label{item:condition_ergo_I} there exist $\lambda\in\ooint{0,1}$ and $b<+\infty$ such that for all $\gamma\in\ocint{0,\bar{\gamma}}$
\begin{equation}
\label{eq:def-discrete-drift}
Equation nsuremath{\mathbb{R}}Ker_\gamma V_{\bar{Equation ta}} \leq \lambda^\gamma V_{\bar{Equation ta}} + \gamma b \;;
Equation nd{equation}
\item\label{item:condition_ergo_II} there exists $\varepsilon\in\ocint{0,1}$ such that for all $\gamma\in\ocint{0,\bar{\gamma}}$ and $x,x'\in\defEnsLigne{V_{\bar{Equation ta}} \leq \widetilde{M}}$,
\begin{equation*}
\tvnorm{Equation nsuremath{\mathbb{R}}ker_\gamma^{\ceil{1/\gamma}}(x,\cdot) - Equation nsuremath{\mathbb{R}}ker_\gamma^{\ceil{1/\gamma}}(x',\cdot)} \leq 2(1 - \varepsilon) \;,
Equation nd{equation*}
where
\begin{equation*}
\widetilde{M}>\parenthese{Figure rac{4b \lambda^{-\bar{\gamma}}}{\log(1/\lambda)}-1} \vee 1 \;.
Equation nd{equation*}
Equation nd{enumerate}
Then, \ref{item:condition_ergo_I} implies by \cite[Lemma 1]{durmus:moulines:2015} that for any $\gamma \in \ocint{0,\bar{\gamma}}$,
\begin{equation}\label{eq:def-discrete-drift_2}
Equation nsuremath{\mathbb{R}}Ker_\gamma^{\ceil{1/\gamma}} V_{\bar{Equation ta}} \leq \lambda V_{\bar{Equation ta}} + b\lambda^{-\bar{\gamma}}/\log(1/\lambda) \;.
Equation nd{equation}
Therefore, applying
\cite[Theorem~19.4.1]{douc:moulines:priouret:soulier:2018} to $Equation nsuremath{\mathbb{R}}Ker^{\ceil{1/\gamma}}_{\gamma}$ for $\gamma \in \ocint{0,\bar{\gamma}}$ using \ref{item:condition_ergo_II} and Equation qref{eq:def-discrete-drift_2}, it follows that Equation qref{eq:def-V-unif} holds with $V= V_{\bar{Equation ta}}$ and $\pi_{\gamma}(V_{\bar{Equation ta}}) < +\infty$. Accordingly, it is enough to show that conditions \ref{item:condition_ergo_I} and \ref{item:condition_ergo_II} hold. This is achieved for ULA in Equation nsuremath{\mathbb{C}}ref{propo:super_lyap_ula} and Equation nsuremath{\mathbb{C}}ref{propo:small_set_ula} in Equation nsuremath{\mathbb{C}}ref{subsec:geom-ergodicity-ula} and relying on these results and the analysis of ULA, the Markov kernel of MALA is shown to fulfill \ref{item:condition_ergo_I} and \ref{item:condition_ergo_II} in Equation nsuremath{\mathbb{C}}ref{propo:lyap_mala_total} and Equation nsuremath{\mathbb{C}}ref{propo:small_set_mala} in Equation nsuremath{\mathbb{C}}ref{subsec:geom-ergodicity-mala}.
For ease of notations, we denote in this Section $Equation nsuremath{\mathbb{R}}mala$ by $Equation nsuremath{\mathbb{R}}kerg$ and $Equation nsuremath{\mathbb{R}}ula$ by $Equation nsuremath{\mathbb{Q}}gam$ for any $\gamma >0$.
\subsection{Geometric ergodicity for the ULA algorithm}
\label{subsec:geom-ergodicity-ula}
\begin{proposition}
\label{propo:small_set_ula}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}. Then for any $Equation nsuremath{\mathbb{R}}rm \geq 0$, $x,y \in \mathbb{R}^d$, $\norm{x}\vee \norm{y} \leq Equation nsuremath{\mathbb{R}}rm$, and $\gamma \in \ocint{0,1/L}$ we have
\begin{equation*}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}}} \leq 2 (1-\varepsilon) \;.
Equation nd{equation*}
with $\varepsilon = 2\mbox{\protect\boldmath$\Phi$}\parenthese{-(1+1/L)^{1/2}(3L)^{1/2}K}$.
Equation nd{proposition}
\begin{proof}
By Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} for any $x,y \in \mathbb{R}^d$,
\[ \norm[2]{x-y-\gamma\{\nabla U(x) - \nabla U(y)\}} \leq (1+ \gamma \upkappa(\gamma)) \norm[2]{x-y} \]
where $\upkappa(\gamma) = (2 L+L^2 \gamma)$. The proof follows from \cite[Corollary 5]{debortoli2018back}.
Equation nd{proof}
\begin{proposition}
\label{propo:super_lyap_ula}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} and let $\bar{\gamma} \in \ocint{0,m/(4L^2)}$. Then, for any $\gamma \in \ocint{0,\bar{\gamma}}$,
\begin{equation*}
Q_{\gamma} V_{\bar{Equation ta}}(x) \leq Equation xp\parenthese{-\bar{Equation ta} m \gamma \norm[2]{x}/4} V_{\bar{Equation ta}}(x) + b_{\bar{Equation ta}} \gamma \mathbbm{1}_{\ball{0}{K_3}}(x) \;,
Equation nd{equation*}
where $\bar{Equation ta} = \min(m/16,(8\bar{\gamma})^{-1})$, $K_3 = \max(K_2,4\sqrt{d/m})$, and
\begin{equation}
\label{eq:coeffs_super_lyap_mala}
\begin{aligned}
b_{\bar{Equation ta}} &= \parentheseDeux{\bar{Equation ta} \defEns{ m/4+ (1+16\bar{Equation ta}\bar{\gamma})(4\bar{Equation ta} + 2 L + \bar{\gamma} L^2)} K^2_3 +4 \bar{Equation ta} d } \\
& \qquad \times Equation xp\parentheseDeux{\bar{\gamma}\bar{Equation ta}\defEns{m/4+ (1+16\bar{Equation ta}\bar{\gamma})(4\bar{Equation ta} + 2 L + \bar{\gamma} L^2)} K_3^2 + (d/2)\log(2)}\;.
Equation nd{aligned}
Equation nd{equation}
Equation nd{proposition}
\begin{proof}
Let $\gamma \in \ocint{0,\bar{\gamma}}$.
First since for any $x \in \mathbb{R}^d$, we have
\begin{multline*}
\bar{Equation ta} \norm[2]{x-\gamma \nabla U(x) + \sqrt{2\gamma} z} -\norm[2]{z} /2 \\
= -Figure rac{1-4\bar{Equation ta}\gamma}{2} \norm[2]{z-Figure rac{ 2(2\gamma)^{1/2}\bar{Equation ta}}{1-4\bar{Equation ta}\gamma}\{x-\gamma\nabla U(x)\}} + Figure rac{\bar{Equation ta}}{1-4\bar{Equation ta}\gamma} \norm[2]{x- \gamma \nabla U(x)} \;,
Equation nd{multline*}
which implies since $1-4 \bar{Equation ta}\gamma > 0$ that
\begin{align}
\nonumber
Equation nsuremath{\mathbb{Q}}gam V_{\bar{Equation ta}}(x) & = (2\uppi)^{-d/2}\int_{\mathbb{R}^d} Equation xp\parenthese{ \bar{Equation ta} \norm[2]{x-\gamma \nabla U(x) + \sqrt{2\gamma} z} -\norm[2]{z} /2} \mathrm{d} z \\
\label{eq:1:propo:super_lyap_mala}
& = (1-4\bar{Equation ta}\gamma)^{-d/2} Equation xp\parenthese{ \bar{Equation ta}(1-4\bar{Equation ta}\gamma)^{-1}\norm[2]{x- \gamma \nabla U(x)}} \;.
Equation nd{align}
We now distinguish the case when $\norm{x} \geq K_3$ and $\norm{x} < K_3$.
By Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} and Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour}, for any $x \in \mathbb{R}^d$, $\norm{x} \geq K_3 \geq K_2$, using that $\bar{Equation ta} \leq m/16$ and $\gamma \leq \bar{\gamma} \leq m/(4L^2)$, we have
\begin{multline*}
(1-4\bar{Equation ta}\gamma)^{-1} \norm[2]{x- \gamma \nabla U(x)} -\norm[2]{x}\\
\leq \gamma \norm[2]{x}(1-4\bar{Equation ta} \gamma)^{-1} \parenthese{4\bar{Equation ta} - m + \gamma L^2} \leq -\gamma (m/2) \norm[2]{x} (1-4\bar{Equation ta}\gamma)^{-1}\;.
Equation nd{multline*}
Therefore, Equation qref{eq:1:propo:super_lyap_mala} becomes
\begin{align*}
Equation nsuremath{\mathbb{Q}}gam V_{\bar{Equation ta}}(x)
& \leq Equation xp\parenthese{ -\gamma \bar{Equation ta} (m/2) (1-4\bar{Equation ta}\gamma)^{-1}\norm[2]{x} - (d/2)\log(1-4\bar{Equation ta}\gamma)} V_{\bar{Equation ta}}(x) \\
& \leq Equation xp\parenthese{ \gamma \bar{Equation ta}\{- (m/2) \norm[2]{x} + 4 d\}} V_{\bar{Equation ta}}(x) \;,
Equation nd{align*}
where we have used for the last inequality that $-\log(1-t) \leq 2t$ for $t \in \ccint{0,1/2}$ and $4 \bar{Equation ta} \gamma \leq 1/2$. The proof of the statement then follows since $\norm{x} \geq K_3 \geq 4 \sqrt{d/m}$.
In the case $\norm{x }< K_3$, by Equation qref{eq:1:propo:super_lyap_mala}, Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and since $(1-t)^{-1} \leq 1+4t$ for $t\in\ccint{0,1/2}$, we obtain
\begin{align*}
(1-4\bar{Equation ta}\gamma)^{-1}\norm[2]{x- \gamma \nabla U(x)} - \norm[2]{x}
&\leq \gamma (1-4\bar{Equation ta}\gamma)^{-1}\{4\bar{Equation ta} + 2 L + \gamma L^2\}\norm[2]{x} \\
&\leq \gamma (1+16\bar{Equation ta}\gamma)\{4\bar{Equation ta} + 2 L + \gamma L^2\}\norm[2]{x} \;,
Equation nd{align*}
which implies that
\begin{multline*}
Equation nsuremath{\mathbb{Q}}gam V_{\bar{Equation ta}}(x)/V_{\bar{Equation ta}}(x) \leq \mathrm{e}^{-\bar{Equation ta} m \gamma \norm[2]{x}/4} \\
+ Equation xp\parentheseDeux{ \gamma \bar{Equation ta} \defEns{ m/4+ (1+16\bar{Equation ta}\gamma)(4\bar{Equation ta} + 2 L + \gamma L^2)}\norm[2]{x} -(d/2)\log(1-4\bar{Equation ta}\gamma)} -1 \;.
Equation nd{multline*}
The proof is then completed using that for any $t \geq 0$, $\mathrm{e}^{t} -1 \leq t \mathrm{e}^{t}$, for any $s \in \ccint{0,1/2}$, $-\log(1-s) \leq 2s$ and $4\bar{Equation ta}\gamma \leq 1/2$.
Equation nd{proof}
\subsection{Geometric ergodicity for the MALA algorithm}
\label{subsec:geom-ergodicity-mala}
We first provide a decomposition in $\gamma$ of $\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}$ defined in Equation qref{eq:def-alpha-MALA}. For any $x,z \in \mathbb{R}^d$, by \cite[Lemma 24]{durmus:moulines:saksman:2017}Figure ootnote{Note that with the notation of \cite{durmus:moulines:saksman:2017}, MALA corresponds to HMC with only one leapfrog step and step size equals to $(2\gamma)^{1/2}$}, we have that
\begin{equation}
\label{lem:durmus_moulines_saksman}
\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z) = \sum_{k=2}^6 \gamma^{k/2} A_{k,\gamma}(x,z)
Equation nd{equation}
where, setting $x_t = x+t\{-\gamma \nabla U(x) + \sqrt{2\gamma} z \}$,
\begin{align*}
& A_{2,\gamma}(x,z)= 2 \int_0^1 \operatorname{D}^2 U(x_t) [z^{\otimes 2}] (1/2-t) \mathrm{d} t \\
& A_{3,\gamma}(x,z)= 2^{3/2} \int_{0}^1 \operatorname{D}^2 U(x_t) [z \otimes \nabla U(x)](t-1/4) \mathrm{d} t \,, \\
& A_{4,\gamma}(x,z)= - \int_{0}^1 \operatorname{D}^2 U(x_t)[ \nabla U(x)^{\otimes 2}] t \mathrm{d} t + (1/2) \norm[2]{ \int_{0}^1 \operatorname{D}^2 U(x_t) [z] \mathrm{d} t } \\
& A_{5,\gamma}(x,z)= -(1/2)^{1/2}\ps{\int_{0}^1 \operatorname{D}^2 U(x_t) [\nabla U(x)] \mathrm{d} t }{ \int_{0}^1 \operatorname{D}^2 U(x_t) [z] \mathrm{d} t} \\
& A_{6,\gamma}(x,z)= (1/4) \norm[2]{\int_{0}^1 \operatorname{D}^2 U(x_t) [\nabla U(x)] \mathrm{d} t } \;.
Equation nd{align*}
\begin{proof}[Proof of Equation nsuremath{\mathbb{C}}ref{lem:bound_alpha_mala_1}]
Since $\int_{0}^1 \operatorname{D}^2 U(x) [z^{\otimes 2}](1/2-t) \mathrm{d} t = 0$, we get setting $x_t = x + t \{-\gamma \nabla U(x) + \sqrt{2\gamma} z \}$,
\begin{multline}
\label{eq:decomposition-A-2}
A_{2,\gamma}(x,z) \\= \sqrt{\gamma} \iint_0^1 \operatorname{D}^3 U (s x_t + (1-s) x) \parentheseDeux{z^{\otimes 2} \otimes \{ -\gamma^{1/2} \nabla U(x) + \sqrt{2} z \}} (1/2-t) t \mathrm{d} s \mathrm{d} t \;.
Equation nd{multline}
The proof follows from $\sup_{x \in \mathbb{R}^d} \norm{ \operatorname{D}^2 U(x)} \leq L $ and $\sup_{x \in \mathbb{R}^d} \norm{ \operatorname{D}^3 U(x)} \leq M $.
Equation nd{proof}
\begin{lemma}
\label{lem:bound_alpha_mala_2}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then, for any $\bar{\gamma} \in \ocint{0, m^3/(4L^4)}$ there exists $C_{2,\bar{\gamma}} < \infty$ such that for any $\gamma \in \ocint{0,\bar{\gamma}}$, $x,z \in \mathbb{R}^d$ satisfying $\norm{x} \geq \max(2 K_1 , K_2)$ and $\norm{z} \leq \norm{x}/(4 \sqrt{2 \gamma})$, where $K_2$ is defined in Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour}, it holds
\begin{equation*}
\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z) \leq C_{2,\bar{\gamma}} \gamma \norm[2]{z}\{1+\norm[2]{z}\} \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
Let $\gamma \in \ocint{0,\bar{\gamma}}$, $x,z \in \mathbb{R}^d$ satisfying $\norm{x} \geq \max(2 K_1 , K_2)$ and $\norm{z} \leq \norm{x}/(4 \sqrt{2 \gamma})$.
Using Equation qref{lem:durmus_moulines_saksman}, we get setting \[ A_{4,0,\gamma}(x,z)= \int_{0}^1 \operatorname{D}^2 U(x_t) [\nabla U(x)^{\otimes 2}] t \mathrm{d} t \;, \]
\begin{multline}
\label{eq:2}
\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z) \leq 2 \gamma A_{2,\gamma}(x,z) -\gamma^2 A_{4,0,\gamma}(x,z) \\+(2\gamma)^{3/2}L^2 \norm{z}\norm{x} + (\gamma^2/2) L^2 \norm[2]{z} + (\gamma^5/2)^{1/2} L^3 \norm{z}\norm{x} + (\gamma^3/4) L^4 \norm[2]{x} \;,
Equation nd{multline}
By Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}, Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour} and Equation nsuremath{\mathbb{C}}ref{lem:bounde_pertub_hessian}, we get for any $x \in \mathbb{R}^d$, $\norm{x} \geq \max(2K_1 ,K_2)$,
\begin{equation}
\label{eq:3}
A_{4,0,\gamma}(x,z) \geq (m/2)^3\norm[2]{x} \;.
Equation nd{equation}
Combining this result with Equation qref{eq:decomposition-A-2}, Equation qref{eq:3} in Equation qref{eq:2}, we obtain using $\gamma \leq \bar{\gamma} \leq m^3/(4L^4)$
\begin{align*}
\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)
& \leq 2 \gamma M \defEns{\sqrt{2\gamma} \norm[3]{z} + \gamma L \norm[2]{z}\norm{x}} -\gamma^2(m^3/2^4) \norm[2]{x} \\
& \qquad +(2\gamma)^{3/2}L^2 \norm{z}\norm{x} + (\gamma^2/2) L^2 \norm[2]{z} + (\gamma^5/2)^{1/2} L^3 \norm{z}\norm{x}\;,
Equation nd{align*}
Since for any $a, b \in \mathbb{R}^+$ and $\varepsilon > 0$, $ab \leq (Equation psilon/2) a^2 + 1/(2Equation psilon) b^2$, we obtain
\begin{align*}
& \tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z) \leq \gamma \norm[2]{z} \mathcal{B}ig\{ 2^{1/2} L^2 \varepsilon^{-1} + (\gamma/2)L^2 + 2^{-3/2} \gamma^{3/2} L^3 \varepsilon^{-1} \\
&\qquad \qquad \qquad \qquad \qquad + (2^3\gamma)^{1/2} M \norm{z} + \gamma M L \varepsilon^{-1}\norm[2]{z} \mathcal{B}ig\} \\
&\qquad \qquad \qquad + \norm[2]{x} \gamma^2 \parentheseDeux{\varepsilon\defEns{L M +2^{1/2} L^2 + 2^{-3/2} \bar{\gamma}^{1/2} L^3 } -m^3/2^4} \;.
Equation nd{align*}
Choosing $\varepsilon = (m^3/2^4) \defEnsLigne{L M +2^{1/2} L^2 + 2^{-3/2} \bar{\gamma}^{1/2} L^3}^{-1}$ concludes the proof.
Equation nd{proof}
\begin{lemma}
\label{propo:lyap_mala}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} and let $\bar{\gamma}\in\ocint{0,m/(4L^2)}$. Then, for any $\gamma \in \ocint{0,\bar{\gamma}}$ and $x\in\mathbb{R}^d$,
\begin{equation*}
\int_{\mathbb{R}^d} \norm[2]{y} Q_{\gamma}(x,\mathrm{d} y) \leq \defEns{1-(m\gamma)/2} \norm[2]{x}+ \tilde{b} \gamma \mathbbm{1}_{\ball{0}{K_4}}(x) \;,
Equation nd{equation*}
where $Q_{\gamma}$ is the Markov kernel of ULA defined in Equation qref{eq:def-kernel-ULA},
\begin{equation*}
K_4 = \max\parenthese{K_2, 2\sqrt{(2d)/m}} \;, \quad
\tilde{b} = 2d + K_4^2 \parenthese{\bar{\gamma} L^2 + 2L + m/2} \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
Let $\gamma\in\ocint{0,\bar{\gamma}}$ and $x\in\mathbb{R}^d$. By Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, we have
\begin{equation*}
\int_{\mathbb{R}^d} \norm[2]{y} Q_{\gamma}(x,\mathrm{d} y) \leq
2\gamma d + \norm[2]{x}(1+\gamma^2 L^2) - 2\gamma\ps{\nabla U(x)}{x} \;.
Equation nd{equation*}
We distinguish the case when $\norm{x} \geq K_4$ and $\norm{x} < K_4$.
If $\norm{x} \geq K_4 \geq K_2$, by Equation nsuremath{\mathbb{C}}ref{lem:quadratic_behaviour}, and since $\gamma \leq \bar{\gamma} \leq m/(4L^2)$, $\norm{x} \geq K_4 \geq 2\sqrt{(2d)/m}$,
\begin{align*}
\int_{\mathbb{R}^d} \norm[2]{y} Q_{\gamma}(x,\mathrm{d} y) &\leq
\norm[2]{x} \parentheseDeux{1-\gamma \defEns{m - \gamma L^2 - (2d)/\norm[2]{x}}} \\
&\leq \norm[2]{x} \defEns{1-\gamma m /2} \;.
Equation nd{align*}
If $\norm{x} < K_4$, we obtain
\begin{equation*}
\int_{\mathbb{R}^d} \norm[2]{y} Q_{\gamma}(x,\mathrm{d} y) \leq
\norm[2]{x} \defEns{1-\gamma m /2} +
\gamma \norm[2]{x} \parenthese{\gamma L^2 + 2 L + m/2} + 2\gamma d \;,
Equation nd{equation*}
which concludes the proof.
Equation nd{proof}
\begin{lemma}
\label{lem:diff_tv_MALA_ULA}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} and let $\bar{\gamma}\in\ocint{0,m/(4L^2)}$. Then, there exist $C_{3,\bar{\gamma}},C_{4,\bar{\gamma}}\geq 0$ such that for any $x \in \mathbb{R}^d$ and $\gamma \in \ocint{0,\bar{\gamma}}$, we have
\begin{align}
\label{eq:1:lem:diff_tv_MALA_ULA}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam - \updelta_x Equation nsuremath{\mathbb{R}}kerg} & \leq C_{3,\bar{\gamma}} \gamma^{3/2} (1+\norm[2]{x}) \;, \\
\label{eq:2:lem:diff_tv_MALA_ULA}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} &\leq C_{4,\bar{\gamma}} \gamma^{1/2} (1+\norm[2]{x}) \;.
Equation nd{align}
Equation nd{lemma}
\begin{proof}
Let $x \in \mathbb{R}^d$ and $\gamma \in \ocint{0,\bar{\gamma}}$.
We first show that Equation qref{eq:1:lem:diff_tv_MALA_ULA} holds and then use this result to prove Equation qref{eq:2:lem:diff_tv_MALA_ULA}.
Let $f : \mathbb{R}^d \to \mathbb{R}$ be a bounded and measurable function. Then, by Equation qref{eq:def-kernel-ULA} and Equation qref{eq:def-kernel-MALA}, we have
\begin{align*}
&\abs{Equation nsuremath{\mathbb{Q}}gam f(x) - Equation nsuremath{\mathbb{R}}kerg f(x)} \\
& \qquad = \mathcal{B}ig| \int_{\mathbb{R}^d}\{f(x-\gamma \nabla U(x) + \sqrt{2 \gamma} z) - f(x)\} \\
&\phantom{----------} \times \{1 - \min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)}) \} \boldsymbol{\varphi}(z) \mathrm{d} z \mathcal{B}ig| \\
& \qquad \leq 2 \norm{f}_{\infty} \int_{\mathbb{R}^d} \abs{1 - \min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)}) } \boldsymbol{\varphi}(z) \mathrm{d} z \\
&\qquad \leq 2 \norm{f}_{\infty} \int_{\mathbb{R}^d} \abs{\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)} \boldsymbol{\varphi}(z) \mathrm{d} z \;.
Equation nd{align*}
The conclusion of Equation qref{eq:1:lem:diff_tv_MALA_ULA} then follows from an application of Equation nsuremath{\mathbb{C}}ref{lem:bound_alpha_mala_1}.
We now turn to the proof of Equation qref{eq:2:lem:diff_tv_MALA_ULA}. Consider the following decomposition
\begin{equation*}
\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}} = \sum_{k=0}^{\ceil{1/\gamma}-1} \updelta_x Equation nsuremath{\mathbb{Q}}gam^k \{Equation nsuremath{\mathbb{Q}}gam - Equation nsuremath{\mathbb{R}}kerg\} Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}-k-1} \;.
Equation nd{equation*}
Therefore using the triangle inequality, we obtain that
\begin{equation}
\label{eq:4}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} \leq \sum_{k=0}^{\ceil{1/\gamma}-1} \tvnorm{ \updelta_x Equation nsuremath{\mathbb{Q}}gam^k \{Equation nsuremath{\mathbb{R}}kerg - Equation nsuremath{\mathbb{Q}}gam\} Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}-k-1}} \;.
Equation nd{equation}
We now bound each term in the sum. Let $k \in \{0,\ldots,\ceil{1/\gamma}-1\}$ and $f : \mathbb{R}^d \to \mathbb{R}$ be a bounded and measurable function. By Equation qref{eq:1:lem:diff_tv_MALA_ULA}, we obtain that
\[ \abs{ \updelta_x \{Equation nsuremath{\mathbb{R}}kerg - Equation nsuremath{\mathbb{Q}}gam\} Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}-k-1} f} \leq C_{3,\bar{\gamma}} \norm{f}_{\infty} \gamma^{3/2} \{1+\norm[2]{x}\} \]
and therefore using Equation nsuremath{\mathbb{C}}ref{propo:lyap_mala}, we get
\begin{equation*}
\abs{ \updelta_x Equation nsuremath{\mathbb{Q}}gam^k \{Equation nsuremath{\mathbb{R}}kerg - Equation nsuremath{\mathbb{Q}}gam\} Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}-k-1} f} \leq C_{3,\bar{\gamma}} \norm{f}_{\infty} \gamma^{3/2} \{1+(1-m\gamma/2)^k \norm[2]{x} + 2\tilde{b}/m \} \;.
Equation nd{equation*}
Plugging this result in Equation qref{eq:4}, we obtain
\begin{align*}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} &\leq C_{3,\bar{\gamma}} \gamma^{3/2} \sum_{k=0}^{\ceil{1/\gamma}-1} \{1+(1-m\gamma/2)^k \norm[2]{x} + 2\tilde{b}/m \} \\
&\leq C_{3,\bar{\gamma}} \gamma^{1/2} \{1+2(\norm[2]{x}+\tilde{b})/m\} \;,
Equation nd{align*}
which concludes the proof.
Equation nd{proof}
\begin{proposition}
\label{propo:small_set_mala}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. Then for any $Equation nsuremath{\mathbb{R}}rm \geq 0$ there exist $\bar{\gamma} > 0$ and $\varepsilon >0$, such that for any $x,y \in \mathbb{R}^d$, $\norm{x}\vee \norm{y} \leq Equation nsuremath{\mathbb{R}}rm$, and $\gamma \in \ocint{0,\bar{\gamma}}$ we have
\begin{equation}
\label{eq:small_set_mala_propo}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} \leq 2 (1-\varepsilon) \;.
Equation nd{equation}
Equation nd{proposition}
\begin{proof}
First note that for any $x,y \in \mathbb{R}^d$, $\gamma >0$, by the triangle inequality, we obtain
\begin{multline}
\label{eq:decomposiiton_small_set_mala}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} \leq \tvnorm{\updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}} - \updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}}}\\ + \tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}}} + \tvnorm{\updelta_y Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}}} \;.
Equation nd{multline}
We now give some bounds for each term on the right hand side for any $x,y \in \mathbb{R}^d$, $\norm{x}\vee \norm{y} \leq Equation nsuremath{\mathbb{R}}rm$ for a fixed $Equation nsuremath{\mathbb{R}}rm \geq 0$ and $\gamma \leq 1/L$.
By Equation nsuremath{\mathbb{C}}ref{propo:small_set_ula}, there exists $\varepsilon_1>0$ such that for any $x,y \in \mathbb{R}^d$, $\norm{x}\vee \norm{y} \leq Equation nsuremath{\mathbb{R}}rm$ and $\gamma \leq 1/L$,
\begin{equation}\label{eq:bound_small_ULA_proof_small_MALA}
\tvnorm{\updelta_x Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}}} \leq
2(1-\varepsilon_1) \;.
Equation nd{equation}
In addition, by Equation nsuremath{\mathbb{C}}ref{lem:diff_tv_MALA_ULA}, there exists $C\geq 0$ such that for any $\gamma \in \ocint{0,m/(4L^2)}$, and $z \in \mathbb{R}^d$, $\norm{z} \leq Equation nsuremath{\mathbb{R}}rm$,
\begin{equation*}
\tvnorm{\updelta_z Equation nsuremath{\mathbb{Q}}gam^{\ceil{1/\gamma}} - \updelta_z Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} \leq C \gamma^{1/2}(1+Equation nsuremath{\mathbb{R}}rm^2) \;.
Equation nd{equation*}
Combining this result with Equation qref{eq:bound_small_ULA_proof_small_MALA} in Equation qref{eq:decomposiiton_small_set_mala}, we obtain that for any $x,y\in \mathbb{R}^d$, $\norm{x}\vee\norm{y} \leq Equation nsuremath{\mathbb{R}}rm$, $\gamma \in \ocint{0,m/(4L^2)}$,
\begin{equation*}
\norm{\updelta_x Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}} - \updelta_y Equation nsuremath{\mathbb{R}}kerg^{\ceil{1/\gamma}}} \leq 2(1-\varepsilon_1) + 2 C\gamma^{1/2}(1+Equation nsuremath{\mathbb{R}}rm^2) \;.
Equation nd{equation*}
Therefore, we obtain that for any $x,y \in \mathbb{R}^d$, $\norm{x} \vee \norm{y} \leq Equation nsuremath{\mathbb{R}}rm$, $\gamma \in \ocint{0,\bar{\gamma}}$, Equation qref{eq:small_set_mala_propo} holds with $\varepsilon \leftarrow \varepsilon_1/2$ taking
\begin{equation*}
\bar{\gamma} = m/(4L^2) \wedge \parentheseDeux{\varepsilon_1^{2}\parenthese{2C(1+Equation nsuremath{\mathbb{R}}rm^2)}^{-2}} \;.
Equation nd{equation*}
Equation nd{proof}
\begin{lemma}
\label{lem_tail_chi2}
Let $\bar{\gamma} >0$ and $\gamma \in \ocint{0,\bar{\gamma}}$. Then, for any $x \in \mathbb{R}^d$, $\norm{x} \geq 20\sqrt{2\bar{\gamma} d}$,
\begin{equation*}
\int_{\mathbb{R}^d \setminus \ball{0}{\norm{x}/(4\sqrt{2\gamma})}} \boldsymbol{\varphi}(z) \mathrm{d} z \leq Equation xp(-\norm{x}^2/(128\gamma)) \;.
Equation nd{equation*}
Equation nd{lemma}
\begin{proof}
Let $x>0$.
By \cite[Lemma 1]{laurent:massart:2000},
\begin{equation*}
\mathbb{P}(\norm[2]{Z} \geq 2\{\sqrt{d} + \sqrt{x}\}^2) \leq
\mathbb{P}(\norm[2]{Z} \geq d + 2 \sqrt{dx} + 2x) \leq
\mathrm{e}^{-x} \;,
Equation nd{equation*}
where $Z$ is a $d$-dimensional standard Gaussian vector.
Setting $t=2\{\sqrt{d} + \sqrt{x}\}^2$, we obtain
\begin{equation*}
\mathbb{P}(\norm[2]{Z} \geq t) \leq Equation xp\parenthese{-\defEns{d + t/2 - \sqrt{2td}}} \;,
Equation nd{equation*}
and for $\sqrt{t} \geq 5\sqrt{d}$, we get $\mathbb{P}(\norm{Z} \geq \sqrt{t}) \leq \mathrm{e}^{-t/4}$ which gives the result.
Equation nd{proof}
\begin{proposition}
\label{propo:lyap_mala_total}
Assume Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA}. There exist $\bar{\gamma}>0$, $\varpi>0$, and $K_5,\bar{b} \geq 0$ such that for any $\gamma \in \ocint{0,\bar{\gamma}}$ and $x\in\mathbb{R}^d$,
\begin{equation*}
R_{\gamma}V_{\bar{Equation ta}}(x) \leq (1-\varpi \gamma)V_{\bar{Equation ta}}(x)+ \bar{b} \gamma \mathbbm{1}_{\ball{0}{K_5}}(x) \;,
Equation nd{equation*}
where $R_{\gamma}$ is the Markov kernel of MALA defined by Equation qref{eq:def-kernel-MALA} and $\bar{Equation ta}$ is given by Equation qref{eq:coeffs_super_lyap_mala}.
Equation nd{proposition}
\begin{proof}
Let $\bar{\gamma}_1 = m/(4L^2)$. By Equation qref{eq:diff-rula-rmala} and Equation nsuremath{\mathbb{C}}ref{propo:super_lyap_ula}, for any $\gamma \in \ocint{0,\bar{\gamma}_1}$ and $x \in \mathbb{R}^d$,
\begin{align*}
Equation nsuremath{\mathbb{R}}kerg V_{\bar{Equation ta}}(x)
&\leq Equation nsuremath{\mathbb{Q}}gam V_{\bar{Equation ta}}(x) + V_{\bar{Equation ta}}(x)\int_{\mathbb{R}^d} \{1-\min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)}\} \boldsymbol{\varphi}(z) \mathrm{d} z\\
& \leq \mathrm{e}^{-\bar{Equation ta} m \gamma \norm[2]{x}/4} V_{\bar{Equation ta}}(x) + b_{\bar{Equation ta}} \gamma \mathbbm{1}_{\ball{0}{K_3}}(x) \\
&\phantom{----}+ V_{\bar{Equation ta}}(x)\int_{\mathbb{R}^d} \{1-\min(1,\mathrm{e}^{-\tau_\step^{\scriptscriptstyle{\operatorname{MALA}}}(x,z)}\} \boldsymbol{\varphi}(z) \mathrm{d} z \;,
Equation nd{align*}
where $K_3$ and $b_{\bar{Equation ta}}$ are given in Equation qref{eq:coeffs_super_lyap_mala}.
Let
\begin{equation*}
\bar{\gamma}_2 = \min\parenthese{1, \bar{\gamma}_1, m^3/(4L^4)} \;, \quad
Equation nsuremath{\mathbb{R}}rm_1 = \max\parenthese{1, 2 K_1 , K_2, K_3, 20\sqrt{2\bar{\gamma}_2 d}} \;.
Equation nd{equation*}
Then, by Equation nsuremath{\mathbb{C}}ref{lem:bound_alpha_mala_2} and Equation nsuremath{\mathbb{C}}ref{lem_tail_chi2}, there exist $C_{1}\geq 0$ such that for any $x \in \mathbb{R}^d$, $\norm{x} \geq Equation nsuremath{\mathbb{R}}rm_1$ and
$\gamma \in \ocint{0,\bar{\gamma}_2}$,
\begin{align*}
Equation nsuremath{\mathbb{R}}kerg V_{\bar{Equation ta}}(x) & \leq \mathrm{e}^{-\bar{Equation ta} m \gamma \norm[2]{x}/4} V_{\bar{Equation ta}}(x) + V_{\bar{Equation ta}}(x) \defEns{C_1 \gamma + Equation xp(-\norm[2]{x}/(128\gamma))} \\
& \leq \mathrm{e}^{-\bar{Equation ta} m \gamma \norm[2]{x}/4} V_{\bar{Equation ta}}(x) + V_{\bar{Equation ta}}(x) \defEns{C_1 \gamma + Equation xp(-1/(128\gamma))} \;.
Equation nd{align*}
Using that there exists $C_2 \geq 0$ such that $\sup_{t \in \ooint{0,1}} \{t^{-1} Equation xp(-1/(128 t))\} \leq C_2$ we get for any $x \in \mathbb{R}^d$, $\norm{x} \geq Equation nsuremath{\mathbb{R}}rm_1$, $\gamma \in \ocint{0,\bar{\gamma}_2}$,
\begin{equation*}
Equation nsuremath{\mathbb{R}}kerg V_{\bar{Equation ta}}(x) \leq \mathrm{e}^{-\bar{Equation ta} m \gamma \norm[2]{x}/4} V_{\bar{Equation ta}}(x) + V_{\bar{Equation ta}}(x) \gamma \defEns{C_1 + C_2} \;.
Equation nd{equation*}
Let
\begin{equation*}
Equation nsuremath{\mathbb{R}}rm_2 = \max\parenthese{Equation nsuremath{\mathbb{R}}rm_1, 4(C_1 + C_2)^{1/2} (\bar{Equation ta} m)^{-1/2}} \;, \quad
\bar{\gamma}_3 = \min\parenthese{\bar{\gamma}_2, 4\defEns{m \bar{Equation ta} Equation nsuremath{\mathbb{R}}rm_2^2}^{-1}} \;.
Equation nd{equation*}
Then, since for any $t \in \ccint{0,1}$, $\mathrm{e}^{-t} \leq 1-t/2$, we get for any $x \in \mathbb{R}^d$, $\norm{x} \geq Equation nsuremath{\mathbb{R}}rm_2$, $\gamma \in \ocint{0,\bar{\gamma}_3}$,
\begin{align}
\nonumber
Equation nsuremath{\mathbb{R}}kerg V_{\bar{Equation ta}}(x)& \leq \mathrm{e}^{-\bar{Equation ta} m \gamma Equation nsuremath{\mathbb{R}}rm_2^2 /4} V_{\bar{Equation ta}}(x) + V_{\bar{Equation ta}}(x) \gamma \defEns{C_1 + C_2} \\
\nonumber
& \leq \parentheseDeux{1-\gamma\defEns{\bar{Equation ta} m Equation nsuremath{\mathbb{R}}rm_{2}^2 /8 -C_1-C_2}} V_{\bar{Equation ta}}(x) \\
\label{eq:drift_mala_totla_2}
& \leq \defEns{1-\gamma \bar{Equation ta} m Equation nsuremath{\mathbb{R}}rm_{2}^2 /16} V_{\bar{Equation ta}}(x) \;.
Equation nd{align}
In addition, by Equation nsuremath{\mathbb{C}}ref{lem:bound_alpha_mala_1}, using that for any $t \in \mathbb{R}$, $1-\min(1,\mathrm{e}^{-t}) \leq \abs{t}$, there exists $C_3\geq 0$ such that for any $x \in \mathbb{R}^d$, $\norm{x} \leq Equation nsuremath{\mathbb{R}}rm_2$ and $\gamma \in \ocint{0,\bar{\gamma}_3}$,
\begin{align*}
Equation nsuremath{\mathbb{R}}kerg V_{\bar{Equation ta}}(x) & \leq V_{\bar{Equation ta}}(x) +b_{\bar{Equation ta}} \gamma \mathbbm{1}_{\ball{0}{K_3}}(x) + C_3 \gamma^{3/2} \int_{\mathbb{R}^d} \{1+\norm[2]{x} + \norm[4]{z}\} \boldsymbol{\varphi}(z) \mathrm{d} z \\
& \leq (1-\gamma \bar{Equation ta} m Equation nsuremath{\mathbb{R}}rm_{2}^2 /16) V_{\bar{Equation ta}}(x) + \gamma \bar{Equation ta} m Equation nsuremath{\mathbb{R}}rm_2^2 \mathrm{e}^{\bar{Equation ta} Equation nsuremath{\mathbb{R}}rm_2^2} / 16 + \gamma b_{\bar{Equation ta}} \\
&\phantom{-------------}+ C_3 \gamma \bar{\gamma}_3^{1/2} \defEns{1 + Equation nsuremath{\mathbb{R}}rm^2_2 + C_4} \;,
Equation nd{align*}
where $C_4 = \int_{\mathbb{R}^d} \norm[4]{z} \boldsymbol{\varphi}(z) \mathrm{d} z$.
Combining this result and Equation qref{eq:drift_mala_totla_2} completes the proof.
Equation nd{proof}
\appendix
\section{Random Walk Metropolis (RWM) algorithm}
\label{sec:additional-proofs}
We show Equation qref{eq:def-discrete-drift} for the RWM algorithm.
For that purpose, consider the following additional assumption on $U$.
\begin{assumptionS}\label{assumption:U-dom-drift-RWM}
There exist $\chi, \widetilde{K}>0$ such that for all $x\in\mathbb{R}^d$, $\norm{x} \geq \widetilde{K}$,
\begin{equation*}
\norm{\nabla \pU(x)} \geq \chi^{-1}\;, \quad
\norm{\operatorname{D}^3 U(x)} \leq \chi \norm{\operatorname{D}^2 U(x)}\; , \quad
\norm{\operatorname{D}^2 U(x)} \leq \chi \norm{\nabla \pU(x)}
Equation nd{equation*}
and $\lim_{\norm{x}\to+\infty} \norm{\operatorname{D}^2 U(x)} / \norm[2]{\nabla \pU(x)} = 0$.
Equation nd{assumptionS}
\begin{lemma}\label{prop:RWM-drift}
Assume that $U\in\setpoly{3}(\mathbb{R}^d,\mathbb{R})$ and Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}.
There exists $\bar{\gamma}>0$ such that for all $\gamma\in\ocint{0,\bar{\gamma}}$, $Equation nsuremath{\mathbb{R}}rwm$ satisfies the drift condition Equation qref{eq:def-discrete-drift} with $V=Equation xp(U/2)$.
Equation nd{lemma}
The proof requires several intermediate results.
In the sequel, $C$ is a positive constant which can change from line to line but does not depend on $\gamma$. We first introduce some notation and state two technical lemmas.
For $M\in\mathbb{R}^{d \times d}$, denote by $\Vnorm[ext{F}]{M}$ the Frobenius norm of $M$.
For a set $A \subset \mathbb{R}^d$, define by $A^{\text{c}} = \mathbb{R}^d \setminus A$.
For all $x \in \mathbb{R}^{\tilde{d}}$ and $K >0$, we denote by $\bouled{x}{K}{\tilde{d}}$ (respectively $\boulefermeed{x}{K}{\tilde{d}}$), the open (respectively close) ball centered at $x$ of radius $K$. When the dimension $d$ of the state space $\mathbb{R}^d$ is unambiguous, they are respectively denoted by $\boule{x}{K}$ and $\boulefermee{x}{K}$.
For all $x\in\mathbb{R}^d$ and $\gamma>0$, define the acceptance region
\begin{equation}
\label{eq:def-accept-region-rwm}
\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma} = \defEns{z\in\mathbb{R}^d : \tau^{\scriptscriptstyle{\operatorname{RWM}}}_{\gamma}(x,z) \leq 0} \;.
Equation nd{equation}
For all $x\in\mathbb{R}^d$ and $\gamma>0$, define $G:\mathbb{R}_+\to\ccint{0,1}$ for all $t\geq 0$ by
\begin{equation}\label{eq:def-Grwm}
G(t) = 1/2 + 2\mathrm{e}^{t^2/2} \bar{\Phi}(t) - \mathrm{e}^{2t^2} \bar{\Phi}(2t) \;.
Equation nd{equation}
\begin{lemma}\label{lemma:Grwm}
There exists $t_0>0$ such that for all $t\in\ccint{0,t_0}$, $G(t) \leq 1 - (t^2 /2)$ and the function $G$ is non-increasing.
Equation nd{lemma}
\begin{proof}
We have for all $t\geq 0$,
\begin{equation}\label{eq:Grwm-derivative}
G'(t) = 2t\mathrm{e}^{t^2/2} \defEns{\bar{\Phi}(t) - 2\mathrm{e}^{(3t^2)/2} \bar{\Phi}(2t)}
Equation nd{equation}
and $G'(0)=0$, $G''(0)=-1$ so there exists $t_0>0$ such that for all $t\in\ccint{0,t_0}$, $G(t) \leq 1 - (t^2 / 2)$, which is the first statement of the lemma. Regarding the second statement, by an integration by parts, we have for all $s>0$
\begin{equation*}
\bar{\Phi}(s) = Figure rac{\mathrm{e}^{-s^2/2}}{\sqrt{2\uppi}s} - Figure rac{1}{\sqrt{2\uppi}} \int_s^{+\infty} Figure rac{\mathrm{e}^{-u^2/2}}{u^2} \mathrm{d} u
Equation nd{equation*}
and using a change of variables $u=v+t$, we get for all $t>0$
\begin{equation*}
\bar{\Phi}(t) - 2\mathrm{e}^{(3t^2)/2} \bar{\Phi}(2t) = \int_t^{+\infty} \defEns{Figure rac{2\mathrm{e}^{t(t-v)}}{(v+t)^2}-Figure rac{1}{v^2}} Figure rac{\mathrm{e}^{-v^2/2}}{\sqrt{2\uppi}} \mathrm{d} v \;.
Equation nd{equation*}
We now show that $\bar{\Phi}(t) - 2\mathrm{e}^{(3t^2)/2} \bar{\Phi}(2t) \leq 0$ for all $t\geq 0$ which will finish the proof using Equation qref{eq:Grwm-derivative}. We distinguish the case $t\geq 0.4$ and $t\in\ccint{0,0.4}$. For $t\geq 0.4$, define $h_t:\coint{t,+\infty}\to\mathbb{R}$ given for all $v\geq t$ by
\begin{equation*}
h_t(v) = 2\ln(1+t/v) - \ln(2) - t^2 + vt \;.
Equation nd{equation*}
We show in the sequel that $h_t(v) \geq 0$ for all $v \geq t \geq 0.4$, which implies $\bar{\Phi}(t) - 2\mathrm{e}^{(3t^2)/2} \bar{\Phi}(2t) \leq 0$ for all $t\geq 0.4$. We have for all $v\geq t$
\begin{equation*}
h_{t}'(v) = t\defEns{-2/\{v(t+v)\} +1}
Equation nd{equation*}
and $h_t$ is decreasing on $\ccint{t, v_{\operatorname{min}} \vee t}$ and increasing on $\coint{v_{\operatorname{min}} \vee t, +\infty}$ where $v_{\operatorname{min}}=(-t+\sqrt{t^2+8})/2$. Note that $v_{\operatorname{min}} \geq t$ is equivalent to $t \leq 1$ and for all $t\geq 1$, $h_t(t) = \ln(2)>0$. Define $Equation ll:\ocint{0,1}\to\mathbb{R}$ given for all $t\in\ocint{0,1}$ by
\begin{align*}
Equation ll(t) = h_t(v_{\operatorname{min}}) &= 2\ln\parenthese{Figure rac{\sqrt{t^2+8}+t}{\sqrt{t^2+8}-t}} -\ln(2) + (t/2)\parenthese{-3t+\sqrt{t^2+8}} \\
&= 5\ln(2) - 4\ln\parenthese{-t+\sqrt{t^2+8}} + (t/2)\parenthese{-3t+\sqrt{t^2+8}} \;.
Equation nd{align*}
We have for all $t\in\ocint{0,1}$
\begin{equation*}
Equation ll'(t) = -3t+\sqrt{t^2+8} \geq 0 \;,
Equation nd{equation*}
$Equation ll$ is non-decreasing and $Equation ll(0.4)>0$, which implies that for all $t\in\ccint{0.4,1}$ and $v\geq t$, $h_t(v) \geq 0$. Therefore, $G'(t) \leq 0$ for all $t\geq 0.4$.
For $t\in\ccint{0,0.4}$, we use the following lower and upper bounds by \cite[Theorems 1 and 2]{5963622} for all $s\geq 0$
\begin{equation*}
Figure rac{\sqrt{\mathrm{e}}}{3\sqrt{\uppi}} \mathrm{e}^{-(3/4)s^2} \leq \bar{\Phi}(s) \leq (1/2) \mathrm{e}^{-s^2/2}
Equation nd{equation*}
and we get for all $t\in\ccint{0,0.4}$
\begin{equation*}
2\mathrm{e}^{(3t^2)/2} \bar{\Phi}(2t) - \bar{\Phi}(t) \geq \mathrm{e}^{-t^2/2} \defEns{Figure rac{2\sqrt{\mathrm{e}}}{3\sqrt{\uppi}}\mathrm{e}^{-t^2} - Figure rac{1}{2}} \;.
Equation nd{equation*}
The right hand side is decreasing on $\ccint{0,0.4}$ and positive because \[ (2\sqrt{\mathrm{e}}\mathrm{e}^{-(0.4)^2})/(3\sqrt{\uppi}) - (1/2) \geq 0.02 \;, \]
which implies that $G'(t) \leq 0$ for all $t\in\ccint{0,0.4}$.
Equation nd{proof}
\begin{lemma}\label{lemma:drift-RWM-borned2U}
Assume that $U\in\setpoly{3}(\mathbb{R}^d,\mathbb{R})$ and Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}. Let $x\in\mathbb{R}^d$, \mathrm{lin}ebreak $\norm{x} \geq \widetilde{M}$ and $K>0$. For all $\gamma>0$ and $z\in\boulefermee{0}{K}$, we have
\begin{multline*}
\norm{\operatorname{D}^2 U(x+\sqrt{2\gamma}z)} \leq \norm{\operatorname{D}^2 U(x)}\defEns{1+C(K)} \\
ext{ where } C(K) = (C \chi K)^{1/2} \gamma^{1/4} \mathrm{e}^{C \chi \sqrt{\gamma}K /2} \;.
Equation nd{multline*}
Equation nd{lemma}
\begin{proof}
Let $z\in\boulefermee{0}{K}$. Define $f:\ccint{0,1} \to \mathbb{R}^{d\times d}$ by $f(t)=\operatorname{D}^2 U(x+t\sqrt{2\gamma}z)-\operatorname{D}^2 U(x)$ for $t\in\ccint{0,1}$. We have
\begin{equation*}
Figure rac{\mathrm{d}}{\mathrm{d} t}\VnormEq[ext{F}]{f(t)}^2 = \ps{f(t)}{\operatorname{D}^3 U(x+t\sqrt{2\gamma}z) \cdot \sqrt{2\gamma} z}_{ext{F}}
Equation nd{equation*}
where for $i,j\in\defEns{1,\ldots,d}$
\begin{equation*}
\parenthese{\operatorname{D}^3 U(x+t\sqrt{2\gamma}z) \cdot \sqrt{2\gamma} z}_{ij} = \sum_{k=1}^{d} \partial_{ijk} U(x+t\sqrt{2\gamma}z) \sqrt{2\gamma} z_k \;.
Equation nd{equation*}
Using the equivalence of norms in finite dimension and Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}, we get
\begin{align*}
\absolute{Figure rac{\mathrm{d}}{\mathrm{d} t}\VnormEq[ext{F}]{f(t)}^2} &\leq C \VnormEq[ext{F}]{f(t)}\norm{\operatorname{D}^3 U(x+t\sqrt{2\gamma}z)} \sqrt{2\gamma} \norm{z} \\
&\leq C \chi \parenthese{\VnormEq[ext{F}]{f(t)}^2 + \norm[2]{\operatorname{D}^2 U(x)}}\sqrt{\gamma} \norm{z}
Equation nd{align*}
which gives by Grönwall's inequality,
\begin{equation*}
\norm[2]{f(1)} \leq \norm[2]{\operatorname{D}^2 U(x)} \parenthese{\mathrm{e}^{C \chi \sqrt{\gamma}\norm{z}} -1} \;.
Equation nd{equation*}
Using $(\mathrm{e}^{s} -1)^{1/2} \leq \sqrt{s} \mathrm{e}^{s/2}$ for all $s\geq 0$, we get the result.
Equation nd{proof}
We now proceed to the proof of Equation nsuremath{\mathbb{C}}ref{prop:RWM-drift}. Note that we have for all $x\in\mathbb{R}^d$ and $\gamma>0$
\begin{multline}\label{eq:RWM-drift-1}
Figure rac{Equation nsuremath{\mathbb{R}}rwm V(x)}{V(x)} = \int_{\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma}} \sqrt{Figure rac{\pi(x)}{\pi(x+\sqrt{2\gamma}z)}} Figure rac{\mathrm{e}^{-\norm[2]{z}/2}}{(2\uppi)^{d/2}} \mathrm{d} z \\
+ \int_{(\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma})^{\text{c}}} \defEns{1+\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}}-Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}} Figure rac{\mathrm{e}^{-\norm[2]{z}/2}}{(2\uppi)^{d/2}} \mathrm{d} z
Equation nd{multline}
where $\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma}$ is defined in Equation qref{eq:def-accept-region-rwm}.
\paragraph*{Intuition behind the proof}
Before giving the proof of the lemma, we sketch here the analysis of a simple case in one dimension where $U(x) = a \absolute{x}$ (with a proper regularization near $0$), $a>0$ and let $x>0$ be large enough. By Equation qref{eq:RWM-drift-1}, we get
\begin{align*}
Figure rac{Equation nsuremath{\mathbb{R}}rwm V(x)}{V(x)} &\approx \int_0^{+\infty} \mathrm{e}^{-a\sqrt{\gamma/2} z} Figure rac{\mathrm{e}^{-z^2/2}}{\sqrt{2\uppi}} \mathrm{d} z
\\
&\phantom{---}+ \int_0^{+\infty} \defEns{1+\mathrm{e}^{-a\sqrt{\gamma/2} z}-\mathrm{e}^{-a\sqrt{2\gamma} z /2}} Figure rac{\mathrm{e}^{-z^2/2}}{\sqrt{2\uppi}} \mathrm{d} z \\
&= (1/2) + 2\mathrm{e}^{a^2 \gamma /4} \bar{\Phi}(\sqrt{\gamma/2}a) - \mathrm{e}^{a^2 \gamma} \bar{\Phi}(\sqrt{2\gamma}a) \\
&= G(a\sqrt{\gamma/2}) \approx 1 - (\gammaa^2)/4 + O(\gamma^{3/2} a^3)
Equation nd{align*}
and the expected contraction in $1-C \gamma$. The proof below is devoted to make this intuition rigorous and the main steps are a localization argument, a comparison to the one dimensional case and an upper bound on the remainder terms.
\begin{figure}
\centering
\begin{tikzpicture}[scale=3]
\draw [->] (-1.25,0) -- (1.25,0);
\draw (1.25,0) node[right] {$z_1$};
\draw [->] (0,-1.25) -- (0,1.25);
\draw (0,1.25) node[above] {$(z_2,\ldots,z_d)$};
\draw (0,0) circle (1);
\draw (60:1) -- (240:1);
\draw (120:1) -- (300:1);
\draw (70:1.1) node[above] {$\cone{0}{hetag}$};
\draw (150:1.1) node[left] {$\boulefermee{0}{\rayrwm}$};
\draw (60:0.2) arc (60:90:0.2);
\draw (70:0.25) node[above] {$hetag$};
\draw [dashed] (0,0.5) -| (0.866,0) node[below] {$b(z_{-1})$};
\draw [dashed] (0.288,0.5) -- (0.288,0) node[below] {$c(z_{-1})$};
\draw (100:1) .. controls (95:0.75) and (110:0.25) .. (0,0);
\draw (-70:1) .. controls (-90:0.5) and (-60:0.25) .. (0,0);
\draw (-70:1) node[below] {$\varphi(z_{-1})$};
Equation nd{tikzpicture}
\caption{\label{fig-cone-brwm-phirwm-corwm} Figure illustrating the definitions of $\cone{0}{hetag}$, $b(z_{-1})$, $c(z_{-1})$ and $\varphi(z_{-1})$.}
Equation nd{figure}
In the sequel, let $x\in\mathbb{R}^d$, $\norm{x} \geq \widetilde{M}$ where $\widetilde{M}$ is given by Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}.
\paragraph*{Step 1: restriction to $\boulefermee{0}{\rayrwm}$}
Define for all $\gamma>0$
\begin{equation}\label{eq:def-rayrwm}
\rayrwm = \{8\log((1/\gamma) \vee 1) + 2d\log(2)\}^{1/2} \;.
Equation nd{equation}
Let $Z$ be a standard $d$-dimensional Gaussian vector. By Markov's inequality and Equation qref{eq:def-rayrwm}, we have
\begin{equation}\label{eq:RWM-drift-2}
\mathbb{P}\parenthese{\norm{Z} \geq \rayrwm} \leq \mathrm{e}^{-\rayrwm^2/4} Equation xpe{\mathrm{e}^{\norm[2]{Z}/4}} \leq Equation xp\parenthese{-Figure rac{\rayrwm^2}{4} + Figure rac{d}{2}\log(2)} \leq \gamma^2 \;.
Equation nd{equation}
Using $\pi(x)/\pi(x+\sqrt{2\gamma}z) \leq 1$ for $z\in\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma}$, \[ 1+\sqrt{\pi(x+\sqrt{2\gamma}z)/\pi(x)}-\pi(x+\sqrt{2\gamma}z)/\pi(x) \leq 5/4 \] for $z\in(\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma})^{\text{c}}$, Equation qref{eq:RWM-drift-1} and Equation qref{eq:RWM-drift-2}, we get
\begin{multline}\label{eq:RWM-drift-1-2}
Figure rac{Equation nsuremath{\mathbb{R}}rwm V(x)}{V(x)} \leq (5/4)\gamma^2 + \int_{\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma}} \mathbbm{1}_{\boulefermee{0}{\rayrwm}}(z) \sqrt{Figure rac{\pi(x)}{\pi(x+\sqrt{2\gamma}z)}} Figure rac{\mathrm{e}^{-\norm[2]{z}/2}}{(2\uppi)^{d/2}} \mathrm{d} z \\
+ \int_{(\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma})^{\text{c}}} \mathbbm{1}_{\boulefermee{0}{\rayrwm}}(z) \defEns{1+\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}}-Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}} Figure rac{\mathrm{e}^{-\norm[2]{z}/2}}{(2\uppi)^{d/2}} \mathrm{d} z \;.
Equation nd{multline}
\paragraph*{Step 2: splitting $\boulefermee{0}{\rayrwm}$ into $\boulefermee{0}{\rayrwm} \cap \mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma}$ and $\boulefermee{0}{\rayrwm} \cap (\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma})^{\text{c}}$}
In this paragraph, we introduce several geometric quantities illustrated with Equation nsuremath{\mathbb{C}}ref{fig-cone-brwm-phirwm-corwm}. Define $\bar{\gamma}>0$ by
\begin{equation}\label{eq:def-gambar}
\max\defEns{(C\chi\rayrwm[\bar{\gamma}])^{1/2}\bar{\gamma}^{1/4} Equation xp(C\chi\bar{\gamma}^{1/2}\rayrwm[\bar{\gamma}]/2), \; (3/2)\sqrt{2\bar{\gamma}}\rayrwm[\bar{\gamma}]\chi} = 1/2 \;,
Equation nd{equation}
where $C$ is the positive constant given in Equation nsuremath{\mathbb{C}}ref{lemma:drift-RWM-borned2U}. Denote by
\begin{equation}\label{eq:def-crwm-1}
C_1 = (C\chi\rayrwm[\bar{\gamma}])^{1/2}\bar{\gamma}^{1/4} Equation xp(C\chi\bar{\gamma}^{1/2}\rayrwm[\bar{\gamma}]/2) \in \ccint{0,1/2} \;.
Equation nd{equation}
Let $e_1(x) = \nabla \pU(x)/\norm{\nabla \pU(x)}$ and consider the decomposition $z=(z_1, \ldots, z_d)$ of $z$ in an orthonormal basis $(e_1(x), e_2(x),\ldots,e_d(x))$ of $\mathbb{R}^d$. For all $z\in\mathbb{R}^d$, denote by $z_{-1}=(z_2,\ldots,z_d)\in\mathbb{R}^{d-1}$.
For all $\gamma\in\ocint{0,\bar{\gamma}}$, define $hetag\in\ccint{0,\uppi/4}$ by
\begin{equation}\label{eq:def-tan-thetag}
\tan hetag = 2\sqrt{2\gamma} \rayrwm Figure rac{\norm{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}} \parenthese{1+C_1} \in\ccint{0,1} \;.
Equation nd{equation}
Denote by
\[ \cone{0}{hetag} = \defEns{z\in\mathbb{R}^d : \absolute{z_1} \leq (\tanhetag) \norm{z_{-1}}} \;. \]
Define $b,c:\boulefermeed{0}{\rayrwm}{d-1} \to \mathbb{R}_+$ for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$ by
\begin{equation}\label{eq:def-brwm-corwm}
b(z_{-1}) = (\rayrwm^2 - \norm[2]{z_{-1}})^{1/2} \quad ext{and} \quad
c(z_{-1}) = (\tanhetag)\norm{z_{-1}} \;.
Equation nd{equation}
By Equation nsuremath{\mathbb{C}}ref{lemma:drift-RWM-borned2U} with $K=\rayrwm$, we have for all $z\in\boulefermee{0}{\rayrwm}$
\begin{equation}\label{eq:RWM-drift2-1}
\norm{\operatorname{D}^2 U(x+\sqrt{2\gamma}z)} \leq \norm{\operatorname{D}^2 U(x)}\parenthese{1+C_1} \;.
Equation nd{equation}
where $C_1$ is given in Equation qref{eq:def-crwm-1}. By Taylor's theorem, we have for all $z\in\boulefermee{0}{\rayrwm}$
\begin{equation}\label{eq:RWM-drift-taylor-U}
U(x+\sqrt{2\gamma}z) - U(x) = \sqrt{2\gamma} \norm{\nabla \pU(x)} z_1 + 2 \mathsf{r}_{\step}(z)
Equation nd{equation}
where $\mathsf{r}_{\step}:\boulefermee{0}{\rayrwm}\to\mathbb{R}$ is defined for all $z\in\boulefermee{0}{\rayrwm}$ by
\begin{equation}\label{eq:def-rrwm}
\mathsf{r}_{\step}(z) = \gamma \int_0^1 (1-t) \operatorname{D}^2 U(x+t\sqrt{2\gamma}z)[z^{\otimes 2}] \mathrm{d} t \;.
Equation nd{equation}
By Equation qref{eq:def-tan-thetag}, Equation qref{eq:RWM-drift2-1} and Equation qref{eq:def-rrwm}, we have for all $z\in\boulefermee{0}{\rayrwm} \cap \cone{0}{hetag}^{\text{c}}$
\begin{align}
\nonumber
4 \mathsf{r}_{\step}(z) &\leq 2\gamma \rayrwm \norm{\operatorname{D}^2U(x)} \parenthese{1+C_1} \parenthese{\absolute{z_1} + \norm{z_{-1}}} \\
\label{eq:driftRWM-rest-term-dom}
&\leq \sqrt{2\gamma} \norm{\nabla \pU(x)} (1/2) \tan hetag \defEns{1+(\tan hetag)^{-1}} \absolute{z_1} \leq \sqrt{2\gamma} \norm{\nabla \pU(x)} \absolute{z_1} \;.
Equation nd{align}
By Equation qref{eq:RWM-drift-taylor-U} and Equation qref{eq:driftRWM-rest-term-dom}, we obtain for all $z\in\boulefermee{0}{\rayrwm} \cap \cone{0}{hetag}^{\text{c}}$, $z\neq 0$,
\begin{equation}\label{eq:sign-areas-phirwm}
\defEns{U(x+\sqrt{2\gamma}z) - U(x)}z_1 > 0 \;.
Equation nd{equation}
Moreover, by Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM} and Equation qref{eq:RWM-drift2-1}, we have for all $z\in\boulefermee{0}{\rayrwm}$
\begin{align*}
\ps{e_1(x)}{\nabla U(x+\sqrt{2\gamma}z)} - \norm{\nabla U(x)} &= \sqrt{2\gamma} \int_0^1 \operatorname{D}^2 U(x+t\sqrt{2\gamma}z) [z, e_1(x)] \mathrm{d} t \;,\\
\absolute{\ps{e_1(x)}{\nabla \pU(x+\sqrt{2\gamma}z)} - \norm{\nabla \pU(x)}} &\leq \sqrt{2\gamma} (1+C_1) \chi \rayrwm \norm{\nabla \pU(x)}
Equation nd{align*}
and $\ps{e_1(x)}{\nabla \pU(x+\sqrt{2\gamma}z)} >0$.
By a version of the implicit function theorem given in Equation nsuremath{\mathbb{C}}ref{prop:implicit-function-thm}, there exists $\varphi : \boulefermeed{0}{\rayrwm}{d-1} \to \mathbb{R}$ continuous such that for all $\gamma\in\ocint{0,\bar{\gamma}}$,
\begin{multline}\label{eq:def-phirwm}
\defEns{z\in\boulefermee{0}{\rayrwm} : U(x+\sqrt{2\gamma}z) = U(x)} \\
= \defEns{\parenthese{\varphi(z_{-1}), z_{-1}} : z_{-1} \in\boulefermeed{0}{\rayrwm}{d-1}} \;.
Equation nd{multline}
Combining Equation qref{eq:sign-areas-phirwm} and Equation qref{eq:def-phirwm}, we obtain for all $\gamma\in\ocint{0,\bar{\gamma}}$,
\begin{align}
\label{eq:phirwm-prop-1}
\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma} \cap \boulefermee{0}{\rayrwm} &= \defEns{z\in\boulefermee{0}{\rayrwm} : z_1 \leq \varphi(z_{-1})} \;,\\
\label{eq:phirwm-prop-2}
(\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma})^{\text{c}} \cap \boulefermee{0}{\rayrwm} &= \defEns{z\in\boulefermee{0}{\rayrwm} : z_1 \geq \varphi(z_{-1})} \;,
Equation nd{align}
and for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$, $\absolute{\varphi(z_{-1})} \leq c(z_{-1})$.
These properties and definitions are summarized in Equation nsuremath{\mathbb{C}}ref{fig-cone-brwm-phirwm-corwm}.
\paragraph*{Step 3: intermediate upper bound on $Equation nsuremath{\mathbb{R}}rwm V(x)/V(x)$}
Using Equation qref{eq:RWM-drift-1-2} and the definitions of $b$ and $\varphi$, see Equation qref{eq:def-brwm-corwm}, Equation qref{eq:def-phirwm}, Equation qref{eq:phirwm-prop-1} and Equation qref{eq:phirwm-prop-2}, we have
\begin{equation}\label{eq:upper-bound-RrwmV-1}
Figure rac{Equation nsuremath{\mathbb{R}}rwm V(x)}{V(x)} \leq (5/4) \gamma^2 + \int_{z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}} g_\step(z_{-1}) Figure rac{\mathrm{e}^{-\norm[2]{z_{-1}}/2}}{(2\uppi)^{(d-1)/2}} \mathrm{d} z_{-1}
Equation nd{equation}
where $g_\step:\boulefermeed{0}{\rayrwm}{d-1}\to\mathbb{R}_+$ is defined for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$ by
\begin{multline*}
g_\step(z_{-1}) = \int_{-b(z_{-1})}^{(\varphi(z_{-1}) \vee -b(z_{-1})) \wedge b(z_{-1})} \sqrt{Figure rac{\pi(x)}{\pi(x+\sqrt{2\gamma}z)}} Figure rac{\mathrm{e}^{-z_{1}^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \\
+ \int_{(\varphi(z_{-1}) \vee -b(z_{-1})) \wedge b(z_{-1})}^{b(z_{-1})} \defEns{1+\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}}-Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;.
Equation nd{multline*}
For all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$, we decompose $g_\step(z_{-1})$ in $g_\step(z_{-1}) = A_{1}(z_{-1}) + A_{2}(z_{-1})$ where $A_1(z_{-1})$ and $A_2(z_{-1})$ are defined by
\begin{align}
\nonumber
A_{1}(z_{-1}) & = \int_{-b(z_{-1})}^{(\varphi(z_{-1}) \vee -b(z_{-1})) \wedge 0} \sqrt{Figure rac{\pi(x)}{\pi(x+\sqrt{2\gamma}z)}} Figure rac{\mathrm{e}^{-z_{1}^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \\
\label{eq:def-RWMdrift-A1}
& + \int_{(\varphi(z_{-1}) \vee -b(z_{-1})) \wedge 0}^{0} \defEns{1+\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}}-Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;, \\
\nonumber
A_{2}(z_{-1}) & = \int_{0}^{(\varphi(z_{-1}) \vee 0) \wedge b(z_{-1})} \sqrt{Figure rac{\pi(x)}{\pi(x+\sqrt{2\gamma}z)}} Figure rac{\mathrm{e}^{-z_{1}^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \\
\label{eq:def-RWMdrift-A2}
& + \int_{(\varphi(z_{-1}) \vee 0) \wedge b(z_{-1})}^{b(z_{-1})} \defEns{1+\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}}-Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;.
Equation nd{align}
Combining it with Equation qref{eq:upper-bound-RrwmV-1}, we obtain
\begin{equation}\label{eq:upper-bound-RrwmV-1-2}
Figure rac{Equation nsuremath{\mathbb{R}}rwm V(x)}{V(x)} \leq (5/4) \gamma^2 + \int_{z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}} \defEns{A_{1}(z_{-1}) + A_{2}(z_{-1})} Figure rac{\mathrm{e}^{-\norm[2]{z_{-1}}/2}}{(2\uppi)^{(d-1)/2}} \mathrm{d} z_{-1} \;.
Equation nd{equation}
By Equation qref{eq:RWM-drift-taylor-U} and Equation qref{eq:def-RWMdrift-A1}, we have for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$
\begin{equation}\label{eq:A1z1}
A_1(z_{-1}) = A_{11}(z_{-1}) + A_{12}(z_{-1}) + A_{13}(z_{-1}) + A_{14}(z_{-1})
Equation nd{equation}
where
\begin{align*}
A_{11}(z_{-1}) & = \int_{-b(z_{-1})}^{0} \mathrm{e}^{\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1} Figure rac{\mathrm{e}^{-z_{1}^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;, \\
A_{12}(z_{-1}) &= \int_{-b(z_{-1})}^{-b(z_{-1}) \vee -c(z_{-1})} \mathrm{e}^{\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 + \mathsf{r}_{\step}(z)} \defEns{1 - \mathrm{e}^{-\mathsf{r}_{\step}(z)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;,\\
A_{13}(z_{-1}) &= \int_{-b(z_{-1}) \vee -c(z_{-1})}^{(\varphi(z_{-1}) \vee -b(z_{-1}))\wedge 0} \mathrm{e}^{\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 + \mathsf{r}_{\step}(z)} \defEns{1 - \mathrm{e}^{-\mathsf{r}_{\step}(z)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;,\\
A_{14}(z_{-1}) &= \int_{(\varphi(z_{-1}) \vee -b(z_{-1})) \wedge 0}^0 \mathcal{B}igg\{1+\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}}-Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}\\
&\phantom{------------------}-\mathrm{e}^{\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1}\mathcal{B}igg\} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;.
Equation nd{align*}
By Equation qref{eq:RWM-drift-taylor-U} and Equation qref{eq:def-RWMdrift-A2}, we have for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$
\begin{align}
\nonumber
A_2(z_{-1}) &= A_{21}(z_{-1}) + A_{22}(z_{-1}) + A_{23}(z_{-1}) + A_{24}(z_{-1}) + A_{25}(z_{-1}) \\
\nonumber
&+ \int_{0}^{(\varphi(z_{-1}) \vee 0) \wedge b(z_{-1})} \mathcal{B}igg\{\sqrt{Figure rac{\pi(x)}{\pi(x+\sqrt{2\gamma}z)}}-1-\mathrm{e}^{-\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1} \\
\label{eq:A2z1-temp}
&\phantom{------------------}+\mathrm{e}^{-\sqrt{2\gamma} \norm{\nabla \pU(x)} z_1} \mathcal{B}igg\} Figure rac{\mathrm{e}^{-z_{1}^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1
Equation nd{align}
where
\begin{align*}
A_{21}(z_{-1}) & = \int_0^{b(z_{-1})} \defEns{1+\mathrm{e}^{-\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1}-\mathrm{e}^{-\sqrt{2\gamma} \norm{\nabla \pU(x)} z_1}} Figure rac{\mathrm{e}^{-z_{1}^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;, \\
A_{22}(z_{-1}) &= \int_{(\varphi(z_{-1}) \vee 0) \wedge b(z_{-1})}^{c(z_{-1}) \wedge b(z_{-1})} \mathrm{e}^{-\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 - \mathsf{r}_{\step}(z)} \defEns{1 - \mathrm{e}^{\mathsf{r}_{\step}(z)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;,\\
A_{23}(z_{-1}) &= \int_{(\varphi(z_{-1}) \vee 0) \wedge b(z_{-1})}^{c(z_{-1}) \wedge b(z_{-1})} \mathrm{e}^{-\sqrt{2\gamma}\norm{\nabla \pU(x)} z_1} \defEns{1 - \mathrm{e}^{-2\mathsf{r}_{\step}(z)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;, \\
A_{24}(z_{-1}) &= \int_{c(z_{-1}) \wedge b(z_{-1})}^{b(z_{-1})} \mathrm{e}^{-\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 - \mathsf{r}_{\step}(z)} \defEns{1 - \mathrm{e}^{\mathsf{r}_{\step}(z)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;,\\
A_{25}(z_{-1}) &= \int_{c(z_{-1}) \wedge b(z_{-1})}^{b(z_{-1})} \mathrm{e}^{-\sqrt{2\gamma}\norm{\nabla \pU(x)} z_1} \defEns{1 - \mathrm{e}^{-2\mathsf{r}_{\step}(z)}} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;.
Equation nd{align*}
By Equation qref{eq:phirwm-prop-1}, $\{\pi(x)/\pi(x+\sqrt{2\gamma}z)\}^{1/2} \leq 1$ for all $z_1\in\ccint{0,\varphi(z_{-1}) \vee 0}$. Hence, the last term in the right hand side of Equation qref{eq:A2z1-temp} is nonpositive and we get
\begin{equation}\label{eq:A2z1}
A_2(z_{-1}) \leq A_{21}(z_{-1}) + A_{22}(z_{-1}) + A_{23}(z_{-1}) + A_{24}(z_{-1}) + A_{25}(z_{-1}) \;.
Equation nd{equation}
Combining Equation qref{eq:A1z1} and Equation qref{eq:A2z1}, we obtain for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$
\begin{multline}\label{eq:RWM-drift-3}
A_1(z_{-1}) +A_2(z_{-1}) \leq A_{11}(z_{-1}) + A_{21}(z_{-1}) + A_{12}(z_{-1}) + A_{13}(z_{-1}) + A_{14}(z_{-1}) \\
+ A_{22}(z_{-1}) + A_{23}(z_{-1}) + A_{24}(z_{-1}) + A_{25}(z_{-1}) \;.
Equation nd{multline}
\paragraph*{Step 4: upper bound on $A_1(z_{-1}) +A_2(z_{-1})$}
We upper bound each term in the right hand side of Equation qref{eq:RWM-drift-3} and we first consider the terms $A_{11} + A_{21}$. Define $a:\ocint{0,\bar{\gamma}} \times \mathbb{R}^d \to \mathbb{R}_+$ for all $\gammatilde\in\ocint{0,\bar{\gamma}}$ and $\tilde{x}\in\mathbb{R}^d$, $\norm{\tilde{x}}\geq \widetilde{M}$ by
\begin{equation}\label{eq:def-arwm}
a(\gammatilde,\tilde{x}) = \sqrt{\gammatilde/2} \norm{\nabla \pU(\tilde{x})} \;.
Equation nd{equation}
We have for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$,
\begin{equation}\label{eq:up-bound-Jz1}
A_{11}(z_{-1}) + A_{21}(z_{-1}) \leq G(a(\gamma,x))
Equation nd{equation}
where $G$ is defined in Equation qref{eq:def-Grwm}.
We now consider the remainder terms $A_{12}(z_{-1}), A_{13}(z_{-1}), A_{14}(z_{-1}), A_{22}(z_{-1})$, $A_{23}(z_{-1}),A_{24}(z_{-1})$ and $A_{25}(z_{-1})$ in Equation qref{eq:RWM-drift-3}.
Let $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$. By definition of $c(z_{-1})$, see Equation qref{eq:def-brwm-corwm}, we have for all $z_{1}\in\ccint{-b(z_{-1}), -c(z_{-1}) \vee -b(z_{-1})}$, $z\notin\cone{0}{hetag}$, and by Equation qref{eq:driftRWM-rest-term-dom}
\begin{equation*}
\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 + \mathsf{r}_{\step}(z) \leq (1/2)\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 \;.
Equation nd{equation*}
Combining it with $1-\mathrm{e}^{s} \leq \absolute{s}$ for all $s\in\mathbb{R}$, Equation qref{eq:RWM-drift2-1} and Equation qref{eq:def-rrwm}, we get
\begin{equation*}
A_{12}(z_{-1}) \leq C \int_{-b(z_{-1})}^{-c(z_{-1}) \vee -b(z_{-1})} \mathrm{e}^{(1/2)\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1} \gamma \norm{\operatorname{D}^2 U(x)} \norm[2]{z} Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \;.
Equation nd{equation*}
Considering the upper bound $\norm[2]{z} \leq \rayrwm^2$ or the decomposition $\norm[2]{z} = z_1^2 + \norm[2]{z_{-1}}$, we obtain
\begin{equation*}
A_{12}(z_{-1}) \leq C \gamma \norm{\operatorname{D}^2 U(x)} \min\defEns{\rayrwm^2 \mathrm{e}^{a(\gamma,x)^2/8} \bar{\Phi}(a(\gamma,x)/2), (\norm[2]{z_{-1}}+1)}
Equation nd{equation*}
where $a(\gamma,x)$ is defined in Equation qref{eq:def-arwm}, and using for all $t>0$, $\mathrm{e}^{t^2/8} \bar{\Phi}(t/2) \leq \sqrt{2}/(\sqrt{\uppi}t)$, we get
\begin{equation}\label{eq:up-bound-I1z1}
A_{12}(z_{-1}) \leq C \min\parenthese{\sqrt{\gamma} \rayrwm^2 Figure rac{\norm{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}}, (\norm[2]{z_{-1}}+1) Figure rac{\norm{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}^2}a(\gamma,x)^2} \;.
Equation nd{equation}
Similarly, we have the same upper bound Equation qref{eq:up-bound-I1z1} for $A_{24}(z_{-1})$ and $A_{25}(z_{-1})$.
Using for all $s\in\mathbb{R}$, $1-\mathrm{e}^{s} \leq \min(1,\absolute{s})$, $\pi(x)/\pi(x+\sqrt{2\gamma}z) \leq 1$ for $z\in\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma}$, Equation qref{eq:def-tan-thetag}, Equation qref{eq:def-brwm-corwm}, Equation qref{eq:RWM-drift2-1}, Equation qref{eq:RWM-drift-taylor-U}, Equation qref{eq:def-rrwm} and Equation qref{eq:phirwm-prop-1}, we have for all $z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}$,
\begin{align}
\nonumber
A_{13}(z_{-1}) & \leq \int_{-b(z_{-1}) \vee -c(z_{-1})}^{(\varphi(z_{-1}) \vee -b(z_{-1}))\wedge 0} \min(1,\absolute{\mathsf{r}_{\step}(z)}) Figure rac{\mathrm{e}^{-z_1^2/2}}{(2\uppi)^{1/2}} \mathrm{d} z_1 \\
\nonumber
& \leq c(z_{-1}) \min(1,C \norm{\operatorname{D}^2 U(x)} \gamma \rayrwm^2) \\
\nonumber
& \leq C \sqrt{\gamma} \rayrwm^2 Figure rac{\norm{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}} \min(1,C \norm{\operatorname{D}^2 U(x)} \gamma \rayrwm^2) \\
\label{eq:up-bound-I2z1}
& \leq C \min\parenthese{\sqrt{\gamma} \rayrwm^2 Figure rac{\norm{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}}, \sqrt{\gamma}\rayrwm^4 Figure rac{\norm[2]{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}^3} a(\gamma,x)^2} \;.
Equation nd{align}
where $a(\gamma,x)$ is defined in Equation qref{eq:def-arwm}. Similarly, we have the same upper bound Equation qref{eq:up-bound-I2z1} for $A_{22}(z_{-1})$ and $A_{23}(z_{-1})$.
Concerning $A_{14}(z_{-1})$, note first that by definition of $\varphi(z_{-1})$, see Equation qref{eq:def-phirwm}, Equation qref{eq:phirwm-prop-1}, Equation qref{eq:phirwm-prop-2}, and Equation qref{eq:RWM-drift-taylor-U}, Equation qref{eq:def-rrwm} we have for all $z_1\in\ccint{(\varphi(z_{-1}) \vee -b(z_{-1}))\wedge 0, 0}$
\begin{equation}\label{eq:rrwm-geq-nablaU}
2\mathsf{r}_{\step}(z) \geq \absolute{\sqrt{2\gamma} \norm{\nabla \pU(x)} z_1} \;.
Equation nd{equation}
Using $1-\mathrm{e}^{s} \leq \absolute{s}$ for all $s\in\mathbb{R}$, $\sqrt{\pi(x+\sqrt{2\gamma}z)/\pi(x)} \leq 1$ for all $z\in(\mathbf{A}^{\scriptscriptstyle{\operatorname{RWM}}}_{x,\gamma})^{\text{c}}$, Equation qref{eq:RWM-drift2-1}, Equation qref{eq:def-rrwm} and Equation qref{eq:rrwm-geq-nablaU}, we obtain
\begin{align*}
&\defEns{1-\mathrm{e}^{\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1}} +\sqrt{Figure rac{\pi(x+\sqrt{2\gamma}z)}{\pi(x)}} \defEns{1-\mathrm{e}^{-\sqrt{\gamma/2}\norm{\nabla \pU(x)} z_1 -\mathsf{r}_{\step}(z)}} \\
&\phantom{-------} \leq \min\parenthese{1, \sqrt{\gamma/2} \norm{\nabla \pU(x)} \absolute{z_1}} \\
&\phantom{------------}+ \min\parenthese{1, \absolute{\sqrt{\gamma/2} \norm{\nabla \pU(x)} z_1 + \mathsf{r}_{\step}(z)}} \\
&\phantom{-------} \leq C \min\parenthese{1, \gamma \norm{\operatorname{D}^2 U(x)} \rayrwm^2} \;.
Equation nd{align*}
By Equation qref{eq:def-tan-thetag}, Equation qref{eq:def-brwm-corwm} and using $\absolute{\varphi(z_{-1})} \leq c(z_{-1})$, we obtain
\begin{equation}\label{eq:up-bound-I3z1}
A_{14}(z_{-1}) \leq C \min\parenthese{\sqrt{\gamma} \rayrwm^2 Figure rac{\norm{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}}, \sqrt{\gamma} \rayrwm^4 Figure rac{\norm[2]{\operatorname{D}^2 U(x)}}{\norm{\nabla \pU(x)}^3} a(\gamma,x)^2}
Equation nd{equation}
where $a(\gamma,x)$ is defined in Equation qref{eq:def-arwm}.
\paragraph*{Step 5: conclusion}
Let $Equation psilon=(1/4)\min(1, t_0^2)$ where $t_0$ is defined in Equation nsuremath{\mathbb{C}}ref{lemma:Grwm}.
Let $\gammatilde>0$ be defined by $C \sqrt{\gammatilde}\rayrwm[\gammatilde]^2 \chi \max\parenthese{1, \rayrwm[\gammatilde]^2 \chi^2} = Equation psilon$ where $C$ is the maximum of the positive constants given in Equation qref{eq:up-bound-I1z1}, Equation qref{eq:up-bound-I2z1} and Equation qref{eq:up-bound-I3z1}.
Define then $\bar{\gamma}_1 = \bar{\gamma} \wedge \gammatilde \wedge t_0^2 \wedge \min(1,\chi^{2}/2)/10$ where $\bar{\gamma}$ is given in Equation qref{eq:def-gambar}.
By Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}, there exists $\overline{M}\geq \widetilde{M}$ such that for all $x\in\mathbb{R}^d$, $\norm{x}\geq \overline{M}$, $C d \norm{\operatorname{D}^2 U(x)} / \norm[2]{\nabla \pU(x)} \leq Equation psilon$, where $C$ is given in Equation qref{eq:up-bound-I1z1}.
By Equation qref{eq:up-bound-I1z1}, Equation qref{eq:up-bound-I2z1} and Equation qref{eq:up-bound-I3z1}, we have for all $x\in\mathbb{R}^d$, $\norm{x}\geq \overline{M}$ and $\gamma\in\ocint{0,\bar{\gamma}_1}$
\begin{align}
\nonumber
&\int_{z_{-1}\in\boulefermeed{x}{\rayrwm}{d-1}} \{A_{12}(z_{-1}) + A_{13}(z_{-1}) + A_{14}(z_{-1}) + A_{22}(z_{-1}) \\
\nonumber
&\phantom{------}+ A_{23}(z_{-1}) + A_{24}(z_{-1}) + A_{25}(z_{-1})\} Figure rac{\mathrm{e}^{-\norm[2]{z_{-1}}/2}}{(2\uppi)^{(d-1)/2}} \mathrm{d} z_{-1} \\
\label{eq:drift-remainder-terms}
&\phantom{-----------------}\leq \min(Equation psilon, Equation psilon a(\gamma,x)^2)
Equation nd{align}
where $a(\gamma,x)$ is defined in Equation qref{eq:def-arwm}. We consider now two cases:
\begin{itemize}
\item
if $a(\gamma,x)>t_0$, by Equation qref{eq:RWM-drift-3}, Equation qref{eq:up-bound-Jz1}, Equation qref{eq:drift-remainder-terms} and Equation nsuremath{\mathbb{C}}ref{lemma:Grwm}, for all $x\in\mathbb{R}^d$, $\norm{x} \geq \overline{M}$, $\gamma\in\ocint{0,\bar{\gamma}_1}$
\begin{multline*}
\int_{z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}} \defEns{A_1(z_{-1}) + A_2(z_{-1})} Figure rac{\mathrm{e}^{-\norm[2]{z_{-1}}/2}}{(2\uppi)^{(d-1)/2}} \mathrm{d} z_{-1} \\
\leq 1 -(t_0^2/2) + Equation psilon \leq 1 - (t_0^2/4) \leq 1 - (1/4) \gamma \;.
Equation nd{multline*}
\item
if $a(\gamma,x)\in\ocint{0,t_0}$, by Equation qref{eq:RWM-drift-3}, Equation qref{eq:up-bound-Jz1}, Equation qref{eq:drift-remainder-terms}, Equation nsuremath{\mathbb{C}}ref{lemma:Grwm} and Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}, for all $x\in\mathbb{R}^d$, $\norm{x} \geq \overline{M}$, $\gamma\in\ocint{0,\bar{\gamma}_1}$,
\begin{multline*}
\int_{z_{-1}\in\boulefermeed{0}{\rayrwm}{d-1}} \defEns{A_1(z_{-1}) + A_2(z_{-1})} Figure rac{\mathrm{e}^{-\norm[2]{z_{-1}}/2}}{(2\uppi)^{(d-1)/2}} \mathrm{d} z_{-1} \\
\leq 1 -(1/2-Equation psilon)a(\gamma,x)^2 \leq 1 - Figure rac{\gamma\norm[2]{\nabla \pU(x)}}{8} \leq 1 - Figure rac{\chi^{-2} \gamma}{8} \;.
Equation nd{multline*}
Equation nd{itemize}
Combining it with Equation qref{eq:upper-bound-RrwmV-1-2}, we obtain for all $x\in\mathbb{R}^d$, $\norm{x}\geq \overline{M}$, $\gamma\in\ocint{0,\bar{\gamma}_1}$,
\[ Equation nsuremath{\mathbb{R}}rwm V(x) / V(x) \leq 1 - \min(1,\chi^{-2}/2)\gamma/8 \;. \]
Besides, denote by
\begin{equation*}
A = \sup_{y,\norm{y}\leq \overline{M}} \defEns{Figure rac{\mathscr{L} V(y)}{V(y)} + \bar{\gamma}_1^{1/2} Figure rac{\genrrwm V(y)}{V(y)}} \;.
Equation nd{equation*}
By Equation nsuremath{\mathbb{C}}ref{prop:RWM-dev-ergo}, we have for all $x\in\mathbb{R}^d$, $\norm{x} \leq \overline{M}$, $\gamma\in\ocint{0,\bar{\gamma}_1}$, $Equation nsuremath{\mathbb{R}}rwm V(x)/V(x) \leq 1 + \gamma A$. We get then for all $x\in\mathbb{R}^d$, $\gamma\in\ocint{0,\bar{\gamma}_1}$,
\begin{multline*}
Equation nsuremath{\mathbb{R}}rwm V(x) \leq \parenthese{1 - Figure rac{\min(1,\chi^{-2}/2)\gamma}{8}} V(x) \\
+ \gamma\parenthese{A+Figure rac{\min(1,\chi^{-2}/2)}{8}} V(x) \mathbbm{1}\defEns{\norm{x} \leq \overline{M}}
Equation nd{multline*}
which concludes the proof.
\subsection*{A version of the implicit function theorem}
The following proposition is taken from \cite[Theorem 7.21]{apostol1969calculus} and \cite[Theorem 6]{Border2013NotesOT}.
\begin{proposition}\label{prop:implicit-function-thm}
Let $\mathsf{K}$ be a compact metric space and $f:\mathbb{R} \times \mathsf{K} \to \mathbb{R}$ be a continuous function. Assume that there exist $M\geq m >0$ such that for all $z\in\mathsf{K}$, $x,y\in\mathbb{R}$, $x\neq y$,
\begin{equation}\label{eq:assum-implicit-func-thm}
m \leq Figure rac{f(x,z) - f(y,z)}{x - y} \leq M \;.
Equation nd{equation}
Then, there exists a unique continuous function $\xi:\mathsf{K}\to\mathbb{R}$ satisfying for all $z\in\mathsf{K}$, $f(\xi(z), z) = 0$.
Equation nd{proposition}
\begin{proof}
Denote by $Equation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ the set of real continuous functions on $\mathsf{K}$. By standard arguments, $Equation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ is complete under the uniform norm defined for all $g_1,g_2\inEquation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ by $\Vnorm[\infty]{g_1-g_2} = \sup_{z\in\mathsf{K}} \norm{g_1(z) - g_2(z)}$. Define $\psi:Equation nsuremath{\mathbb{C}}setfunction(\mathsf{K})\toEquation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ for all $g\inEquation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ and $z\in\mathsf{K}$ by
\begin{equation*}
\psi(g)(z) = g(z) - (1/M)f(g(z),z) \;.
Equation nd{equation*}
By Equation qref{eq:assum-implicit-func-thm}, we have for all $g,h\inEquation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ and $z\in\mathsf{K}$,
\begin{equation*}
\absolute{\psi(g)(z) - \psi(h)(z)} \leq \defEns{1-(m/M)}\absolute{g(z) - h(z)}
Equation nd{equation*}
and $\Vnorm[\infty]{\psi(g) - \psi(h)} \leq \{1-(m/M)\}\Vnorm[\infty]{g - h}$. $\psi$ is a contraction on $Equation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ and has a unique fixed point $\xi$ in $Equation nsuremath{\mathbb{C}}setfunction(\mathsf{K})$ which satisfies $f(\xi(z),z) = 0$ for all $z\in\mathsf{K}$.
Equation nd{proof}
\section{Additional results for the numerical experiments}
\label{sec:additional-results-numeric}
\subsection{One dimensional example: from theory to practice}
\label{subsec:1-2d-numerics-practice}
We consider the setup of Equation nsuremath{\mathbb{C}}ref{subsec:numerical-comparison-toy-examples}. In order to be able to numerically integrate, we truncate the integrals to a finite interval $\ccint{-a, a}$ for $a>0$, \textit{i.e.}~we approximate $\pi(f)$, $\hat{f}'$, $\pi(\psi_i' \psi_j')$, $\pi(\tilde{f} \psi_i)$ for $1\leq i,j \leq p$ by
\begin{align*}
&\pi(f) \approx \int_{-a}^{a} f(t) \pi(t) \mathrm{d} t \;, \\
&\hat{f}'(x) \approx -(1/\pi(x))\int_{-a}^{x} \pi(t) \defEns{ f(t) - \int_{-a}^{a} f(u) \pi(u) \mathrm{d} u} \mathrm{d} t \;, \\
&\pi(\psi_i' \psi_j') \approx \int_{-a}^{a} \pi(t) \psi_i'(t) \psi_j'(t) \mathrm{d} t \;, \\
&\pi(\tilde{f} \psi_i) \approx \int_{-a}^{a} \defEns{ f(t) - \int_{-a}^{a} f(u) \pi(u) \mathrm{d} u} \psi_i(t) \mathrm{d} t \;.
Equation nd{align*}
We consider several values for $a \in\defEns{3, 4, 5, 6}$ and we expect that when $\int_{-a}^{a} \pi(t) \mathrm{d} t$ is close to $1$, the truncation is a good approximation of the true quantity.
We are particularly interested in the value of the asymptotic variance of the Langevin diffusion $\varinf(f) = 2\pi(\hat{f}\tilde{f})$ and the optimal parameters $\thetastar$, $\theta^{*}_{\operatorname{zv}}$ defined in Equation qref{eq:min-asymp-var-diffusion} and Equation qref{eq:paramzv}. Approximations of these quantities are reported in Equation nsuremath{\mathbb{C}}ref{table:1d-truncation-var-param-zv-cv} for different truncation boundaries $a\in\defEns{3,4,5,6}$; concerning $\thetastar$ and $\theta^{*}_{\operatorname{zv}}$ which are $p$-dimensional vectors, we only list their first coordinate, $[\thetastar]_1$ and $[\theta^{*}_{\operatorname{zv}}]_1$. We observe that truncating the integrals to $a = 5$ is sufficient to obtain valid and stable results. It is coherent with the fact that most of the mass of $\pi$ is contained in this interval, see Equation nsuremath{\mathbb{C}}ref{figure:pi_1d}.
\begin{table}
\centering
\begin{tabular}{|c|c|c|c|c|}
\hline
$a$ & $3$ & $4$ & $5$ & $6$ \\
\hline
approx. of $\varinf(t)$ & $89.28$ & $92.41$ & $92.45$ & $92.45$ \\
\hline
approx. of $[\thetastar]_1$ & $-30.19$ & $-34.37$ & $-34.42$ & $-34.42$ \\
\hline
approx. of $[\theta^{*}_{\operatorname{zv}}]_1$ & $-27.70$ & $-28.57$ & $-28.56$ & $-28.56$ \\
\hline
Equation nd{tabular}
\caption{Approximations of $\varinf(t)$, $[\thetastar]_1$ and $[\theta^{*}_{\operatorname{zv}}]_1$, function of the truncation boundary $a$.}\label{table:1d-truncation-var-param-zv-cv}
Equation nd{table}
\begin{figure}
\begin{center}
\includegraphics[scale=0.4]{pi_1d.pdf}
Equation nd{center}
\caption{\label{figure:pi_1d} Plot of $\pi$.}
Equation nd{figure}
It is worth to point out that, although the quantities of interest to construct a control variate, \textit{i.e.}~$\varinf(f), \thetastar, \theta^{*}_{\operatorname{zv}}$, can be accurately estimated by truncating the integrals, others, like $\hat{f}'$, highly depend on the truncation boundary $a$. We plot in Equation nsuremath{\mathbb{C}}ref{figure:dpois_1d} several approximations of $\hat{f}'$, by truncating the integrals to $a\in\defEns{3,4,5,6}$. Note that by an integration by parts, $\lim_{x\to\pm\infty} \hat{f}'(x) / x^2 = C$, with $C>0$. These plots highlight that truncating the integrals has a significant impact on the approximation of $\hat{f}'$.
\begin{figure}
\begin{center}
\includegraphics[scale=0.4]{dpois_1d.pdf}
Equation nd{center}
\caption{\label{figure:dpois_1d} Plots of $\hat{f}'$ for $a\in\defEns{3,4,5,6}$.}
Equation nd{figure}
In Equation nsuremath{\mathbb{C}}ref{figure:approx_dpois_Lpois_1d_1,figure:approx_dpois_Lpois_1d_2}, we plot $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta '$ and $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta$ for $\theta\in\defEns{\thetastar, \theta^{*}_{\operatorname{zv}}}$ where $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta = \ps{\theta}{\psi}$, $\psi = \defEns{\psi_i}_1^{p}$ are defined in Equation qref{eq:def-basis-gaussian-kernels} and $p\in\defEns{4,\ldots,10}$. It illustrates that $\hat{f}'$ and $\tilde{f}$ are better approximated for even $p$; for $p\geq 8$, $Equation nsuremath{\mathbb{C}}ontrolFunc_{\thetastar} '$, $Equation nsuremath{\mathbb{C}}ontrolFunc_{\theta^{*}_{\operatorname{zv}}} '$ and $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_{\thetastar}$, $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_{\theta^{*}_{\operatorname{zv}}}$ are very close and the two methods obtain similar variance reductions.
\begin{figure}
\begin{center}
\includegraphics[scale=0.4]{approx_dpois_Lpois_1d_1.pdf}
Equation nd{center}
\caption{\label{figure:approx_dpois_Lpois_1d_1} Plots of $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta '$ and $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta$ for $\theta\in\defEns{\thetastar, \theta^{*}_{\operatorname{zv}}}$ and $p\in\defEns{4,5,6}$.
}
Equation nd{figure}
\begin{figure}
\begin{center}
\includegraphics[scale=0.3]{approx_dpois_Lpois_1d_2.pdf}
Equation nd{center}
\caption{\label{figure:approx_dpois_Lpois_1d_2} Plots of $Equation nsuremath{\mathbb{C}}ontrolFunc_\theta '$ and $\mathscr{L} Equation nsuremath{\mathbb{C}}ontrolFunc_\theta$ for $\theta\in\defEns{\thetastar, \theta^{*}_{\operatorname{zv}}}$ and $p\in\defEns{7,\ldots,10}$.
}
Equation nd{figure}
\subsection{Proof of Equation nsuremath{\mathbb{C}}ref{lemma:log-probit-assumptions}}
\label{subsec:proof-log-probit-assumptions}
We have for all $x\in\mathbb{R}^d$
\begin{align*}
\nabla \Ub{l}(x) & = - \mathsf{Z}^{\operatorname{T}} \mathsf{Y} + \sum_{i=1}^{N} \mathsf{Z}_i / (1+\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}) + x / \varsigma^2 \;, \\
\operatorname{D}^2 \Ub{l}(x) &= \sum_{i=1}^{N} Figure rac{\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}}{\parenthese{1+\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}}^2} \mathsf{Z}_i \mathsf{Z}_i^{\operatorname{T}} + \operatorname{Id} / \varsigma^2 \;, \\
\operatorname{D}^3 \Ub{l}(x) &= \sum_{i=1}^{N} Figure rac{\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}}{\parenthese{1+\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}}^2} \defEns{2Figure rac{\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}}{1+\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}}-1} \mathsf{Z}_i^{\otimes 3} \;.
Equation nd{align*}
Using for all $i\in\defEns{1,\ldots,N}$ and $x\in\mathbb{R}^d$ that $0<\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x}/(1+\mathrm{e}^{-\mathsf{Z}_i^{\operatorname{T}} x})^2 \leq 1/4$, $\Ub{l}$ is strongly convex, gradient Lipschitz and satisfies Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty}, Equation qref{eq:cond-vgeom-ula}, Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} and Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}.
For $\Ub{p}$, define $h:\mathbb{R}\to\mathbb{R}_{-}$ for all $t\in\mathbb{R}$ by $h(t) = \ln(\Phi(t))$. We have for all $t\in\mathbb{R}$,
\begin{align*}
& h'(t) = Figure rac{\Phi'(t)}{\Phi(t)} \quad,\quad h''(t) = -Figure rac{\Phi'(t)}{\Phi(t)}\defEns{t + Figure rac{\Phi'(t)}{\Phi(t)}} \;,\\
& h^{(3)}(t) = Figure rac{\Phi'(t)}{\Phi(t)}\defEns{2\parenthese{Figure rac{\Phi'(t)}{\Phi(t)}}^2 + 3tFigure rac{\Phi'(t)}{\Phi(t)} + t^2 -1}
Equation nd{align*}
and for all $x\in\mathbb{R}^d$
\begin{align*}
\nabla \Ub{p}(x) & = \sum_{i=1}^{N} \defEns{(1-\mathsf{Y}_i) h'(-\mathsf{Z}_i^{\operatorname{T}}x)-\mathsf{Y}_i h'(\mathsf{Z}_i^{\operatorname{T}}x)} \mathsf{Z}_i + x/\varsigma^2 \;, \\
\operatorname{D}^2 \Ub{p}(x) &= \sum_{i=1}^{N} \defEns{-(1-\mathsf{Y}_i) h''(-\mathsf{Z}_i^{\operatorname{T}}x)-\mathsf{Y}_i h''(\mathsf{Z}_i^{\operatorname{T}}x)} \mathsf{Z}_i \mathsf{Z}_i^{\operatorname{T}} + \operatorname{Id}/\varsigma^2 \;, \\
\operatorname{D}^3 \Ub{p}(x) &= \sum_{i=1}^{N} \defEns{(1-\mathsf{Y}_i) h^{(3)}(-\mathsf{Z}_i^{\operatorname{T}}x)-\mathsf{Y}_i h^{(3)}(\mathsf{Z}_i^{\operatorname{T}}x)} \mathsf{Z}_i^{\otimes 3} \;.
Equation nd{align*}
By an integration by parts, we have for all $t<0$
\begin{equation*}
t + Figure rac{\Phi'(t)}{\Phi(t)} = -Figure rac{t}{\Phi(t)} \int_{-\infty}^{t}Figure rac{\mathrm{e}^{-s^2/2}}{\sqrt{2\uppi}s^2} \mathrm{d} s
Equation nd{equation*}
and $t+\Phi'(t)/\Phi(t) \geq 0$ for all $t\in\mathbb{R}$. Let $t<0$ and $s=-t>0$. We have $\Phi(t) = \bar{\Phi}(s) = \operatorname{erfc}(s/\sqrt{2})/2$ where $\operatorname{erfc}:\mathbb{R}\to\mathbb{R}_+$ is the complementary error function defined for all $u\in\mathbb{R}$ by $\operatorname{erfc}(u) = (2/\sqrt{\uppi})\int_{u}^{+\infty} \mathrm{e}^{-v^2} \mathrm{d} v$. By \cite[Section 8.25, formula 8.254]{gradshteyn2014table}, we have the following asymptotic expansion for $s\to+\infty$
\begin{equation*}
\bar{\Phi}(s) = Figure rac{\mathrm{e}^{-s^2/2}}{\sqrt{2\uppi}s}\parenthese{1-s^{-2} + 3s^{-4} + O(s^{-6})} \;.
Equation nd{equation*}
Using that $\Phi'(t) = (2\uppi)^{-1/2} \mathrm{e}^{-t^2/2}$ for all $t\in\mathbb{R}$, we get asymptotically for $t\to-\infty$ and $s=-t\to+\infty$,
\begin{equation}\label{eq:asymptotic-expansion-Phi}
\Phi'(t) / \Phi(t) = s \parenthese{1+s^{-2} - 2s^{-4} + O(s^{-6})}
Equation nd{equation}
and $\lim_{t\to -\infty} h''(t) = -1$. There exists then $C>0$ such that for all $t\in\mathbb{R}$, $-C \leq h''(t) \leq 0$. $\Ub{p}$ is then strongly convex, gradient Lipschitz and satisfies Equation nsuremath{\mathbb{C}}ref{assumption:U-Sinfty} and Equation qref{eq:cond-vgeom-ula}.
By Equation qref{eq:asymptotic-expansion-Phi}, we have for $t\to-\infty$ and $s=-t\to+\infty$, $h^{(3)}(t) = O(s^{-1})$. $\Ub{p}$ satisfies then Equation nsuremath{\mathbb{C}}ref{ass:condition_MALA} and Equation nsuremath{\mathbb{C}}ref{assumption:U-dom-drift-RWM}.
\subsection{Additional results for the Bayesian logistic and probit regressions}
\label{sec:suppl-probit-reg}
We first define the basis of functions $\psia$ and $\psib$ based on first and second order polynomials respectively. Let $\psia = (\psia_1,\ldots, \psia_d)$ be given for $i\in\defEns{1,\ldots,d}$ and $x=(x_1,\ldots,x_d)\in\mathbb{R}^d$ by $\psia_i(x) = x_i$ and $\psib = (\psib_1,\ldots,\psib_{d(d+3)/2})$ be given for $x=(x_1,\ldots,x_d)\in\mathbb{R}^d$ by
\begin{align*}
\psib_k(x) & = x_k \quad ext{for} \; k\in\defEns{1,\ldots,d} \;, \quad
\psib_{k+d}(x) = x_k^2 \quad ext{for} \; k\in\defEns{1,\ldots,d} \;,\\
\psib_k(x) & = x_i x_j \quad ext{for} \; k=2d+(j-1)(d-j/2)+(i-j) \; ext{and all} \; 1 \leq j < i \leq d \;.
Equation nd{align*}
$\psia$ and $\psib$ are in $\setpoly{\infty}(\mathbb{R}^d,\mathbb{R})$ and are linearly independent in $\mathrm{C}(\mathbb{R}^d,\mathbb{R})$.
We provide additional plots for the logistic regression, see Equation nsuremath{\mathbb{C}}ref{figure:log-1-add} and Equation nsuremath{\mathbb{C}}ref{figure:log-2}, and the results for the Bayesian probit regression presented in Equation nsuremath{\mathbb{C}}ref{sec:application_cv}, see Table~\ref{table:probit}, Equation nsuremath{\mathbb{C}}ref{figure:pro-1} and Equation nsuremath{\mathbb{C}}ref{figure:pro-2}. They are similar to the results obtained for the Bayesian logistic regression.
\begin{figure}
\hspace*{-1.5cm}\includegraphics[scale=0.5]{log-sb-1-1.pdf}
\caption{\label{figure:log-1-add} Boxplots of $x_1,x_2,x_3,x_4$ using the ULA, MALA and RWM algorithms for the logistic regression. The compared estimators are the ordinary empirical average (O), our estimator with a control variate Equation qref{eq:def-invpi-cv} using first (CV-1) or second (CV-2) order polynomials for $\psi$, and the zero-variance estimator of \cite{papamarkou2014} using a first (ZV-1) or second (ZV-2) order polynomial basis. }
Equation nd{figure}
\begin{figure}
\hspace*{-1.5cm}\includegraphics[scale=0.5]{log-sb-1-2.pdf}
\caption{\label{figure:log-2} Boxplots of $x_1^2,x_2^2,x_3^2,x_4^2$ using the ULA, MALA and RWM algorithms for the logistic regression. The compared estimators are the ordinary empirical average (O), our estimator with a control variate Equation qref{eq:def-invpi-cv} using first (CV-1) or second (CV-2) order polynomials for $\psi$, and the zero-variance estimator of \cite{papamarkou2014} using a first (ZV-1) or second (ZV-2) order polynomial basis.}
Equation nd{figure}
\begin{figure}
\hspace*{-1.5cm}\includegraphics[scale=0.5]{pro-sb-1-1.pdf}
\caption{\label{figure:pro-1} Boxplots of $x_1,x_2,x_3$ using the ULA, MALA and RWM algorithms for the probit regression. The compared estimators are the ordinary empirical average (O), our estimator with a control variate Equation qref{eq:def-invpi-cv} using first (CV-1) or second (CV-2) order polynomials for $\psi$, and the zero-variance estimator of \cite{papamarkou2014} using a first (ZV-1) or second (ZV-2) order polynomial basis. }
Equation nd{figure}
\begin{figure}
\hspace*{-1.5cm}\includegraphics[scale=0.5]{pro-sb-1-2.pdf}
\caption{\label{figure:pro-2} Boxplots of $x_1^2,x_2^2,x_3^2$ using the ULA, MALA and RWM algorithms for the probit regression. The compared estimators are the ordinary empirical average (O), our estimator with a control variate Equation qref{eq:def-invpi-cv} using first (CV-1) or second (CV-2) order polynomials for $\psi$, and the zero-variance estimator of \cite{papamarkou2014} using a first (ZV-1) or second (ZV-2) order polynomial basis.}
Equation nd{figure}
\begin{table}
{\small
\begin{tabular}{c|c|c|c c|c c|c c|c c|}
\multicolumn{11}{c}{} \\
& & MCMC & \multicolumn{2}{c|}{CV-1-MCMC} & \multicolumn{2}{c|}{CV-2-MCMC}
& \multicolumn{2}{c|}{ZV-1-MCMC} & \multicolumn{2}{c|}{ZV-2-MCMC} \\
& & Variance & VRF & Variance & VRF & Variance & VRF & Variance & VRF & Variance \\
\hline
$x_1$ & ULA & 2.1 & 24 & 0.089 & 2.9e+03 & 0.00073 & 20 & 0.11 & 2.7e+03 & 0.00078 \\
&MALA & 0.41 & 22 & 0.019 & 2.7e+03 & 0.00015 & 18 & 0.023 & 2.6e+03 & 0.00016 \\
&RWM & 1.2 & 23 & 0.05 & 2.2e+03 & 0.00054 & 21 & 0.056 & 2.2e+03 & 0.00053 \\
\hline
$x_2$ & ULA & 27 & 24 & 1.1 & 2.8e+03 & 0.0099 & 18 & 1.5 & 2.4e+03 & 0.011 \\
&MALA & 6.4 & 24 & 0.27 & 2.9e+03 & 0.0022 & 19 & 0.34 & 2.6e+03 & 0.0025 \\
&RWM & 13 & 18 & 0.72 & 1.8e+03 & 0.0073 & 16 & 0.81 & 1.8e+03 & 0.0075 \\
\hline
$x_3$ & ULA & 11 & 24 & 0.47 & 6.7e+03 & 0.0017 & 18 & 0.62 & 6.3e+03 & 0.0018 \\
&MALA & 2.6 & 23 & 0.11 & 7e+03 & 0.00037 & 18 & 0.14 & 6.8e+03 & 0.00038 \\
&RWM & 5.5 & 18 & 0.3 & 4.3e+03 & 0.0013 & 16 & 0.34 & 4.3e+03 & 0.0013 \\
\hline
$x_1^2$ & ULA & 0.75 & 3.5 & 0.22 & 1.6e+02 & 0.0048 & 2.8 & 0.26 & 1.3e+02 & 0.0057 \\
&MALA & 0.15 & 3.5 & 0.043 & 1.5e+02 & 0.001 & 2.8 & 0.053 & 1.3e+02 & 0.0011 \\
&RWM & 0.43 & 2.6 & 0.16 & 1.2e+02 & 0.0035 & 2.4 & 0.18 & 1.2e+02 & 0.0037 \\
\hline
$x_2^2$ &ULA & 4.7e+02 & 9.3 & 51 & 1.4e+03 & 0.33 & 7.5 & 63 & 1.2e+03 & 0.4 \\
&MALA & 1.1e+02 & 9.1 & 12 & 1.5e+03 & 0.073 & 7.6 & 14 & 1.3e+03 & 0.085 \\
&RWM & 2.2e+02 & 7.7 & 29 & 1e+03 & 0.22 & 6.9 & 33 & 9.8e+02 & 0.23 \\
\hline
$x_3^2$ & ULA & 1.1e+02 & 9.8 & 11 & 9.7e+02 & 0.11 & 7.9 & 14 & 7.9e+02 & 0.14 \\
&MALA & 24 & 9.7 & 2.5 & 9.8e+02 & 0.025 & 8.1 & 3 & 8.5e+02 & 0.029 \\
&RWM & 52 & 7.9 & 6.7 & 6.1e+02 & 0.086 & 7.1 & 7.4 & 5.9e+02 & 0.088 \\
\hline
Equation nd{tabular}
}
\caption{\label{table:probit}Estimates of the asymptotic variances for ULA, MALA and RWM and each parameter $x_i$, $x_i^2$ for $i\in\{1,\ldots,d\}$, and of the variance reduction factor (VRF) on the example of the probit regression.}
Equation nd{table}
Equation nd{document}
|
\begin{document}
\def\ranglengle \langlengle{\ranglengle \langlengle}
\def\leavevmode\hbox{\small1\kern-3.8pt\normalsize1}{\leavevmode\hbox{\small1\kern-3.8pt\normalsize1}}
\def{\rm I\kern-.2emR}{{\rm I\kern-.2emR}}
\def{\rm tr}{{\rm tr}}
\def{{\cal A}l E}{{{\cal A}l E}}
\def{{\cal A}l C}{{{\cal A}l C}}
\def{{\cal A}l I}{{{\cal A}l I}}
\def{{\cal A}l D}{{{\cal A}l D}}
\def{{\cal A}l B}{{{\cal A}l B}}
\def{{\cal A}l N}{{{\cal A}l N}}
\def{{\cal A}l T}{{{\cal A}l T}}
\def{{\cal A}l F}{{{\cal A}l F}}
\def{\cal A}{{{\cal A}l A}}
\def{\cal G}{{{\cal A}l G}}
\def{\cal V}{{{\cal A}l V}}
\def{{\cal A}l C}{{{\cal A}l C}}
\def\rho^{\otimes n}{\rho^{\otimes n}}
\def^{\otimes n}{^{\otimes n}}
\def^{(n)}{^{(n)}}
\def^{(n)}p{^{(n)'}}
\def\tilde{\cal D}{\tilde{{\cal A}l D}}
\def\tilde{\cal N}{\tilde{{\cal A}l N}}
\def\tilde{\cal T}{\tilde{{\cal A}l T}}
\def\frac{I}{d}{\frac{I}{d}}
\def\frac{P}{\sqrt{d \tr P^2}}{\frac{P}{\sqrt{d {\rm tr} P^2}}}
\def\psi^{RQ}{\psi^{RQ}}
\def\rho^{RQ}{\rho^{RQ}}
\def\rho^{RQ}p{\rho^{RQ'}}
\def\rangle{\ranglengle}
\def\langle{\langlengle}
\def\{0,1\}{\{0,1\}}
\def\mbox{\rule[0pt]{1.5ex}{1.5ex}}{\mbox{\rule[0pt]{1.5ex}{1.5ex}}}
\def\noindent\hspace{2em}{\it Proof: }{\noindent\hspace{2em}{\it Proof: }}
\def\hspace*{\fill}~\QED\par\endtrivlist\unskip{\hspace*{\fill}~\mbox{\rule[0pt]{1.5ex}{1.5ex}}\par\endtrivlist\unskip}
\newcommand{\half}{\mbox{$\textstyle \frac{1}{2}$} }
\newcommand{\ket}[1]{| #1 \ranglengle}
\newcommand{\bra}[1]{\langlengle #1 |}
\newcommand{\proj}[1]{\ket{#1}\! \bra{#1}}
\newcommand{\outerp}[2]{\ket{#1}\! \bra{#2}}
\newcommand{\inner}[2]{ \langlengle #1 | #2 \ranglengle}
\newcommand{\melement}[2]{ \langlengle #1 | #2 | #1 \ranglengle}
\newcommand{\expect}[1]{\langlengle #1 \ranglengle}
\date{\today}
\title{A quantum circuit for OR}
\author{Howard Barnum$^{(1,3)}$, Herbert J. Bernstein$^{(1)}$,
Lee Spector$^{(2)}$}
\address{
$^{(1)}$School of Natural Science and
ISIS,
Hampshire College,
Amherst, MA 01002, USA \\
$^{(2)}$School of Cognitive Science, Hampshire College,
Amherst, MA 01002, USA \\
$^{(3)}$I.S.I. Foundation, Villa Gualino, Viale Settimio Severo 65,
Torino 10133, ITALY \\
email: {\tt\{hbarnum, hbernstein, lspector\}@hampshire.edu}
}
\maketitle
\begin{abstract}
We give the first quantum circuit
for computing $f(0)$ OR $f(1)$ more reliably
than is classically possible
with a single evaluation of the function.
OR therefore joins
XOR (i.e. parity, $f(0) \oplus f(1)$) to give the full
set of logical connectives (up to relabeling of inputs and outputs)
for which there is quantum speedup. The XOR algorithm is of fundamental
importance in quantum computation; our OR algorithm
(found with the aid of genetic programming), may
represent a new quantum computational effect, also useful as
a ``subroutine''.
\end{abstract}
\pacs{PACS: 03.67Lx}
\narrowtext
All digital computing can be built out of Boolean functions.
Suppose
a physical process
takes
the two orthogonal inputs $\ket{0},\ket{1}$ to outputs
$\ket{f(0)},\ket{f(1)}$, so that
$f$ is just a (classical) Boolean function from $\{0,1\}$ to $\{0,1\}$.
This
could be a quantum-coherent
computer subroutine, or the evolution of some other physical system we
are interested in {{\cal A}l I}te{Preskill99a}.
If the process is a unitary quantum evolution, and we can
prepare a desired
state as input, then quantum computation lets one
find out more about the function $f$ than if
we are restricted to evolution of classical states.
For instance, we can find out its parity $f(0)$ XOR $f(1)$.
The XOR circuit in {{\cal A}l I}te{Deutsch85a} was the first concrete
demonstration of quantum computation's greater-than-classical power;
the exact version in {{\cal A}l I}te{Cleve98a} is of
fundamental importance in its own right and
for its applications in more complex algorithms.
In this Letter, we complete the demonstration of quantum computation's
greater-than-classical power in this simple setting, by describing
circuits which
compute $f(0)$ OR $f(1)$ with one call to the subroutine $f$. While
these circuits, unlike the XOR circuit, may err, we show that their
performance is better than any possible classical circuit.
This and XOR are the only quantum speedups in this simple
domain. (NOR, AND, NAND, and NOT-XOR may also be sped up
but they, and the algorithms that speed them up, are isomorphic to
OR or XOR by simply
relabeling of inputs and/or outputs.)
The OR algorithm represents a new quantum computational effect,
which may have applications to more complex problems.
Indeed, our circuits
were derived as subroutines of a better-than-classical routine
(evolved
using genetic programming) to compute a more complex property, ``AND/OR,'' of
Boolean black-box functions of two variables ($f: \{0,1\}^2 \rightarrow \{0,1\}$).
OR and AND/OR form part of an infinite family of properties
(uniform binary AND/OR trees) whose quantum
complexity is still imperfectly understood, but which have great theoretical
and practical importance in computing, since they have
applications in game tree evaluation, logic
programming, theorem-proving, and many other areas, and their classical
query complexity is completely understood.
The quantum complexity of Boolean properties of black-box functions
has been studied in {{\cal A}l I}te{Beals98a,Buhrman98a,Buhrman99b}.
Here, we examine quantum gate
arrays for
certain properties of black box functions of one and two qubits.
Given an unknown function $f$ which may be called on
particular inputs or coherently on superpositions of them,
we wish to evaluate
a Boolean property $P$ of $f$.
We are interested in $p^e_{max}$,
the maximum over functions of the probability that
an algorithm evaluates $P(f)$ incorrectly, and $q_{max}$, the
maximum over functions of the expected number of function queries.
{\em Exact} algorithms have $p^e_{max}=0$;
{\em Las Vegas} algorithms are correct whenever they answer
$0$ or $1$, but may also answer ``don't know'' with $p \le 1/2$.
{\em Monte Carlo} algorithms
may err, but $p^e_{max} < 1/2$. The error is {\em one-sided}
if there is a value $x$ ($0$ or $1$) such that $p_e=0$ for $f$
such that $P(f)=x$; otherwise it is {\em two-sided}.
By $n$ repetitions (and majority
voting, for Monte Carlo), the latter two types may be made to
have exponentially small (in $n$)
probability of not giving the correct
answer.
Below, we use a definition in which
Las Vegas algorithms may have stochastic runtime, but
give correct answer with $p=1$;
DFP (described below) is an example.
By running it repeatedly until an answer is obtained,
the first type of Las Vegas algorithm may be converted
into one of the second
type
with expected running time greater by a constant factor.
Genetic programming (GP) {{\cal A}l I}te{Koza92a} evolves
a population of programs (in our case,
sequences of quantum gates) which
are randomly mutated, recombined with each other, and
preferentially selected for desired properties
by running (or simulating) the
programs on a sample of inputs.
The repertoire of gates used by our GP engines
(square brackets indicate qubit references,
parentheses real parameters) was:
${\tt HADAMARD [q]}$,
{\tt U-THETA[q](}$\theta${\tt)}$ := \cos\theta I + i \sin\theta \sigma_y$,
${\tt U2[q](\alpha, \theta, \phi, \psi}) :=
e^{i \alpha}
e^{-i\phi \sigma_z}
(\cos{\theta}I - i \sin{\theta} \sigma_y)
e^{-i\psi\sigma_z};~~{\tt CNOT[control, target]}$;
${\tt CPHASE[control,target](\alpha)}$
,
which multiplies each standard basis state by $e^{i\alpha}$
if it has $1$ in both control and target positions;
${\tt ORACLE [q_1,...q_n,q_{out}]}$,
which adds (mod 2) $f(q_1,...,q_n)$ to $q_{out}$, retaining $q_1,...q_n$
unchanged; and
{\tt MEASURE-0 [q]} and {\tt MEASURE-1 [q]}, which
measure qubit $q$ in the standard basis. The
{\tt MEASURE-x} gates terminate the computation, returning the value
$x$, if the measurement result $x$ is obtained; if the result $\overline{x}$
is obtained,
the state is projected onto the subspace with
$|\overline{x}\rangle$
for that qubit, and computation proceeds.
Allowing termination
conditional on intermediate results, as {\tt MEASURE} gates
do, makes the number of queries stochastic.
For Monte Carlo algorithms, this yields at best a constant speedup
over algorithms with a definite number of queries. Nevertheless, it
may yield more perspicuous algorithms, and
the constant speedup may be needed for better-than-classical
performance, especially in the small-$n$ regime.
For functions of binary
strings of length $d$, the AND/OR
problem is to evaluate a binary tree, having
AND at the root and $d$
layers of alternating
OR and AND as nodes, with a $d + 1$st layer of $n \equiv 2^d$
leaves consisting of
the values of the black-box function ordered by their input string
(viewed as a binary integer).
This and related problems have many applications,
for example in game-tree evaluation, dynamic programming,
waveform analysis, and theorem-proving.
Saks and Wigderson showed that ``depth-first pruning'' (DFP) is
the best classical Las Vegas algorithm for AND/OR
{{\cal A}l I}te{Saks86a}. DFP uses a routine {\tt eval(node)} which
returns the value of the node if it is a leaf,
and otherwise randomly chooses a
daughter of the node and calls itself on the subtree rooted
at that daughter. If this call returns a value for the subtree that
determines the value of the node (1 if it is an OR node, 0 if it
is an AND node), {\tt eval} returns the appropriate value;
otherwise it calls itself on the other highest-level
subtree of the node, and
returns the value of that subtree. DFP itself just calls {\tt eval(root)}.
Santha{{\cal A}l I}te{Santha91a} showed, for
read-once Boolean functions (for which there
is a Boolean formula containing each variable at most once),
that
no classical Monte Carlo algorithm
with all error probabilities below $p$
can have expected queries $q < (1-xp)Q$, where $Q$ is the time taken by
the optimal Las Vegas algorithm, and $x=1,2$ as the error is one- or
two-sided. (It is not known whether a quantum analogue of this holds.)
This is just the trivial
speedup obtained by flipping a biased coin
to decide whether to do the optimal Las Vegas algorithm or output a
random bit (two-sided) or a zero (one-sided).
Thus a $q$-query quantum algorithm would
have to have
$p^e_{max} < \frac{1}{x}(1 - \frac{q}{Q})$ to be better-than-classical.
DFP has worst-case expected queries 3 for
depth-two AND/OR, so a one-query quantum algorithm would need
$p<1/3$ two-sided, $p<2/3$ one-sided to do better than classical.
There is no one-query, zero-error quantum strategy
for calculating OR for a black box Boolean function of one bit
{{\cal A}l I}te{Jozsa91a,Beals98a}.
DFP has expected queries $3/2$,
so a one-query quantum algorithm with
$p<1/6$ two-sided or $p<1/3$ one-sided would be better than classical.
Our OR circuit is shown in Fig. 2.
We use the
convention that
the leftmost qubit in a ket or string of kets
is qubit $0$, the next qubit $1$, and
so on.
Before the {\tt MEASURE-1} gate on qubit $0$, the state is:
\begin{equation}\langlebel{eq: state before measurement}
(1/2) ~[~ \ket{0} (\ket{f_0} + |f_1\rangle) + \ket{1}(|f_0\rangle - |f_1\rangle) ~]\;.
\end{equation}
Thus the {\tt MEASURE-1} has $p=0$ for outcome $1$ if
$f_0 = f_1$ (even parity), due to destructive interference,
while if the parity is odd, it
has $p=1/2$ of correctly
yielding $1$ ($|f_0\ranglengle$ and $\ket{f_1}$ are orthogonal,
and do not interfere).
If the computation does not halt, the state becomes:
\begin{equation}
(1/2)\ket{0}(\ket{f_0} + \ket{f_1})\; .
\end{equation}
Its squared norm is the probability
that the {\tt MEASURE-1} gate yielded $0$.
First consider $\theta = 0$.
For the even parity cases, this term gives the correct answer,
while for the odd parity cases, it is equally likely
to give either answer; it contributes $1/4$ to $p_e$. Thus
$p^{00}_e=p_e^{11}=0$, and $p^{01}_e=p_e^{10}=1/4$.
The error is one-sided, so it is better than
classical ($p_e^{max} < 1/3$). (If we had not halted the computation
when the measurement of qubit $0$ yielded $0$,
and had measured qubit $1$ in the eigenbasis of $\sigma_x$
instead of the $\sigma_z$-eigenbasis
used in the $\theta=0$ version of our
algorithm, one sees from
(\ref{eq: state before measurement}) that a value of $0$ for the final
measurement means the value of the first measurement gives the parity,
while a value $1$ for the final measurement means the value of the first
measurement is noise. This is Deutsch's Las Vegas algorithm for
parity {{\cal A}l I}te{Deutsch85a}.)
Our algorithm also outperforms attempts to use 2-alternative Grover
search to evaluate OR; despite that method being asymptotically
optimal for OR of many inputs, it does not perform better than classically
in this instance.
Adding an $X(\theta)$ before the final measurement gives:
$
p_e^{odd} = 1/2 + \bigl( \frac{c - s}{2} \bigr)^2\;,
~p_e^{even} = s^2\;.
$
Equating these gives a solution
$
c = \frac{-3}{\sqrt{10}}, s = \frac{1}{\sqrt{10}}\;
$
with $p_e= 0.1$ for all cases.
Since $p_e < 1/6$ two-sided with one query, this is also better than classical.
To see that the $\theta = 0$ array for OR minimizes
$p^e_{max}$ subject to the constraint
$p^e_{00}=p^e_{11}=0,$
consider the state
just
before the black-box function is queried:
\begin{equation}a
|\Psi \ranglengle = |\psi_{00}\rangle |00\rangle +
|\psi_{01}\rangle |01\rangle +
|\psi_{10}\rangle |10\rangle +
|\psi_{11}\rangle |11\rangle \;.
\end{equation}a
The right-hand ket in each term is a state of the two qubits
on which the function will be called, and
$\sum_{ij} \langle \psi_{ij} | \psi_{ij} \rangle = 1.$
After the query, the state is:
\begin{equation}a
|\psi_{00}\rangle |0f(0)\rangle +
|\psi_{01}\rangle |0\overline{f(0)}\rangle +
|\psi_{10}\rangle |1f(1)\rangle +
|\psi_{11}\rangle |1\overline{f(1)}\rangle \;. \nonumber
\end{equation}a
The four functions of one bit give
states $|0\rangle, \ket{1},\ket{2},\ket{3}$.
We have:
\begin{equation}a
\inner{0}{1}&=&\inner{\psi_{00}}{ \psi_{00}} +
\inner{\psi_{01}}{ \psi_{01}} + 2 {\rm Re}\inner{\psi_{10}}{\psi_{11}}\;. \\
\inner{0}{2}&=&\inner{\psi_{11}}{ \psi_{11}} +
\inner{\psi_{10}}{ \psi_{10}} + 2 {\rm Re}\inner{\psi_{00}}{\psi_{01}}\;. \\
\inner{0}{3}&=&2 {\rm Re}\inner{\psi_{00}}{\psi_{01}}
+ 2 {\rm Re}\inner{\psi_{10}}{\psi_{11}}\;.
\end{equation}a
Requiring $p^e_{11}=0$ and thus $\inner{0}{3} = 0$,
there are optimal algorithms which
measure $\proj{0}$ and $I- \proj{0}$.
For these,
$p_e^{max} = \max_{j=1,2,3}|\inner{0}{j}|^2$.
A simple calculation using the Schwarz inequality and
$\sum_{ij} \langle \psi_{ij} | \psi_{ij} \rangle = 1$ shows that
$p^e_{max}$ is minimized
where
$\inner{0}{2}=\inner{0}{3} = 1/2$, at
$p^e_{max}=1/4$.
For our algorithm
$\inner{0}{3}=\inner{1}{2}=0,$ while the other inner products are
$1/2$.
The states span a
3-d real subspace of the 4-d complex space
of two qubits.
They lie, evenly spaced,
on a cone with apex at the origin and opening angle $\pi/2$.
The rest of the algorithm measures three
orthogonal subspaces:
$\{\ket{10},\ket{11}\}$
(outcome $1$ for
{\tt MEASURE-1} 0 ; algorithm returns 1),
\{$|00\ranglengle$\} (outcome $0$ for final
measurement on qubit 1; algorithm returns 0),
and
\{$|01\ranglengle$\} (outcome $1$ for
final measurement on qubit 1; algorithm returns 1).
The outcome $1$ for the algorithm corresponds to the
3-d
subspace perpendicular to $|00\rangle$),
while the outcome $0$ corresponds to \{$\ket{00}$\}.
When {\tt MEASURE-1} 0 yields $1$, qubit 1
lies along
$|0\rangle - |1\rangle$, so within the 3-d space spanned by the
possible computer states
the outcome ``1'' for {\tt MEASURE-1} 0 involves one dimension,
and the result ``1'' for the algorithm two dimensions.
Our algorithm makes a further finegrained measurement within these
two dimensions, but
we may avoid this
(which could affect
the results when the routine is called recursively)
by converting it into one
which measures a single qubit at the end.
Such an algorithm
is: {\tt HADAMARD 0 ;
ORACLE 0 1 ;
HADAMARD 0 ;
CHADAMARD 0 1 ;
CONTROLLED X-THETA 0 1 {$\theta$} ;
MEASURE-0 1 ;
MEASURE-1 1}.
The OR algorithm was derived by restriction from a larger algorithm
found via genetic programming.
This algorithm computes the
depth-two
case (AND/OR$_2$) of evaluating:
$$
(f(00) \vee f(01)) \wedge (f(10) \vee f(11))\;.
$$
With the number of oracle calls
fixed at one, and selection to minimize $p^e_{max}$,
GP yielded
the algorithm
reported in
{{\cal A}l I}te{Spector99a}, where more detail on the GP
engine used to evolve it may also be found.
Hand simplification and improvement yielded
the gate array of Fig 1.
Here
{\tt X($\theta)$}
has the matrix ($c := \cos\theta, s := \sin\theta$, $\theta = .0749...$):
$$
\left[
\begin{array}{rr}
c & s \\
s & -c
\end{array}
\right].
$$
This algorithm has error probabilities constant on
orbits of the automorphism group, given in Table
I.
\vskip 10pt
\begin{center}
\begin{tabular}{|l|l|l|l|}
\hline
Orbit & $p_e$ & Orbit & $p_e$ \\
\hline
0 0 0 0 & .00560 & 0 1 0 1 & .28731 \\
0 0 0 1 & .28731 & 1 1 0 1 & .21269 \\
0 0 1 1 & .21269 & 1 1 1 1 & .00560 \\
\hline
\end{tabular}
\end{center}
\vskip 4pt
Table I: Error probabilities (to 5 digits)
for hand-tuned simplified AND/OR algorithm
\vskip 10pt
The {\em automorphism group} of a property $P$ consists of
those permutations $\sigma$ of its input variables
which leave its value
invariant for all assignments (all black-box functions).
AND/OR$_2$ has four input variables
$f_0 \equiv f(00), f_1\equiv f(01), f_2, f_3$; its automorphism
group is generated by
$(0 \leftrightarrow 1), (2 \leftrightarrow 3),
(0 \leftrightarrow 2, 1 \leftrightarrow 3)$.
$Aut(P)$ acts on functions via
$f^\sigma(x) := f(\sigma(x))$
For AND/OR$_2$, the orbits
of this action may be labeled
by representative functions (written as strings
$f_0 f_1 f_2 f_3$):
$0000_1$,
$0001_4$,
$0011_2$,
$0101_4$,
$1101_4$,
$1111_1$.
Subscripts indicate the
number of functions in the orbit.
Our algorithm also has this automorphism group:
the outcome probabilities for all its measurement gates
are constant on orbits of the group.
It is better than classical,
since $p_e^{max} < 1/3$.
The structure of the algorithm suggests examining its restriction
to qubit 1, since the states with $0$ versus
$1$ input in qubit $0$ are still orthogonal when qubit $1$
is measured, so don't
interfere.
If we fix the input for qubit $0$ at $0$ and remove
qubit $0$ from the algorithm
we may consider the algorithm to
use only two qubits and
to apply to a new function $\tilde{f} := f(0 {{\cal A}l D}ot)$
defined by
$\tilde{f}(0) =
f(00)$ and $\tilde{f}(1) =
f(01)$.
Relabeling qubit $1$ as $0$, qubit $2$ as $1$ and
$\tilde{f}$ as $f$, we
get the circuit of Fig. 2 for computing
$f(0)$ OR $f(1)$.
Similarly, fix a value $x$ for qubit $1$ in the depth-two algorithm
and view it as an algorithm for AND operating on the one-bit function $f_x(y)$
given by $f(xy)$. This is not identical to the array
derived by applying De Morgan's law ({\tt NOT-(NOT-A OR NOT-B) $\equiv$ A
AND B}) to our OR algorithm, but its action on all black-box
functions is the same.
So the depth-two algorithm can be loosely viewed as a
``recursive'' application of the
depth-one algorithm (modified to give AND at the top level).
Loosely speaking,
it superposes values for qubit $0$ (as in the AND
algorithm), and
calls a ``function'' of qubit $1$;
which function depends on qubit $0$.
This is not quite accurate for two reasons.
First, the lower-level algorithm returns a value to
the upper level
only if the {\tt MEASURE-1 1}
does not halt the computation. If {\tt MEASURE-1 1}
halts the computation, the superposition
of qubit-$0$ values provides a random value for that bit;
the lower level algorithm is effectively
called on a randomly chosen marginal black-box function, and the
result returned as our algorithm's final output.
Second, if the lower-level ``function
call'' is not halted
the ``function'' of $i$ called by the
top-level AND routine
has
an $f(i {{\cal A}l D}ot)$-dependent $4 \times 4$
unitary matrix which is not of the
usual black-box form.
The state after the function call and second {\tt HADAMARD 1}
is:
\begin{equation}a \langlebel{red house}
(1/2\sqrt{2})~[~ \ket{00} (\ket{f_{00}} + \ket{f_{01}})
+ \ket{01} (\ket{f_{00}} - \ket{f_{01}}) \nonumber \\
- \ket{10} (\ket{f_{10}} + \ket{f_{11}})
- \ket{11} (\ket{f_{10}} - \ket{f_{11}})~]~\;.
\end{equation}a
This is a superposition, with coefficients $1/\sqrt{2}$ and
$-1/\sqrt{2}$, of states of the form (\ref{eq: state before measurement})
of qubits 1 and 2, with the qubit 0
recording which marginal black box function, $f(0{{\cal A}l D}ot)$ or $
f(1{{\cal A}l D}ot)$, is involved.
When
$f(00) \ne f(01)$ or $f(10) \ne f(11)$, but not both;
(orbits
$1101_4$ and $0001_4$), the
{\tt MEASURE-1} has $p=1/4$ of halting the computation with
result $1$, an error if the orbit is $0001$.
If both subtrees have odd parity ($0101_4$
only), $p=1/2$ of halting and (correctly) yielding 1,
and if neither does
($0000$, $1111$, $0011_2$), $p=0$.
This measurement contributes $1/4$ to $p^e_{0001}$.
The state after the {\tt MEASURE-1} gate yields $0$ is
given by the terms with $\ket{00}$ and $\ket{01}$ in
(\ref{red house}).
Hadamarding qubit 0 yields:
\begin{equation}a \langlebel{power of soul}
(1/4) & ~[~& |00\rangle \bigl(\ket{f_{00}} + \ket{f_{01}} - |f_{10}\rangle - |f_{11}\rangle
\bigr) \nonumber \\
& + &
|10\rangle \bigl(\ket{f_{00}} + \ket{f_{01}} + |f_{10}\rangle + |f_{11}\rangle \bigr)~]~
\;.
\end{equation}a
The {\tt MEASURE-0} gate will terminate the computation with the result $0$
with probability given by the squared norm of the first term in this state.
If measurement yields $1$, the final state
is given by the second term in (\ref{power of soul}).
Then the probability of the final measurement on qubit $2$
giving $1 (0)$ is
\begin{equation}a
(1/16) (n_{1(0)}(f))^2\;,
\end{equation}a
where $n_x(f)$ is the number of inputs on which $f$ takes
the value $x$.
The total error probabilities (with their sources) are:
\vskip 10pt
\begin{center}
\begin{tabular}{|l|lr|}
\hline
Orbit & Error probability & \\
\hline
0 0 0 0 & & 0 \\
\hline
0 0 0 1 & 1/4 ({\tt M-1 0}) + 1/16 ({\tt M-1 2}) = & 5/16 \\
\hline
0 0 1 1 & 1/4 ({\tt M-1 2}) & 1/4 \\
\hline
1 1 0 1 & 1/8 ({\tt M-0 1}) +
1/16 ({\tt M-0 2}) = & 3/16 \\
\hline
0 1 0 1 & 1/4 ({\tt M-0 2}) & 1/4 \\
\hline
1 1 1 1 & & 0 \\
\hline
\end{tabular}
\end{center}
\vskip 10pt
This is better than classical even without the final $X(\theta)$.
For
$0001$, $1101$, $0101$ the final measurement may give
either result. We may decrease $p^e_{max}$
(here $p^e_{0001}$) by
increasing the likelihood that the final measurement yields $0$,
at the cost of increasing $p^e_{0101}$ and $p^e_{1101}$ for which
$P(f)=1$, and also possibly $p^e_{0000}$ and
$p^e_{1111}$.
Thus, consider adding, before the measurement, an $X(\theta)$ gate
on
qubit 2.
We obtain
$p_{0001} = \left(\frac{3s - c}{4}\right)^2 + \frac{1}{4}$,
$p_{0101} = \left( \frac{c + s}{2}\right)^2\;.$
Equating these gives
$s^2 = (9 - 14\sqrt{2/5})/26,$ so
$\theta^* = 0.074909...$ at $p_e=p_{0001}=p_{0101} ~= 0.287315...$.
With the help of genetic programming, we found better-than-classical
quantum gate arrays for
the depth one and two cases of the family of properties of black-box
functions given by alternating binary AND/OR trees. These
circuits
could constitute small-$n$ instances of a scalable Monte Carlo algorithm for
this family of properties.
They are also small-$n$ instances of the
bounded-depth AND/OR tree problems whose
complexity was characterized in
{{\cal A}l I}te{Buhrman98a,Buhrman99b}.
The Grover-based algorithms therein achieve marked
speedups over classical means
in the large-$n$ regime, as the fan-in of the nodes in the
bounded-depth trees grows; their $n=2$ and $n=4$ instances
are not superclassical. In contrast, our arrays give
speedups for fan-in $2$. Since the uniform
binary tree problem has fan-in $2$ for all instances,
this suggests that aspects of our
gate arrays may prove useful in addressing this problem
even for large $n$.
If so, this would be another new quantum computational effect related
to the OR circuit. As it stands, OR joins XOR to complete the
set of logical properties of one-bit
black-box subroutines which can be quantum-mechanically
computed, with one subroutine
call,
more reliably than is classically possible.
\begin{acknowledgments}
Supported in part by the John D. and Catherine T. MacArthur Foundation's
MacArthur Chair program at Hampshire College, by NSF grant
\#PHY-9722614, and by a grant from the ISI Foundation,
Turin, Italy, and the Elsag-Bailey
corporation.
We thank H. Burhman, R. Cleve, M. Mosca, and
R. de Wolf for discussions.
\end{acknowledgments}
\end{document}
|
\begin{document}
\title{Matrix Multiplication Using Only Addition}
\numberofauthors{2}
\author{Daniel L. Cussen$^{\dag}$, Jeffrey D. Ullman$^{\sharp}$\\
\affaddr{\large $^{\dag}$Fgemm SPA,
$^{\sharp}$Stanford University}\\
}
\maketitle
\begin{abstract}
Matrix multiplication consumes a large fraction of the time taken in many machine-learning algorithms. Thus, accelerator chips that perform matrix multiplication faster than conventional processors or even GPU's are of increasing interest. In this paper, we demonstrate a method of performing matrix multiplication without a scalar multiplier circuit. In many cases of practical interest, only a single addition and a single on-chip copy operation are needed to replace a multiplication. It thus becomes possible to design a matrix-multiplier chip that, because it does not need time-, space- and energy-consuming multiplier circuits, can hold many more processors, and thus provide a net speedup.
\end{abstract}
\section{Introduction}
\label{intro-sect}
In this paper we show that when multiplying matrices, scalar multiplication is not really necessary and can be replaced by a surprisingly small number of additions. The advantage of performing matrix multiplication using only addition for arithmetic is that it then becomes feasible to build special-purpose chips with no multiplier circuits. Such chips will take up less space per on-chip processor, allowing more, but simpler, processors to be packed into a single chip. \cite{plasticine} is an example of an architecture that implements this strategy. Further, since a multiplier circuit can take significantly more time than addition or other typical machine operations, it is possible that the addition-only approach can be faster, even on conventional architectures. But there is little advantage if the number of additions needed to replace a single multiplication is large. Thus, we must show that in practice, very few additions are needed to replace a multiplication~-- fewer than one addition (plus a little more than one copy operation) in some cases of practical interest.
Before getting deep into the weeds, let us see the essential idea that the algorithm uses. Start with a vector of integers in some large range, say 1-to-$k$. We sort, eliminate duplicates, and take differences between consecutive numbers in the sorted order. The result is a new vector of integers, but they are much more constrained than the original list. Specifically, rather than simply being numbers in the range up to $k$, their {\em sum} is at most $k$. Thus, when we iterate the process of sorting, eliminating duplicates, and taking differences several times, the lists rapidly become much shorter than the original list. We can then multiply the original vector by any constant $c$ by recursively multiplying the vector of differences by $c$ and then obtaining the original vector by accumulating the differences after they have been multiplied by $c$.
\subsection{Motivation and Background}
\label{refs-subsect}
The rising importance of deep-learning and other machine-learning applications has made multiplication of large matrices take on a new importance. For example, backpropagation \cite{backprop} is essentially a sequence of matrix multiplications. At the same time, special-purpose chips or boards, such as \cite{plasticine} \cite{TPU}, are proliferating. We therefore offer a new approach to matrix multiplication that:
\begin{enumerate}
\item
Works for both sparse and dense matrices.
\item
Is more efficient, the larger the matrices are.
\item
Works better when the matrix elements require fewer bits, an important trend as people search for ways to make machine learning more efficient.
\item
Supports a chip design that avoids multiplication circuits, thus saving chip real estate and allowing more arithmetic units to be placed on one chip.
\item
Uses a very small number of additions in place of one scalar multiplication, thus offering an opportunity to speed up the computation, since multiplication can take significantly more time than addition.
\end{enumerate}
The search for algorithms that multiply $n$-by-$n$ matrices in less than the $O(n^3)$ time taken by the straightforward algorithm has been ongoing for more than 50 years, from Strassen \cite{strassen} at $O(n^{2.81})$ to the best known \cite{williams} at $O(n^{2.37})$. Unfortunately, all these algorithms to date, while they have better asymptotic running times than the obvious, have constant factors that make them unattractive, even for very large matrices, and they also assume the matrices are dense.
Our central thesis is that it makes sense to replace multiplication by a small number of additions. This point can only be justified if the time taken for multiplication significantly exceeds the time taken for addition. \cite{fog} is a recent examination of the time taken by all instructions on many of the most commonly used processors. The conclusion is that typically, integer multiplication has 3–6 times the latency of integer addition.\footnote{When we talk of floating-point operations, the difference is much less. The reason is that when multiplying, we have only to add exponents, while a floating-point addition requires that we align the mantissas according to the difference in the exponents. However, we are not proposing to replace floating-point multiplications by floating-point additions. We are proposing to replace the multiplication of mantissas, which are integers, by integer addition.} Thus, replacing multiplication by one or two additions looks promising.
The issue of how much space and energy a multiplier takes on a chip also argues that there are potential advantages to avoiding using a multiplier altogether. The faster multipliers involve complex circuits ({\em compressors}, where several of the partial products are combined not into a single sum, but into two or more integers that themselves must be combined further \cite{wikimult}. Even more space- and energy-consuming circuits allow {\em pipelined} multiplication, where the same circuit can be used to do several multiplications at once. Since it is then possible, in principle, to produce the result of one multiplication in the same time as it takes to perform one integer addition, the utility of our addition-only method depends on being able to fit several adders in the space that one pipelined multiplier takes, and/or use less energy with the adder-only approach.
There is another approach to addition-only matrix multiplication. One can use a table lookup to convert numbers to the log domain, add the logarithms, and convert back \cite{logs}. In this way, one multiplication is replaced by one addition. This method works, although its efficiency depends on how large the log and antilog tables are, which in turn depends on how precise we wish the result to be. And at any rate, the cost of the two lookups must be included in the total cost.
\subsection{Simplifying Assumptions}
\label{simp-subsect}
To describe simply the ideas behind our addition-based approach to matrix multiplication, we shall make several assumptions. First, we assume that we are multiplying $n$-by-$n$ matrices, and that the elements of these matrices are $b$-bit integers. Typically, $b$ would be 32 or 24, but even smaller integers are becoming more interesting. For example, the TPU (tensor-processing unit) \cite{TPU} is based on a 16-bit floating-point number, which has effectively a 12-bit mantissa, counting the implied leading 1. Note that if elements are single-precision floats, they have (effectively) a 24-bit mantissa, and it is only the mantissas that need to be multiplied. The exponent portions are added, and there may be a shift of a single position needed in the product of mantissas. Section~\ref{extensions-subsect} will show how the ideas presented here can be extended more broadly, including to matrices of arbitrary shapes and to sparse matrices.
We assume that our processors have a ``shift-and-add'' operation $\mathrm{add}(x,y,i)$. This operation shifts the integer $y$ left by $i$ positions and adds the result to $x$. We assume $y$ is a $b$-bit integer, $x$ can be as large as $2b-1$ bits, and $0\le i<b$. That is, the effect of this operation is
$$x~:=~x + 2^iy$$
In addition, we shall assume processors have available the operations needed to sort, eliminate duplicates, connect elements of an initial list to their representation in the sorted list (i.e., follow pointers), and take the first differences of a sorted list. However, since the total number of operations of these types needed in the proposed algorithm is $O(n^2\log n)$, and thus small compared with the total number of operations, we shall not count the operations of these types exactly.
\subsection{Russian-Peasants Multiplication}
\label{rpm-subsect}
Suppose we multiply $n$-by-$n$ matrices $P$ and $Q$ with elements $[p_{ik}]$ and $[q_{kj}]$, respectively, each of which is a $b$-bit integer. the product has in position $(i,j)$ the value $\sum_{k=1}^n p_{ik}\times q_{kj}$. We therefore have to take $n^3$ products of $b$-bit integers. Another $(n-1)n^2$ additions are needed to sum the terms that form each of the $n^2$ result elements, but we need those additions no matter how the multiplications are done. Thus, in discussions that follow, we shall not count these additions.
As a baseline for replacing multiplication by addition, we can always simulate a multiplication as a sequence of shift-and-add operations. This technique, which is often referred to as ``Russian-Peasants Multiplication,'' uses only addition and was apparently known as far back as ancient Egypt \cite{rpm}. The method is usually explained in decimal, but when numbers are expressed in binary, it is especially simple. Suppose we are multiplying $p\times q$. Think of $p$ as a bit string $p_{b-1}p_{b-2}\cdots p_0$. Starting with $x=0$, for each $p_i$ that is 1 we perform the shift-and-add operation $\mathrm{add}(x,q,i)$; the result will be $p\times q$.
We can thus replace $n^3$ multiplications by at most $bn^3$ additions. If elements of the matrices are chosen at random, then we would expect about half the bits to be 0, and therefore $bn^3/2$ is a better approximation to the number of required additions to simulate $n^3$ multiplications using the Russian-Peasants approach.. However, in fact we can, in practical cases, do much better than $b$ or $b/2$ additions per multiplication.
\section{The Addition-Only Matrix-Mul\-ti\-plic\-a\-tion Algorithm}
\label{algo-sect}
We shall begin by describing the overall approach to matrix multiplication, followed by the details of the basic zero-multiplication algorithm. In Section~\ref{alignment-subsect}, we shall give an additional idea that can reduce the number of additions needed still further. Section~\ref{upper-sect} will prove that the algorithm offered does use significantly fewer additions than the Russian-Peasants approach in cases of practical interest.
\subsection{Overview of Matrix Multiplication}
\label{overview-subsect}
The goal is to multiply $n$-by-$n$ matrices $A$ and $B$. The elements of these matrices are $b$-bit integers. A common approach is to take $n$ outer products, each is the outer product of a column of $A$ and the corresponding row of $B$. That is, the $k$th outer product applies to column $[a_{1k},\ldots,a_{nk}]$ of $A$ and the row $[b_{k1},\ldots,b_{kn}]$ of $B$. The result of this outer product is $n^2$ scalar products $a_{ik}b_{kj}$ for all $i$ and $j$; we add this product to the element of the result that is in row $i$ and column $j$.
The additions that accumulate these products are part of any matrix-multiplication algorithm. We therefore do not count them when tallying additions made by the proposed method. That is, we are only counting the additions that we use to replace the $n^3$ scalar products that are needed to perform the $n$ outer products just described.
If we were only to multiply two $b$-bit scalars, we could make some small improvements to the Russian-Peasants approach in some cases, but the latter method is about as good as we can do. However, when matrices are large and the number of bits in their elements is small ~-- exactly the common case for machine-learning applications~-- we can do significantly better by multiplying a vector of $n$ integers, each of $b$ bits, by a $b$-bit constant. We preprocess the vector in a way we shall describe, involving sorting, eliminating duplicates, and taking differences of successive numbers. Once preprocessed, we can multiply the vector by $n$ constants, and thus compute one of the $n$ outer products we need.
\subsection{Vector-Scalar Multiplication}
\label{vector-scalar-subsect}
The algorithm for multiplying a vector $[v_1,\ldots,v_n]$ by a scalar $c$ is suggested by Fig.~\ref{algoutline-fig}. We assume that each $v_i$ is a positive $b$-bit integer; signed integers can be handled by ignoring the sign until the products are created. The algorithm is recursive, in that it modifies the initial vector to produce a (typically) shorter vector $[d_1,\ldots,d_m]$. Not only is this vector shorter, but the sum of its elements is at most $2^b$. This constraint is much stronger than the constraint on the original vector~-- that each element be less than $2^b$.
\begin{figure}
\caption{Algorithm Outline}
\label{algoutline-fig}
\end{figure}
Either using Russian-Peasants multiplication as a base case, or by applying the same ideas recursively, we produce the vector-scalar product $[cd_1,\ldots,cd_m]$, which is then used to produce the desired output $[cv_1,\ldots,cv_n]$. The recursive part of the algorithm consists of the following steps.
\begin{enumerate}
\item
{\em Sort}: Begin by sorting the given vector $[v_1,\ldots,v_n]$ in a way that eliminates duplicates. The resulting sorted vector $[s_1,\ldots,s_m]$ has $s_1<s_2<\cdots<s_m$. It is necessary to create an array of pointers $[p_1,\ldots,p_n]$, where $p_i$ gives the unique value $j$ such that $v_i=s_j$. These pointers are suggested by the red, dotted lines in Fig.~\ref{algoutline-fig}.\footnote{Depending on how a chip is organized, it may be more efficient to store the array of pointers inverted. That is, for each $i$ there would be easy access to all the values of $j$ for which $v_j=s_i$.}
\item
{\em Differences}: Construct a new vector $[d_1,\ldots,d_m]$ giving the differences between successive elements of the sorted array, with an imaginary 0 preceding the first number, That is, $d_1=s_1$ and $d_i = s_i-s_{i-1}$ for $i=2,3,\ldots, m$.
\item
{\em Recursion}: Either by using Russian-Peasants multiplication as a base case, or using this algorithm recursively, produce the vector-scalar product $[cd_1,\ldots,cd_m]$.
\item
{\em Accumulate}: Compute the product of $c$ and the vector $[s_1,\ldots,s_m]$ by $cs_1=cd_1$ and $cs_i=cs_{i-1}+cd_i$ for $i=2,3,\ldots,m$. Note that we use $m-1$ additions here, and it is the only place, other than the recursive step, where additions are used.
\item
{\em Follow Pointers}: For each $v_i$, the pointer $p_i$ is that $j$ such that $v_i=s_j$. Therefore, $cp_i=cs_j$. We may therefore copy the value $cs_j$ just computed at the previous step and make that be $cp_i$.
\end{enumerate}
Observe that while step~(1) requires $O(n\log n)$ time to sort, and step~(2) requires $O(n)$ time for subtractions, these are done only $n$ times each. Thus, the total time spent for these two steps is $O(n^2\log n)$ at most. It is only steps (4) and (5), each of which takes $O(n)$ time but is repeated $n^2$ times, that require $O(n^3)$ work. In particular, step~(4) is where the $O(n^3)$ additions occur, and step~(5) requires $O(n^3)$ copy operations.
\begin{example}
\label{pi-ex}
Suppose we start with the vector
$$V=[3,1,4,1,5,9]$$
When we sort and eliminate duplicates (Step~1), we get the vector $S=[1,3,4,5,9]$ and the vector $P=[2,1,3,1,4,5]$. That is, the first element of $P$ is 2 because the first element of $V$, which is 3, is in the second position of $S$. The second element of $P$ is 1, because the second element of $V$, which is 1, has moved to position 1 in $S$, and so on.
Next, take differences of $S$ (Step~2) to get $D=[1,2,1,1,4]$. That is, the first element of $D$ is the first element of $S$, i.e., 1. The second element of $D$ is the difference between the first and second elements of $S$, that is, $3-1=2$, and so on.
For the recursion, Step~3, we treat the vector $D$ as if it were $V$. We sort and eliminate duplicates, to get $[1,2,4]$. When we take differences, we have $[1,1,2]$. Sort and eliminate duplicates again, and we get $[1,2]$, whose difference vector is $[1,1]$. Now, when we sort and eliminate duplicates we have only the vector $[1]$. We can multiply this vector by any constant $c$ using Russian-Peasants multiplication.\footnote{In fact, multiplication by 1 is trivial, and we really do not need any operation at all in this special case. However, in rare cases, the limit of the recursion will be a vector whose length is 1 but whose one element is bigger than 1. It may also make sense to stop the recursion before the vector length reaches 1.}
Suppose that $c=5$; that is, we want to compute $5V$. Let us assume that recursively, we have managed to compute $D'=5D$, the vector $[5,10,5,5,20]$. We compute $S'=5S$, the vector $S'=[5,15,20,25,45]$ by accumulating the elements of $D'$ (Step~4). That is, the first element of $S'$ is the first element of $D'$, i.e. 5. The second element of $S'$ is the sum of the first element of $S'$ and the second element of $D'$, or $5+10=15$, and so on.
Lastly, we construct $V'=5V$ by applying the vector of pointers $P=[2,1,3,1,4,5]$ to the vector $S'=[5,15,20,25,45]$ (Step~5). The first element of $V'$ is the second element of $S'$, or 15. The second element of $V'$ is the first element of $S'$, or 5, and so on. The result is $V'=[15,5,20,5,25,45]$.
In practice, we would compute a complete outer product by multiplying $V$ by $n$ different constants, of which 5 was just an example. We use the same $D$ and $P$ vectors for each of the $n$ constants, so there is no duplication of work.
\end{example}
\subsection{How Many Additions?}
\label{count-subsect}
As mentioned, we shall later prove an upper bound on the number of additions needed by this algorithm; see Section~\ref{upper-sect}. But here, let us look at the intuition why we would expect relatively few additions to be needed. Intuitively, it is because when $n$ is large and $b$ is small, there can't be too many different differences, and therefore, as we recurse, the lengths of the vectors involved drop quickly.
\begin{example}
\label{count-ex}
Suppose we have a vector of $n=1000$ randomly chosen 12-bit integers. When we sort them, we expect to find some duplicates, but not many. So at the first level of recursion, we would have $m$, the length of the sorted list, close to 1000. But at the second level, we note that the vector of differences, $[d_1,\ldots,d_m]$, has almost 1000 elements that sum to at most $2^{12}=4096$. Thus, there cannot be many different values among these, and the value of $m$ at the second level will be quite small compared with 1000. In particular, the sum of the first 91 integers exceeds 4096, so 90 is an absolute upper bound on the number of second differences.
For the third level of recursion, there may be so few that it makes sense to use Russian Peasants on them, or we may recurse a third time, and get a still smaller list. But the work at levels after the second will be negligible no matter how we do it.
Thus, we require at most 1000 additions at Step~4 of the first level, and at most 91 additions at Step~4 of the second level. Subsequent levels require even fewer additions, so the total number of additions required at all levels will be just a little more than 1000. In comparison, if we used Russian Peasants only, we would expect around 6000 additions.
\end{example}
Of course, the count of additions as suggested by Example~\ref{count-ex} is for one vector-scalar multiplication. For the complete multiplication of $n$-by-$n$ matrices, we would have to do $n^2$ such operations, for a total of a little more than $n^3$ additions (assuming again that $n=1000$ and $b=12$). But those additions replace the $n^3$ multiplications that would be needed, so we are still claiming a little more than one addition replacing one multiplication.
\subsection{Running Time of the Complete Algorithm}
\label{running-time-subsect}
Remember that Figure~\ref{algoutline-fig} represents two different phases. The first phase, above the line, is done once for each row of the second matrix, i.e., $n$ times. Thus, even though we are sorting a list of length $n$, which takes $O(n\log n)$ time, the total time spent above the line is $O(n^2\log n)$. That cost can be neglected compared with the $O(n^3)$ running time of the entire algorithm. However, if we think in terms of chip design, we do have to include on chip the capability of doing the sorting and setting up the pointer array that is implied by Fig.~\ref{algoutline-fig}.
For the operations below the line in Fig.~\ref{algoutline-fig}, they clearly take $O(n)$ time. The constant factor includes the number of additions needed to replace one multiplication. For each of the $n$ rows of the second matrix, we perform the operations below the line $n$ times, so the total time taken is $O(n^3)$ as it should be.
In this analysis, we have assumed a serial execution, which is not realistic or desirable if we are to design a special-purpose chip. Presumably we would design for parallel execution, for example implementing a parallel sort or processing several vector-scalar multiplications at the same time. However, $O(n^3)$ will still be the measure of the number of operations the chip will execute.
\section{Improvements to the Basic Algorithm}
\label{improvements-sect}
There are a number of modifications to the algorithm of Section~\ref{algo-sect} that improve one or more aspects. Here, we shall mention {\em alignment} as a technique to reduce the length of vectors involved. We can take advantage of zeros, ones, and duplicates in the columns of the first matrix. We also mention ways to increase the available parallelism. And we address extensions to nonsquare and sparse matrices.
\subsection{Alignment}
\label{alignment-subsect}
If elements $v$ and $w$ of a vector differ by a factor that is a power of 2, then when we multiply the vector by any constant $c$, the products $cv$ and $cw$ will also have a ratio that is the same power of 2. Therefore, we can obtain $cw$ from $cv$, or vice-versa, by shifting their binary representations. We can use this observation to treat $v$ and $w$ as if they were the same, if we make some small modifications to the basic algorithm of Fig.~\ref{algoutline-fig}.
\begin{enumerate}
\item
Before sorting $[v_1,\ldots,v_n]$, we shift each element $v_i$ right until it becomes an odd number (i.e., we drop 0's from the lower-order bits).
\item
In addition to the vector of pointers $[p_1,\ldots,p_n]$, we need another vector $H=[h_1,\ldots,h_n]$, where $h_i$ is the number of positions to the right that we have shifted $v_i$.
\item
When constructing the result vector $[cv_1,\ldots,cv_n]$, we construct $cv_i$ by first following the pointer $p_i$ and then shifting the result $h_i$ positions to the left. Alternatively, if we can shift and add in one step, we can perform this shifting when we add $cv_i/2^{h_i}$ to the element of the result matrix to which it belongs.
\end{enumerate}
\begin{example}
\label{alignment-ex}
Suppose the given vector
$$V = [3,7,2,12,8,6]$$
When we divide out powers of two, we are left with
$$[3,7,1,3,1,3]$$
When we sort and eliminate duplicates, we have vector $S=[1,3,7]$. The vector of pointers is $P=[2,3,1,2,1,2]$. For instance, the first element of $V$, which is 3, appears in position 2 of $S$. The fourth element of $V$, which is 12, has also become 3 after removing factors of 2, so the fourth element of $P$ is also 2. The vector $H$ that records the number of positions shifted is $H=[0,0,1,2,3,1]$. For example, the 3 in $V$ is not shifted at all, while the 12 in $V$ has been shifted two positions to the right.
\end{example}
There are two advantages to this alignment step. First, it evidently reduces the number of elements of $V$ that are considered distinct. Thus, it reduces the length of the sorted list and the length of the vector of differences. But it has another, more subtle, effect. The elements of the sorted list are all odd. Therefore, all differences other than (perhaps) the first are even. Thus, when called recursively the first time, differences have at most $b-1$ bits after shifting right to eliminate trailing 0's.
\subsection{Modified Scalar Multiplication}
\label{column-simp-subsect}
We have described the algorithm as taking a column of the first matrix, and processing each of its $n$ values independently. However, if there are duplicates among those values, as will likely be the case, we should first eliminate duplicates. Further, we do nothing for those values that are 0, and for values that are 1, no multiplication is needed; we can take the original vector $V$ as the product.
We can also apply the trick of Section~\ref{alignment-subsect} to the columns of the first matrix. For instance, if 3 and 12 are both elements of a column, and we have computed $3V$, then we do not also need to compute $12V$; we can simply shift the values of the vector $3V$ two positions left.
\subsection{Parallelism}
\label{parallelism-subsect}
Ideally, as much of the circuitry on a chip should be active at any given time. The algorithm we have described is a serial algorithm, so it will not tend to keep things active. However, there are many opportunities to increase the parallelism in the chip. First, note that other than the circuit to sort, which itself can be parallelized in known ways, e.g. \cite{batcher}, most of the components needed are either registers to store the various vectors needed, or adder circuits. Moreover, as described in Fig.~\ref{algoutline-fig}, all the vectors shown are handled with a single adder circuit. As the accumulation of differences (step~4 in Section~\ref{vector-scalar-subsect}) appears serial, it is hard to parallelize this portion of the algorithm.\footnote{Strictly speaking, there is a parallel algorithm \cite{stone} for computing the accumulated sums of $n$ elements in $O(\log n)$ time, but this approach requires $O(n\log n)$ circuit components and takes $(n\log{n})/2$ additions, so it would negate the advantage of using adders in place of multipliers.}
We can multiply the vector $V$ by many different scalars $c$ at the same time, but we need registers to store intermediate results for each $c$. That change may thus speed up the time needed by a large factor, but it doesn't change the ratio of space needed for registers compared with space for adders. Likewise, we can process many different rows $V$ in parallel; that also speeds the process but doesn't change the space allocation.
There is one modification to the algorithm that {\em will} increase the ratio of adder space to register space. After sorting and eliminating duplicates, we can break the vector $S$ into several segments: one segment for the smallest values, another for the next smallest values, and so on. We can then process each segment independently, in parallel. That change allows us to use many adders at once to accumulate differences for a single vector $S$.
There is a downside to this approach: When we take differences within the segments, the same difference may occur in many different segments, so we may be doing the same work several times without realizing it. But because we create segments after sorting, the sum of all the differences among all the segments has not changed; it is still at most $2^b$. Thus, we can still expect a significant reduction in the total length of vectors after we take second differences. An example suggests what to expect.
\begin{example}
\label{segments-ex}
Let us reconsider the situation of Example~\ref{count-ex}, where we had 1000 12-bit integers and argued that there could be no more than 90 differences. After sorting, we might have fewer than 1000 elements, but let us assume the sorted vector $S$ still has 1000 elements. Suppose we divide $S$ into ten segments of 100 elements each and take differences within each segment. The sum of all those differences is still at most 4096.
Suppose that these differences divide as evenly as possible.\footnote{It can be shown that an even distribution is the worst case; i.e., it allows the largest number of differences.} Then each segment has differences totaling at most 410. Since the sum of the first 29 integers exceeds 410, there can be at most 28 differences in any of the ten segments, or a total of 280 differences. That number is much larger than the 90 differences that can occur if we do not divide $S$ into ten segments, but it is much smaller than 1000; i.e., we are still guaranteed to reduce significantly the total length of all the segments when we take differences.
\end{example}
A second possible approach is to divide a vector of length $n$ into $\sqrt{n}$ segments of length $\sqrt{n}$ each. Accumulate the sum within each segment. Then, accumulate the sums of the final sum of each segment, to get the value that must be added to each member of each segment. That is, we must add to each element of the $i$th segment the sum of the last elements in each of the segments 1 through $i-1$. This approach gives $\sqrt{n}$-fold parallelism, while requiring $2n+\sqrt{n}$ additions in place of the $n$ additions that would be needed to do a sequential accumulation of all $n$ elements.
\subsection{Extension to Other Matrix Forms}
\label{extensions-subsect}
Matrices of interest are rarely square, dense matrices of nonnegative integers. Here is a list of some of the extensions that can be made to the basic algorithm.
\subsubsection{Positive and Negative Integers}
We have assumed that all matrix elements are nonnegative integers. But a sign can be carried along with each element and not used until it is time to place a sign on each scalar product. The algorithm for choosing the correct sign should be obvious.
\subsubsection{Floating-Point Numbers}
Multiplication of floating-point numbers involves multiplying the mantissas and adding the exponents. Multiplication of the mantissas is an integer multiplication. Adding of exponents is, of course, an addition, one that is necessary no matter how we perform the multiplication.
\subsubsection{Nonsquare Matrices}
Suppose we need to multiply a matrix $A$ that is $n$ rows by $k$ columns times a matrix $B$ that is $k$ rows by $m$ columns. Then we need to take $k$ outer products, each of a column of $A$ times the corresponding row of $B$. Nothing about the algorithm described so far requires that $n=m$ or that either equals $k$. The only extension is that we need to decide whether to make the columns of $A$ or the rows of $B$ play the role of the vector $V$. Intuitively, the longer the vector, the fewer additions per element we need. Thus, the choice normally depends on picking columns of $A$ if $n>m$ and picking rows of $B$ otherwise. The only time that choice might not be better is if there is so much parallelism available that we can process more than $\min(n,m)$ scalars at once.
\subsubsection{Sparse Matrices}
There are two cases to consider here. If the matrices are represented in the ordinary manner, as arrays, then the first step, where we sort and eliminate duplicates, essentially eliminates all the 0's. We cannot do better, because we have to look at the entire matrices, regardless of how sparse they are.
In the second case, the matrices $A$ and $B$ are represented as a set of triples $(i,j,v)$ meaning that the element in row $i$ and column $j$ has the nonzero value $v$. We can assemble columns of $A$ or rows of $B$ by finding all the triples that belong to that row or column. The rows and columns are missing the 0 elements, but because we have location data for each element, we can then take the outer product of column $k$ of $A$ and row $k$ of $B$ by working with only the nonzero elements. Any product where one or both of the arguments is 0 would yield a 0 product and thus never influence the result anyway. The only significant modification to the algorithm is that along with the vector $V$ we need to have a parallel vector that gives, for each element, the row (if $V$ is a column of $A$) or the column (if $V$ is a row of $B$).
\section{Experimental Results}
\label{exper-sect}
Figure~\ref{results-fig} shows the lengths of the lists that result from the following experiments. For four values of $n$ ranging from one thousand to one million, we generated 100 lists of random 24-bit numbers. These lists were sorted and duplicates were eliminated. In some cases, we right-shifted (aligned) the numbers first to eliminate factors of 2. The lengths of the resulting sorted lists are shown in the column labeled (A); it and the following columns are averages rounded to the nearest integer.
\begin{figure}
\caption{Result of sorting and taking differences three times}
\label{results-fig}
\end{figure}
Column~(B) shows the lengths of the lists after taking differences and performing the same operations on the list of differences~-- align (if permitted), sort, and eliminate duplicates. Then, columns (C) and (D) represent the lengths of the lists that result after repeating this operation twice more. The last column gives the average number of additions that would be needed to multiply the initial vector of length $n$ by a scalar. To be precise, it is 12 times column~(D) (for the Russian-peasants multiplication of each element on the list of third differences), plus columns (A), (B), and (C), all divided by $n$.
\subsection{Intuition}
\label{intuition-subsect}
If we look at the first row of Fig.~\ref{results-fig}, we see a typical situation where the length of the vector, $n$, is much smaller than the number of possible integers. In this case, the chances that a random list of 1000 integers, each chosen from around 16 million 24-bit integers, would have even one duplicate is small. That remains true, even if we use alignment, which in effect divides integers into about 8 million groups, each corresponding to a 24-bit odd integer. Moreover, as we see from column~(B), even the differences between elements of the sorted list are almost all distinct. It is not until we take differences of the differences that we begin to see duplicates, as suggested by column~(C). By the time we take third differences, there are very few distinct numbers indeed, as seen in column~(D).
In contrast, let us look at the last two rows, where there are a million random integers initially, again chosen from the roughly 16 million possible 24-bit integers. Now there are good chances of seeing some duplication, and indeed we do. But the most significant effect is seen in column~(B), where the number of distinct differences is tiny, even if we do not align. The reason is that when we have a million numbers out of 16 million possibilities, the average difference in the sorted list is only 16. Surely, there will be some larger gaps, but the chances of a really large gap, say over 100, is small, and there cannot be too many of those large gaps, because the sum of all the gaps is at most $2^{24}$. As a result, the total work dealing with all the difference lists is negligible, and the vast majority of the work occurs computing the result of multiplying the list represented by column~(A). In fact, we require less than one addition per scalar multiplication, because the number of duplicates in the original list exceeds the length of all the difference lists.
The effect of alignment is perhaps more profound than might be expected, especially for the smaller values of $n$. Our first assumption might be that alignment doubles the number of duplicates. That is, the 16 million possible integers are grouped into 8 million groups, each corresponding to an odd number. That would appear to double the chances that a randomly chosen integer has appeared before on the initial list. But we are in fact beneficiaries of the ``class-size paradox.'' The groups are not all the same size. For example, the group to which the integer 3 belongs has 23 members~-- all the 24-bit integers that are all 0's except for two conscutive 1's somewhere. So a random integer tends to belong to a larger group, and further it will be a duplicate if any of the members of that group have appeared previously. For example, if we compare the last two lines, we see that if we do not align the one million integers, we find an average of 29,228 duplicates, while if we {\em do} align we average 82,460 duplicates, almost three times as many.
\section{An Upper Bound on Required Addition Steps}
\label{upper-sect}
The results of Section~\ref{exper-sect} are based on the assumption that vectors are chosen randomly and uniformly. Especially for long vectors, randomness forces difference lists to be small. However, in many applications it is not reasonable to assume uniformity in the distribution of values; there might well be many small values but also enough large values that the differences in the sorted list are mostly distinct.
We shall show that, regardless of the initial list, computing a scalar-vector product using only additions and the operations needed to sort, eliminate duplicates, align bit strings, and follow pointers, but not multiplication, requires only a small number of additions per element of the vector. The exact number of additions per element depends on the relationship between the length of the vector and the size of its elements, but it is certainly much less than the baseline of Section~\ref{rpm-subsect} (except perhaps in the uninteresting case of very small matrices).
Here is the intuition behind the proof. As we mentioned, when we have a list of integers, each no more than $k$, and we sort, eliminate duplicates, and take differences, the sum of those differences is at most $k$. The list of differences may have some small numbers, say numbers at most $x$~-- a value we shall select judiciously. But when we take differences a second time, there cannot be many differences among the small numbers, since their sum is at most $x$. There may also be some larger numbers, those bigger than $x$. However, there can be at most $k/x$ of those, since the sum of all the numbers is at most $k$. Thus, when taking differences a second time, there cannot be many differences among the large numbers either. These two observations let us put a strong upper bound on the lengths of the lists as we repeat the sort-eliminate duplicates-take differences process.
\subsection{Two Cost Functions}
\label{CD-subsect}
It is useful to define two mutually recursive cost functions, $C$ and $D$.
\begin{itemize}
\item
Define $C(n,k)$ to be the number of additions needed to multiply, by a constant, a vector of $n$ elements, each of which is a positive, odd integer no larger than $k$, by a constant.\footnote{Note that $k$ is $2^b$ if we are talking about $b$-bit integers.}
\item
Define $D(n,k)$ to be the number of additions needed to multiply, by a constant, a vector of length $n$, whose elements are distinct, odd positive numbers that sum to $k$.
\end{itemize}
Observe that the significant difference between $C$ and $D$ is that in the former case, $k$ is a bound on each individual element, while in the latter case, $k$ bounds the sum of all the elements, and therefore represents a more limited class of vectors.
\subsection{Bounds on the Cost Functions}
\label{CD-bounds-subsect}
We can observe four rules that let us put bounds on $C(n,k)$ and $D(n,k)$ mutually.
{\bf Rule 1}: $C(n,k) \le D(n,k) + n$. This rule reflects the idea that we can align the given vector $[v_1,v_2,\ldots,v_n]$ of length $n$, sort, eliminate duplicates, and take differences of the resulting vector. We shall surely have a difference vector of length no greater than $n$, and the sum of all its elements cannot exceed $k$, the maximum possible element in the original vector. Recursively multiply this vector by a constant $c$. We can then compute the value of each $cv_i$ by accumulating the differences. This step uses at most $n$ shift-and-add additions. The shifting is necessary because some of the differences have had factors of 2 removed during the alignment process. The only other additions needed are in the recursive computation of the product of $c$ and the vector of differences. That cost is no greater than $D(n,k)$.
{\bf Rule 2}: $D(n,k) \le C(n,x) + D(k/x,k)$ for any $x$. This rule doesn't involve any operations, per se. Rather, it states that we can conceptually treat the vector of length $n$ as two vectors. The first consists of the ``front'' part~-- those elements less than or equal to $x$. The second is the ``back'' part~-- elements greater than $x$. For the front, we have an upper bound on each element; they can be no larger than $x$. We also have the condition from the definition of $D$, which is that the sum of these elements can be no greater than $k$, but we shall not use that. Presumably, the upper bound $x$ on each element will prove stronger. For the back part, we know that the sum of these elements is not greater than $k$. We also know that there can be no more than $k/x$ of these elements, since each is greater than $x$.
{\bf Rule 3}: $D(n,k) \le D(\sqrt{k},k)$. Each of the elements on a list is odd and the elements are distinct. Therefore, given that numbers sum to $k$, the longest the list can be is that $r$ such that $1+3+5+\cdots+2r-1 \le k$. Since the sum of the first $r$ odd numbers is exactly $r^2$, it follows that $\sqrt{k}$ is actually an upper bound on the length of the list described by $D(n,k)$. Of course this observation is only valuable if $n>\sqrt{k}$, but the inequality of the rule is true regardless.
{\bf Rule 4}: $C(n,k)$ and $D(n,k)$ are each no larger than $n\log_2k$. This rule holds because we could always use the Russian-peasants algorithm if nothing better is available.
We can use these rules to prove the following theorem. It says, roughly, that the number of additions per element you need to multiply a vector of length $n$ by a constant, with elements no larger than $k$, is $\log_nk$.
\begin{theorem}
\label{main-th}
If
$$n \ge \Bigl(\frac{j+1}{2}\Bigr)k^{1/j}\log_2k$$
then $C(n,k) \le jn$.
\end{theorem}
\begin{proof}
Start with $C(n,k)$, but let us replace $k$ by $x_0$ for reasons that will become obvious. By Rule~1 we have
$$C(n,x_0) \le D(n,x_0) + n$$
Apply Rule~2 to give us
$$C(n,x_0) \le C(n,x_1) + D(\frac{x_0}{x_1},x_0) + n$$
We may alternate Rules 1 and 2 as many times as we like, ending with Rule~1, and introducing a new unknown $x_j$ each time we do, to get
$$C(n,x_0) \le D(n,x_i) + \sum_{j=0}^{i-1} D(\frac{x_j}{x_{j+1}} ,x_j) + (i+1)n$$
Next, apply Rule~3 to the first term on the right, which gives us
$$C(n,x_0) \le D(\sqrt{x_i}, x_i) + \sum_{j=0}^{i-1} D(\frac{x_j}{x_{j+1}} ,x_j) + (i+1)n$$
Now, we choose values for each of the $x_j$'s in order to make the terms equal. That is, pick $x_p = k^{(i+2-p)/(i+2)}$ for $p=0,1,\ldots,i$. In particular, $x_i = k^{2/(i+2)}$, so the first term $D(\sqrt{x_i},x_i)$ becomes $D(k^{1/(i+2)},k^{2/(i+2)})$. The summation $\sum_{j=0}^{i-1} D(\frac{x_j}{x_{j+1}} ,x_j)$ becomes $\sum_{j=0}^{i-1} D(k^{1/(i+2)}, k^{(i+2-j)/(i+2)})$. The first term $D(k^{1/(i+2)},k^{2/(i+2)})$ fits into this sum so we can write
$$C(n,k) \le \sum_{j=0}^i D(k^{1/(i+2)}, k^{(i+2-j)/(i+2)}) + (i+1)n$$
Note that on the left, we replaced $x_0$ by the original value $k$, for which it stood.
Finally, we use Rule~4 to bound each of the terms in the above sum. That gives
$$C(n,k) \le k^{1/(i+2)} \log_2k \sum_{j=0}^i \frac{i+2-j}{i+2} + (i+1)n$$
The summation is $1/(i+2)$ times $2+3+\cdots+(i+2)$. The latter is the sum of the first $i+2$ integers, although it is missing 1. That sum is therefore $\frac{(i+2)(i+3)}{2} -1$. We may drop the ``$-1$'' and conclude
$$C(n,k) \le \frac{i+3}{2}k^{1/(i+2)}\log_2k + (i+1)n$$
As long as the first term is at most $n$, we have $C(n,k) \le (i+2)n$.
To simplify, substitute $j$ for $i+2$. We can then assert that if $n \ge \frac{j+1}{2}k^{1/j}\log_2k$ then $C(n,k) \le jn$.
\end{proof}
\begin{example}
\label{proof-ex}
Suppose $k=2^{24}$. Then $2n$ additions suffice if $n \ge \frac{3}{2}\sqrt{k}\log_2k = 147{,}456$. Also, $3n$ additions suffice if $n \ge 2k^{1 / 3} \log_2k = 12{,}288$. One needs at most $4n$ additions if $n \ge \frac{5}{2} k^{1/4 } \log_2k = 3840$.
\end{example}
\begin{example}
\label{fib-ex}
The purpose of this example is to address concerns one may have regarding extreme cases where the process sort-elim\-inate-dupli\-cates-take-differ\-ences makes lit\-tle progress. Such cases exist, but they require that the size of the matrix be very small~-- comparable to the number of bits used to represent elements.
For instance, suppose we start with powers of 2. If we do not align, then when we take differences we get the original vector with the highest power of 2 removed. However, in this case, $n$ is equal to $\log_2k+1$, and the hypothesis of Theorem~\ref{main-th} cannot be satisfied except in trivial cases~-- specifically, for $k=1$ (i,e,, a Boolean matrix), and the case $k=2$, $j=1$.. Moreover, if we align, then all powers of 2 immediately become 1, and we actually need no additions at all, just shifts.
Another seemingly bad case is the Fibonacci numbers, where after sorting and taking differences we only lose the two highest values. For example, starting with the vector
$$[1,2,3,5,8,13,21]$$
when we take differences we get $[1,2,3,5,8]$. Here, alignment doesn't help. But we still have the constraint that $n$ must be logarithmic in $k$, although the base of the logarithm is now 1.61, approximately. It is still not possible to find nontrivial values for $n$, $k$, and $j$ to satisfy the hypothesis of Theorem~\ref{main-th}.
\end{example}
\section{Attribution}
Note: The algorithm described here was invented solely by Daniel Cussen. The proof of Section~\ref{upper-sect} is solely the work of Jeffrey Ullman.
\end{document}
|
\begin{document}
\title{ extsf{The minimal dimensions of faithful representations for Heisenberg Lie superalgebras}
\begin{quotation}
\small\noindent \textbf{Abstract}: This paper aims to determine the minimal dimensions and super-dimensions of faithful representations for Heisenberg Lie superalgebras over an algebraically closed field of characteristic zero.
\noindent{\textbf{Keywords}}: Heisenberg Lie superalgebra; faithful representation; minimal dimension
\noindent{\textbf{MSC}}: 17B30, 17B10, 17B81
\end{quotation}
\setcounter{section}{0}
\section{Introduction}
Throughout $\mathbb{F}$ is an algebraically closed field of characteristic zero and all vector spaces and algebras are over $\mathbb{F}$ and of finite
dimensions.
Ado's theorem says that every finite dimensional Lie (super)algebra has a finite-dimensional faithful representation \cite{VK}. Let $\frak{g}$ be a Lie (super)algebra and write
$$
\mu(\mathfrak{g})=\min \{\dim V\mid V \mbox{ is a faithful $\mathfrak{g}$-module}\}.
$$
It is in general difficult to determine $\mu(\mathfrak{g})$. The earliest result is that $\mu(\mathfrak{g})=\lceil 2\sqrt{\dim \mathfrak{g}-1} \rceil$ for an abelian Lie algebra $\mathfrak{g}$, which is due to Schur for $\mathbb{F}=\mathbb{C}$ and to Jacobson for arbitrary $\mathbb{F}$ (see also \cite{Mirzakhani}, for a simple proof due to Mirzakhani).
In 1998 Burde concluded that $\mu(\mathfrak{h}_{m})=m+2$ for Heisenberg Lie algebra $\mathfrak{h}_{m}$ of dimension $2m+1$ \cite{DB}.
In 2008 Burde and Moens established an explicit formula of $\mu(\mathfrak{g})$ for semi-simple and reductive Lie algebras \cite{DW}.
In 2009 Cagliero and Rojas obtained a formula $\mu(\mathfrak{h}_{m, p})$ for the current Heisenberg Lie algebra $\mathfrak{h}_{m, p}$ \cite{LN}. One can also find the formula $\mu(\mathfrak{J})$ for a Jordan algebra $\mathfrak{J}$ with the trivial multiplication \cite{WS}.
However, very little is known about the function $\mu$ for Lie superalgebras. In 2012 Liu and Wang determined
$\mu(\mathfrak{g})=\lceil 2\sqrt{\dim \mathfrak{g}} \rceil$ for any purely odd Lie superalgebra $\mathfrak{g}$ \cite{WS} and it remains open to determine $\mu(\mathfrak{g})$ for an abelian Lie superalgebra $\frak{g}$ with nontrivial even part.
In this paper, we shall determine the minimal (super-)dimensions of the faithful representations for Heisenberg Lie superalgebras.
A two-step nilpotent Lie superalgebra with $1$-dimensional center is called a Heisenberg Lie superalgebra. Then
Heisenberg Lie superalgebras split into the following two types according to the parities of their centers \cite{MGO}.
Write $\mathfrak{h}_{m, n}$ for the Heisenberg Lie superalgebra with 1-dimensional even center $\mathbb{F}z$, which has a $\mathbb{Z}_{2}$-homogeneous basis
\begin{eqnarray*}
(u_{1},\ldots,u_{m},v_{1},\ldots,v_{m};z\mid w_{1},\ldots,w_{n})
\end{eqnarray*}
with multiplication given by
$$[u_{i}, v_{i}]=-[v_{i}, u_{i}]=z=[w_{j}, w_{j}], \quad i=1,\ldots,m, j=1,\ldots,n,$$
the remaining brackets being zero. Hereafter $\mathbb{Z}_{2}=\{\bar{0}, \bar{1}\}$ is the group of order $2$.
Write $\mathfrak{h}_{n}$ for the Heisenberg Lie superalgebra with 1-dimensional odd center $\mathbb{F}z$, which has a $\mathbb{Z}_{2}$-homogeneous basis
\begin{eqnarray*}
(v_{1},\ldots,v_{n}\mid z; w_{1},\ldots,w_{n})
\end{eqnarray*}
with multiplication given by
$$[v_{i}, w_{i}]=z=-[w_{i}, v_{i}], i=1,\ldots,n,$$
the remaining brackets being zero.
Both $\mathfrak{h}_{m, n}$ and $\mathfrak{h}_{n}$ are nilpotent.
Note that $\mathfrak{h}_{m, 0}$ is a Heisenberg Lie algebra and $\mathfrak{h}_{0, n}$ is isomorphic to the Heisenberg Lie superalgebra considered in \cite[p.18]{VK}, whose even part coincides with 1-dimension center.
However, the Heisenberg Lie superalgebras with odd centers, $\mathfrak{h}_{n}$, have no analogs in Lie algebras. We should also mention that Hegazi studied representations of the Heisenberg Lie superalgebras of even center, $\mathfrak{h}_{m, n}$, and tried to find a finite-dimensional faithful representation of $\mathfrak{h}_{m, n}$ \cite[\S 3]{Hegazi}.
Throughout this paper, subalgebras and (sub)modules of Lie superalgebras are assumed to be $\mathbb{Z}_2$-graded. Hereafter we write $\frak{g}$ for $\mathfrak{h}_{m,n}$ or $\mathfrak{h}_{n}$. A main result of this paper is that
\begin{eqnarray*}
\mu(\frak{g})=\left\{\begin{array}{ll}
m+\lceil n/2\rceil+2 & \frak{g}=\frak{h}_{m,n}\\
n+2 &\frak{g}=\frak{h}_{n}.
\end{array}\right.
\end{eqnarray*}
To formulate the super-dimensions of the faithful representations, write for $i\in \{0,1\}$,
$$
\mu_{i}(\mathfrak{g})=\min \{\dim V_{\bar{i}}\mid \mbox{$V$ is a faithful $\mathfrak{g}$-module}\};
$$
$$
\mu_{i}^{*}(\mathfrak{g})=\min \{\dim V\mid \mbox{$V$ is a faithful $\mathfrak{g}$-module with} \dim V_{\bar{i}}=\mu_{i}(\mathfrak{g}) \}.
$$
In this paper we also determine the values $\mu_{i}(\mathfrak{g})$ and $\mu_{i}^{*}(\mathfrak{g})$.
\section{Minimal dimensions}
Since Engel's theorem holds for Lie superalgebras, as in Lie algebra case \cite[Lemma 1]{DB}, we have
\begin{lemma}\label{lem1}
Let $L$ be a nilpotent Lie superalgebra with a $1$-dimensional center $\mathbb{F}z $.
Then a representation $\lambda: L\rightarrow \mathfrak{gl}(V)$ is faithful if and only if $z$ acts nontrivially.
\end{lemma}
\begin{proof} The ``only if" part is obvious. Suppose $z$ acts nontrivially.
If $\ker(\lambda)\neq 0$, then Engel's theorem ensures that $\ker(\lambda)$ contains a nonzero element killed by $L$ and hence $\ker(\lambda)$ contains the center $\mathbb{F}z$, showing that $\rho(z)=0,$
a contradiction.
\end{proof}
Let
$$
\zeta(\frak{g})=\max\{\dim \frak{a}\mid \mbox{$\frak{a}$ is an abelian subalgebra of $\frak{g}$ not containing the center of $\frak{g}$}\}.
$$
Let $\sqrt{-1}$ denote a fixed root of the equation $x^{2}=-1$ in $\mathbb{F}$. We have
\begin{lemma}\label{lem2}
Let $\mathfrak{a}$ be an abelian subalgebra not containing $z$ of $\mathfrak{g}$ and having dimension $\zeta(\frak{g})$.
Then
\begin{itemize}
\item for $\mathfrak{g}=\mathfrak{h}_{m, n}$, the super-dimension $(\dim \mathfrak{a}_{\bar{0}},\dim \mathfrak{a}_{\bar{1}})$ must be $(m,\lfloor n/2 \rfloor);$
\item for $\mathfrak{g}=\mathfrak{h}_{n}$, the super-dimension $(\dim \mathfrak{a}_{\bar{0}},\dim \mathfrak{a}_{\bar{1}})$ has $n+1$ possibilities:
$$ (i,n-i),\quad i=0, \ldots, n.$$
\end{itemize}
In particular,
\begin{eqnarray*}
\zeta(\frak{g})=\left\{\begin{array}{ll}
m+\lfloor n/2 \rfloor & \frak{g}=\frak{h}_{m,n}\\
n &\frak{g}=\frak{h}_{n}.
\end{array}\right.
\end{eqnarray*}
\end{lemma}
\begin{proof}
Since $\mathfrak{a}$ does not contain the center $\mathbb{F}z$, there is a $\mathbb{Z}_{2}$-graded subspace $\frak{k}$ containing $\frak{a}$ such that
$\mathfrak{g}=\mathfrak{k}\oplus \mathbb{F}z$.
Let $B:\mathfrak{k}\times \mathfrak{k}\rightarrow \mathbb{F}$ be the form determined by $[x, y]=B(x, y)z$ for all $x, y\in \mathfrak{k}$.
It is clear that $B$ is bilinear and non-degenerate.
Since $\mathfrak{a}$ is abelian,
$B(x, y)=0$ for all $x, y\in \mathfrak{a}$.
Therefore, $\mathfrak{a}$ is a $B$-isotropic subspace of $\mathfrak{k}$. It follows that
$\dim \mathfrak{a}\leq \frac{\dim \mathfrak{k}}{2}=\frac{\dim \mathfrak{g}-1}{2}$.
Suppose $\mathfrak{g}=\frak{h}_{m,n}.$ Then $\dim \mathfrak{a}\leq m+\lfloor n/2 \rfloor$. Let $\mathfrak{b}$ be the subspace spanned by
$$ u_{1},u_{2},\ldots,u_{m},w_{1}+\sqrt{-1}w_{2},w_{3}+\sqrt{-1}w_{4},\ldots,w_{n-1}+\sqrt{-1}w_{n}$$ if $n$ is even
and by
$$u_{1},u_{2},\ldots,u_{m},w_{1}+\sqrt{-1}w_{2},w_{3}+\sqrt{-1}w_{4},\ldots,w_{n-2}+\sqrt{-1}w_{n-1}$$ if $n$ is odd.
One can check that $\mathfrak{b}$ is an abelian subalgebra of dimension $m+\lfloor n/2 \rfloor$ and $\mathfrak{b}$ does not contain $z$.
Hence, $\zeta(\frak{g})=\dim \mathfrak{a}=m+\lfloor n/2 \rfloor$.
Clearly,
$\mathfrak{a}_{\bar{0}}$ is a $B$-isotropic subspace of $\mathfrak{k}_{\bar{0}}$ and
$\mathfrak{a}_{\bar{1}}$ is a $B$-isotropic subspace of $\mathfrak{k}_{\bar{1}}$.
Since $B|_{\mathfrak{k}_{\bar{0}}\times \mathfrak{k}_{\bar{0}}}$ and $B|_{\mathfrak{k}_{\bar{1}}\times \mathfrak{k}_{\bar{1}}}$ are non-degenerate, we have $\dim \mathfrak{a}_{\bar{0}}\leq m$,
$\dim \mathfrak{a}_{\bar{1}}\leq \lfloor n/2 \rfloor$.
Note that $\dim \mathfrak{a}=m+\lfloor n/2 \rfloor$. It follows that $\dim \mathfrak{a}_{\bar{0}}=m$, $\dim \mathfrak{a}_{\bar{1}}=\lfloor n/2 \rfloor$.
Suppose $\mathfrak{g}=\frak{h}_{n}.$ Then $\dim \mathfrak{a}\leq n$.
Let $\frak{b}'$ be the subspace spanned by
$v_{1},v_{2},\ldots,v_{n}.$
Clearly, $\frak{b}'$ is an abelian subalgebra of dimension $n$ of $\mathfrak{h}_{n}$ and $\frak{b}'$ does not contain $z$.
Hence, $\zeta(\frak{g})=\dim \mathfrak{a}=n.$
From the definition of $\mathfrak{h}_{n}$, one may easily find abelian subalgebras not containing $z$ and having the indicated super-dimension $(i,n-i)$ with
$ i=0, \ldots, n.$
\end{proof}
\begin{lemma}\label{lem2047}
Let $V$ be a faithful $\mathfrak{g}$-module. Then there exists a nonzero homogeneous element $v_{0}$ in $V$ such that $zv_{0}\neq0$. Moreover, let $\rho_{v_{0}}$ be the linear mapping defined by
\begin{eqnarray*}
\rho_{v_{0}}: \mathfrak{g} \longrightarrow V,\quad x\longmapsto xv_{0}
\end{eqnarray*}
and let $\mathfrak{a}=\ker(\rho_{v_{0}})$ and $V_0=\mathrm{im}(\rho_{v_{0}})$.
Then $\mathfrak{a}$ is an abelian subalgebra not containing $z$ and
if $\dim \mathfrak{a}=\zeta(\frak{g})$, then $v_{0}\notin V_0$.
\end{lemma}
\begin{proof} Lemma \ref{lem1} ensures that there exists a nonzero homogeneous element $v_{0}$ in $V$ such that $zv_{0}\neq0$. It follows that $\mathfrak{a}$ does not contain $z$.
Since $\rho_{v_{0}}$ is homogenous, $\mathfrak{a}$ is a $\mathbb{Z}_{2}$-graded subspace of $\mathfrak{g}$.
For $x, y\in \mathfrak{a}$, it is obvious that
$[x, y]\in \mathfrak{a}\cap \mathbb{F}z=0$ and it follows that $\mathfrak{a}$ is an abelian subalgebra.
Suppose $\dim \mathfrak{a}=\zeta(\frak{g})$. Assume in contrary that $v_{0}\in V_0$. Then there exists an $x\in \mathfrak{g}_{\bar{0}}$ such that $xv_{0}=v_{0}$,
since $v_{0}$ is a nonzero homogeneous element of $V$.
Clearly, $(\mathfrak{h}_{m, n})_{\bar{0}}$ is a solvable Lie algebra.
Since $[u_{i}, v_{i}]=z$,
by Lie's theorem, $z$ acts nilpotently on $V$. For $\mathfrak{h}_{n}$, $z$ is odd.
Therefore, $x\notin \mathbb{F}z$. Moreover, it is clear that $x\notin \frak{a}$.
Then by the maximality of $\mathfrak{a}$, we have $[x, \mathfrak{a}]\not=0$. There must be some $y\in \mathfrak{a}$ such that $[x, y]=z$.
Since $x\in \mathfrak{g}_{\bar{0}}$, we have
\begin{equation*}\label{eq21}
zv_{0}=[x, y]v_{0}
=x(yv_{0})-y(xv_{0})=0,
\end{equation*}
using that $yv_{0}=0$ and $xv_{0}=v_{0}$.
This is a contradiction. Hence $v_{0}\notin V_0$.
\end{proof}
\begin{proposition}\label{proposition2}
Let $\frak{g}=\mathfrak{h}_{m, n}$ or $\mathfrak{h}_{n}$. Then
\begin{eqnarray*}
\mu(\frak{g})\geq \dim \mathfrak{g}-\zeta(\frak{g})+1.
\end{eqnarray*}
That is,
\begin{itemize}
\item
$\mu(\mathfrak{h}_{m, n})\geq m+\lceil n/2 \rceil+2;$
\item $\mu(\mathfrak{h}_{n})\geq n+2.$
\end{itemize}
\end{proposition}
\begin{proof}
Assume that $\lambda: \mathfrak{g}\rightarrow \mathfrak{gl}(V)$ is a faithful representation. Let $v_{0}, \mathfrak{a}, V_0$ be as in Lemma \ref{lem2047}.
By Lemmas \ref{lem2} and \ref{lem2047}, we have
$$
\dim V\geq \dim V_0=\dim \mathfrak{g}-\dim \mathfrak{a}\geq \dim \mathfrak{g}-\zeta(\frak{g}).
$$
If $\dim V_0\geq \dim \mathfrak{g}-\zeta(\frak{g})+1$, we are done.
Suppose $\dim V_0= \dim \mathfrak{g}-\zeta(\frak{g}).$ Then $\dim \mathfrak{a}=\zeta(\frak{g})$.
By Lemma \ref{lem2047}, we have $v_{0}\notin V_0$.
Therefore,
$$\dim V\geq \dim V_0+1= \dim \mathfrak{g}-\zeta(\frak{g})+1.$$
That is,
$\mu(\mathfrak{h}_{m, n})\geq m+\lceil n/2 \rceil+2;$
$\mu(\mathfrak{h}_{n})\geq n+2.$
\end{proof}
\begin{theorem}\label{theorem1} We have
\begin{eqnarray*}
\mu(\frak{g})=\left\{\begin{array}{ll}
m+\lceil n/2\rceil+2 & \frak{g}=\frak{h}_{m,n}\\
n+2 &\frak{g}=\frak{h}_{n}.
\end{array}\right.
\end{eqnarray*}
\end{theorem}
\begin{proof}
By Proposition \ref{proposition2}, it is enough to establish a faithful representation of the desired dimension for $\frak{g}$.
Consider the even linear mapping
$$\pi: \mathfrak{h}_{m, n}\longrightarrow \mathfrak{gl}(m+2\mid\lceil n/2 \rceil)$$
given by
\begin{eqnarray*}
&&\pi(u_{i})=e_{1,i+1},\quad
\pi(v_{i})=e_{i+1,m+2},\quad \pi(z)=e_{1,m+2},\\
&&\pi(w_{2k-1})=\frac{1}{2}e_{m+2+k,m+2}+ e_{1,m+2+k},\\
&&\pi(w_{2k})=\frac{\sqrt{-1}}{2}e_{m+2+k,m+2}- \sqrt{-1}e_{1,m+2+k},
\end{eqnarray*}
where $1\leq i\leq m, 1\leq 2k, 2k-1\leq n.$
Under $\pi$,
an element of $\frak{h}_{m,n},$
\begin{eqnarray}\label{eq1013w}
\sum^{m}_{i=1}a_{i}u_{i}+\sum^{m}_{i=1}b_{i}v_{i}+cz+\sum^{n}_{j=1}d_{j}w_{j}\quad (\mbox{$a_{i}, b_{i}, c, d_{j}\in \mathbb{F}$})
\end{eqnarray}
is presented as
\begin{equation}\label{eq12}
\left(\begin{array}{cccccc|cccc}
0&a_{1}&a_{2}&\cdots&a_{m}&c &d_{1,2}&d_{3,4}&\cdots&d_{n-1,n}\\
& & & & &b_{1} & \\
& & & & &b_{2} & \\
& & & & &\vdots & \\
& & & & &b_{m} & \\
& & & & &0 & \\
\hline
& & & & &\widetilde{d}_{1,2} & \\
& & & & &\widetilde{d}_{3,4} & \\
& & & & &\vdots & \\
& & & & &\widetilde{d}_{n-1,n}& \\
\end{array}\right)\quad (\mbox{$n$ even})
\end{equation}
or
\begin{equation}\label{eq13}
\left(\begin{array}{cccccc|ccccc}
0&a_{1}&a_{2}&\cdots&a_{m}&c&d_{1,2}&d_{3,4}&\cdots&d_{n-2,n-1}&d_{n}\\
& & & & &b_{1}& \\
& & & & &b_{2}& \\
& & & & &\vdots& \\
& & & & &b_{m}& \\
& & & & &0& \\\hline
& & & & &\widetilde{d}_{1,2}& \\
& & & & &\widetilde{d}_{3,4}& \\
& & & & &\vdots& \\
& & & & &\widetilde{d}_{n-2,n-1}& \\
& & & & &\frac{1}{2}d_{n}& \\
\end{array}\right)\quad (\mbox{$n$ odd}),
\end{equation}
where $d_{i,i+1}=d_{i}-\sqrt{-1}d_{i+1}$, $\widetilde{d}_{i,i+1}=\frac{1}{2}(d_{i}+\sqrt{-1}d_{i+1}).$
It is routine to verify that $\pi$ is a faithful representation of dimension $ m+\lceil n/2\rceil+2$.
Let us consider the even linear mapping
$$\pi':\mathfrak{h}_{n}\longrightarrow \mathfrak{gl}(n+1\mid 1)$$
given by
\begin{eqnarray*}
&&\pi'(v_{i})=e_{1,i+1},\\
&&\pi'(z)=e_{1,n+2},\quad
\pi'(w_{i})=e_{i+1,n+2},
\end{eqnarray*}
where $1\leq i\leq n.$
Under $\pi'$,
an element of $\frak{h}_{n}$,
\begin{eqnarray}\label{eq1057w}
\sum^{n}_{i=1}a_{i}v_{i}+cz+\sum^{n}_{i=1}b_{i}w_{i}\quad (\mbox{$a_{i}, c, b_{i}\in \mathbb{F}$})
\end{eqnarray}
is presented as
\begin{equation*}\label{eq14}
\left(\begin{array}{ccccc|c}
0&a_{1}&a_{2}&\cdots&a_{n}&c\\
& & & & &b_{1}\\
& & & & &b_{2}\\
& & & & &\vdots \\
& & & & &b_{n} \\ \hline
& & & & &0 \\
\end{array}\right).
\end{equation*}
It is routine to verify that $\pi'$ is a faithful representation of dimension $n+2$.
\end{proof}
\section{Super-dimensions}
In this section we discuss the super-dimensions of the faithful representations for Heisenberg Lie superalgebras. We first establish a technical lemma, for which we shall use a result due to Burde \cite{DB}: the formula $\mu(L)$ for Heisenberg Lie algebras.
\begin{lemma}\label{proposition5}
Let $V$ be a faithful module of $\mathfrak{h}_{m,n}$. Let $v_0$ be as in Lemma \ref{lem2047}.
If $v_{0}$ is even, then $\dim V_{\bar{0}}\geq m+2$; if $v_{0}$ is odd, then $\dim V_{\bar{1}}\geq m+2$.
\end{lemma}
\begin{proof}Note that $(\mathfrak{h}_{m, n})_{\bar{0}}$ is a Heisenberg Lie algebra.
Obviously, $V_{\bar{0}}$ is a module of the Lie algebra $(\mathfrak{h}_{m, n})_{\bar{0}}$.
If $v_{0}$ is even, then $v_{0}\in V_{\bar{0}}$. Since $zv_{0}\neq 0$,
$V_{\bar{0}}$ is a faithful module of $(\mathfrak{h}_{m, n})_{\bar{0}}$ by Lemma \ref{lem1}. According to the minimal dimensions of faithful representations for Heisenberg Lie algebras \cite{DB}, we have
$\dim V_{\bar{0}}\geq \mu((\mathfrak{h}_{m, n})_{\bar{0}})=m+2$.
Similarly, if $v_{0}$ is odd, then $V_{\bar{1}}$ is a faithful module of $(\mathfrak{h}_{m, n})_{\bar{0}}$ and hence $\dim V_{\bar{1}}\geq m+2$.
\end{proof}
\begin{theorem}\label{corollary4}
Suppose $V$ is a faithful $\mathfrak{g}$-module of the minimal dimension $\mu(\mathfrak{g})$. Then
\begin{itemize}
\item For $\mathfrak{h}_{m, n}$, the super-dimension $(\dim V_{\bar{0}},\dim V_{\bar{1}})$ has $2$ possibilities:
$$\mbox{$(m+2,\lceil n/2 \rceil)$, $(\lceil n/2 \rceil,m+2)$;}$$
\item For $\mathfrak{h}_{n}$, the super-dimension $(\dim V_{\bar{0}},\dim V_{\bar{1}})$ has $n+1$ possibilities:
$$ (i+1,n-i+1),\quad i=0, \ldots, n.$$
\end{itemize}
\end{theorem}
\begin{proof}
Let $v_{0}, \mathfrak{a}, V_0$ be as in Lemma \ref{lem2047}. Since $\mathfrak{a}$ does not contain the center $\mathbb{F}z$, there exists a subalgebra $\mathfrak{a}'$ containing $z$ such that $\mathfrak{g}=\mathfrak{a}\oplus \mathfrak{a}'.$
Since $\dim V=\dim \mathfrak{g}-\zeta(\frak{g})+1$ and $\dim \mathfrak{a}\leq \zeta(\frak{g})$,
we have $\dim \mathfrak{g}-\zeta(\frak{g}) \leq \dim V_0 \leq \dim \mathfrak{g}-\zeta(\frak{g})+1$. It is enough to consider the following two cases.
\\
\noindent\textit{Case 1}: $\dim V_0= \dim \mathfrak{g}-\zeta(\frak{g})$. Then $\dim \mathfrak{a}= \zeta(\frak{g})$
and Lemma \ref{lem2047} yields $v_{0}\notin V_0$. Then we have $\dim \mathfrak{a}'= \dim \mathfrak{g}-\zeta(\frak{g})$.
Since $\dim V= \dim \mathfrak{g}-\zeta(\frak{g})+1$, it easy to see that $V$ has an $\mathbb{F}$-basis
\begin{eqnarray}\label{eqlc1533}
\{v_{0}, xv_{0}\mid \mbox{$x$ runs over a homogeneous basis of $\mathfrak{a'}$}\}.
\end{eqnarray}
For $\frak{g}=\frak{h}_{m,n}$, by Lemma \ref{lem2} we have $\dim \mathfrak{a}_{\bar{0}}=m$ and $\dim \mathfrak{a}_{\bar{1}}=\lfloor n/2 \rfloor$.
Hence, $\dim \mathfrak{a}'_{\bar{0}}=m+1$,
$\dim \mathfrak{a}'_{\bar{1}}=\lceil n/2 \rceil$.
By (\ref{eqlc1533}), if $v_{0}\in V_{\bar{0}}$ then $\dim V_{\bar{0}}= m+2$ and $\dim V_{\bar{1}}= \lceil n/2 \rceil$;
if $v_{0}\in V_{\bar{1}}$, then $\dim V_{\bar{0}}= \lceil n/2 \rceil$ and $\dim V_{\bar{1}}= m+2 $.
For $\frak{g}=\frak{h}_{n}$, by Lemma \ref{lem2}, $\dim \mathfrak{a}_{\bar{0}}=i$ and $\dim \mathfrak{a}_{\bar{1}}=n-i,$ $ i=0, \ldots, n.$
Hence, $\dim \mathfrak{a}'_{\bar{0}}=i$ and $\dim \mathfrak{a}'_{\bar{1}}=n+1-i,$ $ i=0, \ldots, n.$
Therefore we have $\dim V_{\bar{0}}= i+1 $ and $\dim V_{\bar{1}} =n+1-i$, where $i=0,\ldots, n$.
\\
\noindent\textit{Case 2}: $\dim V_0=\dim \mathfrak{g}-\zeta(\frak{g})+1$. Then $\dim \mathfrak{a}= \zeta(\frak{g})-1$ and
$\dim \mathfrak{a}'= \dim \mathfrak{g}-\zeta(\frak{g})+1$.
Since $\dim V= \dim \mathfrak{g}-\zeta(\frak{g})+1$, one sees that $V$ has an $\mathbb{F}$-basis
\begin{eqnarray}\label{mwjxe1}
\{xv_{0}\mid \mbox{$x$ runs over a homogeneous basis of $\mathfrak{a}'$}\}.
\end{eqnarray}
For $\frak{g}=\frak{h}_{m,n}$,
clearly, $\dim \mathfrak{a}'_{\bar{0}}=m+i$ and
$\dim \mathfrak{a}'_{\bar{1}}=\lceil n/2 \rceil+2-i$ for some $i\in \{1, 2\}$.
By (\ref{mwjxe1}), if $v_{0}\in V_{\bar{0}}$, then $\dim V_{\bar{0}}=m+i$ and
$\dim V_{\bar{1}}=\lceil n/2 \rceil+2-i$; if $v_{0}\in V_{\bar{1}}$,
then $\dim V_{\bar{0}}=\lceil n/2 \rceil+2-i$ and
$\dim V_{\bar{1}}=m+i$ for some $i\in \{1, 2\}$.
By Lemma \ref{proposition5}, it must be $i=2$.
For $\frak{g}=\frak{h}_{n}$, then $\dim \mathfrak{a}=n-1$. Clearly, $\dim \mathfrak{a'}=n+2$, $\dim \mathfrak{a}'_{\bar{0}}=i+1$ and $\dim \mathfrak{a}'_{\bar{1}}=n+1-i,$ $ i=0, \ldots, n-1$. Therefore, we have either $\dim V_{\bar{0}}= i+1 $ and $\dim V_{\bar{1}} =n+1-i$, or
$\dim V_{\bar{0}}=n+1-i $ and $\dim V_{\bar{1}} = i+1$, for some $i\in \{0,\ldots, n-1\}$.
\\
Up to now, we have shown that:
\begin{itemize}
\item For $\mathfrak{h}_{m, n}$, the super-dimension $(\dim V_{\bar{0}},\dim V_{\bar{1}})$ has at most $2$ possibilities:
$$\mbox{$(m+2,\lceil n/2 \rceil)$, $(\lceil n/2 \rceil,m+2)$;}$$
\item For $\mathfrak{h}_{n}$, the super-dimension $(\dim V_{\bar{0}},\dim V_{\bar{1}})$ has at most $n+1$ possibilities:
$$ (i+1,n-i+1),\quad i=0, \ldots, n.$$
\end{itemize}
Next let us realize the faithful representations of the super-dimensions indicated above.
For $\mathfrak{h}_{m, n}$, (\ref{eq12}) and (\ref{eq13}) give a minimal faithful representation of $\mathfrak{h}_{m, n}$ with super-dimension $(m+2, \lceil n/2 \rceil)$.
Consider the even linear mapping
$$\pi: \mathfrak{h}_{m, n}\longrightarrow \mathfrak{gl}(\lceil n/2 \rceil\mid m+2)$$
given by
\begin{eqnarray*}
&&\pi(u_{i})=e_{\lceil n/2 \rceil+1,\lceil n/2 \rceil+i+1},\quad
\pi(v_{i})=e_{\lceil n/2 \rceil+i+1,\lceil n/2 \rceil+m+2},\quad \pi(z)=e_{\lceil n/2 \rceil+1,\lceil n/2 \rceil+m+2},\\
&&\pi(w_{2k-1})=\frac{1}{2}e_{k,\lceil n/2 \rceil+m+2}+ e_{\lceil n/2 \rceil+1,k}, \quad\pi(w_{2k})=\frac{\sqrt{-1}}{2}e_{k,\lceil n/2 \rceil+m+2}- \sqrt{-1}e_{\lceil n/2 \rceil+1,k},
\end{eqnarray*}
where $1\leq i\leq m, 1\leq 2k, 2k-1\leq n.$
Under $\pi$, an element of form (\ref{eq1013w})
is presented as
\begin{equation}\label{eq61}
\left(\begin{array}{cccc|cccccc}
& & & & & & & & & \widetilde{d}_{1,2} \\
& & & & & & & & & \widetilde{d}_{3,4} \\
& & & & & & & & & \vdots \\
& & & & & & & & & \widetilde{d}_{n-1,n} \\ \hline
d_{1,2} &d_{3,4}&\cdots &d_{n-1,n} & 0 &a_{1}&a_{2}&\cdots&a_{m}&c \\
& & & & & & & & &b_{1} \\
& & & & & & & & & \vdots \\
& & & & & & & & &b_{m} \\
& & & & & & & & & 0\\
& & & & & & & & &
\end{array}\right)\quad (\mbox{$n$ even})
\end{equation}
or
\begin{equation}\label{eq62}
\left(\begin{array}{ccccc|ccccc}
& & & & & & & & & \widetilde{d}_{1,2} \\
& & & & & & & & &\widetilde{d}_{3,4} \\
& & & & & & & & &\vdots \\
& & & & & & & & & \widetilde{d}_{n-1,n} \\
&&&&&&&&& \frac{1}{2}d_{n} \\ \hline
d_{1,2} &d_{3,4}&\cdots &d_{n-1,n}&d_{n} & 0 &a_{1}&\cdots&a_{m}&c \\
& & & & & & & & &b_{1} \\
&&&&&&&&& b_{2}\\
& & & & & & & & & \vdots \\
& & & & & & & & &b_{m} \\
& & & & & & & & & 0
\end{array}\right)\quad (\mbox{$n$ odd}),
\end{equation}
where $d_{i,j}=d_{i}-\sqrt{-1}d_{j}$, $\widetilde{d}_{i,j}=\frac{1}{2}(d_{i}+\sqrt{-1}d_{j}).$
It is routine to verify that $\pi$ is a faithful representation with super-dimension $(\lceil n/2\rceil, m+2)$.
For $0\leq r\leq n$, let us consider the even linear mapping
$$\pi':\mathfrak{h}_{n}\longrightarrow \mathfrak{gl}(r+1\mid n-r+1)$$
given by
\begin{eqnarray*}
&&\pi'(v_{i})=e_{1,i+1}, \quad
\pi'(v_{j})=-e_{j+1,n+2}, \\
&&\pi'(z)=e_{1,n+2},\quad
\pi'(w_{k})=e_{k+1,n+2}, \quad
\pi'(w_{l})=e_{1,l+1},
\end{eqnarray*}
where $1\leq i, k\leq r$ and $r+1\leq j,l\leq n.$
Under $\pi'$, an element (\ref{eq1057w}) of $\mathfrak{h}_{n}$ is presented as
\begin{equation}\label{eq71}
\left(\begin{array}{cccc|cccc}
0&a_{1}&\cdots&a_{r}&b_{r+1}&\cdots&b_{n}&c\\
& & & & & & &b_{1}\\
& & & & & & &\vdots \\
& & & & & & &b_{r} \\ \hline
& & & & & & &-a_{r+1}\\
& & & & & & &\vdots \\
& & & & & & &-a_{n} \\
& & & & & & &0
\end{array}\right).
\end{equation}
It is routine to verify that $\pi'$ is a faithful representation with super-dimension $(r+1, n-r+1)$ for all $r= 0,\ldots, n$.
\end{proof}
Recall that for $i\in \{0,1\}$,
$$
\mu_{i}(\mathfrak{g})=\min \{\dim V_{\bar{i}}\mid \mbox{$V$ is a faithful $\mathfrak{g}$-module}\},
$$
$$
\mu_{i}^{*}(\mathfrak{g})=\min \{\dim V\mid \mbox{$V$ is a faithful $\mathfrak{g}$-module with} \dim V_{\bar{i}}=\mu_{i}(\mathfrak{g})\}.
$$
\begin{theorem}\label{corollary5} We have
\begin{eqnarray*}
\mu_{0}(\frak{g})=\mu_{1}(\frak{g})=\left\{\begin{array}{ll}
\min\{m+2, \lceil n/2 \rceil\} & \frak{g}=\frak{h}_{m,n}\\
1 &\frak{g}=\frak{h}_{n}
\end{array}\right.
\end{eqnarray*}
and
\begin{eqnarray*}
\mu_{0}^{*}(\mathfrak{g})=\mu_{1}^{*}(\mathfrak{g})=\left\{\begin{array}{ll}
m+\lceil n/2 \rceil+2 & \frak{g}=\frak{h}_{m,n}\\
n+2 &\frak{g}=\frak{h}_{n}.
\end{array}\right.
\end{eqnarray*}
\end{theorem}
\begin{proof}
Let $(\lambda,V)$ be a faithful representation of $\mathfrak{g}$.
Evidently,
\begin{equation}\label{10}
\mu_{0}^{*}(\mathfrak{g})\geq \mu(\mathfrak{g}); \mu_{1}^{*}(\mathfrak{g})\geq \mu(\mathfrak{g}).
\end{equation}
Keep the notations in Lemma \ref{lem2047}. As in the proof of Theorem \ref{corollary4}, there exists a subalgebra $\mathfrak{a}'$ containing $z$ such that $\mathfrak{g}=\mathfrak{a}\oplus \mathfrak{a}'.$
By Lemma \ref{lem2}, $\dim \mathfrak{a}'\geq \dim \mathfrak{g}-\zeta(\mathfrak{g})$.
Hence, by Lemma \ref{lem2047}(4), if $v_{0}$ is even, then $\dim V_{\bar{1}}\geq \dim \mathfrak{a}'_{\bar{1}}$;
if $v_{0}$ is odd, then $\dim V_{\bar{0}}\geq \dim \mathfrak{a}'_{\bar{1}}$.
Let $\mathfrak{g}=\mathfrak{h}_{m, n}$. By Lemma \ref{lem2}, we have $\dim \mathfrak{a}'_{\bar{1}}\geq \lceil n/2 \rceil$. So,
if $v_{0}$ is even, then $\dim V_{\bar{1}}\geq \lceil n/2 \rceil$; if $v_{0}$ is odd, then $\dim V_{\bar{0}}\geq \lceil n/2 \rceil$.
By Lemma \ref{proposition5}, if $v_{0}$ is even, then $\dim V_{\bar{0}}\geq m+2$; if $v_{0}$ is odd, then $\dim V_{\bar{1}}\geq m+2$.
Therefore, $\dim V_{\bar{0}}\geq \min\{m+2, \lceil n/2 \rceil\}$ and $\dim V_{\bar{1}}\geq \min\{m+2, \lceil n/2 \rceil\}$.
Since (\ref{eq12}) and (\ref{eq13}) define a faithful representation
of $\mathfrak{h}_{m, n}$ with super-dimension $(m+2, \lceil n/2 \rceil)$,
and (\ref{eq61}) and (\ref{eq62}) define a faithful representation
of $\mathfrak{h}_{m, n}$ with super-dimension $(\lceil n/2 \rceil, m+2)$,
we have
$$\mu_{0}(\frak{h}_{m,n})=\mu_{1}(\frak{h}_{m,n})=\min\{m+2, \lceil n/2 \rceil\}.
$$
It follows from (\ref{10}) that
$$\mu_{0}^{*}({\mathfrak{h}_{m, n}})=\mu_{1}^{*}({\mathfrak{h}_{m, n}})=m+\lceil n/2 \rceil+2.
$$
Let $\mathfrak{g}=\mathfrak{h}_{n}$. By Lemma \ref{lem2}, we have $\dim \mathfrak{a}'_{\bar{1}}\geq 1$.
So, if $v_{0}$ is even, then $\dim V_{\bar{1}}\geq 1$; if $v_{0}$ is odd, then $\dim V_{\bar{0}}\geq 1$.
On the other hand, if $v_{0}$ is even, then $v_{0}\in V_{\bar{0}}$ and $\dim V_{\bar{0}}\geq 1$;
if $v_{0}$ is odd, then $v_{0}\in V_{\bar{1}}$ and $\dim V_{\bar{1}}\geq 1$.
Then by (\ref{eq71}), we have
$$\mu_{0}(\mathfrak{h}_{n})=\mu_{1}(\mathfrak{h}_{n})=1.$$
It follows from (\ref{10}) that
$$\mu_{0}^{*}(\mathfrak{h}_{n})=\mu_{1}^{*}(\mathfrak{h}_{n})=n+2.$$
\end{proof}
\begin{remark} Let $L$ be a Lie superalgebra and $\Pi$ the parity functor of the category of $\mathbb{Z}_{2}$-graded vector spaces. It is well known that if $V$ is an $L$-module, then so is $\Pi(V)$ with respect to the original module action. Therefore, in general we have
$$
\mu_{0}(L)=\mu_{1}(L), \quad \mu_{0}^{*}(L)=\mu_{1}^{*}(L).
$$
This fact may be used to shorten the proofs of Theorems \ref{corollary4} and \ref{corollary5}.
\end{remark}
\noindent \textbf{Acknowledgements}
The authors are grateful to the anonymous referee for his/her valuable comments and helpful suggestions. The first author was supported by the NSF of China (11171055, 11471090) and the NSF of HLJ Province, China (A201412, JC201004)
\end{document}
|
\begin{document}
\begin{abstract}
We describe explicitly the cohomology of the total complex of certain diagrams of invertible sheaves on normal toric varieties. These diagrams, called wheels, arise in the study of toric singularities associated to dimer models. Our main tool describes the generators in a family of syzygy modules associated to the wheel in terms of walks in a family of graphs.
\end{abstract}
\title{Cohomology of wheels on toric varieties}
\section{Introduction}
A standard tool in homological algebra is to study a finitely generated module over a ring in terms of a free resolution, or more generally, a coherent sheaf on a variety in terms of a resolution by locally free sheaves. Conversely, given a complex $T^\bullet$ of locally free sheaves on a variety $X$, it is natural to ask whether the cohomology of the complex is nonzero in one degree only, say $k\in \ensuremath{\mathbb{Z}}$, in which case $T^\bullet$ is quasi-isomorphic to the pure sheaf $H^k(T^\bullet)[-k]$. In particular, it is important to have an explicit understanding of the cohomology sheaves of a complex of locally free sheaves. Our main result achieves this for a class of four-term complexes of locally free sheaves on normal toric varieties.
Our motivation comes from the study of derived categories of toric varieties associated to consistent dimer model algebras (see Bocklandt--Craw--Quintero-V\'{e}lez~\cite[Section~2.4]{BCQ12} for a brief introduction). The best-known example of a consistent dimer model algebra is the skew group algebra $\ensuremath{\mathbb{C}}[x,y,z]*G$ for a finite abelian subgroup $G\subset \operatorname{SL}(3,\ensuremath{\mathbb{C}})$, in which case the relevant toric variety is the $G$-Hilbert scheme $X=\ensuremath{G}\operatorname{op}eratorname{-Hilb}(\ensuremath{\mathbb{C}}^3)$ introduced by Nakamura~\cite{Nakamura01}. In their study of the equivalence of derived categories induced by the universal family on the $G$-Hilbert scheme, Cautis--Logvinenko~\cite{CL09} describes explicitly the cohomology sheaves of certain four-term complexes $T^\bullet$ on $X$ and hence shows that with only one exception, every such complex is quasi-isomorphic to a pure sheaf $H^k(T^\bullet)[-k]$ for $k=0,1$ (see also Cautis--Craw--Logvinenko~\cite{CCL12}). Our main result (see Theorem~\ref{thm:mainintro} below) can be applied to a broader class of four-term complexes, including those arising in the study of the derived equivalences induced by the universal family of fine moduli spaces $X$ associated to any consistent dimer model algebra. As an application, joint work with Raf Bocklandt~\cite{BCQ12} establishes the dimer model analogue of the Cautis-Logvinenko result, namely, that for a special choice of moduli space generalising the $G$-Hilbert scheme, all but one of the four-term complexes $T^\bullet$ on $X$ obtained from the derived equivalence is quasi-isomorphic to a pure sheaf $H^k(T^\bullet)[-k]$ for $k=0,1$.
The complexes $T^\bullet$ that we consider in this paper are four-term complexes of the form
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:Tbullet}
L \xlongrightarrow{d^3} \bigoplus_{j=1}^m L_{j,{j+1}} \xlongrightarrow{d^2} \bigoplus_{j=1}^m L_j \xlongrightarrow{d^1} L
\end{equation}
for some $m\geq 2$, where $L$, $L_{j,j+1}$ and $L_{j}$ ($1\leq j\leq m$) are invertible sheaves on any normal toric variety $X$, where each differential is equivariant with respect to the torus-action on $X$, and where the right-hand copy of $L$ lies in degree zero. Assume in addition that for $1\leq j\leq m$, the restriction of the differential $d^2$ to the summand $L_{j,j+1}$ has image in $L_j\operatorname{op}lus L_{j+1}$ (with indices modulo $m$). This means that if we separate vertically the summands in the terms of $T^\bullet$ and hence break the matrices defining the differentials into their constituent maps between summands, the complex can be presented as a diagram of the form
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:diagram}
\begin{split}
\centering
\psset{unit=0.45cm}
\begin{pspicture}(0,-1)(25,13.7)
\cnodeput*(0,6){A}{$L$}
\cnodeput*(8,12){B}{$L_{1,2}$}
\cnodeput*(8,9){C}{$L_{2,3}$}
\cnodeput*(8,6){D}{$L_{3,4}$}
\cnodeput*(8,3.2){S}{$\vdots$}
\cnodeput*(8,0){E}{$L_{m,1}$}
\cnodeput*(18,12){F}{$L_{1}$}
\cnodeput*(18,9){G}{$L_{2}$}
\cnodeput*(18,6){H}{$L_{3}$}
\cnodeput*(18,3.2){T}{$\vdots$}
\cnodeput*(18,0){I}{$L_{m}$}
\cnodeput*(26,6){J}{$L.$}
\psset{nodesep=1pt}
\ncline{->}{A}{B}\lput*{:U}(0.6){$\scriptstyle{D_{1,2}}$}
\ncline{->}{A}{C}\lput*{:U}(0.6){$\scriptstyle{D_{2,3}}$}
\ncline{->}{A}{D}\lput*{:U}(0.6){$\scriptstyle{D_{3,4}}$}
\ncline{->}{A}{E}\lput*{:U}(0.6){$\scriptstyle{D_{m,1}}$}
\ncline{->}{B}{F}\lput*{:U}(0.4){$\scriptstyle{D^2_1}$}
\ncline{->}{B}{G}\lput*{:U}(0.4){$\scriptstyle{D^1_2}$}
\ncline{->}{C}{G}\lput*{:U}(0.4){$\scriptstyle{D^3_2}$}
\ncline{->}{C}{H}\lput*{:U}(0.4){$\scriptstyle{D^2_3}$}
\ncline{->}{D}{H}\lput*{:U}(0.4){$\scriptstyle{D^4_3}$}
\ncline{->}{E}{I}\lput*{:U}(0.4){$\scriptstyle{D^1_m}$}
\nccurve[angleA=-40,angleB=140]{->}{E}{F}\lput*{:U}(0.4){$\scriptscriptstyle{D^m_1}$}
\ncline{->}{F}{J}\lput*{:U}(0.4){$\scriptstyle{D^{1}}$}
\ncline{->}{G}{J}\lput*{:U}(0.4){$\scriptstyle{D^{2}}$}
\ncline{->}{H}{J}\lput*{:U}(0.4){$\scriptstyle{D^{3}}$}
\ncline{->}{I}{J}\lput*{:U}(0.4){$\scriptstyle{D^{m}}$}
\end{pspicture}
\end{split}
\end{equation}
The maps between invertible sheaves in this diagram are multiplication by a torus-invariant section of an invertible sheaf on $X$. We illustrate this and fix notation by writing on each arrow in diagram \eqref{eqn:diagram} the Cartier divisor of zeros of the corresponding section so, for example, the effective divisor $D^1_{2}\in H^0(L_2\otimes L_{1,2}^{-1})\cong \operatorname{Ho}m(L_{1,2},L_2)$ denotes the Cartier divisor of zeros of the section that defines the map from $L_{1,2}$ to $L_2$. This diagram can be represented equally well in a planar picture that is reminiscent of a bicycle wheel (see Figure~\ref{fig:wheel} in Section~\ref{sec:cohomologyWheels}), and we refer to any such four-term complex $T^\bullet$ as a `wheel' on $X$.
To state our main result we choose once and for all a rather special order on the set of transpositions of $m$ letters (see Section~\ref{sec:syzygies}), giving $\tau_1= (\mu_1,\nu_1),\dots, \tau_n=(\mu_n,\nu_n)$ where $n= \binom{m}{2}$ and $\mu_k<\nu_k$ for $1\leq k\leq n$. In addition, for every index $1\leq k\leq n$ we define a subscheme $Z_k\subset X$ to be the scheme-theoretic intersection of certain torus-invariant divisors in $X$. To be more precise, let $\mathscr{D}:=\{D_{\lambda}\}_{\lambda\in \Lambda}$ be a set of torus-invariant divisors in $X$. Define the greatest common divisor and the least common multiple of the set $\mathscr{D}$ to be the torus-invariant divisors
\[
\gcd(\mathscr{D}) = \max \{ D \mid D_\lambda-D\geq 0 \;\forall \;\lambda\in \Lambda\}\quad \text{and}\quad \operatorname{op}eratorname{lcm}(\mathscr{D}) = \min \{ D \mid D-D_\lambda\geq 0 \;\forall \;\lambda\in \Lambda\}
\]
respectively; here max/min means choose the maximal/minimal values for the coefficients of each prime divisor in the expression for $D$. Define subschemes $Z_k \subset X$ for $1 \leq k \leq n$ in terms of the Cartier divisors labelling the arrows in diagram \eqref{eqn:diagram} as follows:
\begin{enumerate}
\item[\ensuremath{(\mathrm{i})}] for $1\leq k\leq m$, define $Z_k$ to be the scheme-theoretic intersection of $\gcd(D_{k+1}^k,D^{k+1}_k)$ and the divisor $\operatorname{op}eratorname{lcm}\big(D^1,\dots,D^m,\gcd(D_{k+2}^{k+1},D^{k+2}_{k+1}),\dots,\gcd(D_{1}^{m},D^{1}_{m})\big)-\operatorname{op}eratorname{lcm}(D^k,D^{k+1})$;
\item[\ensuremath{(\mathrm{ii})}] for $m+1 \leq k \leq 2m-3$, define $Z_k$ to be the scheme-theoretic intersection of the divisors $\operatorname{op}eratorname{lcm}(D^1,D^{\nu_k},D^{\nu_k+1},\dots,D^m)-\operatorname{op}eratorname{lcm}(D^{1},D^{\nu_k})$ and $\operatorname{op}eratorname{lcm}(D^1,D^{\nu_k-1},D^{\nu_k})-\operatorname{op}eratorname{lcm}(D^{1},D^{\nu_k})$;
\item[\ensuremath{(\mathrm{iii})}] for $2m-2\leq k \leq n$, define $Z_k$ to be the scheme-theoretic intersection of the divisors $\operatorname{op}eratorname{lcm}(D^{\mu},D^{\mu_k},D^{\nu_k})-\operatorname{op}eratorname{lcm}(D^{\mu_k},D^{\nu_k})$ for $\mu \in \{1, \dots, \mu_k-1\}\cup\{\nu_k-1\}$.
\end{enumerate}
The subschemes $Z_k\subset X$ are torus-invariant, though some (possibly all) may be empty, see Example~\ref{exa:hex} for an explicit calculation.
\begin{theorem}
\operatorname{op}eratorname{lab}el{thm:mainintro}
Let $X$ be a normal toric variety and let $T^\bullet$ be the complex from \eqref{eqn:Tbullet}, with differentials determined by the Cartier divisors shown in \eqref{eqn:diagram}. Then:
\begin{enumerate}
\item[(1)]$H^0(T^{\bullet}) \cong \mathscr{O}_Z \otimes L$ where $Z$ is the scheme-theoretic intersection of $D^1,\dots,D^m;$
\item[(2)]$H^{-1}(T^{\bullet})$ has an $n$-step filtration
\[
\operatorname{im}(d^2)=F^0 \subseteq F^1\subseteq \cdots \subseteq F^{n-1}\subseteq F^n=\ker(d^1)
\]
where, for $1\leq k \leq n$ and for the permutation $\tau_k=(\mu_k,\nu_k)$, we have
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sheafquotient1intro}
F^k/F^{k-1}\cong \mathscr{O}_{Z_k} \otimes L_{\mu_k}\otimes
L_{\nu_k}\otimes L^{-1}(\gcd(D^{\mu_k},D^{\nu_k}));
\end{equation}
\item[(3)]$H^{-2}(T^{\bullet}) \cong \mathscr{O}_D \otimes L(D)$ where $D=\gcd(D_{1,2},D_{2,3},\dots,D_{m,1});$
\item[(4)]$H^{-3}(T^{\bullet})\cong 0$.
\end{enumerate}
\end{theorem}
To prove Theorem~\ref{thm:mainintro} we lift the complex $T^\bullet$ to a complex of $\operatorname{Cl}(X)$-graded $S$-modules using the functor of Cox~\cite{Cox95}, where $\operatorname{Cl}(X)$ and $S$ denote the class group and Cox ring of $X$ respectively. Explicitly, if $S(L)$ denotes the free $S$-module with generator in degree $L\in \operatorname{Cl}(X)$, then $T^\bullet$ can be lifted to the complex
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:complexSmods1}
S(L) \xlongrightarrow{\varphi^3} \bigoplus_{j=1}^m S(L_{j,{j+1}}) \xlongrightarrow{\varphi^2} \bigoplus_{j=1}^m S(L_j) \xlongrightarrow{\varphi^{1}} S(L).
\end{equation}
This translates the problem to one from commutative algebra. The lion's share of the effort in proving Theorem~\ref{thm:mainintro} goes into proving part (2). For this, the image of $\varphi^2$ is generated by elements $\gbf{\alpha}_1,\dots, \gbf{\alpha}_m$, and our chosen order on the set of transpositions on $m$ letters determines an order on the generators $\gbf{\beta}_1,\dots, \gbf{\beta}_n$ of $\ker(\varphi^1)$ which in turn defines a filtration
\[
\operatorname{im}(\varphi^2) = F^0 \subseteq F^1\subseteq F^2\subseteq \cdots \subseteq F^{n-1}\subseteq F^n=\ker(\varphi^1).
\]
We give a presentation for each successive quotient $F^k/F^{k-1}$ as a cyclic $\operatorname{Cl}(X)$-graded $S$-module of the form $(S/I_k)(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}(\gcd(D^{\mu_k},D^{\nu_k})))$ for some monomial ideal $I_k$ whose generators are defined via the Cartier divisors $D^1,\dots, D^m$ labelling the right-hand arrows in the diagram \eqref{eqn:diagram} illustrating the wheel (see Proposition~\ref{prop:filtration2}). This calculation can be performed in any given example using Macaulay2~\cite{M2}, but we present a unified description for all $1\leq k\leq n$. (Warning: M2 may choose an order on the generators $\gbf{\beta}_1,\dots, \gbf{\beta}_n$ that differs from ours, see Remark~\ref{rem:hex}.)
Our main tool, which may be of independent interest, is a description of the syzygy module of $\ker(\varphi^1)$ in terms of walks in the complete graph $\Gamma$ on $m$ vertices. In fact, for each $1\leq k\leq n$ we introduce a subgraph $\Gamma_k$ of $\Gamma$ that enables us to describe uniformly the module of syzygies $\operatorname{syz}(F^k)$ in terms of certain walks in $\Gamma_k$. To state the result, recall that a circuit in $\Gamma_k$ is a closed walk that does not pass through a given vertex twice. It is straightforward to associate a syzygy to every such circuit (see Lemma~\ref{lem:syzygy}). A circuit is said to be minimal if it admits no chords (see \eqref{eqn:splitting}). We prove the following result (see Theorem~\ref{thm:main1}).
\begin{theorem}
\operatorname{op}eratorname{lab}el{thm:intro}
For $m\leq k\leq n$, the module $\operatorname{syz}(F^{k})$ is generated by the set of syzygies associated to the minimal circuits of $\Gamma_k$.
\end{theorem}
\noindent The precise description of the syzygies from Theorem~\ref{thm:intro} allows us to read off directly a set of monomial generators for each ideal $I_k$, and this feeds into the proof of Theorem~\ref{thm:mainintro} above. Generating sets for toric ideals arising from graphs were studied by Hibi--Ohsugi~\cite{OhsugiHibi}, and some of the graph-theoretic tools that we use here were also employed there. Properties of $\ensuremath{\Bbbk}$-algebras arising from graphs have also been studied widely by Villarreal, see for example \cite{Villarreal}.
Our main result was motivated by the statement of Cautis--Logvinenko~\cite[Lemma~3.1]{CL09} which asserts that in the special case $m=3$, a version of Theorem~\ref{thm:mainintro} holds for the complex $T^\bullet$ from \eqref{eqn:Tbullet} arising from a diagram \eqref{eqn:diagram} on an arbitrary smooth separated scheme. However, this is not true in general: the assertion \cite[Proof of Lemma 3.1(2)]{CL09} that certain elements $\beta_1, \beta_2, \beta_3$ generate $\ker(d^1)$ may fail if the maps from diagram \eqref{eqn:diagram} are not monomial maps.
\begin{example}
\operatorname{op}eratorname{lab}el{ex:counterexample}
For a counterexample in the notation of \emph{loc.cit.} (we write the signs explicitly), suppose the maps $L_1\to L$, $L_2\to L$ and $L_3\to L$ from \eqref{eqn:diagram} are defined locally near a point $p\in X$ as multiplication by $f_1:=x, f_2:=x+y, f_3:=y\in \mathscr{O}_{X,p}$. Then $(1,-1,1)$ lies in $\ker(d^1)$, but it does not lie in the submodule generated by $\beta_1 = (f_2,-f_1,0), \beta_2 = (-f_3,0,f_1), \beta_3= (0,f_3,-f_2)$.
\end{example}
The assumption in Theorem~\ref{thm:mainintro} that $X$ is toric and the maps from \eqref{eqn:diagram} are torus-equivariant ensures that each map arises from multiplication by a monomial in the Cox ring of $X$, in which case standard Gr\"{o}bner theory shows that analogous elements $\gbf{\beta}_1, \gbf{\beta}_2, \gbf{\beta}_3$ generate the appropriate kernel (see Lemma~\ref{lem:1.1}). Under these additional assumptions, Remark~\ref{rem:CautisLogvinenko} explains how the statement of Cautis--Logvinenko~\cite[Lemma~3.1]{CL09} can be recovered as a special case of Theorem~\ref{thm:mainintro} when $X$ is smooth. The main results of both Cautis--Logvinenko~\cite{CL09} and Cautis--Craw--Logvinenko~\cite{CCL12} require the statement of \cite[Lemma~3.1]{CL09} only when $X$ is a smooth toric variety and the maps from \eqref{eqn:diagram} are torus-equivariant, so Theorem~\ref{thm:mainintro} holds at the level of generality required for both of those papers.
In fact, Theorem~\ref{thm:mainintro} provides a unified description of the sheaves \eqref{eqn:sheafquotient1intro} in the filtration on $H^{-1}(T^{\bullet})$ even for $m=3$, improving slightly on the statement from \cite[Lemma~3.1]{CL09}. More generally, for $m>3$, the schemes $Z_k$ ($1\leq k\leq n$) divide naturally into three families determined by the intervals \ensuremath{(\mathrm{i})}\ $1\leq k\leq m$; \ensuremath{(\mathrm{ii})}\ $m+1 \leq k \leq 2m-3$; and \ensuremath{(\mathrm{iii})}\ $2m-2\leq k \leq n$, leading to a more involved filtration in this case. That the statement is considerably more complicated for $m>3$ stems from the simple fact that any pair of vertices of a triangle are adjacent, while the same statement is not true for a polygon with $m>3$ vertices.
\noindent\textbf{Acknowledgements.}
Thanks to Raf Bocklandt for generating Example~\ref{exa:hex} and to Sonja Petrovic for comments on an earlier version of this paper. Thanks also to the anonymous referees for many helpful remarks. Our results owes much to experiments made with Macaulay2~\cite{M2}. Both authors were supported by EPSRC grant EP/G004048/1.
\section{Syzygies from walks in a complete graph}
\operatorname{op}eratorname{lab}el{sec:syzygies}
Let $S=\ensuremath{\Bbbk}[x_1,\dots, x_d]$ be a polynomial ring over a field $\ensuremath{\Bbbk}$ and let $f^1, \dots, f^m\in S$ be monomials for some $m\geq 2$. Consider the free $S$-module with basis $\textbf{e}_1,\dots, \textbf{e}_m$ and define an $S$-module homomorphism $\varphi\colon \bigoplus_{\mu=1}^m S\mathbf{e}_\mu \longrightarrow S$ by setting $\varphi(\mathbf{e}_{\mu})=f^{\mu}$ for $1\leq \mu\leq m$. For every pair of indices $1 \leq \mu < \nu \leq m$ we define monomials $f^{\mu,\nu}=\operatorname{op}eratorname{lcm}(f^{\mu},f^{\nu})$ and set
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:Beta}
\gbf{\beta}_{(\mu,\nu)}= \frac{f^{\mu,\nu}}{f^{\nu}}\mathbf{e}_{\nu}-\frac{f^{\mu,\nu}}{f^{\mu}}\mathbf{e}_{\mu}.
\end{equation}
The module of syzygies of $M:=\langle f^1,\dots, f^m\rangle$ is defined to be the $S$-module $\operatorname{syz}(M):= \ker(\varphi)$. The following result is well known; see for example Eisenbud~\cite[Lemma~15.1]{Eisenbud95}.
\begin{lemma}
\operatorname{op}eratorname{lab}el{lem:1.1}
The kernel of $\varphi$ is generated by the elements $\gbf{\beta}_{(\mu,\nu)}$ for $1 \leq \mu < \nu \leq m$.
\end{lemma}
It is convenient to order the set $\{ (\mu,\nu) \mid 1 \leq \mu < \nu \leq m \}$ of transpositions of $m$ letters. First list the transpositions of adjacent letters $\tau_j=(j,j+1)$ for $1 \leq j \leq m-1$. Set $\tau_m=(1,m)$, then list all remaining transpositions that involve $1$ as $\tau_j=(1,j-m+2)$ for $m+1 \leq j \leq 2m-3$, and finally list all remaining transpositions lexicographically, so $\tau_i=(\mu_i,\nu_i)$ precedes $\tau_j=(\mu_j,\nu_j)$ if and only if $\mu_i < \mu_j$ or $\mu_i=\mu_j$ and $\nu_i < \nu_j$. We may therefore list the generators of $\ker(\varphi)$ from Lemma~\ref{lem:1.1} by setting $\gbf{\beta}_j :=\gbf{\beta}_{(\mu_j,\nu_j)}$ for all $1 \leq j \leq n$, where $n= \binom{m}{2}$. This choice of order enables us to define for each $1 \leq k \leq n$ an $S$-module
$$
F^k=\langle \gbf{\beta}_1,\dots, \gbf{\beta}_{k} \rangle.
$$
Our primary goal is to provide for each $1\leq k\leq n$ an explicit set of generators for the module of syzygies $\operatorname{syz}(F^k)$ that encodes the relations between $\gbf{\beta}_1,\dots, \gbf{\beta}_{k}$. Recall that this module is defined to be the kernel of the surjective $S$-module homomorphism
\[
\psi\colon \bigoplus_{j=1}^k S\gbf{\varepsilon}_j \longrightarrow F^k
\]
satisfying $\psi(\gbf{\varepsilon}_{j})= \gbf{\beta}_j$ for $1\leq j\leq k$. We compute this module directly for $1\leq k\leq m$.
\begin{lemma}
\operatorname{op}eratorname{lab}el{lem:cyclic}
The $S$-module $\operatorname{syz}(F^k)$ is the zero module for $1\leq k \leq m-1$, and it is a free module of rank one for $k=m$.
\end{lemma}
\begin{proof}
Our choice of order on transpositions ensures that for $1\leq k\leq m-1$, there can be no relations between $\gbf{\beta}_1,\dots, \gbf{\beta}_{k}$. For $k=m$, let $\gbf{\sigma} = \sum_{j=1}^m s_j \gbf{\varepsilon}_{j}$ be a syzygy on $\gbf{\beta}_1,\dots, \gbf{\beta}_{m}$ where $s_1,\dots,s_m\in S$. By comparing coefficients of each $\mathbf{e}_{i}$ in the expression
\[
0 = \psi(\gbf{\sigma})= s_m \bigg( \frac{f^{1,m}}{f^{m}}\mathbf{e}_{m}-\frac{f^{1,m}}{f^{1}}\mathbf{e}_{1} \bigg) +
\sum_{j=1}^{m-1} s_j \bigg( \frac{f^{j,j+1}}{f^{j+1}}\mathbf{e}_{j+1}-\frac{f^{j,j+1}}{f^{j}}\mathbf{e}_{j} \bigg)
\]
we obtain the following equations
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:comparecoeffs}
s_1f^{1,2} = s_2f^{2,3} = \cdots = s_{m-1}f^{m-1,m} = - s_m f^{1,m}.
\end{equation}
It's easy to see (or see Lemma~\ref{lem:syzygy} below for a proof) that the element
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sigma0}
\gbf{\sigma}_0:= -\frac{\operatorname{op}eratorname{lcm}(f^1,\dots,f^m)}{f^{1,m}} \gbf{\varepsilon}_m + \sum_{j=1}^{m-1} \frac{\operatorname{op}eratorname{lcm}(f^1,\dots,f^m)}{f^{j,j+1}} \gbf{\varepsilon}_j
\end{equation}
is a syzygy. Moreover, equations \eqref{eqn:comparecoeffs} imply that
\[
\gbf{\sigma} = \frac{s_1f^{1,2}}{\operatorname{op}eratorname{lcm}(f^1,\dots,f^m)}\gbf{\sigma}_0,
\]
so $\operatorname{syz}(F^m)$ is the free $S$-module with basis $\gbf{\sigma}_0$.
\end{proof}
We study the module $\operatorname{syz}(F^k)$ for $m+1\leq k\leq n$ by studying walks in a graph. Let $\Gamma$ be the complete graph on $m$ vertices, with vertex set $\{1,2,\dots,m\}$. Assign an orientation to each edge $e=(\mu,\nu)$ by directing it from $\mu$ to $\nu$ if $\mu < \nu$. Regard every such edge as being labelled by the corresponding generator $\gbf{\beta}_{(\mu,\nu)}$ of $\ker(\varphi)$. The order on the generators $\gbf{\beta}_1,\dots, \gbf{\beta}_n$ introduced above determines an order on the set of edges $e_1,\dots, e_n$ of $\Gamma$. A {\it walk $\gamma$ of length} $\ell$ in $\Gamma$ is a walk in the undirected graph that traverses precisely $\ell$ edges. Every such walk is characterised by the sequence of vertices $\gamma=(\mu_1,\mu_2,\dots, \mu_{\ell+1})$ in $\Gamma$ that it touches. A walk $\gamma$ is \emph{closed} if $\mu_1=\mu_{\ell+1}$, and a {\it circuit} is a closed walk for which $\mu_1,\dots, \mu_\ell$ are distinct. Each circuit $\gamma$ defines uniquely a subgraph of $\Gamma$, and we let $\operatorname{supp}(\gamma)$ denote its set of edges. Given a circuit $\gamma$ and an edge $e\in \operatorname{supp}(\gamma)$, set $\operatorname{sign}_\gamma(e) = +1$ if $\gamma$ traverses $e$ according to the orientation in $\Gamma$, and set $\operatorname{sign}_\gamma(e) = -1$ if $\gamma$ traverses $e$ against orientation.
\begin{figure}
\caption{Directed graph $\Gamma$ illustrating generators $\gbf{\beta}
\end{figure}
Given that the elements $\gbf{\beta}_j$ for $1\leq j\leq n$ correspond to edges in $\Gamma$, we may index the basis elements $\gbf{\varepsilon}_j$ for $1\leq j\leq n$ by edges $e_1,\dots, e_n$ in $\Gamma$. Thus, for the edge $e=e_j$ for $1\leq j\leq n$, we write $\gbf{\varepsilon}_e:=\gbf{\varepsilon}_j$. For any vertices $\mu_1, \dots, \mu_{\ell+1}$ in $\Gamma$, set
\[
f^{\mu_1,\dots,\mu_{\ell+1}}=\operatorname{op}eratorname{lcm}(f^{\mu_{1}}, \dots, f^{\mu_{\ell+1}}).
\]
For a walk $\gamma=(\mu_1,\mu_2,\dots, \mu_{\ell+1})$ in $\Gamma$ we define the monomial $f^\gamma:= f^{\mu_1,\dots,\mu_{\ell+1}}$. In particular, for an edge $e$ in $\Gamma$ joining vertex $\mu$ to $\nu$, we obtain $f^e=f^{\mu,\nu}$.
\begin{lemma}
\operatorname{op}eratorname{lab}el{lem:syzygy}
For any circuit $\gamma$ of length at least three in $\Gamma$, the vector
$$
\gbf{\sigma}_{\gamma}= \sum_{e\in \operatorname{supp}(\gamma)} \operatorname{sign}_\gamma(e) \frac{f^{\gamma}}{f^e} \gbf{\varepsilon}_{e}
$$
is a syzygy on $\gbf{\beta}_1,\dots, \gbf{\beta}_{n}$.
\end{lemma}
\begin{proof}
If $\gamma$ has length two then $\gbf{\sigma}_{\gamma}=\gbf{\varepsilon}_e-\gbf{\varepsilon}_e=0$ which is not in fact a syzygy by definition. For any circuit $\gamma$ of length at least three we must show that
\[
\psi(\gbf{\sigma}_{\gamma})=\sum_{e\in \operatorname{supp}(\gamma)} \operatorname{sign}_\gamma(e) \frac{f^{\gamma}}{f^e}\gbf{\beta}_{e} = 0.
\]
For an edge $e$ that $\gamma$ traverses in the direction from vertex $\mu$ to vertex $\mu^\operatorname{pr}ime$, we have that
$$
\operatorname{sign}_\gamma(e) \frac{f^{\gamma}}{f^e}\gbf{\beta}_{e}= \frac{f^{\gamma}}{f^e}\bigg(\frac{f^{e}}{f^{\mu^\operatorname{pr}ime}}
\mathbf{e}_{\mu^\operatorname{pr}ime}-\frac{f^{e}}{f^{\mu}}\mathbf{e}_{\mu}\bigg) = \frac{f^{\gamma}}{f^{\mu^\operatorname{pr}ime}}
\mathbf{e}_{\mu^\operatorname{pr}ime}-\frac{f^{\gamma}}{f^{\mu}}\mathbf{e}_{\mu}.
$$
The sum of all such terms over $e\in \operatorname{supp}(\gamma)$ collapses as a telescoping series since $\gamma$ is closed.
\end{proof}
For $1\leq k\leq n$, let $\Gamma_k$ denote the spanning subgraph of $\Gamma$ that has vertex set $\{1,\dots, m\}$, and which includes only the first $k$ edges of $\Gamma$ (see Figure~\ref{fig:mainproof1}(a) below for the case $k=m+3$). Clearly $\Gamma=\Gamma_n$. Let $\gamma=(\mu_1,\dots,\mu_\ell,\mu_1)$ be a circuit in $\Gamma_k$ for some $k$. A \emph{chord} of $\gamma$ in $\Gamma_k$ is any edge of the form $c=(\mu_r,\mu_s)$ for some $1 \leq r < s \leq \ell$ that does not lie in $\operatorname{supp}(\gamma)$. Every such chord $c$ splits $\gamma$ into two circuits:
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:splitting}
\gamma_1=(\mu_{r},\dots,\mu_{s},\mu_{r}) \quad \text{and} \quad\gamma_2=(\mu_{1},\dots,\mu_{r},\mu_{s},\dots,\mu_{\ell},\mu_{1}).
\end{equation}
A circuit must have length at least four if it is to admit a chord. We define a {\it minimal circuit} of $\Gamma_k$ to be a circuit of length at least three that has no chords.
\begin{lemma}
\operatorname{op}eratorname{lab}el{lem:chord}
Let $\gamma$ be a circuit in $\Gamma_k$ admitting a chord in $\Gamma_k$ that splits $\gamma$ into circuits $\gamma_1$ and $\gamma_2$ as in \eqref{eqn:splitting}. Then the syzygy $\gbf{\sigma}_{\gamma}$ is contained in the module generated by $\gbf{\sigma}_{\gamma_1}$ and $\gbf{\sigma}_{\gamma_2}$.
\end{lemma}
\begin{proof}
Let $c$ be the chord. For $i=1,2$, let $\gamma_i\setminus c$ denote the walk obtained from $\gamma_i$ by removing the edge $c$. Since $\operatorname{sign}_{\gamma_1}(c)=-\operatorname{sign}_{\gamma_2}(c)$ we may rewrite
\begin{eqnarray*}
\gbf{\sigma}_{\gamma} & = & \operatorname{sign}_{\gamma_1}(c)\frac{f^\gamma}{f^c}\gbf{\varepsilon}_c + \operatorname{sign}_{\gamma_2}(c) \frac{f^\gamma}{f^c}\gbf{\varepsilon}_c + \sum_{e\in \operatorname{supp}(\gamma)} \operatorname{sign}_\gamma(e) \frac{f^{\gamma}}{f^{e}} \gbf{\varepsilon}_{e} \\
& = & \operatorname{sign}_{\gamma_1}(c) \frac{f^{\gamma}}{f^{c}} \gbf{\varepsilon}_{c} + \!\!\sum_{e\in \operatorname{supp}(\gamma_1\setminus c)} \operatorname{sign}_{\gamma_1}(e) \frac{f^{\gamma}}{f^{e}} \gbf{\varepsilon}_{e} + \operatorname{sign}_{\gamma_2}(c) \frac{f^{\gamma}}{f^{c}} \gbf{\varepsilon}_{c} + \!\!\sum_{e\in \operatorname{supp}(\gamma_2\setminus c)} \operatorname{sign}_{\gamma_2}(e) \frac{f^{\gamma}}{f^{e}} \gbf{\varepsilon}_{e} \\
& = & \frac{f^\gamma}{f^{\gamma_1}} \gbf{\sigma}_{\gamma_1} + \frac{f^\gamma}{f^{\gamma_2}} \gbf{\sigma}_{\gamma_2}.
\end{eqnarray*}
It remains to note that $f^{\gamma_1}= f^{\mu_{r},\dots,\mu_{s}}$ divides $f^\gamma=f^{\mu_1,\dots,\mu_\ell}$, and similarly, $f^{\gamma_2}$ divides $f^\gamma$.
\end{proof}
We are now in a position to establish the main result of this section.
\begin{theorem}
\operatorname{op}eratorname{lab}el{thm:main1}
For $1\leq k\leq n$, the $S$-module $\operatorname{syz}(F^k)$ is generated by the syzygies $\gbf{\sigma}_{\gamma}$, where $\gamma$ is a minimal circuit of $\Gamma_k$.
\end{theorem}
\begin{proof}
We distinguish three cases. The first case, in which $1\leq k \leq m-1$, is straightforward: the graph $\Gamma_k$ admits no circuits and $\operatorname{syz}(F^k)=0$ by Lemma~\ref{lem:cyclic}, so the result holds.
We prove the second case, in which $m\leq k\leq 2m-3$, by induction. For $k=m$, Lemma~\ref{lem:cyclic} shows that the $S$-module $\operatorname{syz}(F^m)$ is free with basis $\gbf{\sigma}_0$ from \eqref{eqn:sigma0}. The syzygy $\gbf{\sigma}_{\gamma_0}$ associated to the unique minimal circuit $\gamma_0 = (1,2,\dots,m,1)$ in $\Gamma_m$ coincides with $\gbf{\sigma}_0$, so the statement holds for $k=m$. Assume the statement for $\Gamma_{k-1}$ for any $m+1\leq k \leq 2m-3$, and let
\[
\gbf{\sigma} = \sum_{j=1}^k s_j \gbf{\varepsilon}_{j}
\]
be a syzygy on $\gbf{\beta}_1,\dots, \gbf{\beta}_{k}$ where $s_1,\dots,s_k\in S$.
As a first step we reduce to the case in which the coefficients satisfy $s_j=0$ for $k-m+2\leq j\leq m$ (these indices determine the edges to the left of $\gbf{\beta}_k$ in Figure~\ref{fig:mainproof1}(a)).
\begin{figure}
\caption{The graph $\Gamma_k$ for $m\leq k\leq 2m-3$ illustrated for $k=m+3$}
\end{figure}
Indeed, suppose otherwise, so $s_i\neq 0$ for some $k-m+2\leq i\leq m$. By comparing the coefficient of $\mathbf{e}_\mu$ for each index $k-m+3\leq \mu \leq m$ in the equation
\[
0 = \psi(\gbf{\sigma})= \sum_{j=1}^{k} s_j \bigg(\frac{f^{\mu_j,\nu_j}}{f^{\nu_j}}\mathbf{e}_{\nu_j}-\frac{f^{\mu_j,\nu_j}}{f^{\mu_j}}\mathbf{e}_{\mu_j}\bigg),
\]
we obtain a collection of equations
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:comparecoeffstwo}
s_{k-m+2}f^{k-m+2,k-m+3} = s_{k-m+3}f^{k-m+3,k-m+4} = \cdots = s_{m-1}f^{m-1,m} = - s_m f^{1,m}
\end{equation}
which imply that $s_j\neq 0$ for all $k-m+2\leq j\leq m$. As illustrated in Figure~\ref{fig:mainproof1}(b) for $k=m+3$, the circuit $\gamma_1 := (1,k-m+2,k-m+3, \dots, m, 1)$ is minimal in $\Gamma_k$, and it determines both the monomial $f^{\gamma_1}= f^{1, k-m+2,k-m+3,\dots,m}$ and the syzygy
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sigma1}
\gbf{\sigma}_{\gamma_1} = -\frac{f^{\gamma_1}}{f^{1,m}}\gbf{\varepsilon}_m + \frac{f^{\gamma_1}}{f^{1,k-m+2}}\gbf{\varepsilon}_k + \sum_{j=k-m+2}^{m-1} \frac{f^{\gamma_1}}{f^{j,j+1}}\gbf{\varepsilon}_j.
\end{equation}
Equations \eqref{eqn:comparecoeffstwo} and the fact that $s_m\neq 0$ imply that $f^{\gamma_1}$ divides $s_m f^{1,m}$, and a straightforward computation shows that
\[
\gbf{\sigma}_1:= \gbf{\sigma} + \frac{s_mf^{1,m}}{f^{\gamma_1}} \gbf{\sigma}_{\gamma_1} = \bigg(s_k+\frac{s_mf^{1,m}}{f^{1,k-m+2}}\bigg)\gbf{\varepsilon}_k + \sum_{j=1}^{k-m+1} s_j\gbf{\varepsilon}_j + \sum_{j=m+1}^{k-1} s_j\gbf{\varepsilon}_j.
\]
In particular, if we expand $\gbf{\sigma}_1 = \sum_{j=1}^k t_j \gbf{\varepsilon}_{j}$ for
$t_1,\dots,t_k\in S$, then $t_j=0$ for $k-m+2\leq j\leq m$, and it suffices to prove the result for $\gbf{\sigma}_1$ as claimed.
The second step is to repeat the above, comparing the coefficient of $\mathbf{e}_{k-m+2}$ in the equation $\psi(\gbf{\sigma}_1)=0$, and since $t_{k-m+2}=0$ we obtain
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:comparecoeffsthree}
t_{k-m+1}f^{k-m+1,k-m+2} + t_{k}f^{1,k-m+2} = 0.
\end{equation}
If $t_k\neq 0$ then the minimal circuit $\gamma_2 := (1,k-m+2, k-m+1, 1)$ in $\Gamma_k$ from Figure~\ref{fig:mainproof1}(b) determines both the monomial $f^{\gamma_2}= f^{1, k-m+1,k-m+2}$ and the syzygy
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sigma2}
\gbf{\sigma}_{\gamma_2} = \frac{f^{\gamma_2}}{f^{1,k-m+2}}\gbf{\varepsilon}_k - \frac{f^{\gamma_2}}{f^{k-m+1,k-m+2}}\gbf{\varepsilon}_{k-m+1} - \frac{f^{\gamma_2}}{f^{1,k-m+1}}\gbf{\varepsilon}_{k-1}.
\end{equation}
Equation \eqref{eqn:comparecoeffsthree} implies that $f^{\gamma_2}$ divides $t_{k}f^{1,k-m+2}$ and again, a straightforward computation, this time using equation \eqref{eqn:comparecoeffsthree}, shows that the coefficients of both $\gbf{\varepsilon}_k$ and $\gbf{\varepsilon}_{k-m+1}$ in the syzygy
\[
\gbf{\sigma}_2:= \gbf{\sigma}_1 - \frac{t_k f^{1,k-m+2}}{f^{\gamma_2}} \gbf{\sigma}_{\gamma_2}
\]
are zero. This means that $\gbf{\sigma}_2\in \operatorname{syz}(F^{k-1})$, and we deduce from the inductive hypothesis that $\gbf{\sigma}_2$ is generated by the elements $\gbf{\sigma}_{\gamma}$ associated to minimal circuits $\gamma$ in $\Gamma_{k-1}$.
Among all minimal circuits in $\Gamma_{k-1}$, only $\gamma = (1,k-m+1, k-m+2,\dots, m, 1)$ is not minimal in $\Gamma_{k}$; indeed, the edge labelled $\gbf{\beta}_k$ is a chord. However, this edge splits $\gamma$ into the circuits $\gamma_1, \gamma_2$ defined earlier in the current proof that \emph{are} minimal in $\Gamma_k$, and Lemma~\ref{lem:chord} writes $\gbf{\sigma}_\gamma$ as an $S$-linear combination of $\gbf{\sigma}_{\gamma_1}$ and $\gbf{\sigma}_{\gamma_2}$. Thus, the syzygy $\gbf{\sigma}_2$, and hence both $\gbf{\sigma}_1$ and $\gbf{\sigma}$, are generated by the elements $\gbf{\sigma}_{\gamma}$ associated to minimal circuits $\gamma$ in $\Gamma_{k}$. This completes the proof for $m\leq k\leq 2m-3$.
Finally, consider $2m-2\leq k\leq n$. Given any monomial order on $S$, let $>$ denote the term over position order on the free $S$-module $\bigoplus_{\mu=1}^m S\textbf{e}_\mu$, that is, $>$ is the monomial order defined for $g, g^\operatorname{pr}ime\in S$ and $1\leq \mu, \nu\leq m$ by taking $g^\operatorname{pr}ime \mathbf{e}_{\nu}>g \mathbf{e}_{\mu}$ if and only if $g^\operatorname{pr}ime f^{\nu} >g f^{\mu} $ with respect to the monomial order on $S$, or $g^\operatorname{pr}ime f^{\nu} =g f^{\mu}$ and $\nu > \mu$. It follows that for $1 \leq j \leq k$, the leading term of $\gbf{\beta}_j$ with respect to this order is $f^{\mu_j,\nu_j}/f^{\nu_j} \mathbf{e}_{\nu_j}$. This implies that the S\emph{-vectors of critical pairs} are the elements
$$
\mathrm{S}(\gbf{\beta}_{i},\gbf{\beta}_{j})=\frac{f^{\mu_i,\mu_j,\nu_j}}{f^{\mu_j,\nu_j}}\gbf{\beta}_{j}-\frac{f^{\mu_i,\mu_j,\nu_j}}{f^{\mu_i,\nu_j}}\gbf{\beta}_{i}
$$
arising from all elements in $\mathbb{B}_k:=\{ (i,j) \mid 1 \leq i < j \leq k, \nu_{i}=\nu_{j}\}$ (see Kreuzer--Robbiano~\cite[Definition~2.5.1]{KR00}). Substituting \eqref{eqn:Beta} into every S-vector ensures that the leading terms cancel by definition. Since any critical pair $(i,j)$ corresponds to a pair of directed edges $(\mu_i,\nu_j)$ and $(\mu_j,\nu_j)$ in $\Gamma_k$,
\begin{figure}
\caption{Minimal circuit in $\Gamma_k$ for $2m-2\leq k\leq n$ where $i<j$.}
\end{figure}
the S-vector can then be written as a multiple of the generator $\gbf{\beta}_{(\mu_i,\mu_j)}$ corresponding to the third directed edge from Figure~\ref{fig:triangle}. Indeed, if we choose the index $1\leq h\leq k$ so that $\gbf{\beta}_h = \gbf{\beta}_{(\mu_i,\mu_j)}$, then we compute explicitly that the `standard expressions' are
\[
\mathrm{S}(\gbf{\beta}_{i},\gbf{\beta}_{j})=-\frac{f^{\mu_i, \mu_j, \nu_i}}{f^{\mu_i,\mu_j}}\gbf{\beta}_{h}.
\]
Moreover, we deduce from Buchberger's Criterion \cite[Theorem 15.8]{Eisenbud95} that $\gbf{\beta}_1,\dots, \gbf{\beta}_k$ are a Gr\"{o}bner basis of $F^k$. Every standard expression determines a syzygy, namely
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sigmaij}
\gbf{\sigma}_{(i,j)}=\frac{f^{\mu_i,\mu_j,\nu_j}}{f^{\mu_j,\nu_j}}\gbf{\varepsilon}_{j}-\frac{f^{\mu_i,\mu_j,\nu_j}}{f^{\mu_i,\nu_j}}\gbf{\varepsilon}_{i} + \frac{f^{\mu_i, \mu_j, \nu_i}}{f^{\mu_i,\mu_j}}\gbf{\varepsilon}_{h}.
\end{equation}
Schreyer's theorem~\cite[Theorem 15.10]{Eisenbud95} implies that the set of syzygies $\{\gbf{\sigma}_{(i,j)} \mid (i,j)\in \mathbb{B}_k\}$ is a system of generators for $\operatorname{syz}(F^k)$. Let $\gamma(i,j):=(\mu_i, \mu_j, \nu_j,\mu_i)$ denote circuit in $\Gamma_k$ obtained by traversing the edges labelled $\gbf{\beta}_h$, $\gbf{\beta}_j$ according to orientation followed by the edge labelled $\gbf{\beta}_i$ against orientation (see Figure~\ref{fig:triangle}). Then $\gbf{\sigma}_{(i,j)}$ coincides with the syzygy $\gbf{\sigma}_{\gamma(i,j)}$ from Lemma~\ref{lem:syzygy}, and the result is a consequence of the following Lemma.
\end{proof}
\begin{lemma}
\operatorname{op}eratorname{lab}el{lem:mincircuits}
For $2m-3\leq k\leq n$, the minimal circuits in the graph $\Gamma_k$ are precisely those of the form $\gamma(i,j)= (\mu_i, \mu_j, \nu_j,\mu_i)$ arising from pairs $(i,j)$ in $\mathbb{B}_k=\{ (i,j) \mid 1 \leq i < j \leq k, \nu_{i}=\nu_{j}\}$.
\end{lemma}
\begin{proof}
We proceed by induction. Let $\gamma$ be a minimal circuit in $\Gamma_{2m-3}$ that is not of the form $\gamma(i,j)$ for any $(i,j)\in\mathbb{B}_{2m-3}$. Since $\gamma$ is a circuit, it must traverse an edge $e$ of the subgraph $\Gamma_m$, and since $\gamma\neq \gamma(i,j)$, then either the edge that follows $e$ in $\gamma$, or that preceding $e$ in $\gamma$, must lie in $\Gamma_m$. In either case, $\gamma$ traverses two edges from $\Gamma_m$ that share a common vertex $\mu$. The special nature of $\Gamma_{2m-3}$ then forces the edge $(1,\mu)$ to be a chord of $\gamma$, a contradiction. Assume now that the result holds for $\Gamma_{k-1}$ and let $\gamma$ be a minimal circuit in $\Gamma_{k}$ that is not of the form $\gamma(i,j)$ for any $(i,j)\in\mathbb{B}_k$. If the edge $e_k=(\mu_k,\nu_k)$ does not lie in $\operatorname{supp}(\gamma)$ then the result holds by induction, so we suppose otherwise. Let $e$ be the unique edge in $\operatorname{supp}(\gamma)\setminus \{e_k\}$ that has $\nu_k$ as a vertex. There are three cases:
\begin{enumerate}
\item[\ensuremath{(\mathrm{i})}] $e=(\nu_k-1,\nu_k)$, in which case $(\mu_k,\nu_{k}-1)$ is a chord because $\gamma\neq \gamma(\nu_{k}-1,k)$;
\item[\ensuremath{(\mathrm{ii})}] $e=(\nu_k,\nu_k+1)$, in which case $\gamma$ must pass through a vertex of the form $1\leq \mu \leq \mu_k$ since it is a circuit, but then $(\mu,\nu_k)$ is a chord;
\item[\ensuremath{(\mathrm{iii})}] $e=(\mu,\nu_k)$ for some $1\leq \mu < \mu_k$. Since $\gamma\neq \gamma(j,k)$ for any $j< k$, the circuit $\gamma$ must pass through another vertex of the form $1\leq \mu^\operatorname{pr}ime < \mu_k$, but then $(\mu^\operatorname{pr}ime,\nu_k)$ is a chord.
\end{enumerate}
Thus, the minimal circuit $\gamma$ cannot exist.
\end{proof}
\begin{remark}
\begin{enumerate}
\item If for $2m-2\leq k\leq n$ we draw the vertices of $\Gamma_k$ spaced evenly around a circle centred at the origin in $\ensuremath{\mathbb{R}}^2$, then each minimal circuit $\gamma$ has length three and hence determines a triangle as in Figure~\ref{fig:triangle}. In the spirit of the Taylor resolution of a monomial ideal (see, for example, Bayer--Peeva--Sturmfels \cite{BPS98}), the triangle can be viewed as a 2-cell that defines $f^{\mu_i,\mu_j,\nu_j}$, and the edges are 1-cells defining $f^{\mu_i,\mu_j}, f^{\mu_i,\nu_j}$ and $f^{\mu_j,\nu_j}$. The coefficients of the syzygy $\gbf{\sigma}_{(i,j)}$ are then simply the quotients of the monomial for the 2-cell divided by the monomial for the corresponding 1-cell. An analogous statement holds for $m\leq k\leq 2m-3$, where the syzygies $\gbf{\sigma}_0$ and $\gbf{\sigma}_1$ from the proofs of Lemma~\ref{lem:cyclic} and Theorem~\ref{thm:main1} respectively define polygons with more than three sides.
\item We emphasise that our choice of order on the set of transpositions of $m$ letters is imposed on us by the geometry: the filtration in Proposition~\ref{prop:filtration2} below requires that the $S$-module $F^k$ contains $F^0$ for $1\leq k\leq n$. Without this constraint one could choose an alternative order in which each minimal circuit of $\Gamma_k$ for $m \leq k \leq n$ determines a triangle, leading to a more unified proof of Theorem~\ref{thm:main1}. Indeed, since $f^1,\dots, f^m$ are monomials, the modules $\operatorname{syz}(F^k)$ can be read off directly from the Taylor resolution for $1\leq k\leq m$.
\end{enumerate}
\end{remark}
As an application of Theorem~\ref{thm:main1}, we introduce a filtration of the module $S$-module $\ker(\varphi)=\operatorname{syz}(M)$ that feeds into the proof of our main result. For $1\leq k\leq n$, the $S$-modules $F^k$ define a filtration
\[
0\subseteq F^1\subseteq F^2\subseteq \cdots \subseteq F^{n-1}\subseteq F^n=\operatorname{syz}(M)
\]
in which the successive quotients are cyclic $S$-modules
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:betakquotient}
\frac{F^k}{F^{k-1}} \cong \frac{\langle \gbf{\beta}_k \rangle}{\langle\gbf{\beta}_1,\dots, \gbf{\beta}_{k-1} \rangle \cap \langle \gbf{\beta}_k \rangle}.
\end{equation}
The next result gives an explicit description of these quotient modules.
\begin{proposition}
\operatorname{op}eratorname{lab}el{prop:filtration1}
For each $1\leq k\leq n$, the quotient $F^k/F^{k-1}$ is isomorphic to the cyclic $S$-module $S/I_k$, where the monomial ideal $I_k$ depends on $k$ as follows:
\begin{enumerate}
\item[\ensuremath{(\mathrm{i})}] for $1\leq k\leq m-1$, the ideal $I_k$ is the zero ideal;
\item[\ensuremath{(\mathrm{ii})}] for $k=m$, the ideal $I_k$ is principal with generator $f^{1,\dots,m}/f^{1,m}$;
\item[\ensuremath{(\mathrm{iii})}] for $m+1\leq k\leq 2m-3$, the ideal is
\[
I_k=\bigg\langle \frac{f^{1,k-m+2,k-m+3,\dots, m}}{f^{1,k-m+2}}, \frac{f^{1,k-m+1,k-m+2}}{f^{1,k-m+2}}\bigg\rangle;
\]
\item[\ensuremath{(\mathrm{iv})}] for $2m-2 \leq k\leq n$, the corresponding transposition is $\tau_k=(\mu_k,\nu_k)$, and the ideal is
\[
I_k = \bigg\langle \frac{f^{\mu,\mu_k,\nu_k}}{f^{\mu_k,\nu_k}} \: \bigg\vert \: \mu\in \{1, \dots, \mu_k-1\}\cup\{\nu_k-1\}\bigg\rangle.
\]
\end{enumerate}
\end{proposition}
\begin{proof}
For $1 \leq k \leq n$, let $\{\gbf{\sigma}_1,\dots, \gbf{\sigma}_r\}$ be a set of generators for the $S$-module $\operatorname{syz}(F^k)$. If we write $\gbf{\sigma}_{\nu}=\sum_{j=1}^k s_{\nu j}\gbf{\varepsilon}_j$ with $s_{\nu 1},\dots,s_{\nu k} \in S$ for $1 \leq \nu \leq r$, then \cite[Proposition~3.2.3]{KR00} implies that the coefficients $s_{1k},\dots, s_{rk}$ of $\gbf{\varepsilon}_k$ give the generators $s_{1k}\gbf{\beta}_k,\dots, s_{rk}\gbf{\beta}_k$ of the $S$-module $\langle\gbf{\beta}_1,\dots, \gbf{\beta}_{k-1} \rangle \cap \langle\gbf{\beta}_k \rangle$, so we obtain
\[
\frac{F^k}{F^{k-1}} \cong \frac{S}{\langle s_{1k},\dots, s_{rk} \rangle}.
\]
It remains to compute $I_k:=\langle s_{1k},\dots, s_{rk} \rangle$. Parts \ensuremath{(\mathrm{i})}\ and \ensuremath{(\mathrm{ii})}\ now follow from Lemma~\ref{lem:cyclic} and equation \eqref{eqn:sigma0}. For part \ensuremath{(\mathrm{iii})}, the proof of Theorem~\ref{thm:main1} shows that the only minimal circuits $\gamma$ in $\Gamma_k$ with $m+1\leq k\leq 2m-3$ for which the associated syzygy $\gbf{\sigma}_\gamma$ has a nonzero coefficient for $\gbf{\varepsilon}_k$ are $\gamma_1 := (1,k-m+2,k-m+3, \dots, m, 1)$ and $\gamma_2 := (1,k-m+2, k-m+1, 1)$. These nonzero coefficients are presented in equations \eqref{eqn:sigma1} and \eqref{eqn:sigma2}, namely
\[
\frac{f^{\gamma_1}}{f^{1,k-m+2}} = \frac{f^{1,k-m+2,k-m+3,\dots, m}}{f^{1,k-m+2}}\quad\text{and}\quad \frac{f^{\gamma_2}}{f^{1,k-m+2}}= \frac{f^{1,k-m+1,k-m+2}}{f^{1,k-m+2}}.
\]
For part \ensuremath{(\mathrm{iv})}, we deduce from Theorem~\ref{thm:main1} and Lemma~\ref{lem:mincircuits} that $\operatorname{syz}(F^k)$ is generated by the syzygies $\gbf{\sigma}_{(i,j)} = \gbf{\sigma}_{\gamma(i,j)}$ associated to pairs $(i,j)\in \mathbb{B}_k$. By equation \eqref{eqn:sigmaij}, such syzygies have a nonzero coefficient of $\gbf{\varepsilon}_k$ if and only if $(i,j)=(i,k)$ for those $1\leq i < k$ satisfying $\nu_i=\nu_k$. The $i$th edge $(\mu_i,\nu_i)$ in $\Gamma_k$ has $\nu_i=\nu_k$ if and only if $\mu_i \in \{1, \dots, \mu_k-1\}\cup\{\nu_k-1\}$, that is, we must consider all pairs of the form $(\mu,\nu_k)$ for $\mu\in \{1, \dots, \mu_k-1\}\cup\{\nu_k-1\}$. Equation \eqref{eqn:sigmaij} shows that the coefficient of $\gbf{\varepsilon}_k$ in this case is $f^{\mu,\mu_k,\nu_k}/f^{\mu_k,\nu_k}$ as required.
\end{proof}
\begin{remark}
The generators of $I_k$ listed in Proposition~\ref{prop:filtration1} need not be minimal for $m+1\leq k\leq n$. For example (though not the simplest), a straightforward calculation for the module $M$ over $S=\ensuremath{\Bbbk}[x_1,\dots, x_7]$ with generators
\[
f^1=x_1x_6,\; f^2= x_1x_2x_7,\; f^3=x_2x_3, \; f^4=x_3x_4, \; f^5=x_4x_5x_7, \; f^6= x_5x_6
\]
gives $I_{k}=S$ for $k=9,10,12,13$. Thus, $I_k$ is principal even though this ideal is listed as having more than one generator in Proposition~\ref{prop:filtration1}.
\end{remark}
\section{Cohomology of wheels on toric varieties}
\operatorname{op}eratorname{lab}el{sec:cohomologyWheels}
Let $X$ be a normal variety over $\ensuremath{\Bbbk}$. The divisor class group $\operatorname{Cl}(X)$ is defined to be the group of linear equivalence classes of Weil divisors on $X$. Since $X$ is normal, two divisors $D$ and $D^\operatorname{pr}ime$ are linearly equivalent if and only if the associated rank-one reflexive sheaves $\mathscr{O}_X(D)$ and $\mathscr{O}_X(D^\operatorname{pr}ime)$ are isomorphic. We may therefore identify elements of the class group of $X$ with (isomorphism classes of) sheaves of the form $\mathscr{O}_X(D)$. In particular, for a Cartier divisor $D$ on $X$ defining an invertible sheaf $L:=\mathscr{O}_X(D)$, we sometimes write $L\in \operatorname{Cl}(X)$.
Let $X$ be a normal toric variety over $\ensuremath{\Bbbk}$ defined by a fan $\Sigma$ in the real vector space $N\otimes_\ensuremath{\mathbb{Z}} \ensuremath{\mathbb{R}}$ with underlying lattice $N$ of rank $n$. Write $\Sigma(1)$ for the set of one-dimensional cones in $\Sigma$, set $d:=\vert \Sigma(1)\vert$, and let $v_\rho\in N$ denote the primitive lattice point on the cone $\rho$. Each $\rho\in \Sigma(1)$ determines a torus-invariant Weil divisor $D_\rho$ in $X$, and we let $\ensuremath{\mathbb{Z}}^d$ denote the free abelian group of torus-invariant Weil divisors. Assume that $X$ has no torus factors. The map $\deg\colon \ensuremath{\mathbb{Z}}^d\to \operatorname{Cl}(X)$ sending $D$ to the sheaf $\mathscr{O}_X(D)$ fits into a short exact sequence of abelian groups
\[
0 \xlongrightarrow{} M \xlongrightarrow{\operatorname{div}} \ensuremath{\mathbb{Z}}^d \xlongrightarrow{\deg} \operatorname{Cl}(X)\xlongrightarrow{} 0,
\]
where $M$ is the lattice dual to $N$ and where $m\in M$ maps to $\operatorname{div}(m)=\sum_{\rho\in \Sigma(1)} \langle m,v_\rho\rangle D_\rho$. The restriction of the map $\deg\colon \ensuremath{\mathbb{Z}}^d\to \operatorname{Cl}(X)$ to the subsemigroup $\ensuremath{\mathbb{N}}^d$ defines a $\operatorname{Cl}(X)$-grading of the \emph{Cox ring} of $X$ which is the semigroup ring $S:= \ensuremath{\Bbbk}[x_1,\dots,x_d]$ of $\ensuremath{\mathbb{N}}^d$. Explicitly, the degree of a monomial $\operatorname{pr}od_{\rho\in \Sigma(1)} x_\rho^{a_\rho}\in S$ is $\mathscr{O}_X(\sum_{\rho\in \Sigma(1)} a_\rho D_\rho)\in \operatorname{Cl}(X)$. Armed with this $\operatorname{Cl}(X)$-grading of the ring $S$, Cox~\cite[Proposition~3.1]{Cox95} introduced an exact covariant functor
\begin{equation}
\operatorname{op}eratorname{lab}el{eq:Cox}
\{\operatorname{Cl}(X)\text{-graded }S\text{-modules}\} \longrightarrow \{\text{quasicoherent }\mathscr{O}_X\text{-modules}\} \; :\; F\longmapsto \widetilde{F}
\end{equation}
from the category of $\operatorname{Cl}(X)$-graded $S$-modules to the category of quasi-coherent sheaves on $X$, and Musta{\c{t}}{\u{a}}~\cite[Theorem~1.1]{Mustata02} subsequently showed that the functor is essentially surjective, i.e., that every quasi-coherent sheaf (up to isomorphism) on $X$ lies in the image of this functor. If $X$ is smooth, two such graded modules determine isomorphic sheaves if and only if they agree upto saturation by Cox's irrelevant ideal $B = (\operatorname{pr}od_{\rho\not\subset \sigma} x_\rho \mid \sigma\in \Sigma)$, but we do not use this fact (until Remark~\ref{rem:hex}). The important point for us is that the functor enables us to lift a complex of quasi-coherent sheaves on $X$ to obtain a complex of $\operatorname{Cl}(X)$-graded $S$-modules which we can study, and then push down again to the original complex of sheaves.
As described in the introduction, our primary motivation is to study four-term complexes $T^\bullet$ on $X$ of the form
\eqref{eqn:Tbullet} for some integer $m \geq 2$. In fact, we take as the primary object of study the corresponding diagram of torus-equivariant maps between invertible sheaves on $X$:
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:diagrammaintext}
\begin{split}
\centering
\psset{unit=0.45cm}
\begin{pspicture}(0,-1)(25,13.7)
\cnodeput*(0,6){A}{$L$}
\cnodeput*(8,12){B}{$L_{1,2}$}
\cnodeput*(8,9){C}{$L_{2,3}$}
\cnodeput*(8,6){D}{$L_{3,4}$}
\cnodeput*(8,3.2){S}{$\vdots$}
\cnodeput*(8,0){E}{$L_{m,1}$}
\cnodeput*(18,12){F}{$L_{1}$}
\cnodeput*(18,9){G}{$L_{2}$}
\cnodeput*(18,6){H}{$L_{3}$}
\cnodeput*(18,3.2){T}{$\vdots$}
\cnodeput*(18,0){I}{$L_{m}$}
\cnodeput*(26,6){J}{$L.$}
\psset{nodesep=1pt}
\ncline{->}{A}{B}\lput*{:U}(0.6){$\scriptstyle{D_{1,2}}$}
\ncline{->}{A}{C}\lput*{:U}(0.6){$\scriptstyle{D_{2,3}}$}
\ncline{->}{A}{D}\lput*{:U}(0.6){$\scriptstyle{D_{3,4}}$}
\ncline{->}{A}{E}\lput*{:U}(0.6){$\scriptstyle{D_{m,1}}$}
\ncline{->}{B}{F}\lput*{:U}(0.4){$\scriptstyle{D^2_1}$}
\ncline{->}{B}{G}\lput*{:U}(0.4){$\scriptstyle{D^1_2}$}
\ncline{->}{C}{G}\lput*{:U}(0.4){$\scriptstyle{D^3_2}$}
\ncline{->}{C}{H}\lput*{:U}(0.4){$\scriptstyle{D^2_3}$}
\ncline{->}{D}{H}\lput*{:U}(0.4){$\scriptstyle{D^4_3}$}
\ncline{->}{E}{I}\lput*{:U}(0.4){$\scriptstyle{D^1_m}$}
\nccurve[angleA=-40,angleB=140]{->}{E}{F}\lput*{:U}(0.4){$\scriptscriptstyle{D^m_1}$}
\ncline{->}{F}{J}\lput*{:U}(0.4){$\scriptstyle{D^{1}}$}
\ncline{->}{G}{J}\lput*{:U}(0.4){$\scriptstyle{D^{2}}$}
\ncline{->}{H}{J}\lput*{:U}(0.4){$\scriptstyle{D^{3}}$}
\ncline{->}{I}{J}\lput*{:U}(0.4){$\scriptstyle{D^{m}}$}
\end{pspicture}
\end{split}
\end{equation}
\noindent Every torus-equivariant map is multiplication by a torus-invariant section of an invertible sheaf on $X$, and we illustrate on each arrow the torus-invariant Cartier divisor of zeros of the corresponding section. Thus, for example, the effective divisor $D^1_{2}\in H^0(L_2\otimes L_{1,2}^{-1})\cong \operatorname{Ho}m(L_{1,2},L_2)$ denotes the Cartier divisor of zeros of the section that defines the map from $L_{1,2}$ to $L_2$. One can think of any such diagram as a representation of a quiver (arising as the skeleton of a three-dimensional rhombic polyhedron) in the category of invertible sheaves on $X$.
Throughout, we impose relations on this quiver, whereby each of the two-dimensional rhombic faces of this quiver forms a commutative square, i.e.
\begin{align}
D^j_{j+1}+D^{j+1} & = D_j^{j+1}+D^j , \operatorname{op}eratorname{lab}el{eqn:relations1}\\
D^{j-1}_j+D_{j-1,j} & = D_j^{j+1}+D_{j,j+1},\operatorname{op}eratorname{lab}el{eqn:relations2}
\end{align}
for $1\leq j\leq m$ (working modulo $m$, with indices in the range $1,\dots, m$). We now describe how a diagram of the form \eqref{eqn:diagrammaintext} gives rise to a complex of $\operatorname{Cl}(X)$-graded $S$-modules precisely when \eqref{eqn:relations1} and \eqref{eqn:relations2} hold. Indeed, let $S(L)$ denote the free $S$-module with generator $\textbf{e}_L$ in degree $L$, and for $1\leq j\leq m$ let $S(L_j)$ and $S(L_{j,j+1})$ denote the free $S$-modules with generators $\textbf{e}_j$ in degree $L_j$ and $\textbf{e}_{j,j+1}$ in degree $L_{j,j+1}$ respectively. In addition, let $f^j$, $f^j_{j+1}$, $f^{j+1}_j$, $f_{j,j+1}$ denote the monomials in the Cox ring $S$ whose divisors of zeroes are the torus-invariant Cartier divisors $D^j$, $D^j_{j+1}$, $D^{j+1}_{j}$, $D_{j,j+1}$ from \eqref{eqn:diagrammaintext}. Consider the sequence of $\operatorname{Cl}(X)$-graded $S$-modules
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:complexSmods}
S(L) \xlongrightarrow{\varphi^3} \bigoplus_{j=1}^m S(L_{j,{j+1}}) \xlongrightarrow{\varphi^2} \bigoplus_{j=1}^m S(L_j) \xlongrightarrow{\varphi^{1}} S(L),
\end{equation}
with maps
\[
\varphi^{3}(\textbf{e}_{L}) = \sum_{j=1}^m f_{j,j+1} \textbf{e}_{j,j+1}, \quad
\varphi^{2}(\textbf{e}_{j,j+1}) = f^j_{j+1}\textbf{e}_{j+1} - f^{j+1}_j \textbf{e}_j,\quad
\varphi^{1}(\textbf{e}_j) = f^j\textbf{e}_L.
\]
We claim that the sequence \eqref{eqn:complexSmods} is a complex if and only if the relations \eqref{eqn:relations1} and \eqref{eqn:relations2} hold. Indeed, \eqref{eqn:complexSmods} is a complex if and only if we have
\[
(\varphi^2 \circ \varphi^3)(\textbf{e}_{L})=0\quad \text{and}\quad (\varphi^1 \circ \varphi^2)(\textbf{e}_{j,j+1})=0 \text{ for }1\leq j\leq m,
\]
which is the case if and only if $f^j_{j+1}f^{j+1}-f^{j+1}_j f^j=0$ and $f^{j-1}_j f_{j-1,j}-f^{j+1}_j f_{j,j+1}=0$ for all $1\leq j\leq m$, and these equations hold if and only if \eqref{eqn:relations1} and \eqref{eqn:relations2} hold for all $1\leq j\leq m$. In summary, the diagram \eqref{eqn:diagrammaintext} of invertible sheaves in which the relations \eqref{eqn:relations1} and \eqref{eqn:relations2} hold determines a complex of $\operatorname{Cl}(X)$-graded $S$-modules of the form \eqref{eqn:complexSmods}. Conversely, to any complex of the form \eqref{eqn:complexSmods}, one can reverse this procedure to obtain a diagram \eqref{eqn:diagrammaintext} of invertible sheaves on $X$ in which the relations \eqref{eqn:relations1} and \eqref{eqn:relations2} hold.
Applying the exact functor \eqref{eq:Cox} to the complex \eqref{eqn:complexSmods} of $\operatorname{Cl}(X)$-graded $S$-modules determines a complex $T^\bullet$ of locally free sheaves on $X$ of the form
$$
L \xlongrightarrow{d^3} \bigoplus_{j=1}^m L_{j,{j+1}} \xlongrightarrow{d^2} \bigoplus_{j=1}^m L_j \xlongrightarrow{d^1} L,
$$
where each differential is torus-equivariant, and where the right-hand copy of $L$ lies in degree zero. Moreover, for each $1\leq j\leq m$ the restriction of the differential $d^2$ to the summand $L_{j,j+1}$ has image in $L_j\operatorname{op}lus L_{j+1}$ (with indices modulo $m$). This is the \emph{total chain complex} $T^{\bullet}$ of the diagram \eqref{eqn:diagrammaintext}. The complexes studied by Cautis--Logvinenko~\cite{CL09}, Cautis--Craw--Logvinenko~\cite{CCL12} and Bocklandt--Craw--Quintero-V\'{e}lez~\cite{BCQ12} that motivated our main result all take this form. The invertible sheaves at the left and right of diagram \eqref{eqn:diagrammaintext} coincide, so the sheaves and the maps between them in diagram \eqref{eqn:diagrammaintext} can be represented equally well in a planar picture as in Figure~\ref{fig:wheel}; we call this the \emph{wheel} of invertible sheaves on $X$.
\begin{figure}
\caption{Wheel of invertible sheaves on $X$}
\end{figure}
We now use the results of the previous section to compute the cohomology of the complex $T^{\bullet}$. For this purpose, we first note that the map $\varphi^1$ is of the form considered in the preceding section, so we may list the generators of its kernel in a sequence $\gbf{\beta}_1, \dots,\gbf{\beta}_n$ with $n=\binom{m}{2}$. We also list the generators of the image of $\varphi^2$ as
\[
\gbf{\alpha}_j:= f^j_{j+1}\textbf{e}_{j+1} - f^{j+1}_j \textbf{e}_j
\]
for $1\leq j\leq m$. The next proposition is central to the main result of this paper.
\begin{proposition}
\operatorname{op}eratorname{lab}el{prop:filtration2}
The $S$-modules
\[
F^k = \begin{cases} \langle \gbf{\beta}_1, \dots,\gbf{\beta}_k,\gbf{\alpha}_{k+1}, \dots,\gbf{\alpha}_m\rangle & \mbox{for } 1\leq k \leq m, \\ \langle \gbf{\beta}_1, \dots,\gbf{\beta}_m,\gbf{\beta}_{m+1}, \dots,\gbf{\beta}_j\rangle & \mbox{for } m+1\leq j\leq n, \end{cases}
\]
define a filtration
\[
\operatorname{im}(\varphi^2)=F^0 \subseteq F^1\subseteq \cdots \subseteq F^{n-1}\subseteq F^n=\ker(\varphi^1).
\]
Moreover, for $1 \leq k \leq n$ and for the transposition is $\tau_k=(\mu_k,\nu_k)$, the quotient $F^k/F^{k-1}$ is isomorphic to the cyclic $\operatorname{Cl}(X)$-graded $S$-module $(S/I_k)(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}(\gcd(D^{\mu_k},D^{\nu_k})))$, where the monomial ideal $I_k$ depends on $k$ as follows:
\begin{enumerate}
\item[(1)]for $1 \leq k \leq m$, the ideal is
\[
I_k = \bigg\langle \gcd(f^k_{k+1},f^{k+1}_k), \frac{\operatorname{op}eratorname{lcm}(f^{1,\dots,m},\gcd(f^{k+1}_{k+2},f^{k+2}_{k+1}),\dots,\gcd(f^{m}_{1},f^{1}_{m}) )}{f^{k,k+1}} \bigg\rangle;
\]
\item[(2)]for $m+1 \leq k \leq 2m-3$, the ideal is
\[
I_k=\bigg\langle \frac{f^{1,k-m+2,k-m+3,\dots, m}}{f^{1,k-m+2}}, \frac{f^{1,k-m+1,k-m+2}}{f^{1,k-m+2}}\bigg\rangle;
\]
\item[(3)] for $2m-2 \leq k\leq n$, the ideal is
\[
I_k = \bigg\langle \frac{f^{\mu,\mu_k,\nu_k}}{f^{\mu_k,\nu_k}} \: \bigg\vert \: \mu\in \{1, \dots, \mu_k-1\}\cup\{\nu_k-1\}\bigg\rangle.
\]
\end{enumerate}
\end{proposition}
\begin{proof}
To prove that the $S$-modules $F^k$ define a filtration, we need only show that $\gbf{\alpha}_{k} \in F^k$ for all $1\leq k \leq m$. For this, relation \eqref{eqn:relations1} gives
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:relationsgcd}
D^k - \gcd(D^k,D^{k+1}) = D^k_{k+1} - \gcd(D^k_{k+1},D^{k+1}_k),
\end{equation}
and hence
\[
\frac{f^{k,k+1}}{f^{k+1}} = \frac{\operatorname{op}eratorname{lcm}(f^k,f^{k+1})}{f^{k+1}}=\frac{f^k}{\gcd(f^k,f^{k+1})}=\frac{f^k_{k+1}}{\gcd(f^k_{k+1},f^{k+1}_k)}.
\]
Similarly, we have $f^{k,k+1}/f^{k}= f^{k+1}_{k}/\gcd(f^k_{k+1},f^{k+1}_k)$. Therefore
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:alpha-beta}
\gbf{\alpha}_{k}=\gcd(f^k_{k+1},f^{k+1}_k)\left(\frac{f^{k, k+1}}{f^{k+1}}\mathbf{e}_{k+1}-\frac{f^{k,k+1}}{f^{k}}\mathbf{e}_{k}\right) = \gcd(f^k_{k+1},f^{k+1}_k)\gbf{\beta}_{k}
\end{equation}
for $1 \leq k \leq m$ as required. To prove part (1), we first note that
\[
\frac{F^k}{F^{k-1}}\cong\frac{\langle\gbf{\beta}_{k}\rangle/ ( \langle \gbf{\beta}_1, \dots,\gbf{\beta}_{k-1},\gbf{\alpha}_{k+1}, \dots,\gbf{\alpha}_m\rangle \cap \langle\gbf{\beta}_{k}\rangle)}{\langle\gbf{\alpha}_{k}\rangle/ ( \langle \gbf{\beta}_1, \dots,\gbf{\beta}_{k-1},\gbf{\alpha}_{k+1}, \dots,\gbf{\alpha}_m\rangle \cap \langle\gbf{\alpha}_{k}\rangle )}.
\]
In order to compute this quotient, it suffices, in view of \eqref{eqn:alpha-beta} and the remarks at the beginning of the proof of Proposition~\ref{prop:filtration1}, to determine a set of generators for the module of syzygies on $ \gbf{\beta}_1, \dots,\gbf{\beta}_{k},\gbf{\alpha}_{k+1}, \dots,\gbf{\alpha}_m$ for $1\leq k \leq m$. Proceeding exactly as in the proof of Lemma~\ref{lem:cyclic}, we find that this module is cyclic with generator
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sigma0alpha}
\gbf{\sigma}_0:=-\frac{\operatorname{op}eratorname{lcm}(f^{1,\dots,m},g^{k+1,k+2},\dots,g^{m,1} )}{f^{1,m}}\gbf{\varepsilon}_m+\sum_{j=1}^{m-1} \frac{\operatorname{op}eratorname{lcm}(f^{1,\dots,m},g^{k+1,k+2},\dots,g^{m,1} )}{f^{j,j+1}}\gbf{\varepsilon}_j,
\end{equation}
where we have set $g^{i,i+1}:=\gcd(f^i_{i+1},f^{i+1}_i)$ for $k+1\leq i \leq m$. Ignoring for now the $\operatorname{Cl}(X)$-grading, we deduce from this that
\[
\frac{\langle\gbf{\beta}_{k}\rangle}{ \langle \gbf{\beta}_1, \dots,\gbf{\beta}_{k-1},\gbf{\alpha}_{k+1}, \dots,\gbf{\alpha}_m\rangle \cap \langle\gbf{\beta}_{k}\rangle}\cong \frac{S}{\langle \operatorname{op}eratorname{lcm}(f^{1,\dots,m},g^{k+1,k+2},\dots,g^{m,1})/f^{k,k+1}\rangle}.
\]
and therefore, by virtue of \eqref{eqn:alpha-beta},
\[
\frac{F^{k}}{F^{k-1}}\cong \frac{S}{\langle \gcd(f^{k}_{k+1},f^{k+1}_k), \operatorname{op}eratorname{lcm}(f^{1,\dots,m},g^{k+1,k+2},\dots,g^{m,1})/f^{k,k+1}\rangle}
\]
which gives the ideal $I_k$ in part (1). For parts (2) and (3), Proposition~\ref{prop:filtration1}(iii) and (iv) respectively determine the ideals $I_k$ for which $F^k/F^{k-1}$ is isomorphic to $S/I_k$ as ungraded rings.
It remains to establish the isomorphism as $\operatorname{Cl}(X)$-graded rings. In light of the above and isomorphism \eqref{eqn:betakquotient}, it suffices to show that the degree of $\gbf{\beta}_k$ is $L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}(\gcd(D^{\mu_k},D^{\nu_k}))$ for $1\leq k \leq n$. For each $1 \leq k \leq n$, multiplication by the monomials $f^{\mu_k}$ and $f^{\nu_k}$ define $\operatorname{Cl}(X)$-graded maps $S \to S(L\otimes L_{\mu_k}^{-1})$ and $S \to S(L\otimes L_{\nu_k}^{-1})$ respectively. Tensoring each map with $S(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1})$ yields $\operatorname{Cl}(X)$-graded maps $S(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}) \to S(L_{\nu_k})$ and $S(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}) \to S(L_{\mu_k})$ which, in turn, can be combined to form a $\operatorname{Cl}(X)$-graded map
\[
S(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}) \longrightarrow \bigoplus_{j=1}^m S(L_j),
\]
whose image in $\bigoplus_{j=1}^m S(L_j)$ is generated by the element $f^{\mu_k} \mathbf{e}_{\nu_k}-f^{\nu_k}\mathbf{e}_{\mu_k}$. Twisting further by $S(\mathscr{O}_X(\gcd(D^{\mu_k},D^{\nu_k})))$ determines a $\operatorname{Cl}(X)$-graded map
\[
S(L_{\mu_k}\otimes L_{\nu_k}\otimes L^{-1}(\gcd(D^{\mu_k},D^{\nu_k}))) \longrightarrow \bigoplus_{j=1}^m S(L_j)
\]
whose image is generated by the element
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:nearlybeta}
\frac{f^{\mu_k}}{\gcd(f^{\mu_k},f^{\nu_k})} \mathbf{e}_{\nu_k}-\frac{f^{\nu_k}}{\gcd(f^{\mu_k},f^{\nu_k})}\mathbf{e}_{\mu_k}.
\end{equation}
To prove the claim it remains to show that \eqref{eqn:nearlybeta} coincides with $\gbf{\beta}_k$, but this is immediate since $f^{\mu_k}/\gcd(f^{\mu_k},f^{\nu_k})= \operatorname{op}eratorname{lcm}(f^{\mu_k},f^{\nu_k})/f^{\nu_k}$ and $f^{\nu_k}/\gcd(f^{\mu_k},f^{\nu_k}) = \operatorname{op}eratorname{lcm}(f^{\mu_k},f^{\nu_k})/f^{\mu_k}$.
\end{proof}
For $1 \leq k \leq n$, each of the generators of $I_k$ listed in Proposition~\ref{prop:filtration2} is a monomial in the Cox ring $S$ of $X$, so its divisor of zeros is an effective torus-invariant Weil divisor in $X$. Notice that while $f^j$, $f^j_{j+1}$, $f^{j+1}_j$, $f_{j,j+1}$ define torus-invariant Cartier divisors $D^j$, $D^j_{j+1}$, $D^{j+1}_{j}$, $D_{j,j+1}$ in $X$, the generators of the ideals $I_k$ are Weil divisors in general.
\begin{definition}
\operatorname{op}eratorname{lab}el{def:Zk}
For each $1 \leq k \leq n$, define a subscheme $Z_k \subset X$ to be the scheme-theoretic intersection of a set of effective Weil divisors depending on $k$ as follows:
\begin{enumerate}
\item[\ensuremath{(\mathrm{i})}] for $1\leq k\leq m$, define $Z_k$ to be the scheme-theoretic intersection of $\gcd(D_{k+1}^k,D^{k+1}_k)$ and the divisor $\operatorname{op}eratorname{lcm}\big(D^1,\dots,D^m,\gcd(D_{k+2}^{k+1},D^{k+2}_{k+1}),\dots,\gcd(D_{1}^{m},D^{1}_{m})\big)-\operatorname{op}eratorname{lcm}(D^k,D^{k+1})$;
\item[\ensuremath{(\mathrm{ii})}] for $m+1 \leq k \leq 2m-3$, define $Z_k$ to be the scheme-theoretic intersection of the divisors $\operatorname{op}eratorname{lcm}(D^1,D^{\nu_k},D^{\nu_k+1},\dots,D^m)-\operatorname{op}eratorname{lcm}(D^{1},D^{\nu_k})$ and $\operatorname{op}eratorname{lcm}(D^1,D^{\nu_k-1},D^{\nu_k})-\operatorname{op}eratorname{lcm}(D^{1},D^{\nu_k})$;
\item[\ensuremath{(\mathrm{iii})}] for $2m-2\leq k \leq n$, define $Z_k$ to be the scheme-theoretic intersection of the divisors $\operatorname{op}eratorname{lcm}(D^{\mu},D^{\mu_k},D^{\nu_k})-\operatorname{op}eratorname{lcm}(D^{\mu_k},D^{\nu_k})$ for $\mu \in \{1, \dots, \mu_k-1\}\cup\{\nu_k-1\}$.
\end{enumerate}
\end{definition}
The subschemes $Z_k\subset X$ are torus-invariant, though some (possibly all) may be empty, see Example~\ref{exa:hex} for an explicit calculation. These subschemes enable us to formulate and prove the main result of this paper (this is Theorem~\ref{thm:mainintro} from the introduction).
\begin{theorem}
\operatorname{op}eratorname{lab}el{thm:main}
Let $X$ be a normal toric variety and let $T^\bullet$ be the complex from \eqref{eqn:Tbullet}, with differentials determined by the Cartier divisors shown in \eqref{eqn:diagram}. Then:
\begin{enumerate}
\item[(1)]$H^0(T^{\bullet}) \cong \mathscr{O}_Z \otimes L$ where $Z$ is the scheme-theoretic intersection of $D^1,\dots,D^m;$
\item[(2)]$H^{-1}(T^{\bullet})$ has an $n$-step filtration
\[
\operatorname{im}(d^2)=F^0 \subseteq F^1\subseteq \cdots \subseteq F^{n-1}\subseteq F^n=\ker(d^1)
\]
where, for $1\leq k \leq n$ and for the permutation $\tau_k=(\mu_k,\nu_k)$, we have
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:sheafquotient1}
F^k/F^{k-1}\cong \mathscr{O}_{Z_k} \otimes L_{\mu_k}\otimes
L_{\nu_k}\otimes L^{-1}(\gcd(D^{\mu_k},D^{\nu_k}));
\end{equation}
\item[(3)]$H^{-2}(T^{\bullet}) \cong \mathscr{O}_D \otimes L(D)$ where $D=\gcd(D_{1,2},D_{2,3},\dots,D_{m,1});$
\item[(4)]$H^{-3}(T^{\bullet})\cong 0$.
\end{enumerate}
\end{theorem}
\begin{proof}
As described at the beginning of this section, the complex $T^\bullet$ arises from a diagram \eqref{eqn:diagrammaintext} of invertible sheaves on $X$ in which the relations \eqref{eqn:relations1} and \eqref{eqn:relations2} hold, and every such diagram determines a complex of $\operatorname{Cl}(X)$-graded $S$-modules of the form \eqref{eqn:complexSmods}, where one can reproduce the original complex $T^\bullet$ by applying the exact functor \eqref{eq:Cox}. In particular, one can calculate the cohomology sheaves of $T^\bullet$ by computing the cohomology modules of \eqref{eqn:complexSmods} and applying the Cox functor. The statement of part (2) then follows from Proposition~\ref{prop:filtration2} and Definition~\ref{def:Zk}.
For part (1), note that $H^0(T^\bullet)$ is the cokernel of $\bigoplus_i \mathscr{O}
_X(-D^i)\otimes L\hookrightarrow \mathscr{O}
_X\otimes L$, namely the sheaf $\mathscr{O}
_Z\otimes L$ where $Z$ is the scheme-theoretic intersection of $D^1,\dots,D^m$. For part (4), every nonzero map between invertible sheaves is injective, so $H^{-3}(T^\bullet)\cong 0$. It remains to prove part (3). The proof of the analogous statement from \cite[Lemma~3.1]{CL09} does not immediately extend to our setting, as was the case with parts (1) and (4) above, but we can nevertheless adapt the argument as follows. We claim first that if the greatest common divisor $D$ is zero then $H^{-2}(T^\bullet)\cong 0$. We need only show that complex \eqref{eqn:complexSmods} has no cohomology in degree $-2$. Indeed, suppose $\gbf{\eta}= \sum_{j=1}^m u_j \mathbf{e}_{j,j+1}$ lies in the kernel of $\varphi^2$, so
\[
0=\varphi^2(\gbf{\eta})=\sum_{j=1}^m u_j( f^j_{j+1}\mathbf{e}_{j+1}- f^{j+1}_j \mathbf{e}_{j}).
\]
This translates into the following set of equations:
\[
u_{j-1} f^{j-1}_{j}=u_{j}f^{j+1}_{j}\qquad 1 \leq j \leq m.
\]
By relation \eqref{eqn:relations2} we have $f^{j-1}_j f_{j-1,j}=f^{j+1}_j f_{j,j+1}$ for $1 \leq j \leq m$. Consequently, we find that
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:relationH2}
u_{j-1}f_{j,j+1}=u_{j}f_{j-1,j}, \qquad 1 \leq j \leq m.
\end{equation}
We claim that $f_{j,j+1}$ divides $u_j$ for all $1 \leq j \leq m$. It suffices to prove that $f_{1,2}$ divides $u_1$ by virtue of \eqref{eqn:relationH2}. Let $x_i$ be a prime factor of $f_{1,2}$ with multiplicity $p$. Since by assumption $\gcd(f_{1,2},f_{2,3},\dots,f_{m,1})=1$, it follows that $x_i^p$ does not divide $f_{\nu,\nu+1}$ for some $\nu \neq 1$. Appealing to \eqref{eqn:relationH2} once again, we find that $u_1f_{\nu,\nu+1}=u_{\nu}f_{1,2}$, and thus $x_{i}^p$ divides $u_1 f_{\nu,\nu+1}$. Since $S$ is a unique factorisation domain, this means that $x_i^p$ divides $u_1$, which in turn implies that $f_{1,2}$ divides $u_1$. If we now set $u:=u_1/f_{1,2}$, then equations \eqref{eqn:relationH2} give
\[
u=\frac{u_1}{f_{1,2}}=\frac{u_2}{f_{2,3}}=\cdots=\frac{u_m}{f_{m,1}},
\]
from which it follows that $\gbf{\eta}=u \sum_{j=1}^m f_{j,j+1}\mathbf{e}_{j,j+1}$. Thus, $\gbf{\eta}$ lies in the image of $\varphi^3$, so the complex \eqref{eqn:complexSmods} has no cohomology in degree $-2$ as required.
To complete the proof of part (3), suppose $D\neq 0$. We can factor $d^3\colon T^{-3}\to T^{-2}$ as a map $L \to L(D)$ followed by a map with no common divisors. By the above argument, the image of $L(D)$ under this map equals the kernel of $d^2\colon T^{-2} \to T^{-1}$. Therefore $H^{-2}(T^{\bullet})$ can be identified with the cokernel of $L \to L(D)$, which is $\mathscr{O}
_{D} \otimes L(D)$.
\end{proof}
\begin{remark}
\operatorname{op}eratorname{lab}el{rem:CautisLogvinenko}
For $m=3$, Theorem~\ref{thm:mainintro} agrees with the statement of the main technical result from Cautis--Logvinenko~\cite[Lemma~3.1]{CL09} (recall from the discussion surrounding Example~\ref{ex:counterexample} above that the assumptions from \emph{loc.~cit.}, namely that $X$ is an arbitrary smooth separated scheme, should be replaced by the assumptions of Theorem~\ref{thm:mainintro}). Parts (1), (3), (4) of Theorem~\ref{thm:mainintro} clearly generalise the analogues from \cite[Lemma~3.1]{CL09}. As for $H^{-1}(T^\bullet)$, we have $m=3$ and hence $n=3$, so Theorem~\ref{thm:mainintro}(2) gives a $3$-step filtration
\[
\operatorname{im}(d^2)=F^0\subseteq F^1\subseteq F^2\subseteq F^3=\ker(d^1),
\]
and we claim that the successive quotients agree with those of {\it loc.~cit.}. To justify this we first compute $F^2/F^{1}$. Since $\tau_2 = (2,3)$, Theorem~\ref{thm:mainintro}(2) shows that
\[
F^2/F^1\cong \mathscr{O}
_{Z_2}\otimes L_{2}\otimes L_{3}\otimes L^{-1}\big(\gcd(D^{2},D^{3})\big),
\]
where $Z_2$ is the intersection of $\gcd(D^2_{3},D^{3}_2)$ and $\operatorname{op}eratorname{lcm}(D^1, D^2, D^3, \gcd(D^3_1, D^1_3))-\operatorname{op}eratorname{lcm}(D^2,D^3)$. A direct computation shows that the relation defined by the generator $\gbf{\sigma}_0$ from \eqref{eqn:sigma0alpha} is
\[
\frac{f^3_1}{\gcd(f^3_1,\widetilde{f}^2_1)}\gbf{\beta}_1 + \frac{\widetilde{f}^1_2f^3_1}{\gcd(f^3_1,\widetilde{f}^2_1)\widetilde{f}^3_2}\gbf{\beta}_2 - \frac{\widetilde{f}^2_1}{\gcd(f^3_1,\widetilde{f}^2_1)}\gbf{\alpha}_3= 0,
\]
where $\widetilde{f}^i_j = f^i_j/\gcd(f^i_j, f^j_i)$. Since $k=2$, the coefficient of $\gbf{\beta}_2$ coincides with the generator $\operatorname{op}eratorname{lcm}(f^{1,2,3},\gcd(f^3_1,f^1_3))/f^{2,3}$ of the ideal $I_2$. In particular, the scheme $Z_2$ is the intersection of $\gcd(D^2_{3},D^{3}_2)$ and $\widetilde{D}^1_{2} + D^3_{1}-\widetilde{D}^3_2 - \gcd(D^3_1, \widetilde{D}^2_1)$, where $\widetilde{D}^i_j$ is the divisor of zeros of the function $\widetilde{f}^i_j$. Permutations are listed as $\tau_1=(1,2), \tau_2=(3,1), \tau_3=(2,3)$ in \cite{CL09}, so after applying permutation $(1,2,3)$ to our indices, we need only invoke the identity
\[
\widetilde{D}^2_{3} + D^1_{2}-\widetilde{D}^1_3 - \gcd(D^1_2, \widetilde{D}^3_2) = D^2+\operatorname{op}eratorname{lcm}(D^1_2, \widetilde{D}^3_2)-D^3-\widetilde{D}^1_3
\]
from \cite[p206]{CL09} to see that $Z_2$ is the scheme in the second bullet point of \cite[Lemma~3.1(2)]{CL09}. In order to compare the sheaves, equation \eqref{eqn:relationsgcd} gives $\gcd(D^2, D^3) = D^2 + \gcd(D^2_3, D^3_2) - D^2_3$, and $\mathscr{O}_X
(D^2) = L_2^{-1}\otimes L$ and $\mathscr{O}_X
(-D^2_3) \cong L_3^{-1}\otimes L_{2,3}$ hence
\begin{align*}
L_{2}\otimes L_{3}\otimes L^{-1}\big(\gcd(D^{2},D^{3})\big) & \cong L_{2}\otimes L_{3}\otimes L^{-1}\big(\gcd(D^{2}_3,D^{3}_2)\big) \otimes L_2^{-1}\otimes L\otimes L_3^{-1}\otimes L_{2,3}\\
& \cong L_{2,3}(\gcd(D^3_{2},D^{2}_{3})\big).
\end{align*}
Again, applying the permutation $(1,2,3)$ to the indices recovers the sheaf from the second bullet point of \cite[Lemma~3.1(2)]{CL09}, so our description of $F^2/F^1$ agrees with that from \emph{loc.cit.}. A very similar calculation shows that our unified description of the quotients $F^k/F^{k-1}$ for $k=1,3$ agrees with those of $F^3/F^2$ and $F^1/F^0$ from \cite[Lemma~3.1(2)]{CL09}.
\end{remark}
\begin{example}
\operatorname{op}eratorname{lab}el{exa:hex}
Let $X$ be the smooth toric threefold determined by the fan $\Sigma$ in $\ensuremath{\mathbb{R}}^3$ whose one-dimensional cones are generated by the vectors
\[
\text{$v_1=(1,0,1)$, $v_2=(0,1,1)$, $v_3=(-1,1,1)$, $v_4=(-1,0,1)$, $v_6=(1,-1,1)$, $v_7=(0,0,1)$},
\]
where the cones in higher dimension are best illustrated by the height one slice of $\Sigma$ as shown in Figure~\ref{fig:toricfanX}. In particular, the Cox ring of $X$ is $S=\ensuremath{\Bbbk}[x_1,\dots, x_7]$ and the Cox irrelevant ideal is the monomial ideal $B = (x_3x_4x_5x_6, x_2x_3x_4x_7, x_2x_3x_4x_6, x_1x_5x_6x_7, x_1x_3x_5x_6, x_1x_2x_3x_6)$.
\begin{figure}
\caption{Height one slice of the fan $\Sigma$ defining the smooth toric threefold $X$}
\end{figure}
For $1 \leq \rho \leq 7$, let $E_{\rho}$ denote the divisor in $X$ corresponding to the ray of $\Sigma$ generated by $v_{\rho}$; we use the shorthand $E_{16}=E_{1}+E_6$, $E_{126} = E_1 + E_2 + E_6$ and so on. The group $\operatorname{Cl}(X)$ is the abelian group generated by $E_1,\dots, E_7$ subject to the relations $E_{16} \sim E_{34}$, $E_{23}\sim E_{56}$, and $E_{1234567} \sim 0$ (and since $X$ is smooth, we have that $\operatorname{Cl}(X)$ is isomorphic to the Picard group of $X$).
Set $L:=\mathscr{O}_X$, and consider the diagram of invertible sheaves
\begin{equation}
\operatorname{op}eratorname{lab}el{eqn:wheelexample}
\begin{split}
\centering
\psset{unit=0.45cm}
\begin{pspicture}(0,-1)(25,16)
\cnodeput*(0,7.5){A}{$L$}
\cnodeput*(8,15){B}{$L_{1,2}$}
\cnodeput*(8,12){C}{$L_{2,3}$}
\cnodeput*(8,9){D}{$L_{3,4}$}
\cnodeput*(8,6){S}{$L_{4,5}$}
\cnodeput*(8,3){E}{$L_{5,6}$}
\cnodeput*(8,0){U}{$L_{6,1}$}
\cnodeput*(18,15){F}{$L_{1}$}
\cnodeput*(18,12){G}{$L_{2}$}
\cnodeput*(18,9){H}{$L_{3}$}
\cnodeput*(18,6){T}{$L_{4}$}
\cnodeput*(18,3){I}{$L_{5}$}
\cnodeput*(18,0){V}{$L_{6}$}
\cnodeput*(26,7.5){J}{$L$.}
\ncline{->}{A}{B}\lput*{:U}(0.5){$\scriptstyle{E_{345}}$}
\ncline{->}{A}{C}\lput*{:U}(0.5){$\scriptstyle{E_{456}}$}
\ncline{->}{A}{D}\lput*{:U}(0.5){$\scriptstyle{E_{156}}$}
\ncline{->}{A}{S}\lput*{:U}(0.5){$\scriptstyle{E_{126}}$}
\ncline{->}{A}{E}\lput*{:U}(0.5){$\scriptstyle{E_{123}}$}
\ncline{->}{A}{U}\lput*{:U}(0.5){$\scriptstyle{E_{234}}$}
\ncline{->}{B}{F}\lput*{:U}(0.3){$\scriptstyle{E_{27}}$}
\ncline{->}{B}{G}\lput*{:U}(0.3){$\scriptstyle{E_6}$}
\ncline{->}{C}{G}\lput*{:U}(0.3){$\scriptstyle{E_3}$}
\ncline{->}{C}{H}\lput*{:U}(0.3){$\scriptstyle{E_{17}}$}
\ncline{->}{D}{H}\lput*{:U}(0.3){$\scriptstyle{E_{47}}$}
\ncline{->}{D}{T}\lput*{:U}(0.3){$\scriptstyle{E_{27}}$}
\ncline{->}{S}{T}\lput*{:U}(0.6){$\scriptstyle{E_{57}}$}
\ncline{->}{S}{I}\lput*{:U}(0.6){$\scriptstyle{E_3}$}
\ncline{->}{E}{I}\lput*{:U}(0.6){$\scriptstyle{E_6}$}
\ncline{->}{E}{V}\lput*{:U}(0.6){$\scriptstyle{E_{47}}$}
\ncline{->}{U}{V}\lput*{:U}(0.6){$\scriptstyle{E_{17}}$}
\nccurve[angleA=-40,angleB=140]{->}{U}{F}\lput*{:U}(0.744){$\scriptscriptstyle{E_{57}}$}
\ncline{->}{F}{J}\lput*{:U}(0.5){$\scriptstyle{E_{16}}$}
\ncline{->}{G}{J}\lput*{:U}(0.5){$\scriptstyle{E_{127}}$}
\ncline{->}{H}{J}\lput*{:U}(0.5){$\scriptstyle{E_{23}}$}
\ncline{->}{T}{J}\lput*{:U}(0.5){$\scriptstyle{E_{34}}$}
\ncline{->}{I}{J}\lput*{:U}(0.5){$\scriptstyle{E_{457}}$}
\ncline{->}{V}{J}\lput*{:U}(0.5){$\scriptstyle{E_{56}}$}
\end{pspicture}
\end{split}
\end{equation}
where $L_4\cong L_1 = \mathscr{O}
_X(-E_{16})$, $L_5\cong L_2 = \mathscr{O}
_X(-E_{127})$, $L_6\cong L_3 = \mathscr{O}
_X(-E_{23})$, and similarly, where $L_{5,6}\cong L_{3,4}\cong L_{1,2}=\mathscr{O}
_X(E_{345})$, $L_{6,1}\cong L_{4.5}\cong L_{2,3}=\mathscr{O}
_X(E_{456})$. Let $T^{\bullet}$ be the total complex of diagram \eqref{eqn:wheelexample}. With the notation above, the generators $\gbf{\beta}_1,\dots,\gbf{\beta}_{15}$ of $\ker(d^1)$ are
\begin{equation*}
\begin{array}{lll}
\gbf{\beta}_1= -x_2 x_7 \mathbf{e}_1 + x_6 \mathbf{e}_2, & \gbf{\beta}_6= x_5 \mathbf{e}_1 - x_1 \mathbf{e}_6, & \gbf{\beta}_{11}= -x_4 x_5 \mathbf{e}_2 + x_1 x_2 \mathbf{e}_5,\\
\gbf{\beta}_2= -x_3 \mathbf{e}_2 + x_1 x_7 \mathbf{e}_3, & \gbf{\beta}_7= -x_2 x_3 \mathbf{e}_1 + x_1 x_6 \mathbf{e}_3, & \gbf{\beta}_{12}= -x_5 x_6 \mathbf{e}_2 + x_1 x_2 x_7 \mathbf{e}_6, \\
\gbf{\beta}_3= -x_4 \mathbf{e}_3 + x_2 \mathbf{e}_4, & \gbf{\beta}_8= -x_3 x_4 \mathbf{e}_1 + x_1 x_6 \mathbf{e}_4, & \gbf{\beta}_{13}= -x_4 x_5 x_7 \mathbf{e}_3 + x_2 x_3 \mathbf{e}_5, \\
\gbf{\beta}_4= -x_5 x_7 \mathbf{e}_4 + x_3 \mathbf{e}_5, & \gbf{\beta}_9= -x_4 x_5 x_7 \mathbf{e}_1 + x_1 x_6 \mathbf{e}_5, & \gbf{\beta}_{14}= -x_5 x_6 \mathbf{e}_3 + x_2 x_3\mathbf{e}_6,\\
\gbf{\beta}_5= -x_6 \mathbf{e}_5 + x_4 x_7 \mathbf{e}_6, & \gbf{\beta}_{10}= -x_3 x_4 \mathbf{e}_2 + x_1 x_2 x_7 \mathbf{e}_4, & \gbf{\beta}_{15}= -x_5 x_6 \mathbf{e}_4 + x_3 x_4 \mathbf{e}_6.\\
\end{array}
\end{equation*}
It is easy to see that the relations
\[
\gbf{\beta}_9 =-x_4 x_7 \gbf{\beta}_6 - x_1 \gbf{\beta}_5,\;\; \gbf{\beta}_{10}=x_4 \gbf{\beta}_2 + x_1 x_7 \gbf{\beta}_3,\;\;\gbf{\beta}_{12}=-x_5\gbf{\beta}_1 - x_2 x_7 \gbf{\beta}_6, \;\; \gbf{\beta}_{13}=x_5 x_7 \gbf{\beta}_3 + x_2 \gbf{\beta}_4
\]
hold, so the successive quotients $F^k/F^{k-1}$ vanish for $k=9,10,12,13$.
In addition, the generators $\gbf{\alpha}_1,\dots,\gbf{\alpha}_6$ of $\operatorname{im}(d^2)$ satisfy $\gbf{\alpha}_1=\gbf{\beta}_1$, $\gbf{\alpha}_2=\gbf{\beta}_2$, $\gbf{\alpha}_3=x_7 \gbf{\beta}_3$, $\gbf{\alpha}_4=\gbf{\beta}_4$, $\gbf{\alpha}_5=\gbf{\beta}_5$ and $\gbf{\alpha}_6=x_7 \gbf{\beta}_6$, so $F^{k}/F^{k-1}$ also vanishes for $k=1, 2, 4, 5$.
We now analyse three nonvanishing quotients $F^k/F^{k-1}$ to illustrate part (2) of Theorem~\ref{thm:mainintro}. First consider $k=3$. The transposition $\tau_3=(3,4)$ determines $\gcd(D^3,D^4) = E_3$, so
\[
F^3/F^{2} \cong \mathscr{O}
_{Z_3}\otimes L_{3}\otimes L_{4}\otimes L^{-1}(E_3)
\]
where, according to Definition~\ref{def:Zk}\ensuremath{(\mathrm{i})}, $Z_3$ is the scheme-theoretic intersection of the effective torus-invariant divisors $\gcd(D^3_{4},D^{4}_3) = E_7$ and
\[
\operatorname{op}eratorname{lcm}(D^1, D^2, D^3, D^4, D^5, D^6,\gcd(D^4_5,D^5_4),\gcd(D^5_6,D^6_5),\gcd(D^6_1,D^1_6))-\operatorname{op}eratorname{lcm}(D^3,D^4) = E_{1567}.
\]
In particular, $\operatorname{supp}(\mathscr{O}
_{Z_3}) = E_7$. Now consider the case $k=7$. The corresponding transposition
$\tau_7=(1,3)$ determines $\gcd(D^1,D^3) = 0$, so
\[
F^7/F^{6} \cong \mathscr{O}
_{Z_7}\otimes L_{1}\otimes L_{3}\otimes L^{-1}
\]
where, according to Definition~\ref{def:Zk}\ensuremath{(\mathrm{ii})}, $Z_7$ is the scheme-theoretic intersection of the divisors $\operatorname{op}eratorname{lcm}(D^1,D^2,D^3)-\operatorname{op}eratorname{lcm}(D^1,D^3) = E_7$ and $\operatorname{op}eratorname{lcm}(D^1, D^3, D^4, D^5, D^6) - \operatorname{op}eratorname{lcm}(D^1, D^3) = E_{457}$, giving $Z_7=E_7\cap E_{457}$ and $\operatorname{supp}(\mathscr{O}
_{Z_7}) = E_7$. Finally, consider the case $k=15$ for which the corresponding transposition $\tau_{15}=(4,6)$ determines $\gcd(D^4,D^6) = 0$, so
\[
F^{15}/F^{14} \cong \mathscr{O}
_{Z_{15}}\otimes L_{4}\otimes L_{6}\otimes L^{-1}
\]
where, according to Definition~\ref{def:Zk}\ensuremath{(\mathrm{iii})}, $Z_{15}$ is the scheme-theoretic intersection of the divisors $\operatorname{op}eratorname{lcm}(D^{\mu},D^4, D^6) - \operatorname{op}eratorname{lcm}(D^4, D^6)$ for $\mu=1, 2, 3, 5$, giving $Z_{15} = E_1\cap E_{127}\cap E_2\cap E_7$. In particular, the support of $\mathscr{O}_{Z_{15}}$ is the torus-invariant point $E_1\cap E_2\cap E_7$ in $X$.
As for $H^k(T^\bullet)$ for $k\neq -1$, notice that the scheme theoretic intersection of $D^1,\dots, D^6$ is contained in $D^1\cap D^4 = (E_1+E_6) \cap (E_3+E_4) = \emptyset$, so $H^{0}(T^{\bullet}) \cong 0$ by Theorem~\ref{thm:mainintro}(1). Similarly, $\gcd(D_{1,2},D_{2,3},D_{3,4},D_{4,5},D_{5,6},D_{6,1})=0$ so $H^{-2}(T^{\bullet})\cong 0$ by Theorem~\ref{thm:mainintro}(3). It follows that the complex $T^{\bullet}$ has cohomology concentrated in degree $-1$.
\end{example}
\begin{remark}
\operatorname{op}eratorname{lab}el{rem:hex}
One can carry out much of the above calculation using Macaulay2~\cite{M2} in any given example, though the final description of $F^k/F^{k-1}$ is less user-friendly and geometric than ours. To give the flavour, we reproduce some of the calculations from Example~\ref{exa:hex}, omitting for brevity the information on the degree in the $\operatorname{Cl}(X)$-grading of each $S$-module generator\footnote{Macaulay2 require the $\operatorname{Cl}(X)$-degree information in order to create the chain complex $\texttt{T}$, so for convenience we include the complete M2 commands at the end of the latex source file.}.
\begin{verbatim}S = QQ[x_1,x_2,x_3,x_4,x_5,x_6,x_7];
d1 = matrix{{x_1*x_6,x_1*x_2*x_7,x_2*x_3,x_3*x_4,x_4*x_5*x_7,x_5*x_6}}
d2 = matrix{{-x_2*x_7,0,0,0,0,-x_5*x_7},{x_6,x_3,0,0,0,0},
{0,-x_1*x_7,x_4*x_7,0,0,0},{0,0,-x_2*x_7,-x_5*x_7,0,0},
{0,0,0,x_3,x_6,0},{0,0,0,0,-x_4*x_7,x_1*x_7}}
d3 = matrix{ {-x_3*x_4*x_5},{x_4*x_5*x_6},{x_1*x_5*x_6},{-x_1*x_2*x_6},
{x_1*x_2*x_3},{x_2*x_3*x_4}}
T = chainComplex(d1,d2,d3)
\end{verbatim}
The minimal generators $\{\gbf{\beta}_j \mid j\in\{1,\dots, 15\}\setminus \{9,10,12,13\}\}$ can be obtained using
\begin{verbatim}
ker d1
\end{verbatim}
though Macaulay2 chooses an order on these generators that differs from ours. To obtain the cohomology sheaf $H^{-k}(T^\bullet)$ we compute the $k$th cohomology of $\texttt{T}$ and saturate by the irrelevant ideal. For example, the commands
\begin{verbatim}
B = ideal(x_3*x_4*x_5*x_6,x_2*x_3*x_4*x_7,x_2*x_3*x_4*x_6,x_1*x_5*x_6*x_7,
x_1*x_3*x_5*x_6,x_1*x_2*x_3*x_6 )
H0 = prune HH_0(T)
prune (H0/ saturate(0_S*H0,B)) \end{verbatim}
show that $H^{0}(T^\bullet)\cong 0$. Similarly $H^{-2}(T^\bullet)=0$. As for the filtration on $H^{-1}(T^\bullet)$, we input the submodules $F^k$ by hand and compute the quotients, for example,
\begin{verbatim}
F2=image matrix{{-x_2*x_7,0,0,0,0,-x_5*x_7}, {x_6,x_3,0,0,0,0},
{0,-x_1*x_7,x_4*x_7,0,0,0},{0,0,-x_2*x_7,-x_5*x_7,0,0},
{0,0,0,x_3,x_6,0},{0,0,0,0,-x_4*x_7,x_1*x_7}}
F3=image matrix{ {-x_2*x_7,0,0,0,0,-x_5*x_7}, {x_6,x_3,0,0,0,0},
{0,-x_1*x_7,x_4,0,0,0}, {0,0,-x_2,-x_5*x_7,0,0},
{0,0,0,x_3,x_6,0},{0,0,0,0,-x_4*x_7,x_1*x_7}}
Q3 = F3/F2
prune Q3
\end{verbatim}
\noindent In this case, the output is
\begin{verbatim}
cokernel | x_7 |
\end{verbatim}
so we reproduce our result that $F^3/F^2$ is supported on the divisor $E_7$. Similar, input
\begin{verbatim}
F15=image matrix{
{-x_2*x_7,0,0,0,0,x_5,-x_2*x_3,-x_3*x_4,-x_4*x_5*x_7,0,0,0,0,0,0},
{x_6,-x_3,0,0,0,0,0,0,0,-x_3*x_4,-x_4*x_5,-x_5*x_6,0,0,0},
{0,x_1*x_7,-x_4,0,0,0,x_1*x_6,0,0,0,0,0,-x_4*x_5*x_7,-x_5*x_6,0},
{0,0,x_2,-x_5*x_7,0,0,0,x_1*x_6,0,x_1*x_2*x_7,0,0,0,0,-x_5*x_6},
{0,0,0,x_3,-x_6,0,0,0,x_1*x_6,0,x_1*x_2,0,x_2*x_3,0,0},
{0,0,0,0,x_4*x_7,-x_1,0,0,0,0,0,x_1*x_2*x_7,0,x_2*x_3,x_3*x_4}}
\end{verbatim}
and $\texttt{F14}$ (simply delete the final column in the above), then compute
\begin{verbatim}
Q15 = F15/F14
prune Q15
\end{verbatim}
\noindent In this case, the output is
\begin{verbatim}
cokernel | x_7 x_2 x_1 |
\end{verbatim}
This confirms our calculation from Example~\ref{exa:hex} that $F^{15}/F^{14}$ is supported on the torus-invariant point $E_1\cap E_2\cap E_7$.
\end{remark}
\end{document}
|
\begin{document}
\begin{center}
{\bf ON THE NEWMAN CONJECTURE}
{\sf E}nd{center}
\vskip0,5cm
\begin{center}
Alexander BULINSKI\footnote{The work is partially supported by RFBR grant 10-01-00397.}$^,$\footnote{Lomonosov
Moscow State University and University Paris-6 -- Pierre and Marie Curie.}
{\sf E}nd{center}
\vskip1cm
\begin{abstract}
We consider a random field, defined on an
integer-valued $d$-dimensional lattice $\mathbb{Z}^d$, with
covariance function satisfying a condition more general than
summability. Such condition appeared in the well-known Newman's
conjecture concerning the central limit theorem (CLT) for stationary
associated random fields. As was demonstrated by Herrndorf and
Shashkin, the conjecture fails already for $d=1$. In the present
paper, we show the validity of modified conjecture leaving intact
the mentioned condition on covariance function. Thus we establish,
for any integer $d\geq1$, a criterion of the CLT validity for the
wider class of positively associated stationary fields. The uniform
integrability for the squares of normalized partial sums, taken over
growing parallelepipeds or cubes in $\mathbb{Z}^d$, plays the key
role in deriving their asymptotic normality. So our result extends
the Lewis theorem proved for sequences of random variables. A
representation of variances of partial sums of a field using the
slowly varying functions in several arguments is employed in
essential way.
\vskip0,5cm
{\it Keywords and phrases}: stationary random fields, positive association, central limit theorem,
uniform integrability, slowly varying functions, the Newman conjecture.
\vskip0,5cm
{\it $2010$ AMS classification}: 60F05, 60G60.
{\sf E}nd{abstract}
\section{Introduction}
The study of asymptotical behavior of the
(normalized) sums of random variables is the vast research domain of
Probability Theory having various applications. The limit theorems
established for independent summands form here the classical core. In
this regard one can refer to the monographs \cite{GK}, \cite{Z},
\cite{IL}, \cite{P}; see also references therein.
Stochastic models described by means of families of dependent random variables
arose at the beginning of the last century.
Thus the Gaussian and Markov processes, martingales, solutions of the stochastic differential equations, mixing processes appeared
as well as other important classes (see, e.g., \cite{BS}, \cite{K}). Moreover,
much attention has been paid to studying of random fields.
Since the 1960s due to the problems of mathematical statistics, reliability theory, percolation and
statistical physics there arose
the stochastic models based on the families of variables possessing various forms of positive
or negative dependence (see, e.g., \cite{BSha}).
The key role in these models belongs to the notion of
association (in statistical physics the well-known FKG-inequalities imply the association).
We will use the following concept
extending that introduced in \cite{EPW}.
\begin{Df}\label{PA}$(${\rm \cite{N}}$)$
A real-valued random field $X=\{X_t,t\in T\}$ is called positively associated\footnote{or weakly associated} $($one writes $X\in {\sf PA})$
if, for any finite disjoint sets $I=\{s_1,\ldots,s_m\}\subset T$, $J=\{t_1,\ldots,t_n\}\subset T$ and all bounded coordinate-wise nondecreasing Lipschitz functions $f:\mathbb{R}^m\to \mathbb{R}$, $g:\mathbb{R}^n\to \mathbb{R}$, one has
\begin{equation}\label{e1}
{\sf cov}(f(X_{s_1},\ldots,X_{s_m}),g(X_{t_1},\ldots,X_{t_n})) \geq 0.
{\sf E}nd{equation}
{\sf E}nd{Df}
Recall that a random field $X$ is called associated (\cite{EPW}), if the definition above is satisfied without the hypothesis $I\cap J=\varnothing$. Obviously association implies positive association. Note that any family of (real-valued) independent random variables
is automatically associated. Many other important examples can be found in \cite{BSha}.
For a random field $X=\{X_t,t\in T\}$ and a finite set $U\subset T$ introduce
$$
S(U)=\sum_{t\in U} X_t.
$$
Further on we will consider random fields defined on a lattice $T=\mathbb{Z}^d$ and a probability space $(\Omega,\mathcal{F},{\sf P})$.
In the seminal paper by Newman \cite{N} the central limit theorem (CLT) was established for associated (strictly) stationary random
field $X=\{X_t,t\in \mathbb{Z}^d\}$ under {\it finite susceptibility condition} that is when the covariance function is summable:
\begin{equation}\label{newm}
\sigma^2:=\sum_{j\in\mathbb{Z}^d} {\sf cov}(X_0,X_j) < \infty.
{\sf E}nd{equation}
Namely, these simple assumptions imply for a field $X$ the following relation
\begin{equation}\label{clt0}
\frac{S_n - {\sf E}S_n}{\sqrt{\langle n \rangle}} \stackrel{law}\longrightarrow Z \sim \mathcal{N}(0,\sigma^2)\;\;\mbox{as}\;\;n\to \infty,\;n=(n_1,\ldots,n_d)\in\mathbb{N}^d,
{\sf E}nd{equation}
here $S_n = S([0,n]\cap \mathbb{Z}^d)$, $[0,n]=
[0,n_1]\times \ldots \times [0,n_d]$, $\langle n \rangle= n_1\ldots n_d$, $\mathcal{N}(0,\sigma^2)$
is a Gaussian law with parameters $0$ and $\sigma^2$,
$\stackrel{law}\longrightarrow$ stands for weak convergence of distributions.
The goal of this work is to provide the criteria of the CLT validity for positively associated stationary random fields with finite second moment (and in general without condition {\sf E}qref{newm}).
\section{Main results}
At first it is reasonable to recall several definitions.
\begin{Df}
A function $L:\mathbb{R}^d_+ \to \mathbb{R}\setminus \{0\}$ is called {\it slowly varying} $($at infinity$)$
if, for any vector $a=(a_1,\ldots,a_d)^{\top}$ with positive coordinates,
\begin{equation}\label{svf}
\frac{L(a_1x_1,\ldots,a_d x_d)}{L(x_1,\ldots,x_d)} \to 1\;\;\mbox{as}\;\;x=(x_{1},\ldots,x_{d})^{\top}\to \infty,
{\sf E}nd{equation}
i.e. $x_1\to \infty,\ldots,x_d\to \infty$. For such functions we write $L\in \mathcal{L}(\mathbb{R}^d_+)$.
{\sf E}nd{Df}
We operate with column vectors and use the symbol $\top$ for transposition. A function $L:\mathbb{N}^d \to \mathbb{R}\setminus \{0\}$
is called {\it slowly varying} (at infinity)
if, for any vector $a=(a_1,\ldots,a_d)^{\top}\in \mathbb{N}^d$, relation {\sf E}qref{svf} holds with additional assumption that
$x\in \mathbb{N}^d$. Then we write $L \in \mathcal{L}(\mathbb{N}^d)$.
For example the function ${\sf P}rod_{k=1}^d \log(x_k\vee 1)$ where $x\in \mathbb{R}^d_+$ belongs to $ \mathcal{L}(\mathbb{R}^d_+)$.
\begin{Rm}\label{rm1} It is well-known that not every function belonging to $\mathcal{L}(\mathbb{N}^d)$ admits extension
to a function from the class $ \mathcal{L}(\mathbb{R}^d_+)$ even for $d=1$. However, it is not difficult to
verify that
if a coordinate-wise nondecreasing function $L\in \mathcal{L}(\mathbb{N}^d)$, then $H(x):=L([\widetilde{x}])$ belongs to
$\mathcal{L}(\mathbb{R}^d_+)$. Here $\widetilde{x}=(x_1\vee 1,\ldots,x_d\vee 1)^{\top}$ for $x\in\mathbb{R}^d$, and
$[x]=([x_1],\ldots,[x_d])^{\top}$,
i.e. one takes the integer part of each component of $x$.
{\sf E}nd{Rm}
During a long time there was no solution to the Newman conjecture on possible replacement of requirement
{\sf E}qref{newm} appearing in CLT by a milder condition. Namely, he considered the partial sums $S(U)$ taken over "integer cubes" $U$ and
believed that instead of {\sf E}qref{newm} it suffices to assume that for associated strictly stationary random field
$X=\{X_j,j\in \mathbb{Z}^d\}$ with ${\sf E} X_0^2 < \infty$ the function
\begin{equation}\label{fn}
{\sf K}(r)= \sum_{j\in \mathbb{Z}^d:\|j\| \leq r}{\sf cov}(X_0,X_j),\;\;\;r\in\mathbb{N},
{\sf E}nd{equation}
belongs to $\mathcal{L}(\mathbb{N})$ where $\|\cdot\|$ is the Euclidean norm in $\mathbb{R}^d$.
Unfortunately it turned out that this beautiful hypothesis is not true even for $d=1$.
The first counterexample was constructed by Herrndorf \cite{H}, and then Shashkin \cite{S} showed that
condition {\sf E}qref{newm} has in a sense the optimal character.
It is worth mentioning also that the Newman CLT was generalized in \cite{BV} for partial sums
$S(U)$ taken over regularly growing subsets of $\mathbb{Z}^d$. Further extensions are discussed in
Chapter 3 of \cite{BSha}.
\begin{Df}\label{d3} A family $X=\{X_j,j\in\mathbb{N}^d\}$ is called uniformly integrable if
$$
\lim_{c\to \infty} \sup_{j\in \mathbb{N}^d} {\sf E}|X_j|\mathbb{I}\{|X_j|\geq c\} = 0.
$$
{\sf E}nd{Df}
For a (wide sense) stationary random field $X=\{X_j,j\in\mathbb{Z}^d\}$ introduce the function
$$
{\sf K}_X(n)=\sum_{j\in\mathbb{Z}^d: -n\leq j\leq n} {\sf cov}\,(X_0,X_j),\;\;n\in\mathbb{N}^d.
$$
If $a=(a^{(1)},\ldots,a^{(d)})^{\top}$ and $b= (b^{(1)},\ldots,b^{(d)})^{\top}$ are vectors in
$\mathbb{R}^d$, the notation $a\leq b$ means that $a^{(k)} \leq b^{(k)}$ for all $k=1,\ldots,d$.
We write $a< b$ whenever $a^{(k)} < b^{(k)}$ for any $k=1,\ldots,d$.
The following result extends the Lewis theorem proved in \cite{L} for a sequence of random variables.
\begin{Th}\label{th1}
Let a strictly stationary random field $X=\{X_j, j\in\mathbb{Z}^d\} \in {\sf PA}$,
$0<{\sf E} X_0^2 < \infty$ and ${\sf K}_X(\cdot) \in \mathcal{L}(\mathbb{N}^d)$. Then $X$ satisfies CLT,
i.e.
\begin{equation}\label{clt}
\frac{S_n - {\sf E} S_n}{\sqrt{{\sf var}S_n}} \stackrel{law}\longrightarrow Z \sim \mathcal{N}(0,1)\;\;\mbox{as}\;\;n\to \infty,
{\sf E}nd{equation}
if and only if the family $\{(S_n - {\sf E} S_n)^2/(\langle n \rangle {\sf K}_X(n)), n\in \mathbb{N}^d\}$ is uniformly integrable.
{\sf E}nd{Th}
Consider now a sequence of growing "integer cubes" $C_r=(0,r]^d\cap \mathbb{Z}^d$,
$r\in\mathbb{N}$.
\begin{Th}\label{th2} Let a strictly stationary random field $X=\{X_j, j\in\mathbb{Z}^d\} \in {\sf PA}$,
$0<{\sf E} X_0^2 < \infty$ and ${\sf K}(\cdot) \in \mathcal{L}(\mathbb{N})$. Then
\begin{equation}\label{cltcubes}
\frac{S(C_r) - {\sf E} S(C_r)}{\sqrt{{\sf var}S(C_r)}} \stackrel{law}\longrightarrow Z
\sim \mathcal{N}(0,1)\;\;\mbox{as}\;\;r\to \infty,
{\sf E}nd{equation}
if and only if the sequence $((S(C_r) - {\sf E} S(C_r))^2/(r^d\, {\sf K}(r)))_{r\in \mathbb{N}}$ is uniformly
integrable.
{\sf E}nd{Th}
Theorem \ref{th2} shows what one has to assume additionally, for a class of positively associated strictly stationary
random fields, besides the condition that the function ${\sf K}(\cdot)$ is slowly varying to guarantee that the
Newman conjecture holds true for any dimension $d\in\mathbb{N}$. In \cite{N1} the author discussed his conjecture and
noted without proof that the "mild version" of that hypothesis takes place under the additional condition of
uniform integrability of the sequence appearing in Theorem 2 above. Therefore Theorems \ref{th1} and \ref{th2} show that
in fact we do not change the initial problem but clarify its essential feature. We do not deal here with a
renorm group approach
(do not consider the partition of $\mathbb{R}^d$
by the congruent cubes) but study the partial sums $S_n$ taken over any growing "integer blocks".
\section{Proofs of the main results}
We start with simple auxiliary statements.
\begin{Lm}\label{lm1}
Let a function $L$ belonging to $\mathcal{L}(\mathbb{N}^d)$ be coordinate-wise nondecreasing. Then there exist
non-random vectors $q_n=(q_n^{(1)},\ldots,q_n^{(d)})^{\top} \in\mathbb{N}^d$, where $n
=(n_1,\ldots,n_d)^{\top}\in\mathbb{N}^d$, such that
\begin{equation}\label{eq1}
q_n^{(k)} \leq n_k,\;\frac{q_n^{(k)}}{n_k} \to 0\;\;\mbox{for}\;\;k=1,\ldots,d,\;\;q_n\to \infty\;\;\;\mbox{and}\;\;\;
\frac{L(n)}{L(q_n)}\to 1\;\mbox{as}\;n\to \infty.
{\sf E}nd{equation}
{\sf E}nd{Lm}
{\bf Proof.} According to Remark \ref{rm1}
we can assume without loss of generality that $L$ is extended to a function belonging to
the class $ \mathcal{L}(\mathbb{R}^d_+)$. For any $R=(R^{(1)},\ldots,R^{(d)})^{\top} \in \mathbb{N}^d$
we can choose $N_0(R)\in\mathbb{N}^d$ in such a way that
$$
\frac{L(n_1,\ldots,n_d)}{L\Bigl(\frac{n_1}{R^{(1)}},\ldots,\frac{n_d}{R^{(d)}}\Bigr)}-1\leq \frac{1}{\langle R \rangle}
$$
for all $n\geq N_0(R)$. Now we take a sequence $(R(r))_{r\in\mathbb{N}}$ such that $R(r) \in \mathbb{N}^d$ and
$R(r)<R(r+ 1)$ for each $r\in \mathbb{N}$. Introduce
$M_0(1)=N_0(R(1))$ and $M_0(r+1)= (M_0(r)\vee N_0(R(r+1)))+{\bf 1}$
for $r\in \mathbb{N}$ where, as usual, ${\bf 1}=(1,\ldots,1)^{\top} \in \mathbb{R}^d$ and
$$
(a^{(1)},\ldots,a^{(d)})\vee (b^{(1)},\ldots,b^{(d)})= (a^{(1)}\vee b^{(1)},\ldots,a^{(d)}\vee b^{(d)}).
$$
Then $M_0(r)<M_0(r+1)$ for $r\in\mathbb{N}$. For arbitrary $r\in\mathbb{N}$ and $n\geq M_0(r)$
$$
\frac{L(n_1,\ldots,n_d)}{L\Bigl(\frac{n_1}{R^{(1)}(r)},\ldots,\frac{n_d}{R^{(d)}(r)}\Bigr)}-1\leq \frac{1}{\langle R(r) \rangle}.
$$
Let us define non-random sequences $(\varepsilon_j^{(k)})_{j\in \mathbb{N}}$ where $k=1,\ldots,d$, putting
$\varepsilon_{j}^{(k)} = 1/R^{(k)}(r)$ for $M^{(k)}_0(r)\leq j < M^{(k)}_0(r+1)$.
For any $\varepsilon >0$ take $r_0 \in\mathbb{N}$ in such a way that $1/\langle R(r_0)\rangle<\varepsilon$.
Further on, for $n$ such that $M_0(r)\leq n<M_0(r+1)$ where $r\geq r_0$, one has
$$
1\leq \frac{L(n_1,\ldots,n_d)}{L(n_1\varepsilon_{n_1}^{(1)},\ldots,n_d\varepsilon_{n_d}^{(d)})}
=\frac{L(n_1,\ldots,n_d)}{L\Bigl(\frac{n_1}{R^{(1)}(r)},\ldots,\frac{n_d}{R^{(d)}(r)}\Bigr)}
$$
$$
\leq 1+\frac{1}{\langle R(r) \rangle} \leq 1+\frac{1}{\langle R(r_0) \rangle}\leq 1+\varepsilon.
$$
Then we can take
$
q_n=([n_1\varepsilon_{n_1}^{(1)}],\ldots,[n_d\varepsilon_{n_d}^{(d)}])\vee ([\log n_1],\ldots,[\log n_d])
\vee {\bf 1},
$
to ensure
the validity of {\sf E}qref{eq1}. $\square$
\begin{Lm}\label{lm2}
Let $X=\{X_j, j\in\mathbb{Z}^d\}$ be a wide sense stationary random field with nonnegative covariance function.
Assume that ${\sf K}_X(\cdot)\in \mathcal{L}(\mathbb{N}^d)$. Then
\begin{equation}\label{vps}
{\sf var} S(U_n) \sim \langle n \rangle\,{\sf K}_X(n) \;\;\;\mbox{as}\;\;n\to \infty
{\sf E}nd{equation}
where $U_n=\{j\in\mathbb{Z}^d: {\bf 1}\leq j\leq n\}$, $n\in\mathbb{N}^d$. Conversely, if
${\sf var} S(U_n) \sim \langle n \rangle\, L(n)$
as $n\to \infty$, where $L\in\mathcal{L}(\mathbb{N}^d)$, then
$L(n)\sim {\sf K}_X(n)$ as $n\to \infty$.
{\sf E}nd{Lm}
{\bf Proof.} Let ${\sf K}_X(\cdot)\in \mathcal{L}(\mathbb{N}^d)$. Due to the (wide-sense) stationarity of $X$ one has
${\sf cov}(X_i,X_j)={\sf R}(i-j)$ for $i,j\in\mathbb{Z}^d$. Thus
$$
{\sf var} S(U_n) = \sum_{i,j\in U_n} {\sf cov}(X_i,X_j) = \sum_{i,j\in U_n}{\sf R}(i-j)
$$
$$
=\sum_{m\in \mathbb{Z}^d: -(n-{\bf 1})\leq m\leq n-{\bf 1}} (n_1 -|m_1|)\ldots (n_d -|m_d|){\sf R}(m)
$$
\begin{equation}\label{aux1a}
\leq \langle n \rangle \, \sum_{m\in \mathbb{Z}^d: -(n-{\bf 1})\leq m\leq n-{\bf 1}} {\sf R}(m)
\leq \langle n \rangle \, {\sf K}_X(n),
{\sf E}nd{equation}
as the function ${\sf R}$ is nonnegative.
Take any $c\in (0,1)$ and $n \geq \frac{1}{1-c}{\bf 1}$
(i.e. $cn\leq n-{\bf 1}$, $n\in \mathbb{N}^d$). Using again nonnegativity of ${\sf R}$ we can write
$$
{\sf var} S(U_n)
=\sum_{m\in \mathbb{Z}^d: -(n-{\bf 1})\leq m\leq n-{\bf 1}} (n_1 -|m_1|)\ldots (n_d -|m_d|){\sf R}(m)
$$
$$
\geq (1-c)^d \langle n \rangle \,\sum_{m\in \mathbb{Z}^d: -cn \leq m \leq cn} {\sf R}(m) = (1-c)^d
\langle n \rangle \,{\sf K}_X([cn]).
$$
In view of Remark \ref{rm1} we come to the relation
$$
(1-c)^d \langle n \rangle \,{\sf K}_X([cn])\sim (1-c)^d \langle n \rangle \,{\sf K}_X(n),
\;\;n\to \infty, \;\;n\in\mathbb{N}^d.
$$
Consequently, ${\sf var}S(U_n) \sim \langle n \rangle \,{\sf K}_X(n)$ as $n\to \infty$, because
$c$ can be taken arbitrary close to zero.
Now suppose that ${\sf var} S(U_n)\sim \langle n \rangle \, L(n)$ as $n\to \infty$, where $L
\in \mathcal{L}(\mathbb{N}^d)$. Then for any $\varepsilon >0$ and all $n$ sufficiently large (i.e.
each component of $n$ is large enough), application of {\sf E}qref{aux1a} leads to the inequality
\begin{equation}\label{aux1}
{\sf K}_X(n) \geq \frac{{\sf var}S(U_n)}{ \langle n \rangle} \geq (1-\varepsilon)L(n).
{\sf E}nd{equation}
For a fixed $q\in\mathbb{N}$, $q>1$, and $n_r\in\mathbb{N}$, $m_r\in\mathbb{Z}$ such that
$|m_r|\leq n_r$ where $r=1,\ldots,d$, one has
$$
\frac{q}{q-1}\left(1-\frac{|m_r|}{n_rq}\right) \geq \frac{q}{q-1}\left(1-\frac{n_r}{n_rq}\right)=1.
$$
Therefore, taking into account condition ${\sf R}\geq 0$ we verify that
$$
{\sf K}_X(n) \leq \left(\frac{q}{q-1}\right)^d \sum_{m\in\mathbb{Z}^d: -n\leq m\leq n} {\sf R}(m) {\sf P}rod_{r=1}^d \frac{(n_rq-|m_r|)}{n_rq}
$$
$$
\leq\left(\frac{q}{q-1}\right)^d\left({\sf P}rod_{r=1}^d n_rq\right)^{-1} \sum_{m\in\mathbb{Z}^d: -nq \leq m\leq nq} {\sf R}(m) {\sf P}rod_{r=1}^d(n_rq-|m_r|)
$$
\begin{equation}\label{aux2}
=\left(\frac{q}{q-1}\right)^d\frac{{\sf var}S(U_{qn})}{\langle qn \rangle}\sim \left(\frac{q}{q-1}\right)^d L(qn),\;\;n\to \infty.
{\sf E}nd{equation}
As $q$ can be chosen arbitrary large, using {\sf E}qref{aux1} and {\sf E}qref{aux2} we conclude that the
desired statement holds.
$\square$
\vskip0,5cm
{\bf Proof of Theorem 1.} {\sf Necessity.} Suppose that {\sf E}qref{clt} is satisfied.
Then
$$
\frac{(S_n-{\sf E} S_n)^2}{{\sf var} S_n} \stackrel{law}\longrightarrow Z^2
\;\;\mbox{as}\;\;n\to \infty.
$$
Indeed, if the random variables $Y_n \stackrel{law}\longrightarrow Y$, then for any bounded continuous function
$h:\mathbb{R}\to\mathbb{R}$ one has $h(Y_n)\stackrel{law}\longrightarrow h(Y)$ as $n\to \infty$.
Obviously,
$$
\frac{(S_n-{\sf E} S_n)^2}{{\sf var} S_n}\geq 0\;\;\mbox{and}\;\;\frac{{\sf E}(S_n-{\sf E} S_n)^2}{{\sf var} S_n}=1.
$$
Thus uniform integrability of the family $\{(S_n-{\sf E} S_n)^2/{\sf var} S_n, n\in\mathbb{N}^d\}$
follows from the analogue of Theorem 1.5.4 established in \cite{B} for a sequence of random variables indexed by
points of $\mathbb{N}$. In view of Lemma \ref{lm2} we can claim that {\sf E}qref{vps} holds. Consequently, the family
$\{(S_n - {\sf E} S_n)^2/(\langle n \rangle {\sf K}_X(n)), n\in \mathbb{N}^d\}$ is also uniformly integrable.
{\sf Sufficiency.}
If the function ${\sf K}_X$ is bounded we see that {\sf E}qref{newm} is valid and Theorem 3.1.12 of \cite{BSha}
implies that {\sf E}qref{clt} is satisfied. Thus we will assume further that a function ${\sf K}_X$ is unbounded.
Set
${\sf K}_X(t):={\sf K}_X([t]\vee {\bf 1})$
for $t=(t_1,\ldots,t_d)^{\top}\in \mathbb{R}^d_+$ where $[t]=([t_1],\ldots,[t_d])^{\top}$.
This extension of the initial function ${\sf K}_X$ belongs to $\mathcal{L}(\mathbb{R}^d_+)$ as
${\sf K}_X$ is coordinate-wise nondecreasing on $\mathbb{N}^d$ (a field $X\in {\sf PA}$, therefore its covariance function is nonnegative).
Further on we assume that the function ${\sf K}_X$ is extended on $\mathbb{R}^d_+$ as indicated above.
Let the vectors $q_n$, $n\in\mathbb{N}^d,$ be constructed according to Lemma \ref{lm1}.
It is not difficult to find a non-random family of vectors
$\{p_n, n\in\mathbb{N}^d\}$,
where $p_n$ takes values in $\mathbb{N}^d$, such that
\begin{equation}\label{eq2}
q_n^{(k)} \leq p_n^{(k)}\leq n_k,\;q_n^{(k)}/p_n^{(k)} \to 0\;\;\mbox{and}\;\;p_n^{(k)}/n_k \to 0\;\;
\mbox{for}\;k=1,\ldots,d\;\;\mbox{as}\;n\to \infty.
{\sf E}nd{equation}
Now we apply the Bernstein partitioning method. For $n,j\in\mathbb{N}^d$ and introduced $p_n$ and $q_n$ consider the blocks
$$
U_n^{(j)}=\{u\in \mathbb{N}^d: (j_k-1)(p_n^{(k)}+q_n^{(k)})<u_k \leq j_kp_n^{(k)}+(j_k -1)q_n^{(k)},\;k=1,\ldots,d\},
$$
where $u=(u_1,\ldots,u_d)$. Let $J_n=\{j\in\mathbb{N}^d: U_n^{(j)} \subset U_n\}$ and
$$
W_n =\bigcup_{j\in J_n} U_n^{(j)}, \;\;\;G_n= U_n\setminus W_n,\;\;n\in\mathbb{N}^d.
$$
In other words $W_n$ consists of "large blocks" (having the "size" $p_n^{(k)}$ along each of the $k-$th axis for $k=1,\ldots,d$),
separated by "corridors" belonging to the set $G_n$. Put $v_n = \sqrt{\langle n \rangle {\sf K}_X(n)}$. Then, for
each $t\in\mathbb{R}$ and
$n\in\mathbb{N}^d$ we obtain
$$
\left|{\sf E}{\sf E}xp\left\{\frac{it}{v_n}S_n\right\} - e^{-\frac{t^2}{2}}\right| \leq
\left|{\sf E}{\sf E}xp\left\{\frac{it}{v_n}S_n\right\} -
{\sf E}{\sf E}xp\left\{\frac{it}{v_n}\sum_{j\in J_n}S(U_n^{(j)})\right\}\right|
$$
$$
+\left|{\sf E}{\sf E}xp\left\{\frac{it}{v_n}\sum_{j\in J_n}S(U_n^{(j)})\right\} - {\sf P}rod_{j\in J_n}
{\sf E}{\sf E}xp\left\{\frac{it}{v_n}S(U_n^{(j)})\right\}\right|
$$
$$
+\left|{\sf P}rod_{j\in J_n}{\sf E}
{\sf E}xp\left\{\frac{it}{v_n}S(U_n^{(j)}\right\}
- e^{-\frac{t^2}{2}}\right| =:\sum_{r=1}^3 Q_r,
$$
here $i^2=-1$, $Q_r=Q_r(n,t)$ and $S_n = S(U_n)$ as previously.
Taking into account that $|e^{ix}-e^{iy}|\leq |x-y|$ for all $x,y \in \mathbb{R}$,
and using the Lyapunov inequality we get
$$
Q_1 \leq \frac{|t|}{v_n} {\sf E}|S(G_n)| \leq \frac{|t|}{v_n} ({\sf E}S(G_n)^2)^{1/2}.
$$
A random field $X\in {\sf PA}$, therefore ${\sf cov}\,(X_j,X_u) \geq 0$ for any $j,u\in \mathbb{N}^d$.
Thus in view of wide-sense stationarity of $X$ we come to the relations
$$
{\sf E} S(G_n)^2 \leq \sum_{j\in G_n} \sum_{u:-n\leq u-j \leq n} {\sf cov} (X_j,X_u) \leq
{\sf card}\, G_n \,{\sf K}_X(n)
$$
$$
\leq {\sf K}_X(n) \sum_{k=1}^d (m_n^{(k)}q_n^{(k)} + p_n^{(k)} + q_n^{(k)})
{\sf P}rod_{1\leq l\leq d, l\neq k} n_l
$$
where ${\sf card}\, G$ stands for the cardinality of a set $G$, $m_n^{(k)} = [n_k/(p_n^{(k)}+ q_n^{(k)})]$, $k=1,\ldots,d$. Due to {\sf E}qref{eq1} and {\sf E}qref{eq2} we get the inequality
$$
\frac{{\sf E}S(G_n)^2}{\langle n \rangle\, {\sf K}_X(n)}
\leq \sum_{k=1}^d \frac{m_n^{(k)}q_n^{(k)} + p_n^{(k)} + q_n^{(k)}}{n_k}\to 0,\;\;\;n\to \infty.
$$
Consequently, $Q_1(n,t)\to 0$ for each $t\in\mathbb{R}$ as $n\to \infty$.
For any $n\in \mathbb{N}^d$ the family $\{S(U_n^{(j)}), j\in J_n\} \in {\sf PA}$ (see, e.g., Theorem 1.1.8
in \cite{BSha}). Enumerate elements of this family to obtain the collection of random variables
$\{Y_{n,s}, s=1,\ldots,M_n\}$ where $M_n= {\sf card}\,J_n$.
It is easily seen that
$$
{\sf P}rod_{k=1}^d m_n^{(k)}
\leq M_n \leq {\sf P}rod_{k=1}^d (m_n^{(k)}+1).
$$
Recall that for complex-valued random variables $Y$ and $V$
(absolute square integrable) the covariance ${\sf cov}(Y,V):= {\sf E}(Y-{\sf E}Y)
\overline{(V-{\sf E}V)}$, where the bar denotes the conjugation.
Due to Theorem 1.5.3 of \cite{BSha} one has
$$
Q_2 \leq \sum_{s=1}^{M_n-1}\left|\,{\sf cov}\!\left({\sf E}xp\left\{\frac{it}{v_n}Y_{n,s}\right\},{\sf E}xp\left\{-\frac{it}{v_n}\sum_{l=s+1}^{M_n} Y_{n,l}\right\}\right)\right|
$$
$$
\leq \frac{4t^2}{v_n^2}\sum_{1\leq s,l \leq M_n,s\neq l}{\sf cov}(Y_{n,s},Y_{n,l})
\leq \frac{4t^2}{\langle n \rangle \,{\sf K}_X(n)}\sum_{j\in U_n}\sum_{u\in U_n,|u-j|>q_n}{\sf cov}(X_j,X_u)
$$
where $|u|=\max_{k=1,\ldots,d}|u_k|$.
Obviously, for $j\in U_n$
$$
\{u\in U_n, |u-j|>q_n\} \subset \{u\in \mathbb{N}^d: j-n \leq u \leq j+n\}\setminus \{u\in \mathbb{N}^d:|u-j|\leq q_n\}.
$$
Therefore, the inequality
$$
\sum_{j\in U_n} \sum_{u\in U_n, |u-j|>q_n} {\sf cov}(X_j,X_u) \leq \langle n \rangle ({\sf K}_X(n) - {\sf K}_X(q_n))
$$
and {\sf E}qref{eq2} imply that $Q_2(n,t)\to 0$ for each $t\in \mathbb{R}$ as $n\to \infty$.
For any $n\in\mathbb{N}$ introduce a vector $(Z_{n,1},\ldots,Z_{n,M_n})^{\top}$ having the independent components and such that
the law of $Z_{n,k}$ coincides with the law of $Y_{n,k}/v_n$, $k=1,\ldots,d$. Due to Lemma \ref{lm2} for all
$s=1,\ldots,M_n$
\begin{equation}\label{aux3}
{\sf var} Z_{n,s} = {\sf var } Z_{n,1} \sim \langle p_n \rangle \, {\sf K}_X(p_n)/\langle n \rangle \,{\sf K}_X(n),\;\;\;n\to \infty.
{\sf E}nd{equation}
Thus
\begin{equation}\label{aux4}
\sum_{s=1}^{M_n} {\sf var} Z_{n,s} = M_n {\sf var} Z_{n,1} \to 1, \;\;\;n\to \infty,
{\sf E}nd{equation}
since
$$M_n\langle p_n \rangle \sim {\sf P}rod_{k=1}^d [n_k/(p_n^{(k)} + q_n^{(k)})]p_n^{(k)} \sim \langle n \rangle $$
and ${\sf K}_X(p_n)/{\sf K}_X(n) \to 1$ as $n\to \infty$.
For arbitrary $\varepsilon >0$, taking into account the stationarity of $X$, we have
$$
\sum_{s=1}^{M_n} {\sf E}Z_{n,s}^2 \mathbb{I}\{|Z_{n,s}|>\varepsilon\} = \frac{M_n}{\langle n \rangle\, {\sf K}_X(n)} {\sf E} Y_{n,1}^2\mathbb{I}\{Y_{n,1}^2>\varepsilon^2 \langle n \rangle\, {\sf K}_X(n)\}
$$
$$
=\frac{M_n\langle p_n \rangle\, {\sf K}_X(p_n)}{\langle n \rangle\, {\sf K}_X(n)} {\sf E}\frac{S(U_n^{(1)})^2}{\langle p_n \rangle\, {\sf K}_X(p_n)}\mathbb{I}
\left\{
\frac{S(U_n^{(1)})^2}{\langle p_n \rangle\, {\sf K}_X(p_n)} > \varepsilon^2
\frac{\langle n \rangle\, {\sf K}_X(n)}
{\langle p_n \rangle\, {\sf K}_X(p_n)}
\right\} \to 0,\;\;n\to\infty,
$$
in view of {\sf E}qref{aux3}, {\sf E}qref{aux4} and because
$$
\frac{\langle n \rangle {\sf K}_X(n)}{\langle p_n \rangle {\sf K}_X(p_n)}\to \infty\;\;\mbox{as}\;\;
n\to \infty.
$$
We also used uniform integrability of $\{S(U_n^{(1)})^2/(\langle p_n \rangle\, {\sf K}_X(p_n)), n\in \mathbb{N}^d\}$. Indeed,
this is a subfamily of the uniformly integrable family
$\{S_n^2/(\langle n \rangle\, {\sf K}_X(n)), n\in \mathbb{N}^d\}$.
The Lindeberg theorem (see, e.g., \cite{K}, p. 69) implies that
$$
\sum_{s=1}^{M_n} Z_{n,s} \stackrel{law}\to Z\sim \mathcal{N}(0,1),\;\;\;n\to\infty.
$$
Therefore,
$$
{\sf P}rod_{s=1}^{M_n}{\sf E} {\sf E}xp\{itZ_{n,s}\} - {\sf E}xp\left\{-\frac{t^2}{2}\right\} \to 0,\;\;n\to \infty.
$$
It remains to note that
$$
{\sf P}rod_{j\in J_n} {\sf E}{\sf E}xp\left\{\frac{it}{v_n}S(U^{(j)}_n)\right\} = {\sf P}rod_{s=1}^{M_n}{\sf E}{\sf E}xp\{itZ_{n,s}\}.
$$
Thus $Q_3(n,t)\to 0$ for each $t\in\mathbb{R}$ as $n\to\infty$.
The proof is complete. $\square$
\vskip0,5cm
{\bf Proof of Theorem 2.} For a wide-sense stationary random field $X=\{X_j,j\in\mathbb{Z}^d\}$ introduce the function
$$
{\sf R}_X(r) = \sum_{j\in\mathbb{Z}^d: |j|\leq r}{\sf cov}(X_0,X_j), \;\;r\in \mathbb{N}.
$$
This function ${\sf R}_X(\cdot)$ is close in a sense to ${\sf K}(\cdot)$ defined in
{\sf E}qref{fn}. They coincide if $d=1$.
Clearly, for $d\geq 1$
$$
{\sf K}(r)\leq {\sf R}_X(r)\leq {\sf K}(r\sqrt{d}),\;\;\;\;r\in \mathbb{N}.
$$
Consequently, if ${\sf K}\in \mathcal{L}(\mathbb{N})$, then ${\sf R}_X\in \mathcal{L}(\mathbb{N})$,
and vise versa if
${\sf R}_X\in \mathcal{L}(\mathbb{N})$, then ${\sf K}\in \mathcal{L}(\mathbb{N})$.
Now for a sequence
$(C_r)_{r\in\mathbb{N}}$ it is not difficult to obtain the desired result following the scheme of the proof of
Theorem \ref{th1} and
using ${\sf R}_X$ instead of ${\sf K}_X$. $\square$
\vskip0,2cm
\begin{Rm}\label{rm2} Lemma $\ref{lm2}$ shows that in Theorems $\ref{th1}$ and $\ref{th2}$
instead of normalizations $\sqrt{{\sf var} S_n}$ and
$\sqrt{{\sf var} S(C_r)}$ for partial sums one can use $\sqrt{\langle n \rangle \, K_X(n)}$
and $r^{d/2}\sqrt{K(r)}$, respectively.
{\sf E}nd{Rm}
\vskip1cm
{\bf Acknowledgments.} The author is grateful to Professors I.Kourkova and G.Pages for invitation
to LPMA of the University Pierre and Marie Curie, he would like also to thank all the members of the
LPMA for hospitality.
\vskip1cm
\begin{thebibliography}{99}
\bibitem{B} Billingsley P. {\it Convergence of Probability Measures.} Wiley, New York, 1968, 253 pp.
\bibitem{BSha} Bulinski A.V. and Shashkin A.P. {\it Limit Thorems for Associated Random Fields and Related Systems.}
World Scientific, Singapore, 2007, 436 pp.
\bibitem{BS} Bulinski A.V. and Shiryaev A.N. {\it Theory of Stochastic Processes.} FIZMATLIT, Moscow, 2005, 408 pp. (in Russian).
\bibitem{BV} Bulinski A.V. and Vronski M.A. {\it Statistical variant of the CLT for associated random fields.} Fundam.
Prikl. Mat., vol. 2 (1996), no. 4, pp. 999-1018 (in Russian).
\bibitem{EPW} Esary J. D., Proschan F. and Walkup D. W.
{\it Association of random variables, with applications.}
Ann. Math. Statist., vol. 38 (1967), no. 5, pp. 1466-1474.
\bibitem{GK} Gnedenko B.V. and Kolmogorov A.N.
{\it Limit Distributions for Sums of Independent Random Variables.} Addison-Wesley, Reading, 1954, 264 pp.
\bibitem{H} Herrndorf N. {\it An example of the central limit theorem for associated random sequences.}
Ann. Probab., vol. 12 (1984), no. 3, pp. 912-917.
\bibitem{IL} Ibragimov I.A. and Linnik Yu.V. {\it Independent and Stationary Sequences of Random Variables.} Wolters-Noordhoff, Groningen, 1971,
443 pp.
\bibitem{K} Kallenberg O. {\it Foundations of Modern Probability.} Springer, New York, 2002, 523 pp.
\bibitem{L} Lewis T. {\it Limit theorems for partial sums of quasi-associated random variables.}
In: Szyszkowicz B. (ed.). Asymptotic Methods in Probability and Statistics. Elsevier, Amsterdam, 1998, pp. 31-48.
\bibitem{N1} Newman C.M. {\it Normal fluctuations and the FKG inequalities.} Commun. Math. Phys., vol. 74 (1980), no. 2, pp. 119-128.
\bibitem{N} Newman C.M. {\it Asymptotic independence and limit theorems for positively
and negatively dependent random variables.} In: Tong Y.L. (ed.). Inequalities in Statist. and Probab., ISI,
Hayward, 1984, pp. 127-140.
\bibitem{P} Petrov V.V. {\it Limit Theorems of Probability Theory: Sequences of Independent Random Variables.}
Oxford Studies in Probability, vol. 4, Clarendon Press, Oxford, 1995, 292 pp.
\bibitem{S} Shashkin A.P.
{\it On Newman's central limit theorem.} Theory Probab. Appl., vol. 50 (2006), no. 2, pp. 330-337.
\bibitem{Z} Zolotarev V.M. {\it Modern Theory of Summation of Random Variables.} VSP, Utrecht, 1997, 412 pp.
Alexander BULINSKI,
\vskip0,2cm
Faculty of Mathematics and Mechanics, Lomonosov Moscow State University,
Moscow 119991, Russia
{\it and}
LPMA, UPMC University Paris-6,
4 Place Jussieu, 75252 Paris CEDEX 05, France
\vskip0,5cm
{\it E-mail address}: [email protected]
{\sf E}nd{thebibliography}
{\sf E}nd{document}
|
\begin{document}
\title[Chow groups of K3 surfaces and spherical objects]{Chow groups of K3 surfaces and spherical objects}
\author[D.\ Huybrechts]{Daniel Huybrechts}
\address{Mathematisches Institut,
Universit{\"a}t Bonn, Beringstr.\ 1, 53115 Bonn, Germany}
\email{[email protected]}
\begin{abstract} \noindent
We show that for a K3 surface $X$ the finitely generated subring
$R(X)\subset{\rm CH}^*(X)$ introduced by Beauville and Voisin is
preserved under derived equivalences. This is proved by analyzing
Chern characters of spherical bundles (and complexes). As for a K3
surface $X$ defined over a number field all spherical bundles on
the complex K3 surface $X_\mathbb{C}$ are defined over $\bar\mathbb{Q}$, this is
compatible with the Bloch--Beilinson conjecture. Besides the work
of Beauville and Voisin \cite{BV}, Lazarfeld's result on
Brill--Noether theory for curves in K3 surfaces \cite{Laz} and the
deformation theory developed in \cite{HMS} are central for the
discussion.
\end{abstract}
\maketitle
\let\thefootnote\relax\footnotetext{This work was supported by the SFB/TR 45 `Periods,
Moduli Spaces and Arithmetic of Algebraic Varieties' of the DFG
(German Research Foundation)}
\section{Introduction}
The Chow group ${\rm CH}^i(X)$ of a smooth projective variety $X$ is
the group of all cycles of codimension $i$ modulo rational
equivalence (see \cite{Fulton}). For surfaces these are
${\rm CH}^0(X)=\mathbb{Z}[X]$, ${\rm CH}^1(X)\simeq{\rm Pic}(X)$ (via the first Chern
class), and the more mysterious ${\rm CH}^2(X)$. The latter is roughly
the group of $0$-cycles $Z=\sum n_ix_i$ modulo linear equivalence
on curves containing $Z$. Since Mumford's article \cite{Mum} one
knows that, contrary to ${\rm CH}^0(X)$ and ${\rm CH}^1(X)$, the group
${\rm CH}^2(X)$ can be big. More precisely, the subgroup
$A(X):={\cal E}r({\rm d}g:{\rm CH}^2(X)\xymatrix@1@=15pt{\ar[r]&}\mathbb{Z})$ of all homologically trivial
$0$-cycles on a complex projective surface $X$ is
infinite-dimensional (and in particular infinitely generated)
whenever $p_g(X)=h^2(X,{\cal O}_X)>0$, e.g.\ for K3 surfaces.
For K3 surfaces, Beauville and Voisin have studied more recently
the subgroup
$$R(X):={\rm CH}^0(X)\oplus{\rm CH}^1(X)\oplus
c_X\mathbb{Z}\subset{\rm CH}^2(X),$$ where $c_X\in{\rm CH}^2(X)$ is the fundamental
class of a closed point $x\in X$ that is contained in a (possibly)
singular rational curve in $X$. As shown in \cite{BV}, the class
$c_X$ is independent of the point $x$. The main results of
\cite{BV} can be stated as follows:
\noindent {\bf Theorem (Beauville--Voisin).} {\it {\rm i)}
$R(X)\subset {\rm CH}^*(X)$ is a subring and {\rm ii)} $24c_X={\rm
c}_2(X)$.}
The first condition is equivalently expressed by saying that for
any line bundle $L\in{\rm Pic}(X)$ the class ${\rm
c}_1(L)^2\in{\rm CH}^2(X)$ is a multiple of $c_X$.
Let us rephrase i) and ii) in terms of Mukai vectors. For any
coherent sheaf (or complex of coherent sheaves) $E$ on $X$ one
defines
$$v^{\rm CH}(E):={\rm ch}(E)\sqrt{{\rm td}(X)}\in{\rm CH}^*(X).$$
In other words, $v^{\rm CH}(X):=({\rm rk}(E),{\rm c}_1(E),{\rm
ch}_2(E)+{\rm rk}(E)c_X)\in{\rm CH}^0(X)\oplus{\rm CH}^1(X)\oplus{\rm CH}^2(X)$. Then
by applying i) to powers of $L$, conditions i) and ii) can be
reformulated as:
\begin{itemize}
\item[iii)] {\it For any line bundle $L\in{\rm Pic}(X)$, one has
$v^{\rm CH}(L)\in R(X)$.}
\end{itemize}
Line bundles on a K3 surface $X$ are the easiest examples of
\emph{spherical objects} on $X$, which by definition are bounded
complexes of coherent sheaves $E\in{\rm D}^{\rm b}(X)$ with
${\rm Ext}^*_X(E,E)\simeq H^*({\rm S}^2,\mathbb{C})$. Building upon \cite{BV},
we shall prove the following generalization of iii).
\noindent{\bf Theorem 1.} {\it Let $X$ be a complex projective K3
surface of Picard number $\rho(X)\geq2$ and let $E\in{\rm D}^{\rm b}(X)$ be a
spherical object. Then $v^{\rm CH}(E)\in R(X)$.}
Spherical objects play a distinguished role in the study of the
bounded derived category ${\rm D}^{\rm b}(X)$ (and its homological mirror
given by a certain Fukaya category). They are essential for the
understanding of the rich structure of the group ${\rm
Aut}({\rm D}^{\rm b}(X))$ of all exact $\mathbb{C}$-linear autoequivalences and
Bridgeland's space of stability conditions ${\rm Stab}(X)$ (see
\cite{Br}).
In this context, Theorem 1 is used to deduce information about the
action of derived equi\-valences on the level of Chow groups. More
precisely, we have
\noindent {\bf Theorem 2.} {\it Let
$\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$ be an exact $\mathbb{C}$-linear
equivalence between the bounded derived categories of two smooth
complex projective K3 surfaces of Picard number $\rho(X)\geq2$.
Then the induced action $\Phi_{\cal E}^{\rm CH}:{\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm CH}^*(X')$
preserves the Beauville--Voisin ring, i.e.\
$$\Phi^{\rm CH}_{\cal E}(R(X))=R(X').$$}
The key step towards Theorem 1 is the following result which is
valid without any assumption on the Picard group of the surface
(see Theorem \ref{prop_consequencedefo}).
\noindent {\bf Theorem 3.} {\it Let $X$ and $X'$ be complex
projective K3 surfaces and
$\Phi_{\cal E},\Phi_{\cal F}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$ be two Fourier--Mukai
equivalences. If their induced actions on cohomology coincide,
i.e.\ $\Phi_{\cal E}^H=\Phi_{\cal F}^H:\widetilde H(X,\mathbb{Z})\xymatrix@1@=15pt{\ar[r]^-\sim&} \widetilde
H(X',\mathbb{Z})$, then also $\Phi_{\cal E}^{\rm CH}=\Phi_{\cal F}^{\rm CH}:{\rm
CH}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm CH}(X')$.}
In particular, for $X= X'$ the result shows that $\Phi_{\cal E}^{\rm CH}$
acts as the identity on $A(X)$ whenever it acts trivially on
cohomology (see Corollary \ref{cor:kernelsequal}). As we will
explain in Remark \ref{rem:firstBloch}, this is predicted by a
general conjecture of Bloch which asserts that the action of any
algebraic correspondence on the graded pieces of his conjectural
filtration is determined by its action on cohomology (see
\cite[1.8]{Bl} or \cite[Conj.\ 23.22]{Voisin}).
If instead of projective K3 surfaces over $\mathbb{C}$ we consider K3
surfaces $X$ defined over a number field $K$, then the situation
changes completely. In this case ${\rm CH}^2(X)$ is no longer expected
to be infinitely generated. In fact, the Bloch--Beilinson
conjectures predict that for K3 surfaces over number fields the
degree map yields an isomorphism ${\rm CH}^2(X)\otimes\mathbb{Q}\simeq\mathbb{Q}$ (see
\cite{BlochCrelle,RSS}). This can be rephrased as the following
\noindent {\bf Conjecture (Bloch--Beilinson for K3 surfaces).}
{\it For any K3 surface $X$ defined over a number field $K$ base
change yields
$${\rm CH}^*(X)\otimes\mathbb{Q}~\xymatrix@1@=15pt{\ar@{^(->}[r]&}~{\rm CH}^*(X_L)\otimes\mathbb{Q}\xymatrix@1@=15pt{\ar[r]^-\sim&}
R(X_\mathbb{C})\otimes\mathbb{Q}\simeq\mathbb{Q}^{\rho(X_\mathbb{C})+2},$$ where $L/K$ is a
certain finite field extension with a chosen embedding
$L\subset\mathbb{C}$.}
As usual, $X_L$ denotes $X\times_{{\rm Sp}ec(K)}{\rm Sp}ec(L)$ and similarly
for $X_\mathbb{C}$, which then is a complex projective K3 surface.
It is well known that the base change ${\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]&}{\rm CH}^*(X_L)$ for
any extension $L/K$ has torsion kernel. The passage to the finite
extension $L/K$ is not essential and only needed to ensure that
all geometric line bundles are defined. Thus, the central point of
the conjecture is that ${\rm CH}^2(X)\otimes\mathbb{Q}\simeq\mathbb{Q} c_X$. For a
proof it would clearly suffice to prove that any rational point
$x\in X(\bar\mathbb{Q})$ satisfies $[x]=c_X$, but there is no obvious
geometric reason for this.
The skyscraper sheaves $k(x)$ of rational points $x\in X(\bar\mathbb{Q})$ define
semi-rigid objects in ${\rm D}^{\rm b}(X_{\bar\mathbb{Q}})$ (see Section
\ref{sect:K3overnumber} for the definition), for
${\rm Ext}^1(k(x),k(x))$ is two-dimensional. In this sense, they are
reasonably close to our spherical objects $E\in{\rm D}^{\rm b}(X)$ which have
vanishing ${\rm Ext}^1(E,E)$. The following is thus in accordance with
the Bloch--Beilinson conjecture for K3 surfaces.
\noindent {\bf Theorem 3.} {\it Let $X$ be a K3 surface over a
number field $K$. Then
$\bullet$ Any spherical object $E\in{\rm D}^{\rm b}(X_\mathbb{C})$ is defined over
some finite extension $L/K$.
$\bullet$ Any spherical object $F\in{\rm D}^{\rm b}(X_{\bar\mathbb{Q}})$ satisfies
$v^{\rm CH}(F_\mathbb{C})\in R(X_\mathbb{C})$ if in addition
$\rho(X_{\bar\mathbb{Q}})\geq2$.}
Together these two assertions show that on any K3 surface $X$
defined over $\bar\mathbb{Q}$ with Picard number $\rho(X)\geq2$ there
exists a large number of non-trivial classes in ${\rm CH}^2(X)$ for
which the Bloch--Beilinson conjecture can be verified. As with
other approaches to the conjecture, the difficult part, that would
show that this suffices to deduce the result for all classes,
remains open.
Despite the algebraic nature of all assertions, non-algebraic K3
surfaces play a crucial but hidden role in this paper. A technique
that has been developed together with Macr\`i and Stellari in
\cite{HMS} allows one to deform any derived equivalence that acts
as the identity on cohomology to an essentially trivial derived
equivalence on a generic and non-algebraic deformation of $X$.
This is explained in Section \ref{sect:FMonChow}. The results can
also be used to show that the natural representations of ${\rm
Aut}({\rm D}^{\rm b}(X))$ on ${\rm CH}^*(X)$ and on the Mukai lattice $\widetilde
H(X,\mathbb{Z})$ encode the same information (see Corollary
\ref{cor:kernelsequal}).
In Section \ref{sect:Mukaispherobjcts} we reduce Theorem 1 to the
case that $E$ is a spherical vector bundle. This section also
explains how Theorem 2 is deduced from Theorem 1. The case of
spherical vector bundles is dealt with in detail in Section
\ref{sect:MukaisphVB}. The main ingredient here is Lazarsfeld's
result that the generic curve in an indecomposable linear system
on a K3 surface is Brill--Noether general \cite{Laz}. The final
Section \ref{sect:K3overnumber} discusses the relation to the
Bloch--Beilinson conjecture for K3 surfaces over number fields.
We certainly expect all results to hold true without any
assumption on the Picard number of the K3 surface $X$ and, in
fact, Theorem 1 can be proved also for $\rho(X)=1$ under
additional numerical conditions on the spherical object $E$.
\noindent {\bf Notation.} By ${\rm D}^{\rm b}(X)$ we denote the bounded
derived category of the abelian category ${\rm Coh}(X)$ of
coherent sheaves on $X$. It will be considered as a $K$-linear
triangulated category, when $X$ is defined over $K$ (which mostly
is $\mathbb{C}$, $\bar\mathbb{Q}$ or a number field). The Mukai lattice
$\widetilde H(X,\mathbb{Z})$ of a complex K3 surface is by definition the
full singular cohomology $H^*(X,\mathbb{Z})$ endowed with its natural
weight two Hodge structure and the Mukai pairing (see e.g.\
\cite{FM}). All intersection products will be taken with respect
to the Mukai pairing which differs from the usual intersection
pairing by a sign in degree four. We will associate to any
$E\in{\rm D}^{\rm b}(X)$ its Mukai vector $v(E):={\rm ch}(E)\sqrt{{\rm
td}(X)}\in\widetilde H(X,\mathbb{Z})$ and its natural lift to ${\rm CH}^*(X)$,
which is denoted $v^{\rm CH}(E)$. We do not make this distinction for
the characteristic classes.
\noindent {\bf Acknowledgements.} I wish to thank C.\ Voisin for
instructive comments on a first version of the paper and R.\
Kloosterman for his help with an argument in Section
\ref{sect:K3overnumber}. Thanks also to E.\ Mistretta, M.\
Penegini, and D.\ Ploog, who have checked Theorem 2 in explicit
examples at an early stage of this work.
\section{Fourier--Mukai action on the Chow
group}\label{sect:FMonChow}
Let us start by briefly recalling the following examples of
Fourier--Mukai equivalences for K3 surfaces.
{\bf i)} For a line bundle $L\in{\rm Pic}(X)$, the tensor product
$L\otimes(~~)$ defines a Fourier--Mukai equivalence
$$\Phi_{\iota_*L}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X),$$
where $\iota_*L$ is the direct image of $L$ under the diagonal
embedding $X\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}elta\subset X\times X$.
{\bf ii)} If $X'$ is a smooth projective two-dimensional fine
moduli space of $\mu$-stable vector bundles on $X$, then the
universal bundle $\mathbb{E}$ on $X\times X'$ induces an equivalence (see
\cite{Muk,FM})
$$\Phi_\mathbb{E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X').$$
Note that any Fourier--Mukai partner of $X$ is isomorphic to such
a moduli space, but of course other Fourier--Mukai equivalences
between $X$ and $X'$ do exist and are given by kernels more
complicated than $\mathbb{E}$.
{\bf iii)} If $E\in{\rm D}^{\rm b}(X)$ is a spherical object, i.e.\
${\rm Ext}^*_X(E,E)\simeq H^*({\rm S}^2,\mathbb{C})$, then the spherical twist
$$T_E:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X),$$
studied in detail in \cite{ST}, is a Fourier--Mukai equivalence
with kernel ${\cal P}_E:={\rm Cone}({\rm tr}:E^{\vee}\boxtimes
E\xymatrix@1@=15pt{\ar[r]&}{\cal O}_{\rm D}elta)$ (see also \cite[Ch.\ 8]{FM}). Spherical objects,
although rigid, exist in abundance on any projective K3 surface.
E.g.\ any line bundle, even the trivial one, gives rise to a
non-trivial spherical twist. Moreover, Kuleshov shows in
\cite{Kul} that any $(1,1)$-class $v\in\widetilde H(X,\mathbb{Z})$ of
square $-2$ is the Mukai vector of a spherical object, which can
be chosen to be a vector bundle if the rank of $v$ is positive.
Any Fourier--Mukai equivalence $\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$
induces a group isomorphism
$$\Phi^{\rm CH}_{\cal E}:{\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm CH}^*(X')$$
and a Hodge isometry $$\Phi^H_{\cal E}:\widetilde H(X,\mathbb{Z})\xymatrix@1@=15pt{\ar[r]^-\sim&}
\widetilde H(X',\mathbb{Z}).$$ Both are defined as correspondences
associated to $v^{\rm CH}({\cal E})\in {\rm CH}^*(X\times X')\otimes\mathbb{Q}$
respectively $v({\cal E})\in H^*(X\times X',\mathbb{Q})$.
Note that in general one would expect $\Phi^{\rm CH}$ and $\Phi^H$ to
be defined only with rational coefficients, but as Mukai observed
the situation is special for K3 surfaces (see the original
argument in \cite{Muk} or \cite{FM,HL}).
\begin{remark}
The action $\Phi^{\rm CH}_{\cal E}$ is difficult to grasp for example ii),
but easy to describe in the examples i) and iii).
Indeed, in i) the actions $\Phi^{\rm CH}_{\iota_*L}$ and
$\Phi^H_{\iota_*L}$ are both given by multiplication with ${\rm
ch}(L)=\exp({\rm c}_1(L))$, where the Chern character is viewed in
${\rm CH}^*(X)$ resp.\ $H^*(X,\mathbb{Z})$. Thus Theorem 2 is a trivial
consequence of the results in \cite{BV} in this case.
For the spherical twists in iii), $T^{\rm CH}_E$ and $T^H_E$ are
reflections in $v^{\rm CH}(E)^\perp$ resp.\ $v(E)^\perp$, where the
orthogonal complement is taken with respect to the Mukai pairing.
In particular, their squares $(T^2_E)^{\rm CH}$ and $(T^2_E)^H$ act
trivially, i.e.\ as the identity, on both groups ${\rm CH}^*(X)$ resp.\
$\widetilde H(X,\mathbb{Z})$.
\end{remark}
\begin{remark}\label{rem:actioninexamples}
Observe that for an arbitrary spherical object the associated
spherical twist $T_E$ preserves the Beauville--Voisin subring
$R(X)$ if and only if $v^{\rm CH}(E)\in R(X)$. In this case it acts as
the identity on the space of cohomologically trivial cycles
$A(X)$.
\end{remark}
According to Mumford, ${\rm CH}^2(X)$ is big and in fact of infinite
dimension for any complex projective surface with $p_g(X)>0$ and
therefore in particular for K3 surfaces. See \cite[Ch.\
22]{Voisin} for the notion of dimension of ${\rm CH}^2(X)$.
Thus, a priori for an arbitrary Fourier--Mukai equivalence
$\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$ between two K3 surfaces the
induced map $\Phi^{\rm CH}_{\cal E}:{\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm CH}^*(X')$ between the
infinite dimensional Chow groups might capture more information
than $\Phi_{\cal E}^H:\widetilde H(X,\mathbb{Z})\xymatrix@1@=15pt{\ar[r]^-\sim&} \widetilde H(X',\mathbb{Z})$.
That this is (unfortunately?) not the case is the main result of
this section
\begin{thm}\label{prop_consequencedefo}
Let $X$ and $X'$ be smooth complex projective K3 surfaces and let
$$\Phi_{\cal E},\Phi_{\cal F}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$$ be two Fourier--Mukai
equivalences with $\Phi_{\cal E}^H=\Phi_{\cal F}^H$. Then also
$$\Phi_{\cal E}^{\rm CH}=\Phi^{\rm CH}_{\cal F}:{\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]^-\sim&} {\rm CH}^*(X').$$
\end{thm}
\begin{remark} In general the direct sum decomposition ${\rm CH}^*(X)={\rm CH}^0(X)\oplus{\rm CH}^1(X)\oplus{\rm CH}^2(X)$
is not respected by Fourier--Mukai transforms. However, the
homologically trivial part is. More precisely, if
$\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$ is any Fourier--Mukai equivalence,
then $\Phi_{\cal E}^{\rm CH}(A(X))=A(X')$. Moreover, if $\Phi^H_{\cal E}$
respects the cohomological degree, e.g.\ for cohomologically
trivial autoequivalences, then
$\Phi_{\cal E}^{\rm CH}({\rm CH}^0(X)\oplus{\rm CH}^2(X))={\rm CH}^0(X')\oplus {\rm CH}^2(X')$.
\end{remark}
The essential step in the proof of Theorem
\ref{prop_consequencedefo} consists of the following slightly
weaker result.
\begin{prop}\label{prop:weak}
Let $\Phi_{\cal E},\Phi_{\cal F}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$ be as in Theorem
\ref{prop_consequencedefo}. Then $\Phi^{\rm CH}_{\cal E}=\Phi_{\cal F}^{\rm CH}$ on
${\rm CH}^0(X)\oplus{\rm CH}^2(X)$.
\end{prop}
\begin{proof}
By studying the composition
$\Phi_{\cal F}^{-1}\circ\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X)$, one easily
reduces to the case of autoequivalences acting as the identity on
cohomology. So let $\Phi_{{\cal E}_0}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X)$ with
$\Phi^H_{{\cal E}_0}={\rm id}$. We claim that then also
$\Phi^{\rm CH}_{{\cal E}_0}={\rm id}$ on ${\rm CH}^0(X)\oplus {\rm CH}^2(X)$. In
particular we have to show that $\Phi^{\rm CH}_{{\cal E}_0}={\rm id}$ on the
space of homologically trivial cycles $A(X)$.
Clearly, changing $\Phi_{{\cal E}_0}$ by even powers $T^{2k}$ of the
shift functor or even powers $T_{{\cal O}_X}^{2 k}$ of the spherical
twist associated to the trivial line bundle does not affect the
assertion (see Remark \ref{rem:actioninexamples} and use
$v^{\rm CH}({\cal O}_X)=(1,0,c_X)\in R(X)$). So, in the course of the proof
we will freely modify $\Phi_{{\cal E}_0}$ by autoequivalences of this
type.
In \cite{HMS} we were mainly interested in the case
$\Phi_{{\cal E}_0}^H=(-{\rm id}_{H^2})\oplus {\rm id}_{H^0\oplus H^4}$,
but as mentioned there already the case $\Phi_{{\cal E}_0}^H={\rm id}$
is similar and actually easier. So the results of \cite{HMS} show
that for any autoequivalence $\Phi_{{\cal E}_0}$
with $\Phi^H_{{\cal E}_0}={\rm id}$ one finds:\\
i) Two smooth formal deformations
${\cal X}\xymatrix@1@=15pt{\ar[r]&}{\rm Sp}f(R)\xymatrix@1@=15pt{&\ar[l]}{\cal X}'$ with $R=\mathbb{C}[[t]]$
and ${\cal X}_0=X={\cal X}'_0$. Here ${\cal X}$ is the formal neighbourhood of
$X$ inside its twistor space with respect to a very general
K\"ahler class in ${\rm Pic}(X)\otimes\mathbf{R}R$. Note that in this way $X$
is deformed towards a non-projective K3 surface.
ii) A complex ${\cal E}\in{\rm D}^{\rm b}({\cal X}\times_R{\cal X}'):={\rm D}^{\rm b}_{\rm
coh}(\Mod{{\cal X}\times_R{\cal X}'})$ deforming ${\cal E}_0$, i.e.\
$L\iota^*{\cal E}\simeq{\cal E}_0$, where
$\iota:X\times X ~\xymatrix@1@=15pt{\ar@{^(->}[r]&} {\cal X}\times_R^{\phantom{R}}{\cal X}'$ is the obvious closed embedding.\\
By \cite[Prop.\ 2.18, 2.19]{HMS} we may assume, after possibly
composing with powers of $T_{\cal O}^2$ and $T^2$, that the restriction
${\cal E}_K\in{\rm D}^{\rm b}(({\cal X}\times_R{\cal X}')_K)$ of ${\cal E}$ to the general fibre
is a sheaf. Hence \cite[Cor.\ 4.5]{HMS} applies and shows that
there exists an $R$-flat sheaf(!) $\widetilde{\cal E}$ on
${\cal X}\times_R{\cal X}'$ with the same restriction to the general fibre
as ${\cal E}$, i.e.\ $\widetilde{\cal E}_K\simeq{\cal E}_K$ in
${\rm D}^{\rm b}(({\cal X}\times_R{\cal X}')_K)$, where $K=\mathbb{C}((t))$. For the notation
we refer to \cite{HMS}. Using the compatibilities between
$\Phi_{{\cal E}_0}^H$ and the induced action on Hochschild
(co)homology, one can in addition assume that the first order
deformations
${\cal X}_1\xymatrix@1@=15pt{\ar[r]&}{\rm Sp}ec(\mathbb{C}[t]/t^2)\xymatrix@1@=15pt{&\ar[l]}{\cal X}'_1$ of
$X={\cal X}_0={\cal X}'_0$ coincide.
The specialization morphism $K({\rm D}^{\rm b}(({\cal X}\times_R{\cal X}')_K))\xymatrix@1@=15pt{\ar[r]&}
K({\rm D}^{\rm b}(X\times X))$ is well defined, see \cite[Remark 2.7]{HMS} or
the analogous statement for Chow groups in Remark \ref{rem:spec}.
Hence the coherent sheaf(!) $\widetilde{\cal E}_0$ and the original
complex ${\cal E}_0$ have the same Mukai vectors $v^{\rm CH}\in{\rm CH}^*(X\times
X)\otimes\mathbb{Q}$ and therefore
$\Phi_{\widetilde{\cal E}_0}^{\rm CH}=\Phi_{{\cal E}_0}^{\rm CH}$ and
$\Phi_{\widetilde{\cal E}_0}^H=\Phi_{{\cal E}_0}^H$.
Note that the Fourier--Mukai transform $\Phi_G$ associated to the
sheaf $G:=\widetilde{\cal E}_0$ is not necessarily an equivalence,
which would simplify the following arguments. But in any case,
there is a dense open subset $U\subset X$ over which $G$ is flat
(see e.g.\ \cite[Thm.\ 2.15, Lemma 2.1.6]{HL}). Hence, for any
closed point $x\in U$ the image $\Phi_G(k(x))$ is simply the sheaf
$G|_{\{x\}\times X}$. On the other hand,
$v(\Phi_G(k(x)))=v(\Phi_{{\cal E}_0}(k(x)))=(0,0,1)$ and hence
$G|_{\{x\}\times X}$ must be of the form $k(y)$ for some point
$y\in X$. This gives rise to a morphism $U\xymatrix@1@=15pt{\ar[r]&} X$, which by
interchanging the two factors turns out to define a birational map
$X\dashrightarrow X$. As any birational map between K3 surfaces,
the latter can then be completed to an isomorphism $f:X\xymatrix@1@=15pt{\ar[r]^-\sim&} X$.
Moreover, if $Z:=\operatorname{Supp}(G)\subset X\times X$, then $\Gamma_f\subset
Z$ is one irreducible component and the other components do not
dominate $X$. The latter implies that
$[\Gamma_f]_*|_{H^{2,0}}=\Phi^H_G|_{H^{2,0}}={\rm id}_{H^{2,0}}$,
i.e.\ $f$ is a symplectomorphism.
Obviously $f_*(c_X)=c_X$ and a general conjecture of Bloch (see
Remark \ref{rem:firstBloch}) predicts that for a symplectomorphism
the induced automorphism $f_*:{\rm CH}^*(X)\simeq{\rm CH}^*(X)$ is the
identity on $A(X)$. Since for generic $x\in X$ we have
$\Phi_G^{\rm CH}([x])=[f(x)]$, this would be enough to conclude that
$\Phi_G^{\rm CH}$ acts as the identity on ${\rm CH}^2(X)$.
Without using Bloch's conjecture, the argument is more involved
and goes as follows. Since $G$ is the restriction of a sheaf on
${\cal X}_1\times_{R_1}{\cal X}_1$, the structure sheaf of the graph
${\cal O}_{\Gamma_f}$ deforms sideways to first order, i.e.\ there
exists an $R_1$-flat coherent sheaf on ${\cal X}_1\times_{R_1}{\cal X}'_1$
restricting to ${\cal O}_{\Gamma_f}$ over the closed point. (Do it
first for the graph of $f|_U$ and then pass to the closure.) In
other words, the automorphism $f$ deforms sideways to first order
(actually to any order, but we do not need this) in
${\cal X}\times_{R}{\cal X}'$. But clearly $f$ deforms sideways to first
order if and only if $f^*(w)=w$, where $w\in H^1(X,{\cal T}_X)$
corresponds to the first order deformation
${\cal X}_1\xymatrix@1@=15pt{\ar[r]&}{\rm Sp}ec(\mathbb{C}[t]/t^2)$.
By construction, the class $w$ maps to the chosen K\"ahler class
in $H^{1,1}(X)$ under the isomorphism $H^{1,1}(X)\simeq
H^1(X,\Omega_X)\simeq H^1(X,{\cal T}_X)$ and, since the K\"ahler class
was chosen generically, this implies $f^*={\rm id}$ on ${\rm Pic}(X)$.
Since the transcendental lattice $T(X)$ is an irreducible Hodge
structure (of weight two), the assumption $f^*={\rm id}$ on
$H^{2,0}(X)$ implies by Schur's lemma that $f^*={\rm id}$ on
$T(X)$. Together with $f^*={\rm id}$ on ${\rm Pic}(X)$ this proves
$f^*={\rm id}$ on the full cohomology $H^*(X,\mathbb{Z})$. By the Global
Torelli theorem, the latter is equivalent to $f={\rm id}$.
Eventually this shows that $\Phi_G^{\rm CH}([x])=f_*[x]=[x]$ for
generic and hence all $x\in X$. Thus $\Phi^{\rm CH}_G={\rm id}$ on
${\rm CH}^2(X)$.
To conclude we observe that $\Phi_{{\cal E}_0}({\cal O}_X)$ deforms sideways
to a spherical object in ${\rm D}^{\rm b}({\cal X}_K)$, for ${\cal O}_X$ and ${\cal E}_0$ do.
On the other hand, up to shift ${\cal O}_{{\cal X}'_K}$ is the only
spherical object in ${\rm D}^{\rm b}({\cal X}'_K)$ (cf.\ \cite[Prop.\ 2.14]{HMS}).
Hence, up to shift ${\cal O}_X$ is the only spherical object on $X$
that deforms sideways in the family ${\cal X}'$ to a spherical object
in ${\rm D}^{\rm b}({\cal X}_K')$. Hence $\Phi_{{\cal E}_0}({\cal O}_X)\simeq{\cal O}_X$ (up to
shift), which in particular shows that
$\Phi^{\rm CH}_{{\cal E}_0}(1,0,c_X)=(1,0,c_X)$.
Thus we have shown that $\Phi_{{\cal E}_0}^{\rm CH}$ acts as identity on
${\rm CH}^0(X)\oplus{\rm CH}^2(X)$. Moreover, it acts as $\left(\begin{array}{cc}{\rm id}&0\\
\ast&{\rm id}\end{array}\right)$ on ${\rm CH}^1(X)\oplus{\rm CH}^2(X)$ which
will later be shown to be diagonal.
\end{proof}
The proposition can also be used to derive information about the
Mukai vectors in ${\rm CH}^*(X)$ of spherical objects having the same
Mukai vector in cohomology. This is the following
\begin{cor}\label{cor:sphsame}
If $E,E'\in{\rm D}^{\rm b}(X)$ are two spherical objects with $v(E)=v(E')\in
\widetilde H(X,\mathbb{Z})$, then $$v^{\rm CH}(E)=v^{\rm CH}(E')\in {\rm CH}^*(X).$$
\end{cor}
\begin{proof}
Write $v(E)=(r,\ell,s)=v(E')$. Let us first reduce to the case
that $r\ne0$. Suppose $r=0$, then $\ell\ne0$. Then let
$\Phi=T_{{\cal O}_X}\circ (L\otimes(~~))$ and use $\Phi\circ T_E\simeq
T_{\Phi(E)}\circ \Phi$ (see e.g.\ \cite[Lemma 8.21]{FM}), which
holds for any Fourier--Mukai equivalence $\Phi$ and any spherical
object $E$. Thus $T^H_{\Phi(E)}=T^H_{\Phi(E')}$ and the assertion
$v^{\rm CH}(E)=v^{\rm CH}(E')$ is clearly equivalent to
$v^{\rm CH}(\Phi(E))=v^{\rm CH}(\Phi(E'))$. If $L$ is chosen such that
$({\rm c}_1(L).\ell)\ll 0$, then the spherical object $\Phi(E)$
has positive rank.
Now apply Proposition \ref{prop:weak} to the class $c_X$ to deduce
$T^{\rm CH}_E(c_X)=T^{\rm CH}_{E'}(c_X)$. Both sides can be explicitly
computed, which yields $c_X-rv^{\rm CH}(E)=c_X-rv^{\rm CH}(E')$ and hence
$v^{\rm CH}(E)=v^{\rm CH}(E')$.
\end{proof}
\noindent {\it Proof of Theorem \ref{prop_consequencedefo}.}
Suppose again that $\Phi_{\cal E}$ is an autoequivalence of ${\rm D}^{\rm b}(X)$
with $\Phi_{\cal E}^H={\rm id}$. By Proposition \ref{prop:weak} we know
already that $\Phi_{\cal E}^{\rm CH}$ is the identity on
${\rm CH}^0(X)\oplus{\rm CH}^2(X)$. Thus it remains to show that
$\Phi^{\rm CH}_{\cal E}({\rm c}_1(L))$ of an arbitrary line bundle $L$ has
no component in $A(X)$. The image $L':=\Phi_{\cal E}(L)$ of a line
bundle $L$ is a spherical object and since $\Phi^H_{\cal E}={\rm id}$,
one has $v(L)=v(L')$. By Corollary \ref{cor:sphsame} this implies
$v^{\rm CH}(L)=v^{\rm CH}(L')$ and hence $\Phi^{\rm CH}_{\cal E}({\rm c}_1(L))\in
{\rm CH}^1(X)$. \hspace*{\fill}$\Box$
As done already in the proof above, Theorem
\ref{prop_consequencedefo} can be reformulated in terms of
autoequivalences. Since the kernel of the cohomology
representation of ${\rm Aut}({\rm D}^{\rm b}(X))$ is essentially the only
remaining mystery in this context, we state this explicitly as
\begin{cor}\label{cor:kernelsequal}
Let $X$ be a smooth complex projective K3 surface and denote by
$$\rho^{\rm CH}:{\rm Aut}({\rm D}^{\rm b}(X))\xymatrix@1@=15pt{\ar[r]&}{\rm Aut}({\rm CH}^*(X))\phantom{~~~~~}{and}~~~~~ \rho^H:{\rm
Aut}({\rm D}^{\rm b}(X))\xymatrix@1@=15pt{\ar[r]&}{\rm Aut}(\widetilde H(X,\mathbb{Z}))$$ the natural
representation $\Phi\xymatrix@1@=15pt{\ar@{|->}[r]&}\Phi^{\rm CH}$ resp.\ $\Phi\xymatrix@1@=15pt{\ar@{|->}[r]&}\Phi^H$.
Then ${\cal E}r(\rho^{\rm CH})={\cal E}r(\rho^H)$.
\end{cor}
\begin{proof}
The inclusion ${\cal E}r (\rho^{\rm CH})\subset{\cal E}r(\rho^H)$ is obvious and
the other one follows from the proposition.
\end{proof}
\begin{remark} In \cite{Br}
Bridgeland suggests the following explicit description of this
kernel. He conjectures ${\cal E}r(\rho^H)=\pi_1({\cal P}_0^+(X))$, for a
certain period domain ${\cal P}^+_0(X)$ defined in terms of the
algebraic part of $\widetilde H(X,\mathbb{Z})$. In particular, the
conjecture says that ${\cal E}r(\rho^H)$ is spanned by the square
$T^2=[2]$ of the shift functor and the squares $T^2_E$ of all
spherical twists $T_E$. (In fact, spherical twists associated to
spherical sheaves should suffice.)
As explained above, the conjectural generators $T^2$ and $T_E^2$
of ${\cal E}r(\rho^H)$ act trivially on ${\rm CH}^*(X)$. In this sense, the
corollary provides non-trivial evidence for Bridgeland's
conjecture.
\end{remark}
\begin{remark}\label{rem:firstBloch}
The corollary appears interesting also in the light of another
open conjecture due to Bloch (see \cite{Bl}), which for the case
of surfaces reads as follows: Consider a surface $X$ and a cycle
$\Gamma\in{\rm CH}^2(X\times X)$ with its induced natural endomorphisms
$[\Gamma]^{2,0}_*$ of $H^0(X,\Omega_X^2)$ and $[\Gamma]_*$ of
${\rm CH}^2(X)$. In general the latter does not respect the natural
filtration ${\cal E}r({\rm alb}_X)\subset A(X)\subset{\rm CH}^2(X)$, but
induces an endomorphism ${\rm gr}[\Gamma]_*$ of the graded object
${\cal E}r({\rm alb}_X)\oplus{\rm Alb}(X)\oplus\mathbb{Z}$. Then Bloch
conjectures that $[\Gamma]_*^{2,0}=0$ if and only if ${\rm
gr}[\Gamma]_*$ is trivial on ${\cal E}r({\rm alb}_X)$ (see also
\cite[Ch.\ 11]{Voisin}). Note that for a K3 surface $X$ the graded
object is just $A(X)\oplus\mathbb{Z}$.
If $\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X)$ is a Fourier--Mukai
autoequivalence of a K3 surface $X$ such that $\Phi^H_{\cal E}={\rm
id}$, then $\Gamma:=v^{\rm CH}({\cal E})-[{\rm D}elta]$ acts trivially on
cohomology and, in particular, on $H^{0}(X,\Omega_X^2)$. Bloch's
conjecture would thus say that ${\rm gr}[\Gamma]_*$ is trivial on
$A(X)={\cal E}r({\rm alb}_X)$. And indeed, by Corollary
\ref{cor:kernelsequal} this holds true, as we in fact have
$\Gamma=0$.
Note that Bloch's conjecture would actually say that
$\Phi_{\cal E}^H={\rm id}$ on $H^0(X,\Omega_X^2)$ is sufficient to
conclude $\Phi_{\cal E}^{\rm CH}={\rm id}$ on $A(X)$, but our techniques
fail to prove this. In fact, it seems even unknown whether any
symplectomorphism $f\in {\rm Aut}(X)$ induces the identity on $A(X)$,
i.e.\ whether for a symplectomorphism $f$ any point $x\in X$ is
rationally equivalent to its image $f(x)$ (cf.\ comments in the
proof of Proposition \ref{prop:weak}).
\end{remark}
\section{Mukai vectors of spherical
objects}\label{sect:Mukaispherobjcts}
The goal of this section is to prove that for any spherical object
$E\in{\rm D}^{\rm b}(X)$ on a smooth complex projective K3 surface $X$ with
Picard number $\rho(X)\geq2$ the Mukai vector
$v^{\rm CH}(E)\in{\rm CH}^*(X)$ is contained in the Beauville--Voisin
subring $R(X)\subset{\rm CH}^*(X)$. The main results (Corollaries
\ref{cor:sphobR} and \ref{cor:2}) should also hold for K3 surfaces
with $\rho(X)=1$, but we can only prove it under additional
conditions on the numerical invariants of $E$.
\begin{remark}\label{rem:equivcondiandii}
As noted earlier, the original result in \cite{BV} can be seen as
the special case that the spherical object $E$ is a line bundle
$L\in{\rm Pic}(X)$. Indeed, if $v^{\rm CH}(L^k)=(1,k{\rm c}_1(L),k^2{\rm
c}_1(L)^2/2+{\rm c}_2(X)/24)\in R(X)$, then necessarily ${\rm
c}_1(L)^2\in R(X)$ and ${\rm c}_2(X)\in R(X)$. However, it should
be emphasized that our methods do not provide an alternate proof
of the results in \cite{BV}.
\end{remark}
In this section we shall explain how to reduce the proof of
Theorem 1 to a generalization of the Beauville--Voisin result from
line bundles to higher rank spherical vector bundles. The proof of
the following crucial result is postponed to the next section.
\begin{prop}\label{prop:sphvb}
Let $X$ be a smooth complex projective K3 surface and let $E$ be a
spherical vector bundle on $X$. Suppose that one of the following
conditions hold \begin{itemize}\item[i)] $\rho(X)\geq2$ or
\item[ii)] ${\rm Pic}(X)=\mathbb{Z} H$ and $v(E)=(r,kH,s)$ with $k\equiv\pm1
(r)$.
\end{itemize}
Then $v^{\rm CH}(E)\in R(X)$.
\end{prop}
This proposition is expected to hold without any restriction on
$X$ or the spherical vector bundle $E$. However, it does not
generalize to $\mu$-stable vector bundles, i.e.\ the Mukai vector
of a general non-rigid $\mu$-stable vector bundle $E$ is certainly
not contained in $R(X)$.
The following consequence of Proposition \ref{prop:sphvb} proves
Theorem 1.
\begin{cor}\label{cor:sphobR}
Let $E\in{\rm D}^{\rm b}(X)$ be a spherical object on a smooth projective K3
surface $X$. Suppose that either
\begin{itemize}
\item[i)] $\rho(X)\geq2$ or \item[ii)] ${\rm Pic}(X)=\mathbb{Z} H$ with
$v(E)=(r,kH,s)$ with $k\equiv\pm1(r)$.
\end{itemize}
Then $v^{\rm CH}(E)\in R(X)$.
\end{cor}
\begin{proof}
Let $E\in{\rm D}^{\rm b}(X)$ be spherical. Write $v(E)=(r,\ell,s)$. Clearly,
$v^{\rm CH}(E)\in R(X)$ is equivalent to $v^{\rm CH}(E[1])=-v^{\rm CH}(E)\in
R(X)$. Hence we may assume $r\geq0$.
If $r>0$, then as proved in \cite{Kul} there exists a spherical
locally free sheaf $E'$ on $X$ with $v(E')=v(E)$. (In fact any
torsion free spherical sheaf is automatically locally free as was
already observed by Mukai in \cite{Muk}.) Now apply Proposition
\ref{prop:sphvb} to $E'$ which yields $v^{\rm CH}(E')\in R(X)$. But by
Corollary \ref{cor:sphsame}, we know that $v^{\rm CH}(E)=v^{\rm CH}(E')$ for
any two numerically equivalent spherical objects $E,E'\in{\rm D}^{\rm b}(X)$.
The case $r=0$ is straightforward. First, by applying $T_{{\cal O}_X}$
we reduce to the case that also $s=0$ and hence $v(E)=(0,\ell,0)$
with $\ell$ a $(-2)$-class, which we may assume to be effective.
Thus $T_E^H=s_\ell$, the reflection in $\ell^\perp$. The Weyl
group $W_X$ generated by reflections $s_{\rm d}lta$ for all effective
$(-2)$-classes ${\rm d}lta$ is known to be generated by reflections
associated to nodal classes, i.e.\ when ${\rm d}lta$ is represented by
a smooth rational curve. Thus $s_\ell$ can be written as a
composition of
finitely many reflections $s_{[C_i]}$, where
the curves $C_i\subset X$ are smooth and rational. Then use that
$v^{\rm CH}(E)\in R(X)$ is equivalent to $T^{\rm CH}_E(R(X))=R(X)$, because
$T^{\rm CH}_E$ is the reflection in the hyperplane $v^{\rm CH}(E)^\perp$,
and that $T^H_E=s_\ell=\prod s_{[C_i]}$ implies $T^{\rm CH}_E=\prod
T_{{\cal O}_{C_i}(-1)}^{\rm CH}$ (Theorem \ref{prop_consequencedefo}).
Clearly, $v^{\rm CH}({\cal O}_{C_i}(-1))\in R(X)$ and hence
$T^{\rm CH}_{{\cal O}_{C_i}(-1)}(R(X))=R(X)$.
\end{proof}
Let us now show that Theorem 1 implies Theorem 2, which we state
again as
\begin{cor}\label{cor:2}
Suppose $X$ and $X'$ are smooth complex projective K3 surfaces
with Picard number $\rho(X)\geq2$. If
$\Phi_{\cal E}:{\rm D}^{\rm b}(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X')$ is a Fourier--Mukai equivalence,
then the induced map $\Phi_{\cal E}^{\rm CH}:{\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm CH}^*(X')$
respects the Beauville--Voisin subring, i.e.\
$$\Phi_{\cal E}^{\rm CH}(R(X))=R(X').$$
\end{cor}
\begin{proof}
The Mukai vectors $v^{\rm CH}(L)\in{\rm CH}^*(X)$ of all line bundles
$L\in{\rm Pic}(X)$ span $R(X)$. The images $E:=\Phi_{\cal E}(L)\in{\rm D}^{\rm b}(X')$
are not necessarily (shifted) line bundle again, but they are
spherical objects in ${\rm D}^{\rm b}(X')$. Since $\rho(X)\geq2$ and hence
$\rho(X')\geq2$, Corollary \ref{cor:sphobR}, i) applies. Thus
$\Phi_{\cal E}^{\rm CH}(v^{\rm CH}(L))=v^{\rm CH}(E)\in R(X')$.
\end{proof}
\begin{remark}
The most interesting special case is the one when $X'$ is a fine
moduli space of $\mu$-stable vector bundles and ${\cal E}$ is the
universal bundle $\mathbb{E}$. In examples where both, moduli space $X'$
and universal bundle $\mathbb{E}$, are constructed explicitly, one
sometimes can prove Corollary \ref{cor:2} directly (see e.g.\
\cite{HL} for explicit examples). If there was an argument proving
the result for arbitrary universal bundles without first proving
Corollary \ref{cor:sphobR}, then the techniques of Section
\ref{sect:FMonChow} would prove Corollary \ref{cor:2} more
directly (and also in the case $\rho(X)=1$).
To be more precise, let $E$ be any spherical object with
$v(E)=(r,\ell,s)$ and $r>0$. Then
$T^H_E(0,0,1)=-(r^2,r\ell,rs-1)$. Thus, if $T_E$ is composed with
$\Phi_{\mathbb{E}[1]}$ where $\mathbb{E}\in{\rm D}^{\rm b}(X\times X')$ is the universal
family of stable vector bundles with Mukai vector
$(r^2,r\ell,rs-1)$, then $(\Phi_{\mathbb{E}[1]}\circ
T_E)^H(0,0,1)=(0,0,1)$. By composing with a certain equivalence
$\Psi$ that is a combination of spherical twists $T_{{\cal O}_C}$ (with
$\mathbb{P}^1\simeq C\subset X$) and tensor products with line bundles,
the Hodge isometry $(\Psi\circ\Phi_{\mathbb{E}[1]}\circ T_E)^H$ becomes
graded. Moreover, it will respect the K\"ahler cone up to sign
(see e.g.\ \cite[Ch.\ 9]{FM} for details). Then by the Global
Torelli theorem $(\Psi\circ\Phi_{\mathbb{E}[1]}\circ T_E)^H=\pm f_*$ for
some isomorphism $f$. Now use that $f^{\rm CH}_*$ and $\Psi^{\rm CH}$
preserve the Beauville--Voisin ring. Hence $T_E^{\rm CH}(R(X))=R(X)$
if and only if $\Phi^{\rm CH}_{\mathbb{E}}(R(X))=R(X')$, where $\mathbb{E}$
is a universal family of stable bundles of rank $r^2$.
\end{remark}
\section{Spherical vector bundles: Proof of Proposition
\ref{prop:sphvb}}\label{sect:MukaisphVB}
Let $C$ be a smooth irreducible complex projective curve of genus
$g$. Recall that the Brill--Noether locus
$W^{r_0}_d(C)\subset{\rm Pic}^d(C)$ is the determinantal subvariety of
all line bundles $A$ of degree $d$ with $h^0(C,A)\geq r_0+1$. The
Brill--Noether number for these numerical invariants is by
definition
$$\rho(r_0,d,g):=g-(r_0+1)(g-d+r_0).$$
Classically (see \cite{ACGH}) one knows that $W^{r_0}_d(C)$ is
non-empty whenever $\rho(r_0,d,g)\geq0$. (Due to a result of
Fulton and Lazarsfeld, it is also connected when
$\rho(r_0,d,g)>0$, but this will not be used.) Moreover, for a
generic curve $C$ the Brill--Noether number $\rho(r_0,d,g)$ is in
fact the dimension of $W^{r_0}_d(C)$ when $\rho(r_0,d,g)\geq0$ and
$W^{r_0}_d(C)=\varnothing$ otherwise.
Central for our discussion is a result of Lazarsfeld \cite{Laz}
that shows that a generic smooth curve $C$ in an indecomposable
linear system on a K3 surface is Brill--Noether general, i.e.\ the
$W^{r_0}_d(C)$ have the expected dimension. Let us make precise
which parts of \cite{Laz} are really used.
Suppose $A\in W^{r_0}_d(C)$ satisfies
\begin{equation}\label{eqn:Lazcond1}
{\rm i)}~~h^0(C,A)=r_0+1 ~~{\rm ~and~ii)~the~line~bundles}~~
A~~{\rm and}~~ A^*\otimes\omega_C~~{\rm are~ globally~ generated}.
\end{equation}
If $C$ is embedded into a K3 surface $X$, one associates to $A$
the Lazarsfeld bundle $F_{C,A}$, which by definition is the kernel
of the evaluation map $H^0(C,A)\otimes {\cal O}_X\xymatrix@1@=15pt{\ar[r]&} A$. Here $A$ is
viewed as a sheaf on $X$ supported on $C$. Thus, there is a short
exact sequence
$$\xymatrix{0\ar[r]&F^{\phantom{C^0}}_{C,A}\ar[r]&H^0(C,A)\otimes{\cal O}_X\ar[r]&A\ar[r]&0}$$
and it is not difficult to see that $F_{C,A}$ really is locally
free. Dualizing yields an exact sequence
\begin{equation}\label{eqn:sesLaz}\xymatrix{0\ar[r]&H^0(C,A)^*\otimes
{\cal O}_X\ar[r]&F_{C,A}^{*\phantom{C^0}}\ar[r]&A^*\otimes\omega_C\ar[r]&0.}\end{equation}
The crucial result for our discussion is the following
observation.
\begin{lem}\label{lem:Laz} \cite[Lemma 1.3]{Laz}
If $|C|$ is indecomposable, i.e.\ $|C|$ does not contain any
reducible curves, then the bundle $F_{C,A}$ is simple.\hspace*{\fill}$\Box$
\end{lem}
Clearly, the assumption on $|C|$ is satisfied if ${\rm Pic}(X)$ is
generated by ${\cal O}(C)$ and we shall restrict to this case. So let
from now on $X$ be a complex projective K3 surface with
$\rho(X)=1$, let $H\in{\rm Pic}(X)$ be the ample generator and write
$(H.H)=2g-2$. Then the generic curve $C\in|H|$ is smooth of genus
$g$. (Indeed, by Bertini it suffices to show that $|H|$ has no
base points and according to \cite[Cor.\ 3.2]{SDonat} there are no
base points outside the fixed components which do not exist,
because $|H|$ is indecomposable.)
Now choose $r_0$ and $d$ such that
\begin{equation}\label{eqn:Lazcond} d<g+r_0\phantom{PP} {\rm
and}\phantom{PP}\rho(r_0,d,g)=0.
\end{equation} We will only need the following immediate consequence
of \cite{Laz}:
\begin{prop}\label{prop:BNgeneral}
For generic $C\in|H|$ there exists a line bundle $A\in
W^{r_0}_d(C)$ satisfying (\ref{eqn:Lazcond1}).
\end{prop}
\begin{proof} In fact we will show that for a generic curve $C\in|H|$
any $A\in W^{r_0}_d(C)$ satisfies (\ref{eqn:Lazcond1}). Since
$\rho(r_0,d,g)=0$ and hence $W^{r_0}_d(C)\ne\varnothing$ (for any
smooth $C$), this proves the assertion.
Let $C\in |H|$ be generic and let $A\in W_d^{r_0}(C)$. We first
check $h^0(C,A)=r_0+1$. If not, then
$W_d^{r_0+1}(C)\ne\varnothing$. On the other hand, by our
assumption (\ref{eqn:Lazcond}) we have
$\rho(r_0+1,d,g)=\rho(r_0,d,g)-(g-d+r_0+1)-(r_0+2)
=0+d-g-2r_0-3<0$ and thus $W_d^{r_0+1}(C)=\varnothing$, as the
generic smooth curve in $|H|$ is Brill--Noether general according
to \cite{Laz}.
Next, for generic $C\in|H|$ any $A\in W^{r_0}_d(C)$ is globally
generated. Otherwise $W^{r_0}_{d-1}(C)\ne\varnothing$. This would
again contradict that $C$ is Brill--Noether general, for
$\rho(r_0,d-1,g)=\rho(r_0,d,g)-(r_0+1)<0$.
The calculation for $A^*\otimes\omega_C$ is similar. First observe
$A^*\otimes\omega_C\in W^{r_0-d-1+g}_{2g-2-d}(C)$ and
$h^0(C,A^*\otimes\omega_C)=r_0-d+g$ by Serre duality and
Riemann--Roch. Then, using the assumption $d<g+r_0$ in
(\ref{eqn:Lazcond}) (so far $d<g+2r_0$ was enough), one checks
$\rho(r_0-d-1+g,2g-2-d-1,g)<0$ and hence
$W^{r_0-d-1+g}_{2g-2-d-1}(C)=\varnothing$. The latter shows in
particular that $A^*\otimes\omega_C$ is globally generated.
\end{proof}
We continue to assume ${\rm Pic}(X)=\mathbb{Z} H$. Consider a spherical
bundle $E$ on $X$ and let $v(E)=(r,kH,s)$ with $k\equiv\pm1(r)$.
By tensoring with powers of $H$ and dualizing we can modify $E$
such that $k=1$. As these operations do not affect whether
$v^{\rm CH}(E)\in R(X)$, we will assume henceforth that $k=1$. Since
$E$ is spherical, one has $(H.H)-2rs=-2$ or, in other words,
$rs=g$.
Next we would like to relate $E$ to a particular Lazarsfeld
bundle, but a priori it is not clear that $E$ fits in a short
exact sequence of the form $0\xymatrix@1@=15pt{\ar[r]&}{\cal O}_X^r\xymatrix@1@=15pt{\ar[r]&} E\xymatrix@1@=15pt{\ar[r]&} M\xymatrix@1@=15pt{\ar[r]&}0$ with $M$ a
line bundle on a generic $C\in|H|$ (cf.\ (\ref{eqn:sesLaz})).
However, we will see that this is possible for the right choice of
$r_0$ and $d$. To be more precise, let
$$d:=g-1-s+r\phantom{PP}{\rm and}\phantom{PP}r_0:=r-1.$$
(If the wished for exact sequence $0\xymatrix@1@=15pt{\ar[r]&} {\cal O}_X^r\xymatrix@1@=15pt{\ar[r]&} E\xymatrix@1@=15pt{\ar[r]&} M\xymatrix@1@=15pt{\ar[r]&}0$ is
of the form (\ref{eqn:sesLaz}), then the Riemann--Roch formula
$\chi(M)=-d+g-1$ together with $\chi(M)=\chi(E)-2r=s-r$ dictates
this choice.)
A straightforward computation reveals that with this choice
$\rho(r_0,d,g)=g-rs=0$ and $d<g+r_0$. The latter is equivalent to
$s>0$ which follows from $g=rs$ and $r>0$. Thus Proposition
\ref{prop:BNgeneral} applies and we find for generic $C\in|H|$ a
line bundle $A\in W^{r_0}_d(C)$ satisfying (\ref{eqn:Lazcond}).
This then yields a short exact sequence of the form
(\ref{eqn:sesLaz}). Moreover, $F_{C,A}^*$ is simple by Lemma
\ref{lem:Laz} and $v(F_{C,A}^*)=(r,H,s)=v(E)$.
By \cite[Prop.\ 3.14]{Muk}, any spherical bundle on a K3 surface
with Picard number one is $\mu$-stable. Mukai also proves that
rigid $\mu$-stable vector bundles with given Mukai vector are
unique (see also \cite[Thm.\ 6.16]{HL}). Hence $E\simeq F_{C,A}^*$.
Thus as a consequence of \cite{Laz} we proved
\begin{cor}\label{cor:genericcurveses}
Let $E$ be a spherical bundle on a K3 surface $X$ with
${\rm Pic}(X)=\mathbb{Z} H$ and such that $v(E)=(r,H,s)$. Then for any generic
smooth curve $C\in|H|$ there exists a line bundle $M$ on $C$ and
a short exact sequence
$$0\xymatrix@1@=15pt{\ar[r]&}{\cal O}_X^r\xymatrix@1@=15pt{\ar[r]&} E\xymatrix@1@=15pt{\ar[r]&} M\xymatrix@1@=15pt{\ar[r]&}0.$$\hspace*{\fill}$\Box$
\end{cor}
\begin{remark} Corollary \ref{cor:sphsame}, which also works for
$\rho(X)>1$, shows that in any case $v^{\rm CH}(E)=v^{\rm CH}(F_{C,A}^*)$.
So we do not actually need $E\simeq F_{C,A}^*$, but only that the
simple $F_{C,A}^*$ exists.
\end{remark}
The rough idea of the next step is to let degenerate the smooth
generic curve $C$ to a rational curve $C_0\in|H|$, which always
exist due to Mumford (cf.\ \cite{MM} or \cite{BHPV}). At the same
time, $M$ will deform to a sheaf $M_0$ supported on $C_0$. Since
the right hand side in $v^{\rm CH}(M)=v^{\rm CH}(E)-v^{\rm CH}({\cal O}_X^r)$ stays
constant in the process, one also has
$v^{\rm CH}(M_0)=v^{\rm CH}(E)-v^{\rm CH}({\cal O}_X^r)$. But now $M_0$ is supported
on the rational curve $C_0\subset X$ and by \cite{BV} this implies
$v^{\rm CH}(M_0)\in R(X)$. Hence $v^{\rm CH}(E)\in R(X)$.
This can be made rigorous as follows: Consider $Z:={\rm
Gr}(r,H^0(X,E))$ and the non-empty Zariski open subset $U\subset
Z$ of all subspaces $V\subset H^0(X,E)$ such that
$V\otimes{\cal O}_X\xymatrix@1@=15pt{\ar[r]&} E$ is injective with cokernel $M$ being a line
bundle on a smooth curve $C$. Then $C\in|H|$, for ${\rm
c}_1(E)=H$. By Corollary \ref{cor:genericcurveses} the set $U$ is
not empty and in fact the composition
$$U\xymatrix@1@=15pt{\ar[r]&}{\rm Pic}^d({\cal C}/|H|)\xymatrix@1@=15pt{\ar[r]&}|H|$$ is dominant.
Here ${\cal C}\xymatrix@1@=15pt{\ar[r]&}|H|$ is the linear system together with its universal
curve and ${\rm Pic}^d({\cal C}/|H|)\xymatrix@1@=15pt{\ar[r]&}|H|$ denotes the compactified
relative Jacobian variety (or Simpson's moduli space of stable
pure sheaves).
The morphism $U\xymatrix@1@=15pt{\ar[r]&}{\rm Pic}^d({\cal C}/|H|)$ can be compactified to a
morphism $\varphi:Z'\xymatrix@1@=15pt{\ar[r]&}{\rm Pic}^d({\cal C}/|H|)$ where $Z'$ is some
projective variety containing $U$ as a dense open subset. For the
following, we can assume that the universal sheaf ${\cal M}$ on
${\rm Pic}^d({\cal C}/|H|)\times X$ exists, otherwise pass to some
projective variety dominating ${\rm Pic}^d({\cal C}/|H|)$. Then the
pull-back ${\cal N}:=(\varphi\times{\rm id})^*{\cal M}$ on $Z'\times X$ has
the property that $v^{\rm CH}({\cal N}_t)=v^{\rm CH}(E)-v^{\rm CH}({\cal O}_X^r)$ for any
closed point $t\in U$. Hence also
$v^{\rm CH}({\cal N}_{t_0})=v^{\rm CH}(E)-v^{\rm CH}({\cal O}_X^r)$ for any closed point
$t_0\in Z'$ in the boundary.
\begin{remark}\label{rem:spec}
This last argument makes use of the specialization map for Chow
groups. Consider first a family ${\cal X}\xymatrix@1@=15pt{\ar[r]&} S$ over a smooth
irreducible curve. Let $t\in S$ be a closed point and $\eta\in S$
be the generic point. Denote by ${\cal X}_t$ and ${\cal X}_\eta$ the
corresponding fibres, which we regard as varieties over $k(t)$
resp.\ $k(\eta)=K(S)$. The closure of any cycle on ${\cal X}_\eta$
yields a cycle on ${\cal X}$ which can then be restricted to the closed
fibre ${\cal X}_t$. Rational equivalence is preserved in the process,
so that we get the specialization map
\begin{equation}\label{eqn:spec}
{\rm CH}^*({\cal X}_\eta)\xymatrix@1@=15pt{\ar[r]&}{\rm CH}^*({\cal X}_t). \end{equation} See \cite[Ch.\
20]{Fulton} for details when $S$ is the spectrum of a discrete
valuation ring with the two points $t$ and $\eta$. For an
arbitrary (smooth and irreducible) base $S$ one constructs by
recurring blow-ups (see e.g.\ \cite[II, Exer.\ 4.12]{Ha}) a
morphism ${\rm Sp}ec(R)\xymatrix@1@=15pt{\ar[r]&} S$, with $R$ a discrete valuation ring,
mapping the closed (resp.\ generic) point to $t$ (resp.\ $\eta$).
Then pull-back the family ${\cal X}$ to ${\rm Sp}ec(R)$ and apply the
construction for discrete valuation rings. Note that by
construction for any cycle $\alpha\in{\rm CH}^*({\cal X})$ the restriction
$\alpha_t\in{\rm CH}^*({\cal X}_t)$ equals the image under the
specialization map of the restriction $\alpha_\eta$, cf.\
\cite[20.3.1]{Fulton}.
This specialization technique applies to our case, as the short
exact sequences associated to any $V\subset H^0(X,E)$ in $U$ glue
to a short exact sequence over $U$ and hence over the generic
point $\eta\in U\subset Z'$. Thus one has
$v^{\rm CH}({\cal N}_\eta)=v^{\rm CH}(E_\eta)-v^{\rm CH}({\cal O}_{X_\eta}^r)$, where
$E_\eta$ is obtained by base change $X_\eta:=X\times_\mathbb{C}\eta\xymatrix@1@=15pt{\ar[r]&}
X$.
\end{remark}
Since $U\xymatrix@1@=15pt{\ar[r]&}|H|$ is dominant, there exists a closed point $t_0\in
Z'$ which under $$Z'\xymatrix@1@=15pt{\ar[r]&}{\rm Pic}^d({\cal C}/|H|)\xymatrix@1@=15pt{\ar[r]&}|H|$$ maps to a closed
point corresponding to an irreducible rational curve $C_0\in|H|$.
Thus ${\cal N}_{t_0}$ is supported on an irreducible rational curve.
But then $v^{\rm CH}({\cal N}_{t_0})\in R(X)$ and hence $v^{\rm CH}(E)\in R(X)$.
Thus we have proved
\begin{prop}\label{prop:mainPicone}
Let $X$ be a K3 surface with ${\rm Pic}(X)=\mathbb{Z} H$. If $E$ is a
spherical bundle with $v(E)=(r,kH,s)$ and $k\equiv\pm1(r)$, then
$v^{\rm CH}(E)\in R(X)$.\hspace*{\fill}$\Box$
\end{prop}
\begin{remark}
Note that the above arguments also work for $\rho(X)\geq2$
whenever $U\xymatrix@1@=15pt{\ar[r]&} |H|$ is dominant, but without using Lazarsfeld's
result this seems difficult.
\end{remark}
\begin{remark}\label{rem:Mumford}
There is an alternate argument going back to Mumford that would
replace the degeneration argument. One can show that the set of
effective cycles $Z\in{\rm S}^n(X)$ rationally equivalent to a
given one $Z_0\in{\rm S}^n(X)$ forms a countable union of
irreducible Zariski closed subsets. The countability stems from
the fact that the number of irreducible components of the Hilbert
scheme of all subvarieties is countable. See \cite[Ch.\
22]{Voisin} for an account.
By taking sections of the line bundles ${\cal N}_t$ one obtains
effective cycles on $X$, which for all $t$ in the open subset $U$
are rationally equivalent to each other (and to ${\rm c}_2(E)$).
But then this holds for any cycle in the closure of the image of
$U\xymatrix@1@=15pt{\ar[r]&} {\rm S}^n(X)$, which necessarily contains a cycle that is
contained in a rational curve.
\end{remark}
In order to fully prove Proposition \ref{prop:sphvb}, it remains
to treat the case $\rho(X)\geq2$. There are essentially two
arguments involved:
i) Show that on a K3 surface $X$ with $\rho(X)\geq2$ tensoring
with line bundles and dualizing brings the Mukai vector of any
spherical bundle $E$ into the form $v(E)=(r,H,s)$, where $H$ is a
primitive ample line bundle.
ii) View any polarized K3 surface $(X,H)$ with $\rho(X)\geq2$ as a
degeneration of a polarized K3 surface of Picard number one. Then
use Proposition \ref{prop:mainPicone} and a degeneration argument.
The first question is purely numerical: Suppose $E$ is a spherical
vector bundle with $v(E)=(r,{\rm c}_1(L),s)$. Write ${\rm
c}_1(L)=k\ell$ for some primitive $\ell\in{\rm NS}(X)$ and
$k\in\mathbb{Z}$. As $E$ is spherical, one has $k^2(\ell.\ell)-2rs=-2$.
Thus $k$ and $r$ are coprime, for $(\ell.\ell)$ is even. Assuming
$\rho(X)\geq2$, there exists a line bundle $M\in{\rm Pic}(X)$ such that
${\rm c}_1(E\otimes M)=k\ell+r{\rm c}_1(M)$ is primitive and
ample. (Indeed, complete $e_1:=\ell$ to a basis $e_1,e_2,\ldots,
e_\rho$ of ${\rm NS}(X)$ and choose $M$ such that ${\rm
c}_1(M)=\sum a_ie_i$ with $a_2=\pm r^n$. Then $k+ra_1$ and
$ra_2=\pm r^{n+1}$ are coprime and for $n\gg0$ the coefficients
$a_1,a_3,\ldots, a_\rho$ can be chosen such that $ke_1+r{\rm
c}_1(M)$ is contained in the ample cone, which is open.)
Since $v^{\rm CH}(E)\in R(X)$ is equivalent to $v^{\rm CH}(E\otimes M)\in
R(X)$, it suffices to consider the following situation: $(X,H)$ is
a polarized K3 surface with $H$ primitive and $E$ is a spherical
vector bundle on $X$ with ${\rm d}t(E)=H$.
In step ii) we choose a smooth projective family of polarized K3
surfaces $\pi:({\cal X},{\cal H})\xymatrix@1@=15pt{\ar[r]&} D$ over a curve $D$, such that a
distinguished fibre, say over the closed point $0\in D$, is
$(X,H)$, i.e.\ ${\cal X}_0\simeq X$ and ${\cal H}_0:={\cal H}|_{{\cal X}_0}\simeq H$,
and such that the general fibre has Picard number one. More
precisely, for all except countably many closed points $t\in D$
one has $\rho({\cal X}_t)=\mathbb{Z}{\cal H}_t$.
The obstructions to deform the spherical bundle $E$ on the central
fibre $X$ to a bundle on the nearby fibres in ${\cal X}\xymatrix@1@=15pt{\ar[r]&} D$ are
contained in ${\rm Ext}_X^2(E,E)$ and their traces are the obstructions
to deform ${\rm d}t(E)=H$ sideways. Since ${\cal H}$ exists, the latter
must be trivial. As $E$ is spherical, the trace free part of
${\rm Ext}^2_X(E,E)$ is trivial and hence $E$ deforms to a vector
bundle ${\cal E}$ on ${\cal X}$, possibly after shrinking $D$. So
${\cal E}_0:={\cal E}|_{{\cal X}_0}\simeq E$ and, by semi-continuity (shrink $D$
again if necessary), the restriction ${\cal E}_t$ to any other fibre
${\cal X}_t$ is as well spherical. A degeneration argument then yields
the following result, which completes the proof of Proposition
\ref{prop:sphvb}.
\begin{prop} Suppose $E$ is a spherical vector bundle on a K3
surface with $\rho(X)\geq2$. Then $v^{\rm CH}(E)\in R(X)$.
\end{prop}
\begin{proof}
Again, there are two ways of proving this (cf.\ Remark
\ref{rem:spec} and the discussion following it). One can argue as
Mumford and say that either $v^{\rm CH}({\cal E}_t)\in R({\cal X}_t)$ for all
closed points $t\in D$ or for only a countable number of them
(cf.\ Remark \ref{rem:Mumford} which one easily adapts to the
relative setting). Since over $\mathbb{C}$ the number of closed points
$t\in D$ with $\rho({\cal X}_t)=1$ is uncountable and for them
$v^{\rm CH}({\cal E}_t)\in R({\cal X}_t)$ by Proposition \ref{prop:mainPicone},
we must have $v^{\rm CH}({\cal E}_t)\in R({\cal X}_t)$ for all closed points
$t\in D$ and in particular for $t=0$. Hence, $v^{\rm CH}(E)\in R(X)$.
An alternate argument would be the following. Consider the
relative Grassmannian ${\rm Gr}(r,\pi_*{\cal E})$ with fibres ${\rm
Gr}(r, H^0({\cal X}_t,{\cal E}_t))$ (at least over a non-empty open subset
of $D$ to which we tacitly restrict). Then let ${\cal U}\subset {\rm
Gr}(r,\pi_*{\cal E})$ be the open subset of subspaces $V\subset
H^0({\cal X}_t,{\cal E}_t)$ inducing short exact sequences of the form $0\xymatrix@1@=15pt{\ar[r]&}
V\otimes{\cal O}_{{\cal X}_t}\xymatrix@1@=15pt{\ar[r]&}{\cal E}_t\xymatrix@1@=15pt{\ar[r]&} M\xymatrix@1@=15pt{\ar[r]&}0$ with $M$ a line bundle on
some smooth curve on ${\cal X}_t$ in the ample linear system $|{\cal H}_t|$.
As explained earlier, if $\rho({\cal X}_t)=1$, the natural morphism
$\varphi_t:{\cal U}_t\xymatrix@1@=15pt{\ar[r]&}|{\cal H}_t|$ is dominant. But this is an open
condition. Hence $\varphi_t$ is actually surjective on a Zariski
open subset of closed points $t\in D$ and thus over the generic
point $\eta\in D$. Then imitate the degeneration argument for the
linear system on ${\cal X}_\eta$, that shows that $v^{\rm CH}({\cal E}_\eta)\in
R({\cal X}_\eta)$. Now use the specialization map
${\rm CH}({\cal X}_\eta)\xymatrix@1@=15pt{\ar[r]&}{\rm CH}({\cal X}_0)$.
\end{proof}
\section{K3 surfaces over number fields}\label{sect:K3overnumber}
The situation changes dramatically if instead of K3 surfaces over
$\mathbb{C}$ one considers smooth projective K3 surfaces defined over a
number field or over $\bar\mathbb{Q}$. In fact, a general conjecture of
Beilinson and Bloch (see \cite{BlochCrelle,RSS}) applied to this
case can be stated as follows:
\begin{conj} \label{conj:BB}If $X$ is a smooth projective K3 surface
over a number field $K$ or $\bar\mathbb{Q}$, then $${\rm
deg}:{\rm CH}^2(X)\otimes\mathbb{Q}\xymatrix@1@=15pt{\ar[r]^-\sim&}\mathbb{Q}.$$
\end{conj}
How does this compare to \cite{BV} and to the results of the
previous sections? Choose an embedding $K\subset \mathbb{C}$ and let
$X_\mathbb{C}:=X\times_K\mathbb{C}$ be the induced complex K3 surface. A
folklore argument shows that for arbitrary $X$ the kernel of the
natural map
$${\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]&}{\rm CH}^*(X_\mathbb{C})$$ is torsion.
Thus,
Conjecture \ref{conj:BB} can be rephrased as
\begin{conj}{\rm\bf (Bloch--Beilinson for K3 surfaces)}\label{conj:BB2} If $X$ is a smooth projective K3
surface over a number field $K\subset\mathbb{C}$ (or $\bar\mathbb{Q}$), then the
pull-back yields an injection
$${\rm CH}^*(X)\otimes\mathbb{Q}~\xymatrix@1@=15pt{\ar@{^(->}[r]&}~ R(X_\mathbb{C})\otimes\mathbb{Q}\subset{\rm CH}^*(X_\mathbb{C})_\mathbb{Q}.$$
\end{conj}
To prove the conjecture, it suffices to show that any $K$-rational
point $x\in X(K)$ satisfies $[x]=c_X\in{\rm CH}^2(X_\mathbb{C})$. This would
follow from the a priori stronger statement that any $K$-rational
point $x\in X(K)$ lies on a rational curve which is called a
`logical possibility' by Bogomolov, cf.\ \cite{BT}. Note that
there are other classes of points on K3 surfaces which are known
to have fundamental class in $R(X_\mathbb{C})$, e.g.\ in
\cite{ML} this is shown for points that can be written as sums of
a torsion point on an elliptic curve and a point in the
intersection of the elliptic curve with a rational curve.
From the derived point of view, any $K$-rational point $x$ defines
a \emph{semi-rigid object} $$k(x)\in{\rm D}^{\rm b}(X),$$ where ${\rm D}^{\rm b}(X)$ is
viewed as a $K$-linear triangulated category. By definition, an
object $E\in{\rm D}^{\rm b}(X)$ is called semi-rigid if ${\rm Ext}^*_X(E,E)\simeq
H^*({\rm S}^1\times {\rm S}^1,K)$. For comparison, recall that $E$
was called spherical if ${\rm Ext}^*_X(E,E)\simeq H^*({\rm S}^2,K)$.
The techniques of this article do not allow to treat semi-rigid
objects, but they do show that their simpler spherical cousins
behave as expected.
\begin{prop}
Let $E\in{\rm D}^{\rm b}(X)$ be a spherical object on a smooth projective K3
surface $X$ over a number field $K\subset\mathbb{C}$ (or $\bar\mathbb{Q}$) such
that $\rho(X_\mathbb{C})\geq2$. Then under ${\rm CH}^*(X)\xymatrix@1@=15pt{\ar[r]&}{\rm CH}^*(X_\mathbb{C})$ its
Mukai vector $v^{\rm CH}(E)\in{\rm CH}^*(X)$ is mapped to $R(X_\mathbb{C})$.
\end{prop}
\begin{proof}
This is an immediate consequence of Corollary \ref{cor:sphobR}.
Indeed, flat base change turns $E$ into a spherical object
$E_\mathbb{C}\in{\rm D}^{\rm b}(X_\mathbb{C})$ whose Mukai vector is contained in
$R(X_\mathbb{C})$.
\end{proof}
By means of the proposition one can now produce non-trivial
classes on K3 surfaces over number fields that are contained in
$R(X_\mathbb{C})$. In other words, these classes behave as predicted by
Conjecture \ref{conj:BB2}, but for a less geometric reason than
e.g.\ rational points contained in rational curves.
As it turns out, in fact all spherical objects on the
complex K3 surface $X_\mathbb{C}$, which although rigid exist in
abundance, are defined over $\bar\mathbb{Q}$. This is
\begin{prop}
Let $X$ be a smooth projective K3 surface over a number
field $K\subset \mathbb{C}$. Then any spherical object $F\in{\rm D}^{\rm b}(X_\mathbb{C})$
is defined over some finite extension $L/K$, i.e.\ there exists a
spherical object $E\in{\rm D}^{\rm b}(X_L)$ such that $E_\mathbb{C}\simeq F$.
\end{prop}
\begin{proof}
This uses a standard argument that roughly says that all points of
a zero-dimensional moduli space representing a moduli functor
defined over an algebraically closed field are defined over the
same field. E.g.\ any line bundle on $X_\mathbb{C}$ is defined over
$\bar\mathbb{Q}$ (and hence over some finite extension of $K$), because
the Picard variety for $X_{\bar\mathbb{Q}}$ lives over $\bar\mathbb{Q}$.
In our case we use Inaba's moduli space of simple complexes (cf.\
\cite{Lieb} for a more general setting). Consider the functor
${\rm\bf Splcx}_{X_{\bar\mathbb{Q}}}$ on the category of locally
noetherian schemes over $\bar\mathbb{Q}$, which in particular sends the
spectrum of any finitely generated field extension $L/\bar \mathbb{Q}$ to
the set of isomorphism classes of all bounded complexes
$E\in{\rm D}^{\rm b}(X_L)$ with ${\rm Ext}^0_{X_L}(E,E)=L$ and
${\rm Ext}^i_{X_L}(E,E)=0$ for $i<0$. Then it is shown in \cite{Inaba1}
that the \'etale sheafification ${\rm \bf
Splcx}^{et}_{X_{\bar\mathbb{Q}}}$ is represented by an algebraic space
over $\bar\mathbb{Q}$, which we denote ${\rm\bf Spl}$.
Any bounded complex $F\in{\rm D}^{\rm b}(X_\mathbb{C})$ is defined over some finitely
generated field extension $L/\bar\mathbb{Q}$. Thus, a spherical
$F\in{\rm D}^{\rm b}(X_\mathbb{C})$ can be seen as an $L$-rational point of ${\rm\bf
Spl}$. The vanishing of ${\rm Ext}^1(F,F)$ shows that the Zariski
tangent space at the corresponding point in ${\rm\bf Spl}_L$ is
trivial. In particular, locally around the point corresponding to
$F$ the algebraic space ${\rm \bf Spl}$ is zero-dimensional and
we may therefore assume it is a scheme over $\bar\mathbb{Q}$.
To conclude use the following straightforward argument from
commutative algebra. Let $A$ be a finitely generated $k$-algebra
over an algebraically closed field $k$, let $L/k$ be any
extension, and let $B:=A\otimes_kL$. Let ${\bf n}\subset B$ be a
maximal ideal and suppose that ${\bf m}:=A\cap{\bf n}$ is maximal
in $A$. If now ${\bf n}/{\bf n}^2=0$, then $k=A$. Indeed,
Nakayama's lemma immediately shows that $B$ must be a field and
hence ${\bf m}=A\cap{\bf n}=0$, i.e.\ $A$ is a field.
Since
$A$ is a finitely generated algebra over the algebraically closed
field $k$, this yields $A=k$. In order to reduce to the case that
${\bf m}$ is maximal, i.e.\ that the $L$-rational point of
${\rm\bf Spl}$ is a closed point, take a generic closed point $P$
in the Zariski closure of the image of ${\rm Sp}ec(L)\xymatrix@1@=15pt{\ar[r]&}{\rm\bf Spl}$.
By semi-continuity it will correspond to a spherical object on
$X_{k(P)}$. Then the above argument applies and shows that $P$ is
isolated and therefore equals the original $L$-rational point.
This shows that any spherical object $F\in{\rm D}^{\rm b}(X_\mathbb{C})$ is defined
eventually over $\bar\mathbb{Q}$ and hence over some finite extension of
$K$.
\end{proof}
The deformation techniques used to prove Proposition
\ref{prop:weak} would allow one to avoid moduli spaces of simple
complexes and to work solely with moduli spaces of bundles, but
the above proof seems more conceptual.
\begin{remark}
As the reader will have noticed, the proof also shows that the
Fourier--Mukai kernel ${\cal F}$ of any autoequivalence
$\Phi_{\cal F}:{\rm D}^{\rm b}(X_\mathbb{C})\xymatrix@1@=15pt{\ar[r]^-\sim&}{\rm D}^{\rm b}(X_\mathbb{C})$ is defined over $\bar\mathbb{Q}$.
In other words, ${\rm Aut}({\rm D}^{\rm b}(X_{\bar\mathbb{Q}}))\simeq{\rm
Aut}({\rm D}^{\rm b}(X_\mathbb{C}))$. Of course, the same holds for the set of
equivalences between two different K3 surfaces both defined over
$\bar\mathbb{Q}$.
\end{remark}
\end{document}
|
\begin{document}
\onehalfspace
\title{Relating dissociation, independence, and matchings}
\author{Felix Bock\and Johannes Pardey\and Lucia D. Penso\and Dieter Rautenbach}
\date{}
\maketitle
\begin{center}
{\small
Institute of Optimization and Operations Research, Ulm University, Ulm, Germany\\
\texttt{$\{$felix.bock,johannes.pardey,lucia.penso,dieter.rautenbach$\}[email protected]}
}
\end{center}
\begin{abstract}
A dissociation set in a graph is a set of vertices
inducing a subgraph of maximum degree at most $1$.
Computing the dissociation number ${\rm diss}(G)$ of a given graph $G$,
defined as the order of a maximum dissociation set in $G$,
is algorithmically hard even when $G$ is restricted to be bipartite.
Recently, Hosseinian and Butenko
proposed a simple $\frac{4}{3}$-approximation algorithm
for the dissociation number problem in bipartite graphs.
Their result relies on the inequality
${\rm diss}(G)\leq\frac{4}{3}\alpha(G-M)$
implicit in their work,
where $G$ is a bipartite graph,
$M$ is a maximum matching in $G$,
and $\alpha(G-M)$ denotes the independence number of $G-M$.
We show that the pairs $(G,M)$
for which this inequality holds with equality
can be recognized efficiently,
and that a maximum dissociation set can be determined for them efficiently.
The dissociation number of a graph $G$ satisfies
$\max\{ \alpha(G),2\nu_s(G)\}
\leq {\rm diss}(G)\leq \alpha(G)+\nu_s(G)\leq 2\alpha(G)$,
where $\nu_s(G)$ denotes the induced matching number of $G$.
We show that deciding whether ${\rm diss}(G)$
equals any of the four terms
lower and upper bounding ${\rm diss}(G)$ is NP-hard.\\[3mm]
{\bf Keywords:} Dissociation set; independent set; matching; induced matching
\end{abstract}
\pagebreak
\section{Introduction}
We consider finite, simple, and undirected graphs, and use standard terminology.
A set $I$ of vertices of a graph $G$
is a {\it dissociation set} in $G$ if the subgraph $G[I]$ of $G$ induced by $I$
has maximum degree at most $1$,
and the {\it dissociation number ${\rm diss}(G)$} of $G$
is the order of a maximum dissociation set in $G$.
Dissociation sets and the dissociation number were introduced as a special vertex-deletion problem by Yannakakis \cite{ya}
who showed that the dissociation number problem,
that is, the problem of deciding whether ${\rm diss}(G)\geq k$
for a given pair $(G,k)$, where $G$ is a graph and $k$ is a positive integer,
is NP-complete even restricted to instances where $G$ is a bipartite graph.
This initial hardness result was stengthened in different ways \cite{bocalo,ordofigowe};
in particular,
the problem remains NP-complete for bipartite graphs of maximum degree at most $3$.
Recently, Hosseinian and Butenko \cite{hobu}
proposed a simple $\frac{4}{3}$-approximation algorithm
for the maximum dissociation set problem restricted to bipartite graphs.
Their result can be derived from the following two simple inequalities:
Let $G$ be a graph and let $M$ be a maximum matching in $G$.
Since every independent set in $G-M$ is a dissociation set in $G$, we have
\begin{eqnarray}
{\rm diss}(G) & \geq & \alpha(G-M)\label{e1},
\end{eqnarray}
where $\alpha(H)$ denotes the {\it independence number} of a graph $H$,
which is the order of a maximum independent set in $H$.
Now, if $G$ is bipartite, then one can show
\begin{eqnarray}
{\rm diss}(G) & \leq & \frac{4}{3}\alpha(G-M)\label{e2}.
\end{eqnarray}
Since a maximum matching $M$ in a given bipartite graph $G$
as well as a maximum independent set $I$ in the bipartite graph $G-M$
can be determined efficiently, the combination of (\ref{e1}) and (\ref{e2})
implies that returning $I$ yields a $\frac{4}{3}$-approximation
for the maximum dissociation set problem in $G$.
We will give the simple proof of (\ref{e2}) that is implicit in \cite{hobu} below.
As our first contribution we show that the extremal graphs for (\ref{e2})
have a very restricted structure, which yields the following.
\begin{theorem}\label{theorem1}
For a given pair $(G,M)$,
where $G$ is a bipartite graph and $M$ is a maximum matching in $G$,
one can decide in polynomial time whether (\ref{e2}) is satisfied with equality. Furthermore, in case of equality,
one can determine in polynomial time a maximum dissociation set in $G$.
\end{theorem}
Next to (\ref{e1}) and (\ref{e2}),
there are the following relations between
the dissociation number ${\rm diss}(G)$,
the independence number $\alpha(G)$,
and the induced matching number $\nu_s(G)$ of a graph $G$:
\begin{eqnarray}
{\rm diss}(G) & \leq &2\alpha(G),\label{e3}\\
{\rm diss}(G) & \geq &2\nu_s(G),\label{e3b}\\
{\rm diss}(G) & \geq &\alpha(G),\mbox{ and}\label{e3c}\\
{\rm diss}(G) & \leq &\alpha(G)+\nu_s(G).\label{e3d}
\end{eqnarray}
While these inequalities are all straightforward,
the extremal graphs are not easy to describe,
and we show the following.
\begin{theorem}\label{theorem2}
For each of the inequalities (\ref{e3}), (\ref{e3b}), (\ref{e3c}), and (\ref{e3d}),
it is NP-hard to decide whether a given graph satisfies it with equality.
\end{theorem}
In view of the special role of bipartite graphs in this context,
it makes sense to consider the bipartite extremal graphs
for (\ref{e3}) to (\ref{e3d}).
It is easy to see that a bipartite graph $G$ satisfies
${\rm diss}(G)=2\alpha(G)$
if and only if $G$ is $1$-regular.
For a bipartite graph $G$,
the equality ${\rm diss}(G)=\alpha(G)$ holds if and only if
$G$ has no induced matching $M$ intersecting
every maximum matching in $G$.
Inspecting the proofs in \cite{zeripiwecobe} reveals
that Zenklusen et al.~showed that it its NP-complete to decide, for a given pair $(G,k)$,
where $G$ is a bipartite graph and $k$ is a positive integer,
whether there is an induced matching $M$ in $G$ of size $|M|$ at most $k$
intersecting every maximum matching in $G$.
Unfortunately, the size bound is crucial for their reduction.
The complexity of the induced matching number
is closely tied to the complexity of the dissociation number
\cite{ordofigowe}.
In particular, it is hard for bipartite graphs,
and the complexity of recognizing the bipartite extremal graphs for (\ref{e3b}) and (\ref{e3d}) is open.
The close relation between dissociation sets, independent sets,
and (induced) matchings also reflects in the obvious relation
$${\rm diss}(G)=\max\{ \alpha(G-M):M\mbox{ is an induced matching in }G\}.$$
Before we proceed to the proofs of our results,
we briefly mention related research.
In fact, bounds on the dissociation number \cite{brkakase,brjakaseta},
fast exact algorithms \cite{kakasc,xiko},
randomized approximation algorithms \cite{kakasc}, and
fixed parameter tractability \cite{ts}
were studied.
As observed in several references,
dissociations sets are the dual of so-called
{\it $3$-path (vertex) covers}, cf.~also \cite{beocra}.
\section{Proofs}
The following two subsections contain the proofs of Theorem \ref{theorem1} and Theorem \ref{theorem2}.
\subsection{Structure and recognition of the extremal graphs for (\ref{e2})}
We first give a proof of (\ref{e2}), which is implicit in \cite{hobu}.
After that we consider the extremal graphs.
Throughout this subsection, let $G$ be a bipartite graph of order $n$
with partite sets $A$ and $B$,
and let $M$ be a maximum matching in $G$.
Let $I$ be a maximum dissociation set in $G$.
Let $E$ be the induced matching spanned by $I$, that is, $E=E(G[I])$.
Note that $|E|\leq \frac{|I|}{2}$.
Let $A=A_1\cup A_2\cup A_3\cup A_4$
and $B=B_1\cup B_2\cup B_3\cup B_4$ be partitions of $A$ and $B$ such that
\begin{itemize}
\item $E\cap M$ is a perfect matching between $A_1$ and $B_1$,
\item $E\setminus M$ is a perfect matching between $A_2$ and $B_2$,
\item $A_3\cup B_3$ is the set of isolated vertices in $G[I]$,
\item $A_4=A\setminus I$, and $B_4=B\setminus I$.
\end{itemize}
Let $\ell=|A_2|$.
Since $I\setminus A_2$ is an independent set in $G-M$, we have
\begin{eqnarray}\label{e5}
\alpha(G-M) &\geq & |I\setminus A_2|=|I|-|A_2|={\rm diss}(G)-\ell.
\end{eqnarray}
Gallai's theorem implies that
$\alpha(G)$ and the vertex cover number $\tau(G)$ of $G$ add up to the order $n$ of $G$, and
K\H{o}nig's theorem implies that
$\tau(G)$ equals the matching number $|M|$ of $G$,
which together implies that $\alpha(G)=n-|M|$.
Since $M$ contains $|E\cap M|=|E|-\ell\leq \frac{|I|}{2}-\ell$
edges spanned by $I$,
and at most $n-|I|$ further edges,
one incident with each vertex of $A_4\cup B_4=V(G)\setminus I$, we have
\begin{eqnarray}\label{e6}
|M| &\leq & |E\cap M|+(n-|I|)
\leq \left(\frac{|I|}{2}-\ell\right)+\left(n-|I|\right)
=n-\ell-\frac{{\rm diss}(G)}{2},
\end{eqnarray}
and, hence,
\begin{eqnarray}\label{e4}
\alpha(G-M)
\geq \alpha(G)
=n-|M|
\stackrel{(\ref{e6})}{\geq} n-\left(n-\ell-\frac{{\rm diss}(G)}{2}\right)
=\ell+\frac{{\rm diss}(G)}{2}.
\end{eqnarray}
Adding (\ref{e5}) and (\ref{e4}) yields (\ref{e2}).
We now consider the extremal graphs for (\ref{e2}),
which leads to a proof of Theorem \ref{theorem1}.
Therefore, we suppose that (\ref{e2}) is satisfied with equality.
This implies that equality holds throughout the inequality chains
(\ref{e5}), (\ref{e6}), and (\ref{e4}).
Equality throughout (\ref{e5}) and (\ref{e4}) implies
$\ell=\frac{{\rm diss}(G)}{4}$ and $\alpha(G-M)=\alpha(G)$.
Equality in (\ref{e6}) implies
$|E|
=|E\cap M|+|E\setminus M|
=|E\cap M|+\ell
=\frac{|I|}{2}$,
which implies that $G[I]$ is $1$-regular, or, equivalently, $A_3=B_3=\emptyset$.
In view of the above value of $\ell$,
exactly half the edges of $G[I]$ belong to $M$, or, equivalently,
$\ell=|A_1|=|A_2|=|B_1|=|B_2|$.
Furthermore, equality in (\ref{e6}) also implies that the matching $M$
contains exactly $n-|I|=|A_4|+|B_4|$ further edges,
one incident with each vertex of $A_4\cup B_4$;
these edges match all of $A_4$ into $B_2$ as well as all of $B_4$ into $A_2$,
in particular, $\ell\geq |A_4|,|B_4|$.
The matching $M$ leaves exactly
$(|A_2|-|B_4|)+(|B_2|-|A_4|)=2\ell-(|A_4|+|B_4|)$
vertices unmatched that all lie in $A_2\cup B_2$.
See Figure \ref{fig2} for an illustration.
\begin{figure}
\caption{The continous lines illustrate the edges in $M$
while the dashed lines illustrate those in $E\setminus M$.
All remaining edges of $G$ are not illustrated
and intersect $A_4\cup B_4$.}
\label{fig2}
\end{figure}
Now, let $M'$ be any maximum matching in $G-M$.
Since $\alpha(G-M)=\alpha(G)$, the results of Gallai and K\H{o}nig
imply that $G-M$ and $G$ have the same matching number,
which implies $|M|=|M'|$.
Since in $G-M$ the vertices in $A_1$ have all their neighbors in $B_4$,
and the vertices in $B_1$ have all their neighbors in $A_4$,
the matching $M'$ leaves at least
$(|A_1|-|B_4|)+(|B_1|-|A_4|)=2\ell-(|A_4|+|B_4|)$ vertices
in $A_1\cup B_1$ unmatched.
Since $|M|=|M'|$,
this actually implies that $M'$ consists of
$|B_4|$ edges matching all of $B_4$ into $A_1$,
$|A_4|$ edges matching all of $A_4$ into $B_1$, and
the $\ell$ edges from $E\setminus M$ that form a perfect matching between $A_2$ and $B_2$.
Let $H$ be the graph with vertex set $V(G)$ and edge set $M\cup M'$.
The components of $H$ are $M$-$M'$-alternating paths and cycles
that traverse the sets $A_i$ and $B_i$
respecting the cyclic order illustrated in Figure \ref{fig1}.
\begin{figure}
\caption{The cyclic order respected by the components of $H$,
that is, a cycle in $H$ traverses the sets in the cyclic order
$A_1,B_1,A_4,B_2,A_2,B_4,A_1,B_1,A_4,B_2,A_2,B_4,\ldots$.}
\label{fig1}
\end{figure}
For components $C$ of $H$ that are cycles,
this implies that the length of $C$ is a multiple of $6$.
Furthermore,
there are exactly $2\ell-(|A_4|+|B_4|)$ ccomponents of $H$ that are paths;
they all have length $4$ modulo $6$, and,
starting with an edge from $M$,
they go from $A_1\cup B_1$ to $A_2\cup B_2$.
For a component $P$ of $H$ that is a path, these structural properties
allow to decide the location of the individual vertices.
If, for example, the path $P:u_1u_2\ldots u_{12}\ldots$ starts in a vertex $u_1$ from $A$ not covered by $M'$,
then
$u_1,u_7,\ldots \in A_1$,
$u_2,u_8,\ldots \in B_1$,
$u_3,u_9,\ldots \in A_4$,
$u_4,u_{10},\ldots \in B_2$,
$u_5,u_{11},\ldots \in A_2$, and
$u_6,u_{12},\ldots \in B_4$.
We now formulate a satisfiable $2$-{\sc Sat} formula $f$
such that a satisfying truth assignment allows to derive a (suitable)
location of all vertices of $G$ on cycle components of $H$.
Therefore, let
$C_1,\ldots,C_k$ be the components of $H$ that are cycles.
For $i$ in $[k]$,
let $C_i:a_i^1b_i^1a_i^2b_i^2\ldots b_i^{n_i}a_i^1$,
where $a_i^1\in A$ and $a_i^1b_i^1\in M$.
Note that exactly one of the three vertices $a_i^1$, $a_i^2$, and $a_i^3$ belongs to $A_4$, which also determines the location of every other vertex on $C_i$.
For every $i$ in $[k]$,
we introduce three Boolean variables $x_i^1$, $x_i^2$, and $x_i^3$, and
add to $f$ the three clauses
$\bar{x}_i^1\vee \bar{x}_i^2$,
$\bar{x}_i^2\vee \bar{x}_i^3$, and
$\bar{x}_i^1\vee \bar{x}_i^3$;
where $x_i^j$ being true corresponds to $a_i^j\in A_4$.
Now, we consider an edge $ab$ of $G$ that does not belong to $M\cup M'$
such that at least one endpoint of $ab$ lies on a cycle component of $H$.
The structural properties imply that $a\in A_4$ or $b\in B_4$ (or both).
If $a$ lies on a cycle component $C$ of $H$,
$b$ lies on a path component of $H$, and
$b\not\in B_4$,
then we add to $f$ the clause $x_i^j$,
where $i$ and $j$ are such that $C$ is $C_i$, and
the distance of $a_i^j$ and $a$ on $C$ is $0$ modulo $6$.
Note that, if $b\in B_4$, then no clause is added to $f$.
Similarly,
if $a$ lies on a path component of $H$,
$b$ lies on a cycle component of $H$,
and $a\not\in A_4$, then we proceed analogously
by adding to $f$ the clause $x_i^j$
corresponding to the condition $x_i^j\in A_4$
that is equivalent to the condition $b\in B_4$.
Finally, if $a$ and $b$ both lie on cycle components of $H$,
say $a=a_i^j$ and $b=b_{i'}^{j'}$,
then we add to $f$ the clause $x_i^j\vee x_{i'}^{j''}$,
where $j''$ is the uniquely determined index such that
$a_{i'}^{j''}$ is the unique vertex in $\{ a_{i'}^1,a_{i'}^2,a_{i'}^3\}$
whose distance to $b_{i'}^{j'}\in B_4$ within $C_{i'}$
is equivalent to $3$ modulo $6$.
This completes the construction of $f$.
Clearly, setting $x_i^j$ to true for every $i$ and $j$ with $a_i^j\in A_4$
and false otherwise, yields a satisfying truth assignment for $f$,
that is, if (\ref{e2}) is satisfied with equality, then $f$ is satisfiable.
Given a bipartite graph $G$ and a maximum matching $M$ in $G$,
one can in polynomial time
\begin{itemize}
\item determine a bipartition $A$ and $B$ of $G$,
\item determine a maximum matching $M'$ of $G-M$,
\item check the necessary condition $|M|=|M'|$,
\item construct the auxiliary graph $H$,
\item check the necessary condition that all cycle components of $H$
have length $0$ modulo $6$,
and all path components of $H$ have length $4$ modulo $6$,
\item suitably assign the vertices in path components of $H$
to the sets $A_i$ and $B_i$ with $i\in \{ 1,2,4\}$ as described above,
\item check the necessary condition that all edges of $G$
that do not belong to $M\cup M'$
and connect vertices in path components of $H$
intersect $A_4\cup B_4$, and
\item set up the $2$-{\sc Sat} formula $f$ as described above,
check its satisfiability, and, in case of satisfiability,
determine a satisfying truth assignment.
\end{itemize}
Note that the construction of $f$ requires only available knowledge.
As we have seen above,
if (\ref{e2}) is satisfied with equality,
then the necessary conditions mentioned above hold,
and $f$ is satisfiable.
Conversely,
if the necessary conditions mentioned above hold, and $f$ is satisfiable,
then a satisfying truth assignment allows to suitably assign
the vertices in cycle components of $H$
to the sets $A_i$ and $B_i$ with $i\in \{ 1,2,4\}$
in such a way that
all edges of $G$ that do not belong to $M\cup M'$ intersect $A_4\cup B_4$,
$A_1\cup A_2\cup B_1$ is a maximum independent set in $G-M$, and
$A_1\cup A_2\cup B_1\cup B_2$ is a dissociation set in $G$,
that is, (\ref{e2}) is satisfied with equality.
Note that $H$ may contain cycle components $C_i$
for which all three variables $x_i^1$, $x_i^2$, and $x_i^3$ are false,
even if $G$ contains edges connecting $C_i$
to other components of $H$.
In such a case, any of the three vertices $a_i^1$, $a_i^2$, and $a_i^3$
may be located within $A_4$, which yields three different valid possibilities.
Note furthermore that, in case of equality in (\ref{e2}),
the set $A_1\cup A_2\cup B_1\cup B_2$,
which is efficiently constructible as explained above,
is a maximum dissociation set in $G$.
This complete the proof of Theorem \ref{theorem1}.
\subsection{Hardness of deciding equality in (\ref{e3}) to (\ref{e3d})}
In this subsection,
we show Theorem \ref{theorem2}.
For the hardness of deciding equality in (\ref{e3}), (\ref{e3b}), or (\ref{e3c}),
we suitably adapt Karp's proof \cite{ka}
of the NP-completeness of the {\sc Clique} problem,
reducing $3$-{\sc Sat} to the respective problems.
Therefore,
let $f$ be an instance of $3$-{\sc Sat}
consisting of the clauses $C_1,\ldots,C_m$
over the Boolean variables $x_1,\ldots,x_n$.
\noindent For the hardness of deciding equality in (\ref{e3}) or (\ref{e3b}),
we describe the efficient construction of a graph $G$ such that
\begin{eqnarray}\label{ee1}
\mbox{$f$ is satisfiable}
\,\,\,\,\,\,\,\,\Leftrightarrow \,\,\,\,\,\,\,\,
{\rm diss}(G)=2\alpha(G)
\,\,\,\,\,\,\,\,\Leftrightarrow \,\,\,\,\,\,\,\,
{\rm diss}(G)=2\nu_s(G).
\end{eqnarray}
For every clause $C_i=x\vee y\vee z$ in $f$,
where $x$, $y$, and $z$ are the three literals in $C_i$,
we introduce the four vertices $x^i$, $y^i$, $z^i$, and $c^i$ in $G$
that induce a clique $G_i$,
where $x^i$, $y^i$, and $z^i$
are associated with the three literals $x$, $y$, and $z$ in $C_i$.
Note that $G$ has order $4m$.
For every two vertices $u$ and $v$ belonging to different cliques $G_i$
such that the literal associated with $u$
is the negation of the literal associated with $v$,
we add to $G$ the edge $uv$.
This completes the construction of $G$;
see Figure \ref{fig3} for an illustration.
\begin{figure}
\caption{The graph $G$ for the formula $f=C_1\wedge C_2\wedge C_3$ with
$C_1=x_1\vee x_2\vee x_3$,
$C_2=\bar{x}
\label{fig3}
\end{figure}
Clearly, the set $I=\{ c^1,\ldots,c^m\}$ is a maximum independent set of $G$,
in particular, we have $\alpha(G)=m$.
The structure of $G$ easily implies that $G$
has a maximum induced matching $M$
that only contains edges from $G_1\cup \ldots \cup G_m$,
in fact, any edge in $M$ between a vertex $x$ in $G_i$ and some $G_j$ for $i\not=j$
can be replaced by the edge $xc^i$.
Similarly, the graph $G$ has a maximum dissociation set $D$
such that all edges induced by $D$ belong to $G_1\cup \ldots \cup G_m$.
These observations easily imply that $G$ satisfies (\ref{e3d}) with equality,
that is, we have ${\rm diss}(G)=\alpha(G)+\nu_s(G)$.
As observed by Karp, the formula $f$ is satisfiable if and only if
$G-I$ has an independent set $I'$ of order $m$.
If $f$ is satisfiable, and $I'$ is as above, then
$I\cup I'$ is a maximum dissociation set in $G$,
and the edges spanned by $I\cup I'$ form a maximum induced matching in $G$,
that is, we have
${\rm diss}(G)=2\nu_s(G)=2m=2\alpha(G)$.
Conversely, if ${\rm diss}(G)=2\alpha(G)$,
then $G$ has a maximum dissociation set $D$ containing $I$,
and $D\setminus I$
is an independent set in $G-I$ of order $m$,
that is, it follows that $f$ is satisfiable.
Similarly, if ${\rm diss}(G)=2\nu_s(G)$,
then (\ref{e3d}) implies $\nu_s(G)=m$, and
$G$ has a maximum induced matching $M$ covering $I$,
and the vertices covered by $M$ not in $I$
form an independent set in $G-I$ of order $m$,
that is, again it follows that $f$ is satisfiable.
This completes the proof of (\ref{ee1}),
which shows the NP-hardness of deciding equality in (\ref{e3}) or (\ref{e3b}).
\noindent For the hardness of deciding equality in (\ref{e3c}),
we describe the efficient construction of a graph $H$
such that $f$ is satisfiable if and only if ${\rm diss}(H)=\alpha(H)$.
For every clause $C_i=x\vee y\vee z$ in $f$,
where $x$, $y$, and $z$ are the three literals in $C_i$,
we introduce the six vertices
$x^{(i,1)}$, $y^{(i,1)}$, $z^{(i,1)}$, $x^{(i,2)}$, $y^{(i,2)}$, and $z^{(i,2)}$ in $H$
that induce a subgraph $H_i$ that is a clique
minus the three edges $x^{(i,1)}x^{(i,2)}$, $y^{(i,1)}y^{(i,2)}$, and $z^{(i,1)}z^{(i,2)}$.
Similarly as above, the vertices $x^{(i,1)}$, $y^{(i,1)}$, and $z^{(i,1)}$ in $H_i$
are associated with the three literals $x$, $y$, and $z$ in $C_i$.
Note that $H$ has order $6m$.
For every two vertices $u$ and $v$ belonging to different subgraphs $H_i$
that are associated with literals
such that the literal associated with $u$
is the negation of the literal associated with $v$,
we add to $H$ the edge $uv$.
This completes the construction of $H$;
see Figure \ref{fig4} for an illustration.
\begin{figure}
\caption{The graph $H$ for the formula $f=C_1\wedge C_2\wedge C_3$ with
$C_1=x_1\vee x_2\vee x_3$,
$C_2=\bar{x}
\label{fig4}
\end{figure}
Since every dissociation set in $H$ intersects each $H_i$ in at most two vertices,
and selecting two vertices with exponent $(i,2)$ in $H_i$ for each $i$ in $[m]$
yields a dissociation set in $H$, we have ${\rm diss}(H)=2m$.
By the structure of $H$, we have ${\rm diss}(H)=\alpha(H)$
if and only if $H$ has an independent set that contains, for every $i$ in $[m]$,
exactly one of the vertices with exponent $(i,1)$.
As noted above, this is equivalent to the satisfiability of $f$,
which shows the NP-hardness of deciding equality in (\ref{e3c}).
\noindent For the hardness of deciding equality in (\ref{e3d}),
we describe an efficient reduction from the NP-complete {\sc Independent Set} problem.
Therefore, let $(G,k)$ be an instance of this problem,
that is, the problem of deciding whether $\alpha(G)\geq k$.
Possibly by adding isolated vertices to $G$
and increasing $k$ for each added vertex by one,
we may assume that $2(k-1)>n\geq 2$,
where $n$ is the order of $G$.
We describe the efficient construction of a graph $H$
such that $\alpha(G)\geq k$
if and only if (\ref{e3d}) does {\it not} hold with equality.
The graph $H$ arises from $G$
\begin{itemize}
\item by adding, for every vertex $u$ of $G$, a new vertex $u'$ as well as the edge $uu'$, and
\item by adding a disjoint copy of $(k-1)K_2$, that is, $k-1$ further independent edges,
as well as all possible edges between the original vertices of $G$
and the vertices of the copy of $(k-1)K_2$.
\end{itemize}
If $V$ denotes the vertex set of $G$, then the vertex set of $H$
is $V\cup V'\cup W$, where
$V'=\{ u':u\in V\}$,
$W$ is the set of the $2(k-1)$ vertices of the copy of $(k-1)K_2$,
there are all possible edges between $W$ and $V$,
and no edges between $V'$ and $W$.
The order of $H$ is $2n+2(k-1)$.
It is easy to see that $\alpha(H)=n+k-1$,
in fact, the set $V'$ together with one vertex on each of the $k-1$ edges within $W$
yields a maximum independent set in $H$.
Our next goal is to show ${\rm diss}(H)=n+2(k-1)$.
Since $V'\cup W$ is a dissociation set in $H$, we have ${\rm diss}(H)\geq n+2(k-1)$.
Now, let $D$ be a maximum dissociation set in $H$.
If $D$ intersects both $V$ and $W$, then $D$ contains exactly
one vertex from $V$,
one vertex from $W$, and
all but one vertices from $V'$,
that is, $|D|\leq 1+1+n-1=n+1<n+2(k-1)$, which is a contradiction.
If $D$ does not intersect $W$, then $|D|\leq |V\cup V'|=2n<n+2(k-1)$,
which is a contradiction.
Thus, the set $D$ does not intersect $V$, which,
by the choice of $D$, implies $D=V'\cup W$,
and, hence, we obtain ${\rm diss}(H)=|D|=|V'\cup W|=n+2(k-1)$ as desired.
In view of the $k-1$ independent edges in $W$, we have $\nu_s(H)\geq k-1$.
Since ${\rm diss}(H)=\alpha(H)+k-1$,
in order to complete the proof, it suffices to show that
$\alpha(G)\geq k$ if and only if $\nu_s(H)\geq k$.
If $I$ is an independent set in $G$ of order at least $k$,
then $\{ uu':u\in I\}$ is an induced matching in $H$,
hence $\alpha(G)\geq k$ implies $\nu_s(H)\geq k$.
Now, suppose that $\nu_s(H)\geq k$,
and let $M$ be a maximum induced matching in $H$
containing as few edges with both endpoints in $V$ as possible.
If $M$ contains an edge with both endpoints in $W$,
then all edges in $M$ have both endpoints in $W$,
which implies the contradiction $|M|\leq k-1$.
If $M$ contains an edge between $W$ and $V$,
then we obtain the contradiction $|M|=1$.
Hence, no edge in $M$ covers any vertex of $W$.
If $uv\in M$ for $u,v\in V$,
then $M\setminus \{ uv\}\cup \{ uu'\}$
is a maximum induced matching in $H$
containing fewer edges with both endpoints in $V$ than $M$.
Hence, the choice of $M$ implies that the set of $|M|\geq k$ vertices from $V$ covered by an edge from $M$ is an independent set in $G$,
that is, $\alpha(G)\geq k$.
This completes the proof of Theorem \ref{theorem2}.
\section{Conclusion}
Our initial motivation to consider the extremal graphs for (\ref{e2})
was the attempt to improve the approximation algorithm of Hosseinian and Butenko \cite{hobu}.
This remains to be done.
As explained in the introduction,
the complexity of recognizing the bipartite extremal graphs
for (\ref{e3b}), (\ref{e3c}), and (\ref{e3d}) remains open.
We believe that all three problems are hard.
The estimates (\ref{e1}), (\ref{e2}), and (\ref{e3}) imply
$\alpha(G-M)\leq {\rm diss}(G)\leq 2\alpha(G-M)$
for a given graph $G$ and a given maximum matching $M$ in $G$.
Theorem \ref{theorem2} easily implies that the extremal graphs
for these two inequalities are also hard to recognize.
In fact, if $G$ is a graph with $\alpha(G)\geq 3$,
the graph $H$ arises from the disjoint union of $G$ and $K_{n(G)}$
by adding all possible edges between $V(G)$ and $V(K_{n(G)})$,
and $M$ is a perfect matching of $H$ using only edges between $V(G)$ and $V(K_{n(G)})$,
then
$\alpha(H)=\alpha(H-M)=\alpha(G)$ and
${\rm diss}(H)={\rm diss}(H-M)={\rm diss}(G)$.
This implies that, for every $c\in \{ 1,2\}$,
the graph $G$ satisfies ${\rm diss}(G)=c\cdot \alpha(G)$
if and only if
the graph $H$ satisfies
${\rm diss}(H)=c\cdot \alpha(H-M)$.
\end{document}
|
\begin{document}
\title{
Counterexample to ``Sufficient conditions for uniqueness of the Weak
Value'' by J.\ Dressel and A.\ N.\ Jordan, arXiv:1106.1871v1.
}
\author{Stephen Parrott\thanks{For contact information,
go to http://www.math.umb.edu/$\\[1ex]im$sp}}
\date{June 24, 2011}
\maketitle
\begin{abstract}The abstract of ``Contextual Values of Observables
in Quantum Measurements'' by J.\ Dressel, S.\ Agarwal, and A. N. Jordan
[Phys. Rev. Lett. {\bf104} 240401 (2010)] (called DAJ
below), states:
\begin{quote}
``We introduce contextual values as a generalization of the
eigenvalues of an observable that takes into account both
the system observable and a general measurement procedure. This
technique leads to a natural definition of a general conditioned
average that converges uniquely to the quantum weak value in
the minimal disturbance limit.''
\end{quote}
A counterexample to the claim of the last sentence was presented in
\cite{parrott3}, a 32-page paper discussing various topics related
to DAJ, and a simpler counterexample in Version 1 of the present work.
Subsequently Dressel and Jordan placed in the arXiv the paper of the
title (called DJ below) which attempts to prove the claim of DAJ quoted
above under stronger hypotheses than given in DAJ, hypotheses which the
counterexample does not satisfy. The present work (Version 5) presents a new
counterexample to this claim of DJ.
\\[1ex]
A brief introduction to ``contextual values'' is included. Also included
is a critical analysis of DJ.
\end{abstract}
\\[1ex]ection{Introduction}
A counterexample to a major claim of
\begin{quote}
J. Dressel, S Agarwal, and A.\ N.\ Jordan, ``Contextual values
of observables in quantum measurements'', Phys.\ Rev.\ Lett.\ {\bf 104}
240401 (2010)
\end{quote}
(henceforth called DAJ)
was given in \cite{parrott3}, a 32-page paper discussing DAJ in detail.
The claim in question is stated as follows in DAJ's abstract:
\begin{quote}
``We introduce contextual values as a generalization of the
eigenvalues of an observable that takes into account both
the system observable and a general measurement procedure. This
technique leads to a natural definition of a general conditioned
average that converges uniquely to the quantum weak value in
the minimal disturbance limit.''
\end{quote}
This wording (particularly, ``minimal disturbance limit'') is potentially
misleading, as will be explained briefly below, and is discussed
more fully in \cite{parrott3}.
Version 1 presented a simple counterexample to the claim of the above quote
based on my interpretation of the vague presentation of DAJ.
A later paper by Dressel and Jordan,
``Sufficient conditions for uniqueness of the Weak Value''
\cite{DJ} (henceforth called DJ) adjoined new (and very strong) hypotheses
to DAJ which the counterexample did not satisfy and claimed to prove
that the above quote was correct under these new hypotheses.
The present work presents a new counterexample to that claim.
It also includes the introduction to the
main ideas of DAJ of Version 1 and a critical analysis of DJ.
\\[1ex]ection{Notation and brief reprise of DAJ}
To establish notation, we briefly summmarize the main ideas of DAJ.
The notation generally follows DAJ except that DAJ denotes operators
by both boldface and circumflex, e.g., $\mbox{\boldmath $\hat{M}$}$, but we omit the boldface
and ``hat'' decorations. Also, we use $P_f$ to denote the operator of
projection onto the subspace spanned by a vector $f$. (DAJ uses
{\boldmath $\hat{E}^{(2)}_f$}.)
When we quote directly an equation of DAJ, we use DAJ's equation number,
which ranges from (1) to (10),
and also DAJ's original notation.
Other equations will bear numbers beginning
with (100).
Suppose we are given a set $\{M_j\}$ of measurement operators,
where $j$ is an index ranging over a finite set.
We assume that the reader is familiar with the theory of measurement operators,
as given, for example, in the book \cite{N/C} of Nielsen and Chuang.
By definition, measurement operators satisfy
\\[1ex]etcounter{equation}{99}
\begin{equation}
\langlel{eq100}
\\[1ex]um_j M^\dag_j M_j = I \quad,
\end{equation}
where $I$ denotes the identity operator. With such measurement operators
is associated the {\em positive operator valued measure} (POVM)
$\{E_j\}$ with $E_j := M^\dag_j M_j$. When the system is in a
(generally mixed) normalized state $\rho$
(represented as a positive operator of
trace 1), the probability of a measurement yielding result $j$ is
$\mbox{Tr } [ M^\dag_j M_j \rho ] = \mbox{Tr } [ E_j \rho ]$. Moreover, after the
measurement, the system will be in (unnormalized) state
$M_j \rho M^\dag_j$, which when normalized is:
\begin{equation}
\langlel{eq105}
\mbox{normalized post-measurement state} =
\frac{M_j \rho M^\dag_j}{\mbox{Tr } [M_j \rho M^\dag_j ]}
\quad.
\end{equation}
For notational simplicity, we normalize states only in calculations
where the normalization factor is material.
We also assume given an operator $A$, representing what DAJ calls
``the system observable'' in the above quote. We ask if it is possible
to choose real numbers $\alpha_j$, which DAJ calls {\em contextual values},
such that
\begin{equation}
\langlel{eq110}
A = \\[1ex]um_j \alpha_j E_j \quad.
\end{equation}
This will not always be possible, but we consider only cases for which
it is. When it is possible, it follows that the expectation $\mbox{Tr }[A\rho]$
of $A$ in the state $\rho$ equals the expectation calculated from the
probabilities $\mbox{Tr } [E_j \rho ]$ obtained from the POVM $\{E_j\}$,
with the numerical value $\alpha_j$ associated with outcome
$j$:
\begin{equation}
\langlel{eq120}
\mbox{Tr } [A\rho] = \\[1ex]um_j \alpha_j \mbox{Tr } [E_j \rho]
\quad.
\end{equation}
The book \cite{wiseman} of Wiseman and Milburn defines a measurement to
be ``minimally disturbing'' if the measurement operators $M_j$ are all
positive (which implies that they are Hermitian).
\footnote{This is a technical
definition which can be misleading if one does not realize that normal
associations of the English phrase ``minimally disturbing'' are
not implied. Further discussion can be found in \cite{wiseman} and
\cite{parrott3}.}
DAJ uses a slightly more general definition to define their
``minimal disturbance limit'' of the above quote. We shall use
the definition of Wiseman and Milburn \cite{wiseman} because it is simpler
and sufficient for our counterexample. A counterexample under the
definition of Wiseman and Milburn will also be a counterexample under any more
inclusive definition, such as that of DAJ.
A particularly simple kind of measurement is one in which there are
only two measurement operators, $P_f$ and $I-P_f$. Intuitively, this
``measurement'' asks whether the (unnormalized)
post-measurement state is $P_f$ or not.
Here we are using the notation of mixed states. Phrased in terms
of pure states, and assuming that the pre-measurement state $\rho$ is pure,
the measurement determines if the post-measurement state is the pure
state $f$ or a pure state orthogonal to $f$.
Suppose that we make a measurement with the original measurement operators
$M_j$ and then make a second measurement with measurement operators
$P_f, I-P_f$. In this situation, the second measurement is called a
``postselection'', and when it yields state $P_f$, one says that the
postselection has been ``successful''.
Such a compound measurement may be
equivalently considered as a single measurement with measurement
operators $\{P_fM_j, (I-P_f)M_j\}$.
``Successful'' postselection leaves the system in normalized state
\begin{equation}
\langlel{eq130}
\frac{(P_f M_j)\rho (P_f M_j)^\dag}
{\mbox{Tr } [ (P_f M_j)\rho (P_f M_j)^\dag]}
\quad,
\end{equation}
which is pure state $f$ ($P_f$ in mixed state notation). This result will
occur with probability $p(j,f) = \mbox{Tr } [ (P_f M_j)^\dag P_f M_j\rho ] =
\mbox{Tr }[M^\dag_j P_f M_j \rho ] .$
The probability $p(j|f)$ of first measurement result $j$ given that
the postselection was successful is:
\begin{equation}
\langlel{eq140}
p(j|f) = \frac{p(j,f)}{\\[1ex]um_i p(i,f)} =
\frac{\mbox{Tr } [ M^\dag_j P_f M_j \rho ]}
{\\[1ex]um_i \mbox{Tr } [ M^\dag_i P_f M_i \rho]}
\quad.
\end{equation}
Hence, if we assign numerical value $\alpha_j$ to result $j$ as above,
the conditional expectation of the measurement {\em given} successful
postselection is:
\begin{equation}
\langlel{eq150}
_f\langle A \rangle :=
\frac{ \\[1ex]um_j \alpha_j \mbox{Tr } [ M^\dag_j P_f M_j \rho ]}
{\\[1ex]um_i \mbox{Tr } [ M^\dag_i P_f M_i \rho]}
\quad.
\end{equation}
This is DAJ's ``general conditioned average''.
Written in DAJ's original notation, this reads
$$
\langlel{eq6}
_f \langle {\cal A} \rangle = \\[1ex]um_j \alpha^{(1)}_j P_{j|f}
=
\frac{\\[1ex]um_j \alpha^{(1)}_j \mbox{Tr } [
\hat{\bf E}^{(1,2)}_{jf} \mbox{\boldmath $\hat{\rho}$}]
}
{\\[1ex]um_j \mbox{Tr } [
\hat{\bf E}^{(1,2)}_{jf} \mbox{\boldmath $\hat{\rho}$}]
}.
\eqno (6)
$$
DAJ's theory of contextual values was motivated by a
theory of ``weak measurements'' initiated by Aharonov, Albert, and Vaidman
\cite{AAV} in 1988.
Intuitively, a ``weak'' measurement is one which negligibly
disturbs the state of the system. This can be formalized by introducing
a ``weak measurement'' parameter $g$ on which the measurement operators
$M_j = M_j(g)$ depend, and requiring that
\begin{equation}
\langlel{eq160}
\lim_{g \rightarrow 0}
\frac{M_j (g) \rho M^\dag_j (g) }
{\mbox{Tr } [M_j (g) \rho M^\dag_j (g) ]}
= \rho
\quad \mbox{ for all $\rho$ and $j$}
\quad,
\end{equation}
This says that for small $g$, the post-measurement state is almost the
same as the pre-measurement state $\rho$ (cf. equation \re{eq130}).
We shall refer to this as
``weak measurement'' or a ``weak limit''.
The ``minimal disturbance limit'' mentioned in the above quote from DAJ's
abstract presumably refers to \re{eq160} combined with their generalization
of Wiseman and Milburn's ``minimally disturbing'' condition that
the measurement operators be positive, and this is the definition that
we shall use.
\footnote{DAJ only partially and unclearly defines
its ``minimally disturbing'' condition,
but in a message to Physical Review Letters (PRL) in response to a
``Comment'' paper that I submitted, the authors of DAJ confirmed that Wiseman
and Milburn's definition implies theirs. DAJ uses but does not define
the phrase ``weak limit'', but in the same message to PRL, the authors
state that \re{eq160} corresponds to ``ideally weak measurement''.
Since ``ideally weak measurement'' must be (assuming normal usage of syntax)
a special case of mere ``weak measurement'', our counterexample which
assumes \re{eq160} will also be a counterexample to the statement of DAJ
quoted in the introduction.
\\[1ex]
I have made several direct inquiries to the authors of DAJ requesting
a precise definition
of their ``minimal disturbance limit'', but all have been ignored.
}
DAJ claims that in their ``minimal disturbance limit''
(which is implied by a weak limit with positive
measurement operators), their
``general conditioned average'' $_f\langle A \rangle$ (6), our \re{eq150},
is always given by:
\begin{equation}
\langlel{eq170}
_f \langle A \rangle =
\frac{1/2 \mbox{Tr } [ P_f \{A, \rho\}]]}
{\mbox{Tr } [P_f \rho ]}
\quad.
\end{equation}
Our equation \re{eq170} is equation (7) of DAJ:
$$
\langlel{eq7}
A_w = \frac{\mbox{Tr } [\mbox{\boldmath $\hat{E}$}^{(2)}_f \{ \mbox{\boldmath $\hat{A}$} , \mbox{\boldmath $\hat{\rho}$} \} ]}
{2 \mbox{Tr } [\mbox{\boldmath $\hat{E}$}^{(2)}_f \mbox{\boldmath $\hat{\rho}$} ]} \quad,
\eqno (7)
$$
Here $A_w$ is their notation for ``weak value'' of $A$.
\footnote
{In the traditional theory of ``weak measurement''
initiated by \cite{AAV}, the weak limit (i.e., $\lim_{g \rightarrow 0}$) of
\re{eq150} (equivalently, (6)) would be called a ``weak value'' of $A$,
though the traditional
``weak measurement'' literature calculates it via different procedures.
When $\rho$ is a pure state, most modern authors calculate this weak value
as \re{eq170} (equivalently (7)),
though the seminal paper \cite{AAV} arrived (via questionable
mathematics) at a complex
weak value of which \re{eq170} is the real part.
(Only recently was it recognized that ``weak values'' are not unique
\cite{jozsa}\cite{parrott1}\cite{parrott2}.)
}
The statement of DAJ quoted in the Introduction, that their
\begin{quote}
``$\ldots$ general conditioned
average $\ldots$ converges uniquely to the quantum weak value in
the minimal disturbance limit'',
\end{quote}
implies that for a weak limit of positive measurement operators, their
(6) always evaluates to (7), or in our notation, our \re{eq150} always
evaluates to \re{eq170}. We shall give an example for which
\re{eq150} does {\em not} evaluate to \re{eq170}.
\\[1ex]ection{The counterexample}
\\[1ex]ubsection{General discussion}
We are assuming the ``minimal disturbance'' condition that
the measurement operators be positive, so in the definition \re{eq150}
of DAJ's ``general conditioned average'', we replace $\\[1ex]tM^\dag_j$ with
$\\[1ex]tM_j$. First we examine its denominator.
Let
\begin{equation}
\langlel{eq180}
\eta_j(g) := \mbox{Tr } [ \\[1ex]tM_j(g) \\[1ex]trho\\[1ex]tM_j(g) ]
\quad,
\end{equation}
which are inverse normalization factors for the unnormalized post-measurement
states $ \\[1ex]tM_i(g) \\[1ex]trho\\[1ex]tM_i(g) $ (cf. \re{eq105}.
We shall assume that all $\eta_j (g)$ are bounded for small $g$,
which is expected (because we expect $M_j(g)$ to approach a multiple
of the identity for small $g$ in order to make the measurement
``weak'') and will be the case for our counterexample.
We have
\begin{eqnarray}
\langlel{eq190}
\lefteqn{
\lim_{g \rightarrow 0}
\\[1ex]um_j \mbox{Tr } [\\[1ex]tP_f \\[1ex]tM_j (g) \\[1ex]trho \\[1ex]tM_j (g)]
=
}
\nonumberumber\\
&&
\lim_{g \rightarrow 0} \\[1ex]um_j \mbox{Tr } [ P_f
\left(
\frac{
\\[1ex]tM_j (g) \\[1ex]trho \\[1ex]tM_j (g)
}
{\eta_j(g) }
- \\[1ex]trho
\right)
]
\,
\eta_j(g)
\nonumberumber
\\
&&
\quad\quad\quad\quad\quad + \lim_{g \rightarrow 0}
\\[1ex]um_j \mbox{Tr }[\\[1ex]tP_f\\[1ex]trho ] \, \eta_j(g) \nonumberumber\\
&=&
\lim_{g \rightarrow 0}
\\[1ex]um_j \mbox{Tr }[\\[1ex]tP_f\\[1ex]trho ]\, \mbox{Tr } [\\[1ex]tM_j (g) \\[1ex]trho \\[1ex]tM_j (g) ]
\nonumberumber \\
&=&
\mbox{Tr } [\\[1ex]tP_f\\[1ex]trho] \lim_{g \rightarrow 0} \mbox{Tr } [ \\[1ex]um_j
\\[1ex]tM_j (g) \\[1ex]tM_j (g) \\[1ex]trho ]
\nonumberumber \\
&=& \mbox{Tr } [\\[1ex]tP_f \\[1ex]trho] \quad,
\end{eqnarray}
because $\\[1ex]um M^2_j = \\[1ex]um M^\dag_j M_j = I$ and $\mbox{Tr } [ \\[1ex]trho ] = 1.$.
This is the denominator of DAJ's claimed result \re{eq170}
(half the denominator of their (7) because both numerator and denominator
of our \re{eq170} differ from (7) by a factor of 1/2).
Next we examine the numerator of the ``general conditioned average''
\re{eq150}. We shall write it as a sum of two terms, the first term leading
to DAJ's \re{eq170}, and the second
a term which does not obviously vanish in the limit
$g \rightarrow 0.$.
The counterexample will be obtained by finding a case for which the
limit of the
second term actually does not vanish.
Note the trivial identity for operators $\\[1ex]tM, \\[1ex]trho$:
$$
\\[1ex]tM\rho\\[1ex]tM = \\[1ex]tM [ \\[1ex]trho, \\[1ex]tM] + \\[1ex]tM^2 \\[1ex]trho
$$
and the similar
$$
\\[1ex]tM\rho\\[1ex]tM = -[ \\[1ex]trho, \\[1ex]tM]\\[1ex]tM + \\[1ex]trho\\[1ex]tM^2
\quad.
$$
Combining these gives
\begin{equation}
\langlel{eq210}
\\[1ex]tM \\[1ex]trho \\[1ex]tM =
\frac{1}{2} \{\\[1ex]tM^2 , \\[1ex]trho \}
+ \frac{1}{2} [ \\[1ex]tM, [\\[1ex]trho, \\[1ex]tM ]]
\quad.
\end{equation}
Using \re{eq210} and the contextual value equation
\re{eq110}, $A = \\[1ex]um_j \alpha_j \\[1ex]tE_j = \\[1ex]um_j \alpha_j M^2_j$,
we can rewrite the numerator of \re{eq150} as
\begin{eqnarray}
\langlel{eq220}
\mbox{numerator of \re{eq150}} &=&
\\[1ex]um_j \alpha_j \mbox{Tr } [ \\[1ex]tM_j \\[1ex]tP_f \\[1ex]tM_j \rho ] \nonumberumber\\
&=&
\\[1ex]um_j \alpha_j \mbox{Tr } [ \\[1ex]tP_f \\[1ex]tM_j \rho \\[1ex]tM_j ] \\
&=&
\frac{1}{2} \mbox{Tr } [ \\[1ex]tP_f \{ \\[1ex]tA, \\[1ex]trho \} ]
+ \\[1ex]um_j \frac{1}{2}\alpha_j \mbox{Tr } [ \\[1ex]tP_f [\\[1ex]tM_j , [\rho, \\[1ex]tM_j]\, ]
\nonumberumber
\quad.
\end{eqnarray}
After division by the denominator of $\re{eq150}$, the first term
gives DAJ's claimed (7) in the limit $g \rightarrow 0$, our \re{eq170},
and the second term gives
\begin{eqnarray}
\langlel{eq230}
\mbox{difference between weak limit of (6) and (7) =}
&& \nonumberumber \\
&& \nonumberumber \\
\lim_{g \rightarrow 0}
\frac{\\[1ex]um_j \frac{1}{2}\alpha_j(g) \mbox{Tr }
[ \\[1ex]tP_f [\\[1ex]tM_j (g) ,\, [\rho, \\[1ex]tM_j (g)]\, ]
\,]
}
{\mbox{Tr } [ \\[1ex]tP_f \\[1ex]trho ]}
.
\end{eqnarray}
We shall call \re{eq230} the ``anomalous term''.
Since there is no obvious control over the size of the $\alpha_j (g)$,
a counterexample is expected, but was surprisingly hard to find.
The Version 1 counterexample for the quoted claim of DAJ and the newer
counterexample for the new claim of DJ are identical up to this point.
The difference is that the Version 1 counterexample used three $2 \times 2$
diagonal matrices as measurement operators, resulting in a contextual value equation
\re{eq110} with multiple solutions, whereas the newer counterexample
uses three $3 \times 3$ diagonal matrices for which there is a unique
solution to \re{eq110}. The newer counterexample could supercede the
Version 1 example, but
we retain the original counterexample because of its simple and
intuitive nature (e.g., all steps can be mentally verified).
\\[1ex]ubsection{The Version 1 counterexample }
The ``system observable'' $A$ for the counterexample will correspond
to a $2 \times 2$ matrix
\begin{equation}
\langlel{eq240}
A := \left[
\begin{array}{cc}
a & 0 \\
0 & b \\
\end{array}
\right]
\end{equation}
There will be three measurement operators:
\begin{eqnarray}
\langlel{eq250}
M_1 (g) &:=&
\left[
\begin{array}{cc}
1/2 + g & 0 \\
0 & 1/2 - g
\end{array}
\right]
, \ \
M_2 (g) :=
\left[
\begin{array}{cc}
1/2 - g & 0 \\
0 & 1/2 + g
\end{array}
\right] ,
\\
M_3(g) &:=&
\left[
\begin{array}{cc}
\\[1ex]qrt{1/2 - 2 g^2} & 0 \\
0 & \\[1ex]qrt{1/2 - 2 g^2}
\end{array}
\right] .
\nonumberumber
\end{eqnarray}
Note that $M_3 (g)$ is uniquely defined by the
measurement operator equation $\\[1ex]um_{j=1}^3 M^2_j (g) = 1$ and that
all three measurement operators approach multiples of the identity
as $g \rightarrow 0$, which assures weakness of the measurement.
Note also that $M_3 (g)$ is actually a multiple of the identity for
all $g$, so the
commutators in the expression \re{eq230} for the anomalous term which
involve $M_3$ vanish. That is, $M_3$, and hence $\alpha_3$,
make no contribution to the anomalous term.
Writing out the contextual value equation \re{eq110} in components
gives two scalar equations in three unknowns:
\begin{eqnarray}
\langlel{eq260}
(1/2 + g)^2 \alpha_1 (g) + (1/2 - g)^2 \alpha_2 (g) +
(1/2 - 2g^2) \alpha_3 (g) &=& a \\
(1/2 - g)^2 \alpha_1 (g) + (1/2 + g)^2 \alpha_2 (g)
+ (1/2 - 2g^2) \alpha_3 (g) &=& b \nonumberumber \quad.
\end{eqnarray}
The solution can be messy because of the algebraic
coefficients. However, for the case $a = 1 = b$, a solution can be
obtained without calculation.
This choice
of $a$ and $b$ corresponds to the system observable being the identity
operator, so the measurement is not physically interesting,
but it gives a mathematically valid example with minimal calculation.
Later we shall indicate how counterexamples can be obtained
for other choices of $a$ and $b$ from appropriate solutions of \re{eq260}.
Assuming $a = 1 = b$, the system \re{eq260} can be rewritten
\begin{eqnarray}
\langlel{eq270}
(1/2 + g)^2 \alpha_1 (g) + (1/2 - g)^2 \alpha_2 (g)
&=& 1 - (1/2 - 2g^2) \alpha_3 (g) \\
(1/2 - g)^2 \alpha_1 (g) + (1/2 + g)^2 \alpha_2 (g)
&=& 1 - (1/2 - 2g^2) \alpha_3 (g) \nonumberumber
\quad.
\end{eqnarray}
We will think of $\alpha_3 (g)$ as a free parameter to be arbitrarily chosen,
and as noted previously, the choice will not affect
the anomalous term \re{eq230}.
Viewed in this way, \re{eq270}
becomes a system of two linear equations in two unknowns
which become the same equation if
$\alpha_2 = \alpha_1$, with solution
\begin{equation}
\langlel{eq280}
\alpha_2(g) =\alpha_1 (g) =
\frac{1 - (1/2 -2g^2)\alpha_3(g)}
{ (1/2 +g)^2 + (1/2 - g)^2} =
\frac{1 - (1/2 -2g^2)\alpha_3(g)}{1/2 + 2g^2)}
.
\end{equation}
Since $\alpha_3$ can be chosen arbitrarily, also $\alpha_2 = \alpha_1$ can
be arbitrary; we shall choose $\alpha_3(g)$ so that
\begin{equation}
\langlel{eq290}
\alpha_2(g) = \alpha_1 (g) = \frac{1}{g^2}
\quad.
\end{equation}
To see that this solution
will produce a counterexample,
note that for
$$
\rho =
\left[
\begin{array}{cc}
\rho_{11} & \rho_{12}\\
\rho_{21} & \rho_{22}
\end{array}
\right]
$$
and for any diagonal matrix
$$
D =
\left[
\begin{array}{cc}
d_{1} & 0\\
0 & d_2
\end{array}
\right]
,
\quad [D, \rho] =
\left[
\begin{array}{cc}
0 & (d_1 - d_2) \rho_{12}\\
(d_2 - d_1)\rho_{21} & 0
\end{array}
\right],
\ \mbox{and}
$$
$$
[\, D, [D, \rho]\,] =
\left[
\begin{array}{cc}
0 & (d_1 - d_2)^2\rho_{12}\\
(d_2 - d_1)^2 \rho_{21} & 0
\end{array}
\right]
.
$$
In particular for $j = 1,2$,
$$
[\ M_j(g),\, [M_j(g), \rho]\,] =
\left[
\begin{array}{cc}
0 & 4g^2 \rho_{12} \\
4g^2 \rho_{21} & 0
\end{array}
\right] \quad,
$$
and since $M_3 (g)$ is a multiple of the identity, $[M_3(g), \rho] = 0.$
Hence \re{eq230} becomes:
\begin{equation}
\langlel{eq295}
\frac{-(1/2)\mbox{Tr } [P_f \\[1ex]um_j \alpha_j
[M_j(g),\, [M_j(g), \rho]\ ]\ ]}
{\mbox{Tr } [ \\[1ex]tP_f \\[1ex]trho}
=
\frac{
- \mbox{Tr } [P_f
\left[
\begin{array}{cc}
0 & 4\rho_{12}\\
4\rho_{21} & 0\\
\end{array}
\right] ]
}
{\mbox{Tr } [ \\[1ex]tP_f \\[1ex]trho] }
\quad.
\end{equation}
This is easily seen to be nonzero for $\rho_{12} \neq 0$ and appropriate
$P_f$. For a norm 1 vector $f :=
(f_1, f_2) $
\begin{equation}
\langlel{eq300}
\mbox{weak limit of (6)} =
\frac{\mbox{Tr } [ P_f \{A, \rho \} ]}
{2 \mbox{Tr } [P_f \rho ]}
+ \frac{- 8 \Re ({f}^*_2 f_1 \rho_{21}) }
{|f_1|^2 \rho_{11} + 2 \Re ({f}^*_2 f_1 \rho_{21}) + |f_2|^2 \rho_{22} }.
\end{equation}
The counterexample just given assumed that the system observable
$A := \mbox{diag$\{ a,b\} $ } $
was the identity to make the calculations easy, but counterexamples
can be obtained for any system observable. For example, if $A$ is the
one-dimensional projector
$A := \mbox{diag$\{1,0\} $ } $, and if system \re{eq270} is solved with
$\alpha_1(g) := 1/g^2$, then $\alpha_2(g) = 1/g^2 - 1/(2g)$, and
the weak limit of the anomalous term is the same as just calculated for
$A = I$. \cite{parrott3}
DJ \cite{DJ} adds additional (very strong) hypotheses to those of DAJ
which the counterexample just given does not satisfy.
\footnote{
However, the fact that these additional conditions cannot reasonably be
inferred from DAJ is not made clear by DJ, and a casual reader might well
obtain the opposite impression.
}
Assuming these
additional conditions, DJ attempts to prove that (6) evaluates to (7)
in their ``minimal disturbance limit''.
The next sections will present a more powerful counterexample which
disproves this new claim of DJ.
Originally a new paper with the more powerful counterexample was
submitted to the arXiv, but a moderator rejected it. He thought that
instead, Version 1 should be modified.
Rather than waste time on a
lengthy and unpleasant appeal, I decided that it would be easier to do that.
The paper to this point is Version 1.
The sections following comprise essentially
the rejected arXiv paper, which presents the more powerful counterexample
and critically analyzes DJ.
The new counterexample is fairly simple,
utilizing three $3 \times 3$ matrices,
but not as intuitive as one would like. It was found by analyzing the
properties that measurement operators might have in order that (7) could
be shown false, and then playing with parametrized $3\times 3$
measurement operators, trying to adjust the parameters so that (7) would
not hold. The Version 4 counterexample is simpler and more powerful than
the Version 2 counterexample.
No doubt even simpler counterexamples could be found.
Besides the new counterexample, we attempt to clarify some statements in DJ
which we think might be misleading.
\\[1ex]ection{The new additional hypotheses for the claim that (6) implies (7)
in the
``minimal disturbance limit''}
Section 5 of DJ lists several additional assumptions, the most important
of which are:
\begin{enumerate}
\item
The $M_j$ commute with each other and $A$ (so they can all be represented
by diagonal matrices).
\\[1ex]
This is a strong assumption.
It is hard to imagine how it could reasonably be inferred
or even guessed from DAJ. The closest reference in DAJ to something similar
is the following.
\begin{quote}
``To {\em illustrate} [emphasis mine] the construction of
the least redundant set of [contextual values],
we consider the case when $\{\mbox{\boldmath $\hat{M}$}_j \}$ and $\mbox{\boldmath $\hat{A}$}$ all commute.''
\end{quote}
Nothing is said about this being a general assumption for the rest of the
paper. Indeed, such an assumption would seriously restrict the applicability
of the following definition (6) of ``general conditioned average''
$_f \langle \mbox{\boldmath $\hat{A}$} \rangle $, which requires no such assumption.
I studied DAJ for months without ever being led to even consider
the possibility that this might be an
{\em assumption} for the general claims of its abstract.
\item
The contextual values $\vec{\alpha} = (\alpha_1, \ldots, \alpha_N)$ are
obtained from the eigenvalues $\vec{a} = (a_1, \ldots, a_m)$ of
$A = \mbox{diag} ( a_1, \ldots , a_m)$ as
$$
\vec{\alpha} = F^{(+)} \vec{a}
$$
where $F$ is an $N \times m$ matrix satisfying $F \vec{\alpha} =
\vec{a}$ and $F^{(+)}$ its Moore-Penrose pseudo-inverse.
The Version 1 counterexample does not satisfy this condition.
\\[1ex]
Relying only on what is written in DAJ,
it would be very hard for a reader to guess that this is supposed to be
a {\em hypothesis} for (6), or for a claim that (6) implies (7), or both.
(I did consider these possibilities, but rejected them as too unlikely,
as will be explained later in more detail.)
The only
passage of DAJ which seems possibly relevant is:
\begin{quote}
``$\ldots$ we propose that the physically sensible choice of
[contextual values] is the least redundant set uniquely related to the
eigenvalues through the Moore-Penrose pseudoinverse.''
\end{quote}
DAJ gives no reason why this should be the ``physically
sensible choice''. (DJ does attempt to address this issue, but
unconvincingly and badly incorrectly, as will be discussed later.)
Again, to assume this
would seem to artificially limit the applicability of (6),
since (6) is correct independently of
this assumption.
\end{enumerate}
We do not list the other hypotheses for DJ's attempted proof that (6)
implies (7) because they are more technical and less surprising
than the two just discussed.
Our counterexample will satisfy all of the hypotheses listed in DJ.
The counterexample for Version 2 has been replaced by a
simpler example in Version 4.
\\[1ex]ection{A counterexample to the claim of DJ}
Section V of DJ entitled ``General Proof'' attempts to show that
(6) implies (7) under their listed hypotheses. The present section
presents a counterexample which satisfies all of their listed
hypotheses, yet the weak limit of their ``general conditioned average'' (6)
is {\em not} the ``quantum weak value''(7).
We follow identically the analysis of Section 3
through equation \re{eq230}.
This time, we use a system observable
\\[1ex]etcounter{equation}{200}
\begin{equation}
\langlel{eq500}
A =
\left[
\begin{array}{ccc}
1 & 0 & 0 \\
0 & 0 & 0 \\
0 & 0 & 0
\end{array}
\right]
\quad.
\end{equation}
and three measurement operators
which are $3 \times 3$ diagonal matrices:
\begin{eqnarray}
\langlel{eq510}
M_1 (g) &:=&
\left[
\begin{array}{ccc}
\\[1ex]qrt{1/2 + g} & 0 & 0 \\
0 & \\[1ex]qrt{1/2} & 0 \\
0 & 0 & \\[1ex]qrt{1/2 + g}
\end{array}
\right]\quad, \nonumberumber\\
M_2(g) &:=&
\left[
\begin{array}{ccc}
\\[1ex]qrt{1/3 + g^2} & 0 & 0 \\
0 & \\[1ex]qrt{1/3 + g} & 0 \\
0 & 0 & \\[1ex]qrt{1/3}
\end{array}
\right]\quad, \nonumberumber\\
\quad M_3(g) &:=&
\left[
\begin{array}{ccc}
\\[1ex]qrt{1/6 - g - g^2} & 0 & 0 \\
0 & \\[1ex]qrt{1/6 - g } & 0 \\
0 & 0 & \\[1ex]qrt{1/6 - g}
\end{array}
\right].
\end{eqnarray}
The contextual values $\vec{\alpha} = (\alpha_1, \alpha_2,
\alpha_3)$ satisfy $ F \vec{\alpha} = \vec{a} := (1,0,0)^T$ with
\begin{equation}
\langlel{eq520}
F =
\left[
\begin{array}{ccc}
1/2 + g & 1/3 + g^2 & 1/6 - g - g^2\\
1/2 & 1/3 + g & 1/6 - g \\
1/2 + g & 1/3 & 1/6 - g
\end{array}
\right]
\end{equation}
The matrix $F$ is invertible with inverse (which is also equal to the
Moore-Penrose pseudoinverse $F^{(+)}$)
\begin{equation}
\langlel{eq525}
F^{(+)} = F^{-1} =
\mbox{\Large
$
\left[
\begin{array}{ccc}
\frac{1-6g}{6g^2} & \frac{1-2g}{2g} & \frac{-1+9g}{6g^2} \\
\frac{1-6g}{6g^2} & \frac{1 + 2g}{2g} & \frac{-1+3g}{6g^2} \\
\frac{-5 - 6g}{6g^2} & \frac{1+2g}{2g} & \frac{3g+5}{6g^2}
\end{array}
\right].
$
}
\end{equation}
The important thing to note is that the first column, which is also
$(\alpha_1, \alpha_2, \alpha_3 )^T$, is of leading order $1/g^2$
as $ g \rightarrow 0$, which is all that the subsequent proof will use:
\begin{equation}
\langlel{eq527}
\alpha_1 (g) = \alpha_2 (g) = \frac{1-6g}{6g^2} ,\quad
\alpha_3 (g) = \frac{-5 - 6g}{6g^2}.
\end{equation}
The full inverse \re{eq525} was obtained from a computer algebra program,
and the first column (which is all that the counterexample will use)
was also checked by hand using Cramer's rule.
Equations \re{eq190} through \re{eq230} write
the ``general conditioned average'' $_f\langle A \rangle$ of (6)
as a sum of two terms, one of which
evaluates to (7) in the weak limit $g \rightarrow 0$. The other term, called
the ``anomalous term'', is given by \re{eq230} as:
\begin{eqnarray}
\langlel{eq530}
\mbox{difference between weak limit of (6) and (7) =}
&& \nonumberumber \\
&& \nonumberumber \\
\lim_{g \rightarrow 0}
\frac{\\[1ex]um_k \frac{1}{2}\alpha_k(g) \mbox{Tr }
[ \\[1ex]tP_f [\\[1ex]tM_k (g) ,\, [\rho, \\[1ex]tM_k (g)]\, ]
\,]
}
{\mbox{Tr } [ \\[1ex]tP_f \\[1ex]trho ]}
.
\end{eqnarray}
To disprove the claim of DJ, we need to show that there exists
a state $\rho$ and vector $f$ such that the anomalous term
does not vanish.
It is well-known that the only matrix $S$ such that for all projection
matrices $P_f$, $\mbox{Tr } [ P_f S] = 0,$ is the zero matrix $S=0$.
\footnote{A computational proof is routine, but to see this without
calculation, recall that $\langle S, T \rangle := \mbox{Tr } S^\dag T $ defines a
complex Hilbert space structure
(i.e., positive definite complex inner product)
on the set of $n \times n$ matrices.
If $\langle S, T \rangle$
vanishes for all projectors $T = P_f$, then (by the spectral theorem),
it vanishes for all Hermitian $T$, and hence for all $T$, in which case
$S$ is orthogonal to all elements of this Hilbert space and hence is the
zero element.
}
Hence it will be enough to show that
\begin{equation}
\langlel{eq540}
\lim_{g \rightarrow 0}
\\[1ex]um_k -\frac{1}{2}\alpha_k(g)
[\\[1ex]tM_k (g) ,\, [ \\[1ex]tM_k (g), \rho ] \, ]
\neq 0.
\end{equation}
for some mixed state $\rho$ such that for all nonzero vectors $f$,
$\mbox{Tr } [P_f \rho] \neq 0$.
First note that for any diagonal matrix $D = \mbox{diag} (d_1, d_2, d_3)$
and any matrix $\rho = (\rho)_{ij}$,
\begin{equation}
\langlel{eq550}
[D,[D,\rho]]_{ij} = (d_i - d_j)^2 \rho_{ij} \quad.
\end{equation}
In the cases of interest to us, $D$ will be one of the measurement operators
$M_k (g)$ , $ (d_i(g) - d_j (g))^2 = O(g^2)$ for all $i,j$, and for some
$i,j,$ the leading order of $(d_i (g) - d_j (g))^2$ is actually $g^2.$
The $\alpha_k (g)$
all diverge like
$1/g^2$ as $g \rightarrow 0$. Thus we can see without calculation that
we will obtain a counterexample unless some unrecognized relation forces
the terms of \re{eq540} to exactly cancel.
\footnote{
One useful observation that we can make
from what we have done so
far without detailed calculation
is that the attempted proof of DJ is likely wrong or at least
seriously incomplete, since that attempted proof concludes the vanishing
of \re{eq540}
on the basis of order of magnitude arguments only. Though framed in
different language, it essentially says that \re{eq540} must vanish because
they think that
$\alpha_k (g) = (F^{(+)} (g) (1,0,0)^T)_k = O(1/g)$
(contradicting \re{eq527}),
while $[M_j(g), [M_j(g), \rho]] = O(g^2).$
}
That cancellation does not occur in this case can be seen with
minimal calculation
as follows. In \re{eq550}, take $(i,j) := (1,2)$, and note that
$(d_1 - d_2)^2$ is always non-negative. When $D = M_3 (g)$, from the
power series
$$
\\[1ex]qrt{c + x} = \\[1ex]qrt{c} + \frac{x}{2\\[1ex]qrt{c}} + O(x^2)\quad,
$$
one sees that $(d_1 - d_2)^2 = O(g^4)$, and since $\alpha_3(g)$ is
only $O(g^{-2})$, the $k=3$ term in \re{eq540} vanishes in the limit
$g \rightarrow 0$.
We also have
$$
\alpha_1 (g) = \alpha_2 (g) = \frac{1}{6g^2} - \frac{1}{g}
\quad,
$$
and for either $D = M_1(g)$ or $D = M_2(g)$,
$$
(d_1 - d_2)^2 = (g/\\[1ex]qrt{2})^2 + O(g^2))^2 = g^2/2 + O(g^3) \quad
$$
So, in the limit $g \rightarrow 0$, \re{eq540}
evaluates to
\begin{equation}
\langlel{eq552}
-\frac{1}{2} \frac{1}{6} \frac{1}{2} \rho_{12} = -\frac{\rho_{12}}{24} \quad.
\end{equation}
Note that all we care about is that \re{eq552} does not always vanish,
and this can be seen solely from the fact that
$\alpha_1$ and $\alpha_2$ have the same sign, so that the $k=1,2$
terms in \re{eq540} are negative multiples of $\rho_{12}$
which do not vanish identically in the limit $g \rightarrow 0$.
To finish the proof, let $\rho$ be a positive definite state
(i.e., all eigenvalues
strictly positive) such that $\rho_{12} \neq 0$. Such a state can
be constructed by starting with a positive definite diagonal state and adding
a small perturbation to assure $\rho_{12} \neq 0$ (which will result in
a positive definite state if the perturbation is small enough).
Since $\rho$ is positive definite, $\mbox{Tr }[\rho P_f] \neq 0$ for all nonzero
vectors $f$, and we are done.
\\[1ex]ection{Discussion of DJ}
\\[1ex]ubsection{Possible error in DJ's proof}
The counterexample given above unfortunately relies on some detailed calculation.
A conceptual counterexample would certainly be preferable. A reader
interested in discovering the truth of the matter will be faced with the
unpleasant choice of wading through DJ's dense proof or checking the boring
details of the counterexample. For such readers, it may be helpful if
we point out what seems a potentially erroneous step in DJ's proof.
A step which caused me to question their proof occurs at the very end of
their Section V:
\begin{quote}
``$\ldots$ to have a pole of order higher than $g^n$ [$n=1$ in the
counterexample] then there must be at least one relevant singular value
with an order greater than $g^n$.
[The counterexample has a singular value of order $g^2$.]
However, if that were the case
then the expansion of $F$ to order $g^n$ would have a relevant singular
value of zero and therefore could not satisfy (25) $\ldots$''
\end{quote}
I have not been able to guess a meaning for
``the expansion of $F$ to order $g^n$ would have a relevant singular
value of zero'' under which the last sentence would be true.
\\[1ex]ubsection{Significance of the Moore-Penrose pseudo-inverse}
The original paper DAJ \cite{DAJ} introduced the Moore-Penrose pseudo-inverse
as follows:
\begin{quote}
``$\ldots$ we propose that the physically sensible choice of [contextual
values $\vec{\alpha}$] is the least redundant set
uniquely related to the eigenvalues [$\vec{a} = (a_1, \ldots, a_m)$ with
$ A = \mbox{diag} (a_1, \ldots, a_m$)]
through the Moore-Penrose pseudoinverse.''
\end{quote}
I puzzled for a long time over this statement. Besides the fact that
the meaning of ``least redundant set'' was obscure to me, they give no
reason why this choice (which presumably means
$\vec{\alpha} = F^{(+)} \vec{a}$,
with $F^{(+)}$ the Moore-Penrose pseudo-inverse) should be considered
the unique ``physically sensible'' choice, or even {\em a} physically
sensible choice. The arXiv paper DJ which we are discussing attempts
to fill this gap, but the attempt relies on erroneous mathematics and
is unconvincing.
Before starting the discussion of this attempt, let me remark that
although the attempt seems partly aimed at
invalidating the counterexample of \cite{parrott4},
it is basically irrelevant to that aim. That
counterexample is a valid mathematical counterexample to
a mathematical claim of DAJ as I imagine the vague exposition of
DAJ would probably be interpreted by most readers.
Though the counterexample
uses a particular solution of the contextual value equation
$F \vec{\alpha} = \vec{a}$, it was never claimed that this solution
has any physically desirable properties.
Though DJ does {\em not} show that the counterexample is unphysical
as DJ claims, even if it were shown unphysical, it would still disprove
the claim that (6) necessarily evaluates to (7)
in the ``minimal disturbance limit''. A reader of DAJ cannot reasonably
be expected to guess that the definition of ``minimal disturbance limit''
is supposed to include the pseudo-inverse prescription.
Therefore, the discussion will be directed toward analyzing the claim
of DJ that the pseudo-inverse solution should be preferred because
DJ thinks (incorrectly) that
\begin{quote}
``$\ldots$ the pseudo-inverse solution will choose the solution that
generally provides the most rapid statistical convergence for observable
measurements on the system.''
\end{quote}
A careful analysis of DJ's
argument for this claim will reveal flaws which invalidate it.
DJ writes:
\begin{quote}
``With the pseudo-inverse in hand, we then find a uniquely specified
solution $\vec{\alpha}_0 = F^{(+)} \vec{a}$ that is directly related
to the eigenvalues of the operator. Other solutions
$\alpha = \vec{\alpha}_0 + x $ of (3) will contain additional components
in the null space of $F$, and will thus deviate from this least
redundant solution. [True if sympathetically interpreted, but tautological.]
Consequently, the solution $\alpha_0$ has the least norm of all solutions
$\ldots$''
\end{quote}
The Euclidean norm $||\vec{\alpha}||^2 := \\[1ex]um_j \alpha^2_j$ in the real Hilbert space
$R^n$ has no physical significance in quantum theory. Why is it relevant
that $\vec{\alpha}_0$ has least norm? The discussion immediately following may
possibly be intended to answer this, but when analyzed it only tautologically
repeats what has already been said. However, an inattentive reader could
easily get the impression that something had been proved.
DJ thinks that this immediately following discussion (at the bottom of
the first column of p.4) gives ``mathematical reasons for using the
pseudoinverse'', but in fact no convincing reason has been given.
The next paragraph continues:
\begin{quote}
``In addition to the mathematical reasons for using the pseudoinverse in this
context [referring to the discussion just analyzed, which doesn't give any
convincing mathematical reason], there is an important physical one that
we will now describe. As mentioned, a fully compatible detector
can be used together with the contextual values to reconstruct
any moment of a compatible observable.
However, since the detector outcomes are imperfectly correlated with
the observable, the contextual values typically lie outside of the
eigenvalue range and many repetitions of the measurement must be practically
performed to obtain adequate precision for the moments.
{\em Importantly,
the uncertainty in the moments is controlled by the the variance,
not of the observable operator, but of the contextual values themselves.}
[emphasis mine]''
\end{quote}
Consider a probability space with outcomes
$\{1, 2, \ldots , n\}$ with probability $p_j $ for outcome $j$.
A {\em random variable} $v$ is an assignment $j \mapsto v_j$ of a
real number $v_j$ to each outcome $j$. The {\em mean} $\bar{v}$
of $v$ is defined as usual by
$$
\bar{v} := \\[1ex]um_j v_j p_j \quad,
$$
and the {\em variance} $\tau^2$ of $v$ is defined by
$$
\tau^2 := \\[1ex]um_j (v_j - \bar{v})^2 p_j = \\[1ex]um_j v_j^2 p_j - \bar{v}^2 \quad.
$$
Here we use the symbol $\tau^2$ instead of the customary $\\[1ex]igma^2$ to denote
the variance to avoid confusion with the different $\\[1ex]igma^2$ defined in DJ
(as the second moment).
One can speak of the
``variance'' of a random variable on a classical probability space, or
of the ``variance'' of quantum observable measured in a given state.
But what can it mean to speak of the ``variance'' of contextual values
$\alpha_j$? Contextual values are
are {\em predefined} to
satisfy
$$
A = \\[1ex]um_j \alpha_j M_j^\dag M_j
\quad, \eqno (\ref{eq110})
$$
where $A$ is the ``system observable'' and $\{M_j\}$ a collection of
measurement operators. What is {\em measured} are the outcomes $j$.
However, even though we know the contextual values beforehand from
\re{eq110}, one might speak of ``measuring'' them
in the following sense. To every outcome $j$ corresponds a contextual
value $\alpha_j$. A given state of the system $\rho$
makes the set of all outcomes $j$ into a probability space by
assigning a probability $p_j$ to each outcome $j$:
$p_j = \mbox{Tr } [M^\dag_j M_j\rho] $.
The assignment $j \mapsto \alpha_j$
is a random variable on this probability space, and it is meaningful
to speak of its ``variance''.
The subsequent analysis assumes
that this is the meaning that DJ intended. This discussion may seem
inappropriately elementary, but I was initially puzzled about this point, and
it cannot hurt to make it explicit.
Note that the mean $\bar{\alpha} = \mbox{Tr } [A\rho]$ of this random variable
is the same no
matter how the contextual values $\alpha_j$ are chosen so long as they
satisfy the contextual value equation \re{eq110}.
That implies that choosing the contextual values so as to minimize the
true variance $\tau^2$ in a given state is equivalent to minimizing
the second moment $\\[1ex]igma^2$.
Note also that the mean and variance implicitly depend on the state $\rho$,
and that there is no reason to think that one might be able to choose the
contextual values so as to minimize the variance in {\em all} states.
DJ continues:
\begin{quote}
``Consequently, it is in the experimenters best interests to minimize the
second moment of the contextual values,
\begin{equation}
\langlel{eq610}
\\[1ex]igma^2 = \\[1ex]um_j \alpha^2_j p_j,
\end{equation}
where $p_j$ is the probability of outcome $j$.''
\end{quote}
DJ correctly identifies $\\[1ex]igma^2$ as the second moment, but
unless read very carefully,
the subsequent discussion
could encourage confusion of $\\[1ex]igma^2$ with the true
variance $\tau^2$.
Next DJ notes that $|| \vec{\alpha} ||^2$ is a (very crude) upper bound
for $\\[1ex]igma^2$:
$$
\\[1ex]igma^2 := \\[1ex]um_j \alpha^2_j p_j \leq \\[1ex]um_j \alpha^2_j =
||\vec{\alpha}||^2 \quad. \eqno (*)
$$
\begin{quote}
``In the absence of prior knowledge about the system one is dealing with,
this is the most general bound one can make.
Therefore, the pseudo-inverse solution will choose the solution that
generally provides the most rapid statistical convergence for observable
measurements on the system.''
\end{quote}
This is highly questionable.
Although it may not be clear at this point, subsequent
paragraphs make clear that DJ is claiming that
it is legitimate to use $||\vec{\alpha}||^2$ as a sort of estimate
for $\\[1ex]igma^2$, the strange and invalid justification for the claim
being the sentences
of the quote following equation (*).
DJ's next paragraph computes $|| \vec{\alpha} (g) ||^2$ for both the $\vec{\alpha}(g)$
used in the counterexample of \cite{parrott4} and for the
pseudo-inverse solution $\vec{\alpha}_0 (g) = F^{(+)}(g) \vec{a}$,
using $|| \vec{\alpha} (g) ||^2$ as a kind of crude estimate
for $\\[1ex]igma^2 = \\[1ex]igma^2 (g) = \\[1ex]igma^2 (g, \rho)$.
\begin{quote}
``For the case of the counterexample, the Parrott solution (13)
[(13) should be (11)] has to leading order the bound on the variance
$$
||\vec{\alpha}||^2 = \frac{3}{g^4} - \frac{3(a-b)}{2g^3} +
O(\frac{1}{g^2}), \eqno (15)
$$
while the pseudoinverse solution (11) [(11) should be (13)] has to leading
order the bound
$$
||\vec{\alpha}||^2 = \frac{(a-b)^2}{8g^2} + \frac{2}{3}(a+b)^2
+ O(g^2). \eqno (16)
$$
For any observable $\vec{a}$, {\em the Parrott solution has detector
variance of order $O(1/g^4)$}[emphasis mine], which would swamp
any attempt to measure an observable near the weak limit. $\ldots$
However, the pseudoinverse solution has a detector variance of order
$O(1/g^2)$ in the worst case; $\ldots$ ''
\end{quote}
What invalidates the argument is the use
of the crude upper bound (*) as an estimate for the second moment
$\\[1ex]igma^2$ and the subsequent claim that ``the Parrott
solution has detector variance of order $O(1/g^4)$
$\ldots$ ''.
\footnote{DJ incorrectly identifies $\\[1ex]igma^2$ as the ``variance'' instead
of the second moment, but this is a mere slip.
Ignoring this slip, technically
one could argue that this statement is correct
because to say that a quantity is $O(1/g^4)$ only means that it increases
{\em no faster} than $1/g^4$ as $g \rightarrow 0$.
For example, $g^8 = O(1/g^4)$. However,
in the context and taking into account the typically sloppy use
of the ``big-oh'' notation in the physics literature, most readers would
probably interpret this passage as claiming that the ``Parrott solution''
has variance of {\em leading order} $1/g^4$, which would be an invalid
conclusion from the argument.
}
Solely from {\em upper bounds} for two quantities, one cannot
draw any reliable conclusions about the relative size of the
quantities themselves.
To see this clearly in a simpler context which uses essentially
the same reasoning,
consider the upper bounds
$$ x < x^4 \quad \mbox{and} \quad x^2 < x^3 $$
for real numbers $x > 1$.
From the fact that the first upper bound $x^4$ (for $x$) is larger than
the second upper bound $x^3$ (for $x^2$), we cannot conclude that
$x$ is larger than $x^2$ for $x > 1$. Yet DJ's argument relies on
this type of incorrect reasoning.
In the interests of following closely the exposition of DJ, we passed
rapidly over (*). Let us return to analyze it more closely:
$$
\\[1ex]igma^2 := \\[1ex]um_j \alpha^2_j p_j \leq \\[1ex]um_j \alpha^2_j =
||\vec{\alpha}||^2 \quad. \eqno (*)
$$
\begin{quote}
``In the absence of prior knowledge about the system one is dealing with,
this is the most general bound one can make.
Therefore, the pseudo-inverse solution will choose the solution that
generally provides the most rapid statistical convergence for observable
measurements on the system.''
\end{quote}
Note once again that $\\[1ex]igma^2 = \\[1ex]igma^2 (\rho)$ depends implicitly on
the state $\rho$ because the probabilities $p_j = \mbox{Tr } [\rho M^\dag_j M_j]$
of outcome $j$ depend on $\rho$. Keeping this in mind, one sees how
crude the upper bound (*) really is.
For a nonzero system observable $A$, equality holds in (*)
(i.e., $\\[1ex]igma^2 (\rho) = ||\vec{\alpha}||^2)$ only in the trivial case
in which one particular $p_J = 1$ and the others vanish, and in addition,
$\alpha_j (g) =0$ for $j \neq J$. That corresponds to the trivial case in
which there is effectively only one measurement operator $M_J (g)$
satisfying $\alpha_J(g) M^\dag_J (g) M_J(g) = A$.
(The other measurement operators play the role
of assuring that $\\[1ex]um_j M^\dag_j M_j = I$, but do not contribute to the
estimation of the expectation of $A$ in the state $\rho$, $\mbox{Tr } [A\rho]$.)
Since one of DJ's hypotheses (which we did not discuss above) is that
$\lim_{g \rightarrow 0} M_j (g) $ is a multiple of the identity for
all $j$, also the system observable $A$ is a multiple of the identity.
The statement following (*), that ``this is the most general bound one can
make'', seems a very strange form of reasoning. Doubtless, (*) {\em was} the
most general bound that the authors knew how to make,
but it seems unscientific
to base an important argument on an unsupported personal belief
that no one else can do better.
In fact,
a better bound is possible. By the Cauchy-Schwartz inequality,
$$
\\[1ex]igma^2 = \\[1ex]um_j \alpha^2_j p_j \leq [\\[1ex]um_j (\alpha^2_j)^2]^{1/2}
[ \\[1ex]um_j p^2_j]^{1/2}
\leq
\left [\\[1ex]um_j \alpha^4_j \right]^{1/2},
\eqno (**)
$$
since $ 0 \leq p_j \leq 1$, so $p^2_j \leq p_j$ and
$\\[1ex]um_j p^2_j \leq \\[1ex]um_j p_j = 1$. That (**) is a better bound than (*)
when at least two $\alpha_j$ are nonzero
follows from
$$
\left[ \left[ \\[1ex]um_j \alpha^4_j \right ]^{1/2} \right]^2 =
\\[1ex]um_j (\alpha^2_j)^2 < \left[ \\[1ex]um_j \alpha^2_j \right]^2
= \left[ ||\vec{\alpha}||^2 \right]^2,
$$
because for any collection of at least two positive numbers
$\{q_j\}$, $\\[1ex]um q^2_j < [\\[1ex]um_j q_j]^2 $.
If the authors were to reformulate their proposal for the appropriate choice
of the contextual values in terms of this better bound,
it seems unlikely that DAJ's proposed Moore-Penrose pseudo-inverse
solution would minimize (**), or possible bounds even better than (**).
And as pointed out earlier, the physical meaning or appropriateness of
minimizing a particular {\em upper bound} for the detector's second
moment remains obscure.
\\[1ex]ubsection{What is the ``physically sensible'' choice of contextual values?}
In many experiments (indeed, in all experiments known to me),
the system always starts in a known state $\rho$. For such an experiment,
it seems to me that the ``physically sensible choice'' of contextual values
would be the choice that minimizes the detector variance
$\tau^2 = \tau^2 (\rho)$ {\em in that initial state $\rho$}.
It is a simple exercise to work out a necessary condition for this
minimization, and the pseudo-inverse prescription does not necessarily
satisfy it. For the reader's convenience, we sketch the details.
The contextual values equation \re{eq110} for contextual values
$\vec{\alpha} = (\alpha_1, \ldots, \alpha_N)$ can always be written as
a linear system given by a vector equation
\begin{equation}
\langlel{eq800}
\vec{\alpha} = F(\vec{a}) \quad,
\end{equation}
where $\vec{a}$ is a vector associated with the system observable $A$ and
$F$ a matrix whose size will depend on the dimension of $\vec{a}$.
\footnote{If
$A$ or some of the measurement operators are not diagonal,
then $\vec{a}$ will not be the vector of eigenvalues
of $A$ as in DJ. For example, if $A$ is a general $2 \times 2$
Hermitian matrix, then $\vec{a} = (a_1, a_2, a_3)$
may be taken to be the three-dimensional vector $(A_{11}, A_{12}, A_{22})$,
and in general, $\vec{a}$ may be formed from the components of $A$ on or
above its main diagonal.}
Given an initial state $\rho$, measurement operators $M_j$, and associated
probabilities $p_j = \mbox{Tr } [ \rho M^\dag_j M_j]$, we want to minimize
the detector variance
\begin{equation}
\langlel{eq810}
\tau^2 (\rho) := \\[1ex]um_i p_i \alpha^2_i - \left( \\[1ex]um_i p_i \alpha_i \right)^2
\quad.
\end{equation}
As noted in the preceding subsection, for a particular state $\rho$ and
taking into account the contextual value equation \re{eq110},
this is the same as minimizing
the second moment
\begin{equation}
\langlel{eq820}
\\[1ex]igma^2 (\rho) := \\[1ex]um_i p_i \alpha^2_i \quad.
\end{equation}
(To avoid confusion, we continue
using DJ's nonstandard notation $\\[1ex]igma^2$ for the second
moment instead of the variance.)
Let $\vec{\alpha}^P$ denote a particular solution of
$F(\vec{\alpha}) = \vec{a}$.
Then the general solution of
$F(\vec{\alpha}) = \vec{a}$ is $\vec{\alpha} = \vec{\alpha}^P + \vec{\eta}$
with
$\vec{\eta}$ in the nullspace Null($F$) of $F$, and
\begin{equation}
\langlel{eq830}
\\[1ex]igma^2 := \\[1ex]um_i p_i \alpha^2_i = \\[1ex]um_i p_i (\alpha^P_i)^2
+ 2 \\[1ex]um_i p_i \alpha^P_i \eta_i + \\[1ex]um_i p_i \eta^2_i .
\end{equation}
For small $\vec{\eta}$, a nonvanishing linear second term will dominate the
quadratic third term,
\footnote{More precisely, if for some $\eta$ the linear term does not
vanish, then replacing $\eta$ by $x \eta$, with $x$ real, gives a quadratic
function in $x$ with nonvanishing linear term, which cannot have a minimum
at $x = 0$.}
and we see that if $\vec{\alpha}^P$
is to minimize $\\[1ex]igma^2$,
then the vector
$(p_1 \alpha^P_1, \ldots , p_N \alpha^P_N)$
must be orthogonal
to the nullspace of $F$. This is the necessary condition mentioned earlier.
Thus it seems to me that a ``physically sensible'' choice of contextual
values in this situation should satisfy this necessary condition.
However, the pseudo-inverse solution is abstractly defined by the different
condition
that $\vec{\alpha}^P = (\alpha^P_1 , \ldots , \alpha^P_N)$
be orthogonal to Null($F$).
\footnote{This is discussed but not proved in the Appendix to
\cite{parrott3}. A formal statement and proof can be found in
\cite{campbell}, p.\ 9, Theorem 1.1.1.
}
Even if the state $\rho$ is not known from the start, to estimate
the expectation of $A$ as $\\[1ex]um_j \alpha_j \mbox{Tr } [ M^\dag_j M_j \rho ] =
\\[1ex]um_j \alpha_j p_j$, one needs to estimate
the $p_j$ as frequencies of occurence of outcome $j$, so the $p_j$ can be
regarded as experimentally determined to any desired accuracy. Given these
$p_j$, one can then choose the solution $\vec{\alpha}$ to the contextual
value equation $F(\vec{\alpha}) = \vec{a}$ to minimize \re{eq830} and
the detector variance.
This procedure for minimizing the
detector variance will rarely result in the pseudo-inverse solution.
\\[1ex]ubsection{Does DAJ assume that contextual values $\vec{\alpha}$
come from the Moore-Penrose pseudo-inverse,
$\vec{\alpha} = F^{(+)}\vec{a}$?}
We have seen that none of the reasons that DJ gives for determining contextual
values by the pseudo-inverse construction,
\begin{equation}
\langlel{eq615}
\vec{\alpha} = F^{(+)}\vec{a}\quad,
\end{equation}
hold up under scrutiny.
DAJ doesn't give any valid reasons, either.
Its ``general conditioned average'' (6) does not require this hypothesis, nor
the hypothesis that the system observable $A$ and measurement operators
$M_j$ mutually commute. Why assume something that is not needed?
DJ gives the false impression that DAJ unequivocally assumes \re{eq615}
as a hypothesis. For example,
\begin{quote}
``The problem with Parrott's counterexample is that he ignores this
discussion [of defining the contextual values by the pseudo-inverse
prescription $\vec{\alpha} := F^{(+)} \vec{a}$] $\ldots$''.
\end{quote}
The totality of this ``discussion'' is the single sentence:
\begin{quote}
``$\ldots$ we propose that the physically sensible choice of
CV is the least redundant set uniquely related to the eigenvalues through
the Moore-Penrose pseudoinverse.''
\end{quote}
DAJ does devote
a long paragraph to a complicated method of defining and
calculating the Moore-Penrose pseudo-inverse, but that has nothing to do with
the reasons for using the pseudo-inverse in the first place.
A reference to a mathematical text would have sufficed and saved sufficient
space to have clearly stated their hypotheses for (6) and for the
claimed implication that (6) implies (7) in their ``minimal disturbance
limit''. If the authors don't tell us,
how can we poor readers possibly guess that the pseudo-inverse prescription
\re{eq615} is assumed as a hypothesis
for (6) (if in fact it is, which to this day I don't know), or if not,
as a hypothesis for a section which follows (6), such as the ``Weak values''
section?
When I wrote \cite{parrott4} giving the counterexample, I did consider
the possibility that DAJ might possibly be assuming the
pseudo-inverse solution, but rejected it as implausible. This was
partly because they had previously sent me an attempted proof that their (6)
implies (7) in their ``minimal disturbance limit'' which if correct
(it wasn't) would have applied to {\em any} solution $\alpha$, not just
the pseudo-inverse solution. (It also would have applied even if the
measurement operators and system observable did not mutually commute.)
So, I knew to a certainty that when DAJ was submitted, there was no
reason for the authors to have assumed the pseudo-inverse prescription.
Also, in the sweeping claim of DAJ's abstract that their ``general conditioned
average'' (6)
\begin{quote}
``$\ldots$ converges uniquely to the quantum weak value in the minimal
disturbance limit'',
\end{quote}
by no stretch of the imagination could the reader guess that the
technical pseudo-inverse
prescription would be part of the definition of ``minimal disturbance limit''.
And if the prescription is not part of the definition of ``minimal disturbance
limit'', then to justify the claim, the prescription would have to be taken
as part of the definition of their ``general conditioned average'' (6).
But the latter alternative would artificially limit the applicability of (6),
since (6) is correct no matter how the contextual values are chosen (subject
to the contextual value equation \re{eq110}).
\\[1ex]ubsection{Section VI of DJ:}
The last four paragraphs of Section VI of DJ (entitled ``Discussion'') are
misleading and in some ways incorrect. The reasons are given
in Section 11.1 of \cite{parrott3} and will not be repeated here.
\\[1ex]
\\[1ex]ection{Acknowledgments}
I was surprised to see in DJ the acknowledgment: ``We acknowledge
correspondence with S.\ Parrott''. That made me wonder if protocol required
that I provide a similar acknowledgment. And if so, what should it say?
Would it be proper to acknowledge negative contributions as well as
positive ones, and if so should I? If I didn't, how would I explain
why I didn't simply ask the authors about some of the questionable points
in DAJ?
The (nearly unique) positive contribution of the authors of DAJ to
\cite{parrott3}, \cite{parrott4}, and the present work
was to furnish their original argument
that (6) implies (7) in their ``minimal disturbance limit''.
That argument
brought to my attention the decomposition
of equation \re{eq210}, which was part of their attempted proofs.
That argument was definitely incorrect because I found a counterexample
to one of its steps. I sent the counterexample to the authors in mid-February,
but they never acknowledged it. I made several subsequent inquiries about
other points in DAJ, but all were ignored. I have not heard from them
since February 19. (It is now June 23).
(What little correspondence we did exchange was uniformly
courteous.) That is why I was unable to clarify other vague points
in DAJ such as for which results (if any)
the pseudo-inverse solution was assumed
as a hypothesis.
I intend to
eventually post on my website, www.math.umb.edu/$\\[1ex]im$sp , a complete account
of the strange aspects of this affair, which has been unique in my
professional experience. It will raise questions about the editorial
practices of influential journals of the American Physical Society, among
other issues.
DJ acknowledges that their work was supported by two grants, at least one
of which was taxpayer-supported via the National Science Fountation. The
present work was not supported by any grants, unless donation of the author's
time might be considered a kind of ``grant''.
If so, it is a ``grant'' to society in general.
I have spent months trying to unravel DAJ, mostly without any help.
I submit this to the arXiv to save others similar time.
It is painful to realize that I have largely wasted my time for a contribution
so small, but it is satisfying to hope that the time saved by others
may result in larger contributions than I could have made.
\\[2ex]
{\bf Added in version 8:} Version 2 of \cite{DJ}, arXiv:1106.1871v2
replies to the present work. It was
was published in J.\ Phys.\ A: Math.\ Theor.\ {\bf 45} 015304. The
published version will be called DJpub below.
\\[1ex]
I thank the authors for noting a typo in the definition of the $(3,3)$
entry of the $3 \times 3$
matrix $M_2 (g)$ in the Section 5 counterexample on p.11. The original
entry $1/3$ should have been $\\[1ex]qrt{1/3}$, and this correction has
been made in this Version 7. The original
analysis assumed the correct value, so apart from this substitution,
no changes were necessary.
\\[1ex]
DJpub reinterprets (unjustifiably, in my view) one of the hypotheses of
\cite{DJ} and notes that the counterexample given above does not satisfy
the reinterpreted hypothesis.
An analysis of DJpub has been posted in arXiv:1202.5604, and
an abbreviated version has been under consideration by J.\ Phys.\ A for
over 10 months (as of this writing, October 14, 2012).
\end{document}
|
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{document}
\title{On the flat geometry of the cuspidal edge}
\author{Ra\'{u}l Oset Sinha\footnote{Supported by DGCYT and FEDER grant no. MTM2012-33073.}\, and\, Farid Tari
\footnote{Partially supported by the grants FAPESP 2014/00304-2, CNPq 301589/2012-7, 472796/2013-5.}}
\maketitle
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{abstract}
We study the geometry of the cuspidal edge $M$ in $\mathbb R^3$ derived from its contact with planes and lines (referred to as
flat geometry). The contact of $M$ with planes is measured by the singularities of the height functions on $M$.
We classify submersions on a model of $M$ by diffeomorphisms
and recover the contact of $M$ with planes from that classification.
The contact of $M$ with lines is measured by the
singularities of orthogonal projections of $M$. We list the generic singularities of the projections and
obtain the generic deformations of the apparent contour (profile) when the direction of projection varies locally in $S^2$.
We also relate the singularities of the height functions and of the projections
to some geometric invariants of the cuspidal edge.
\end{abstract}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\footnote[0]{2010 Mathematics Subject classification 57R45, 53A05.}
\footnote[0]{Key Words and Phrases. Apparent contours, Bifurcations, Cuspidal edge, Height functions, Orthogonal projections, Singularities.}
\section{Introduction}\label{sec:intro}
Let $\phi:U\subset \mathbb R^2\to \mathbb R^3$ be a parametrisation of a surface $M$, where $U$ is an open set
and $\phi$ is an infinitely differentiable map.
The surface $M$ is called a {\it cuspidal edge} if it admits a parametrisation $\phi$ which is $\mathcal A$-equivalent to
$f(x,y)=(x,y^2,y^3)$, that is, there exist diffeomorphisms $h$ and $k$ such that $\phi=k\circ f\circ h^{-1}$.
Our study is local in nature so we consider germs of parametrisations of a cuspidal edge. Observe that the
cuspidal edge is singular along a curve and its trace on a plane transverse to this curve is a curve with a cusp singularity, see
Figure \ref{fig:Discvuk} (middle figure).
Cuspidal edges occur naturally in differential geometry. For instance, given a regular surface $M$ in $\mathbb R^3$,
one can consider its parallel $M_d$, which is the surface obtained by moving the points on $M$ along a chosen
unit normal vector to $M$ by a fixed distance $d$. The parallel $M_d$ can become singular and is, in general,
a cuspidal edge with its singularities corresponding to
points on the surface where $d=1/\kappa_i$, $i=1,2$, $\kappa_i$'s being the principal curvatures. (The singularities
of $M_d$ can become more degenerate than a cuspidal edge on some special curves on the surface $M$.)
Another example is the focal set (caustic) of a surface in $\mathbb R^3$. If we take a parametrisation where the lines of curvature
are the coordinate curves $x_i=constant$, $i=1,2$, then the focal set is a cuspidal edge at generic points on the curves $\partial \kappa_i/\partial x_i=0$, $i=1,2$, where $\kappa_i$ are the principal curvatures.
Because cuspidal edges occur naturally and in a stable way in some cases, it is of interest to study their
differential geometry. There is already work in this direction,
see for example \cite{bruce-wilkinson, martinsnuno, martinssaji, kentaro, teramoto, wilkinson}.
In this paper, we study the geometry of the cuspidal edge $M$ derived from its contact with planes and lines
(which is referred to as the flat geometry of $M$ as planes and lines are flat objects, i.e., have zero curvature).
Consider parallel planes orthogonal to ${\bf v}\in S^2$, where $S^2$ denotes the unit sphere in $\mathbb R^3$.
These planes are the fibres of the function $h_{{\bf v}}(p)=p\cdot{} {\bf v}$, where $``\cdot{}"$ is the scalar product in $\mathbb R^3$.
The contact of $M$ with the above planes at $p_0=\phi(0,0)$ is measured by the singularities
of $h_{{\bf v}}\circ \phi$ at the origin.
By varying ${\bf v}$, we get the family of
height functions
$H:U\times S^2\to \mathbb R$ on $M$ given by
$$H((x,y),{\bf v})=\phi(x,y)\cdot{}{\bf v}.$$
In the above setting the model (flat) surfaces in $\mathbb R^3$ are planes and the parametrisation $\phi$ is taken in general form.
In this paper, we follow the approach in \cite{brucewest} and invert the situation: we fix the $\mathcal A$-model $X$ of the cuspidal edge
as the image of the map-germ $f(x,y)=(x,y^2,y^3)$ and consider its contact with fibres of submersions.
We classify in \S \ref{sec:submersions} submersion $\mathbb R^3,0\to \mathbb R$ up to changes of coordinates
in the source that preserve the model cuspidal edge $X$. Such changes of coordinates form a geometric subgroup $\mathcal R(X)$ of the Mather group $\mathcal R$
(see \cite{damon}). In \S \ref{sec:Geomsubmersions}, we deduce from that classification the generic geometry of the contact of a cuspidal edge $M$ with planes. We study the duals of these generic cases. Other results on duals of cuspidal edges can be found in \cite{teramoto}.
The contact of $M$ with lines is measured by the $\mathcal A$-singularities of orthogonal projections of $M$ to planes. Here too we fix the model cuspidal edge $X$ and
classify in \S \ref{sec:projections} the singularities of germs of submersions $\mathbb R^3,0\to \mathbb R^2,0$ under the action of the subgroup $_X\mathcal A=\mathcal R(X)\times \mathcal L$
of the Mather group $\mathcal A$.
This approach has an important advantage to considering the $\mathcal A$-singularities of the orthogonal
projections on $M$ (or the $\mathcal R$-singularities of the height functions on $M$). Using a tansversality result from \cite{brucewest} adapted to our situation,
we can state that only the singularities of $_X\mathcal A_e$-codimension $\le 2$ can occur for a generic cuspidal edge $M$. Furthermore, we associate a
natural 2-parameter family of submersions $\mathbb R^3,0\to \mathbb R^2,0$ on the model $X$ obtained from the family of orthogonal projections of $M$ to planes.
This family is an $_X\mathcal A_e$-versal unfolding of the generic singularities of the submersions on $X$.
This allows us to obtain in \S \ref{sec:Geometryprojections} the generic deformations of the apparent contour (profile) of $M$
when the direction of projection varies locally in $S^2$.
\section{Preliminaries} \label{sec:prel}
We review in this section some aspects of the geometry of the cuspidal edge (\S\ref{ssec:geomcuspidalede}) and establish
some notation (\S\ref{ssec:notaSing}) for the classification of germs of functions and mappings on the cuspidal edge.
\subsection{Geometric cuspidal edge}\label{ssec:geomcuspidalede}
Let $M$ be a general cuspidal edge in $\mathbb R^3$ which we shall refer to (following the notation in \cite{brucewest}) as
a geometric cuspidal edge. In \cite{martinssaji} a local parametrisation (at the origin) of the cuspidal edge is given by
allowing any changes of coordinates in the source and changes of coordinates in the target given by isometries.
The parametrisation, which we shall adopt in the rest of the paper,
is the following
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{equation}\label{eq:prenormalform}
\phi(x,y)=(x,a(x)+\frac{1}{2}y^2,b_1(x)+y^2b_2(x)+y^3b_3(x,y)),
\end{equation}
with $(x,y)$ in a neighbourhood of the origin and
$a(0)=a'(0)=0$, $b_1(0)=b_1'(0)=0$, $b_2(0)=0$, $b_3(0)\ne 0$.
Following the notation in \cite{martinssaji}, we write
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rcl}
a(x)&=&\frac{1}{2}a_{20}x^2+\frac{1}{6}a_{30}x^3+\frac{1}{24}a_{40}x^4+O(5),\\
b_1(x)&=&\frac{1}{2}b_{20}x^2+\frac{1}{6}b_{30}x^3+\frac{1}{24}b_{40}x^4+O(5),\\
b_2(x)&=&\frac{1}{2}b_{12}x+\frac{1}{6}b_{22}x^2+O(3),\\
b_3(x,y)&=&\frac{1}{6}b_{03}+ \frac{1}{6}b_{13}x+O(2).
\end{array}
$$
The tangential direction of $M$ at the origin is along $(1,0,0)$ and
its tangent cone is the plane $w=0$, where $(u,v,w)$ are the coordinates of $\mathbb R^3$.
The singular set $\Sigma$ of $M$ is the image of the line $y=0$ and
is parametrised by
$
\alpha(x)=\phi(x,0)=(x,a(x),b_1(x)).
$
If we denote by $\kappa_{\Sigma}$ and $\tau_{\Sigma}$ the curvature
and the torsion of $\Sigma$ as a space curve, then
$$\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{l}
\kappa_{\Sigma}(0)=\sqrt{a_{20}^2+b_{20}^2}, \\
\tau_{\Sigma}(0)=\frac{a_{20}b_{30}-b_{20}a_{30}}{a_{20}^2+b_{20}^2}, \\
\tau_{\Sigma}'(0)=\frac{1}{a_{20}^2+b_{20}^2}(
a_{20}b_{40}-b_{20}a_{40}-\frac{2}{a_{20}^2+b_{20}^2}(a_{20}b_{30}-b_{20}a_{30})(a_{20}a_{30}+b_{20}b_{30})
).
\end{array}
$$
The osculating plane of $\Sigma $ at the origin is orthogonal to the vector $(0,-b_{20},a_{20})$.
It coincides with the tangent cone to $M$ at the origin if and only if $b_{20}=0$.
It is worth observing that this happens if and only if the closure of the parabolic curve of the regular part of $M$ intersects
the singular set $\Sigma$.
Recall that a smooth curve has contact of type $A_{\ge 1}$ at a point $p$ with any of its tangent planes at $p$, of type $A_{\ge 2}$ if the
plane is the osculating plane at $p$ and of type $A_{ 3}$ if furthermore the torsion of the curve vanishes at $p$ but the derivative of the torsion is not zero at that point
(see for example \cite{brucegiblinBook}). In \cite{kentaro} other invariants of the cuspidal edge are defined. These are:
The singular curvature $\kappa_s$ ($\kappa_s(0)=a_{20}$);
The limiting normal curvature $\kappa_n$ ($\kappa_n(0)=b_{20}$);
The cuspidal curvature $\kappa_c$ ($\kappa_c(0)=b_{03}$);
The cusp-directional torsion $\kappa_t$ ($\kappa_t(0)=b_{12}$);
The edge inflectional curvature $\kappa_i$ ($\kappa_i(0)=b_{30}$).
The contact of $M$ with lines and planes is affine
invariant (\cite{bgt95}), so we can allow affine changes of coordinates in the target without changing the type of contact.
Given a parametrisation $\psi:\mathbb R^2,0\to \mathbb R^3,0$, we can make a rotation in the target
and changes of coordinates in the source and write
$j^2\psi(x,y)=(x,Q_1(x,y),Q_2(x,y))$, with $Q_1,Q_2$ homogeneous polynomials of degree 2.
We can consider the
${\cal G}=GL(2,{\mathbb R})\times GL(2,{\mathbb R})$-action on the set of pairs of quadratic forms
$(Q_1,Q_2)$.
Following \cite{martinssaji}, we can set $(Q_1,Q_2)=(\frac{a_{20}}{2}x^2+\frac{1}{2}y^2,\frac{b_{20}}{2}x^2)$
by isometric changes of coordinates in the target and any smooth changes of coordinates in the source, and this is ${\cal G}$-equivalent to
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{tabular}{cl}
$(y^2,x^2)$ & if and only of $b_{20}\ne 0$ (hyperbolic)\cr
$(\pm x^2+y^2,0)$& if and only of $b_{20}=0$, $a_{20}\ne 0$ (inflection)\cr
$(y^2,0)$& if and only of $b_{20}=a_{20}= 0$ (degenerate inflection)
\end{tabular}
\end{center}
The above $\cal G$-classes are the only ones that can occur for the pair
$(Q_1,Q_2)$ associated to a parametrisation of a cuspidal edge.
In particular, most points on $\Sigma$ are hyperbolic points for the pair $(Q_1,Q_2)$ and
we have an inflection point if and only if the osculating plane of $\Sigma $ coincides with the tangent cone to $M$.
Following the above discussion, we can take $(Q_1,Q_2)$ associated to $\phi$ in \eqref{eq:prenormalform}
in one of the $\cal G$ normal forms above. However we shall work with the parametrisation \eqref{eq:prenormalform}
to make the interpretation of the conditions we get match some of the invariants in \cite{martinssaji}.
\subsection{Classification tools}\label{ssec:notaSing}
Let ${\cal E}_n$ be the local ring of germs of functions
$\mathbb{R}^n,0 \to \mathbb{R}$ and ${\mathcal M}_n$ its maximal
ideal.
Denote by ${\cal E}(n,p)$ the $p$-tuples of elements in ${\cal
E}_n$. Let ${\cal A}={\cal R}\times{\cal
L}$
denote the group
of pairs of germs of diffeomorphisms of the source and target, which acts smoothly on ${\mathcal
M}_n.{\cal E}(n,p)$ by $(k_1,k_2).G=k_2\circ G\circ k_1^{-1}$.
The tangent space to the $\mathcal{A}$-orbit of $F$ at the germ $F$
is given by
$$
L{\cal A}{\cdot}{F}={\mathcal M}_n.\{F_{x_1},\ldots,F_{x_n}\}+F^*({\mathcal M}_p).\{e_1,\ldots,e_p\},
$$
where $F_{x_i}$ are the partial derivatives of $F$ with respect to $x_i$
($i=1,\ldots,n$), $e_1,\ldots,e_p$ denote the standard basis vectors
of ${\mathbb R}^p$ considered as elements of ${\cal E}(n,p)$, and
$F^*({\mathcal M}_p)$ is the pull-back of the maximal ideal in
${\cal E}_p$. The extended tangent space to the $\mathcal{A}$-orbit
of $F$ at the germ $F$ is given by
$$
L_e{\cal A}{\cdot}{F}={\cal E}_n.\{F_{x_1},\ldots,F_{x_n}\}+F^*({\cal E}_p).\{e_1,\ldots,e_p\},
$$
and the codimension of the extended orbit is $
d_e(F,{\mathcal{A}})=\dim_{\mathbb{R}}({\cal
E}(n,p)/L_e{\mathcal{A}}(F))\,. $
Let $k \geq 1$ be an integer. We denote by $J^k(n,p)$ the space of
$k$th order Taylor expansions without constant terms of elements of
${\cal E}(n,p)$
and write $j^kF$ for the $k$-jet of $F$. A germ $F$ is said to be {\it
$k-\mathcal{A}$-determined} if any $G$ with $j^kG=j^kF$ is
$\mathcal{A}$-equivalent to $F$ (notation: $G\sim_{\mathcal A} F$). The $k$-jet
of $F$ is then called a sufficient jet.
Our goal in \S \ref{sec:submersions} and \S \ref{sec:projections}
is to classify germs of functions and mappings on $X \subset \mbox{\boldmath $\mathbb{R}$}^3$, where $X$ is the germ of
the smooth model of a cuspidal edge.
This means that we require that the
diffeomorphisms in $\mathbb R^3$ preserve $X$. We
follow the method in \cite{brucewest} and recall some results from
there. Let $X,0\subset \mathbb R^n,0$ be the germ of a reduced
analytic sub-variety of $\mathbb R^n$ at $0$ defined by a polynomial
$h$ in $\mathbb R[x_1,\ldots,x_n]$. Following \mbox{Definition 3.1} in
\cite{brucewest}, a diffeomorphism $k:\mathbb R^n,0\rightarrow
\mathbb R^n,0$ is said to preserve $X$ if $k(X),0=X,0$ (i.e.,
$k(X)$ and $X$ are equal as germs at $0$). The group of such
diffeomorphisms is a subgroup of the group $\mathcal{R}$ and is
denoted by $\mathcal{R}(X)$ ($_{ X}\mathcal R$ in some texts).
We denote by $_X\mathcal A=\mathcal R(X)\times\mathcal L$ the subgroup $\mathcal A$ where the diffeomorphisms in the source
preserve $X$.
\section{Functions on a cuspidal edge}\label{sec:submersions}
Given the $\mathcal A$-normal form
$f(x,y)=(x,y^2,y^3)$ of a cuspidal edge, we classify germs of
submersions $g:\mathbb R^3,0\to \mathbb R,0$ up to $\mathcal
R(X)$-equivalence, with $X=f(\mathbb R^2,0)$.
The defining equation of $X$ is given by $h(u,v,w)=v^3-w^2$.
Let $\Theta(X)$ be the $\mathcal{E}_3$-module of vector fields in
$\mathbb R^3$ tangent to $X$.
We have $\xi\in\Theta(X)$ if and only if $\xi h=\lambda h$
for some function $\lambda$ (\cite{brucewest}).
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{prop}\label{prop:genO(X)}
The $\mathcal{E}_3$-module $\Theta(X) $ of vector fields in
$\mathbb R^3$ tangent to $X$ is generated by the vector fields
$
\xi_1=\frac{\partial}{\partial u},$
$\xi_2=2v\frac{\partial}{\partial v}+3w\frac{\partial}{\partial w},$
$\xi_3=2w\frac{\partial}{\partial v}+3v^2\frac{\partial}{\partial w}.
$
\end{prop}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{proof}
The defining equation $h(u,v,w)=v^3-w^2$ of $X$ is weighted
homogenous in $v$ and $w$ with weights 2 and 3 respectively. The result follows by applying
\mbox{Proposition 7.2} in \cite{bruceroberts} for isolated singularities to the cusp $v^3-w^2=0$ in the $(v,w)$-plane and adding the trivial vector field $\xi_1$ in $\mathbb R^3$.
\end{proof}
Let $\Theta_1(X)=\{\delta\in\Theta(X):j^1\delta=0\}$. It follows from Proposition \ref{prop:genO(X)} that
$$
\Theta_1(X)=\mathcal M_3^2.\{ \mathcal \xi_1\}+\mathcal M_3.\{ \mathcal \xi_2, \xi_3\}.
$$
For $f\in \mathcal E_3$, we define $\Theta(X){\cdot} f=\{\eta(f)\,|\,
\eta\in \Theta(X)\}$. We define similarly $\Theta_1(X){\cdot} f$ and
the following tangent spaces to the $\mathcal R(X)$-orbit of $f$ at
the germ $f$:
$$
L\mathcal R_1(X){\cdot}f=\Theta_1(X){\cdot} f,\quad
L\mathcal R(X){\cdot}f=L_e{\cal R}(X){\cdot}f=\Theta(X){\cdot} f.
$$
The ${\mathcal{R}(X)}$-codimension of $f$ is given by
$d(f,{\mathcal{R}(X)})=\dim_{\mathbb{R}}({\cal M}_3/L{\mathcal{R}(X)}(f))\,.$
The classification (i.e., the listing of representatives of the
orbits) of $\mathcal R(X)$-finitely determined germs is carried out
inductively on the jet level. The method used here is that of the
complete transversal \cite{bkd} adapted for the $\mathcal
R(X)$-action.
We have the following result which is a version of
Theorem 3.11 in \cite{brucewest} for the group $\mathcal R(X)$.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{prop}\label{prop:completeTrans}
Let $f:\mathbb R^3,0\to \mathbb R,0$ be a smooth germ and
$h_1,\ldots, h_r$ be homogeneous polynomials of degree $k+1$ with
the property that
$$
\mathcal{M}_3^{k+1} \subset L\mathcal R_1(X){\cdot}f + sp\{
h_1,\ldots, h_r\} +\mathcal{M}_3^{k+2}.
$$
Then any germ $g$ with $j^kg(0)=j^kf(0)$ is $\mathcal
R_1(X)$-equivalent to a germ of the form $f(x)+\sum_{i=1}^l
u_ih_i(x)+\phi(x) $, where $\phi(x)\in \mathcal M_n^{k+2}$. The
vector subspace $sp\{ h_1,\ldots, h_r\}$ is called a complete
$(k+1)$-$\mathcal R(X)$-transversal of $f$.
\end{prop}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{cor}\label{cor:detrminacy} If $ \mathcal{M}_3^{k+1}\subset L\mathcal R_1(X){\cdot}f
+\mathcal{M}_3^{k+2}$ then $f$ is $k-\mathcal{R}(X)$-determined.
\end{cor}
We also need the following result about trivial families.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{prop}{\rm (\cite{brucewest})} \label{prop:trivialfam}
Let $F:\mathbb R^3\times \mathbb R,(0,0)\to \mathbb R,0$ be a smooth
family of functions with $F(0,t)=0$ for $t$ small. Let
$\xi_1,\ldots,\xi_p$ be vector fields in $\Theta(X)$ vanishing at
$0\in \mathbb R^n$. Then the family $F$ is $k-\mathcal R(X)$-trivial if
$\frac{\partial F}{\partial t}\in \left\langle \xi_1(F),\ldots,\xi_p(F) \right\rangle +\mathcal M^{k+1}_n.
$
\end{prop}
Two families of germs of functions $F$ and $G:(\mathbb R^3 \times \mathbb R^l,(0,0))\to (\mathbb{R},0)$ are
\mbox{$P$-$\mathcal{R}^+(X)$}-equivalent if
there exist a germ of a diffeomorphism
$\Phi:(\mathbb R^3 \times \mathbb R^l,(0,0)) \to (\mathbb R^3 \times \mathbb R^l,(0,0))$
preserving $(X\times \mathbb R^l,(0,0))$ and of the form
$\Phi(x,u)=(\alpha(x,u),\psi(u))$ and a germ of a function $c:(\mathbb R^l,0) \to\mathbb R$
such that
$
G(x,u) = F(\Phi(x,u)) + c(u).
$
A family $F$ is said to be an $\mathcal{R}^+(X)$-versal deformation of $F_0(x)=F(x,0)$ if any other deformation $G$ of $F_0$
can be written in the form $G(x,u) = F(\Phi(x,u)) + c(u)$ for some germs of
smooth mappings $\Phi$ and $c$ as above with $\Phi$
not necessarily a germ of diffeomorphism.
Given a family of germs of functions $F$, we write
$\dot{F}_i(x)=\frac{\partial F}{\partial u_i}(x,0).$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{prop}\label{theo:InfcondUnfFunc}
A deformation $F:(\mathbb R^3 \times \mathbb R^l,(0,0))\to (\mathbb{R},0)$ of a germ of a function $f$
on $X$ is
$\mathcal{R}^+(X)$-versal if and only if
$$
L\mathcal R_e(X)\cdot{}f + \mathbb R.\left\{1, \dot{F}_1,\ldots,\dot{F}_l\right\}= \mathcal{E}_n.
$$
\end{prop}
We can now state the result about the $\mathcal R(X)$-classification of germs of submersions.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{theo} \label{theo:Classification}
Let $X$ be the germ of the $\mathcal A$-model of the cuspidal edge parametrised by
$f(x,y,z)=(x,y^2,y^3)$. Denote by $(u,v,w)$ the coordinates in the target.
Then any $\mathcal R(X)$-finitely determined germ of a submersion in $\mathcal M_3$
with $\mathcal R(X)$-codimension $\le 2$
(of the stratum in the presence of moduli) is
$\mathcal R(X)$-equivalent to one of the germs in {\rm Table \ref{tab:germsubm}}.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{table}[ht]
\caption{Germs of submersions in $\mathcal M_3$ of $\mathcal R(X)$-codimension $\le 2$.}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
{\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{tabular}{lcl}
\hline
Normal form & $d(f,\mathcal{R}(X))$ &$\mathcal{R}^+(X)$-versal deformation\\
\hline
$u$ & $0$ &$u$\\
$\pm v\pm u^2$ & $0$&$\pm v\pm u^2$\\
$\pm v+ u^3$& $1$&$\pm v+u^3+a_1u$\\
$\pm v\pm u^4$& $2$&$\pm v\pm u^4+a_{2}u^{2}+a_1u$\\
$w+u^2$ &$1$&$w+u^2+a_1v$\\
$w+ uv+au^3$, $a\ne 0,-\frac{4}{27}$ &$2^{(*)}$&
$w+uv+au^2+a_2u^2+a_1u$\\
\hline
\end{tabular}
}
\\
{\footnotesize
$(*)$: $a$ is a modulus and the codimension is that of the stratum.}
\end{center}
\label{tab:germsubm}
\end{table}
\end{theo}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{proof} To simplify notation, we write complete $k$-transversal for
complete $k-\mathcal R(X)$-transversal, equivalence for $\mathcal
R(X)$-equivalence and codimension for $\mathcal R(X)$-codimension. In all the proof, $\xi_1, \xi_2,\xi_3$ are as in Proposition \ref{prop:genO(X)}.
The linear changes of coordinates in $\mathcal R(X)$ obtained by
integrating the 1-jets of vector fields in $\Theta(X)$ are
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rcl}
\eta_1(u,v,w)&=&(\alpha u+\mbox{\mbox{\boldmath $0$}ldmath $e$}ta v+\gamma w,v,w), \alpha \ne 0\\
\eta_2(u,v,w)&=&(u,e^{2\alpha}v,e^{3\alpha}w), \alpha\in \mathbb R\\
\eta_3(u,v,w)&=&(u,v+\alpha w,w)
\end{array}
$$
Consider a non-zero 1-jet $g=au+bv+cw$. If $a\ne 0$, then $g$ is equivalent to $u$ and
its codimension is $0$.
Suppose that $a=0$. If $b\neq 0$, we use $\eta_3$ to set $c=0$ and $\eta_2$ to set $b=\pm 1$. If $a=b=0$ but $c\ne 0$ we can set
$c=\pm 1$. Observe that $(u,v,w)\mapsto (u,v,-w)$ preserves $X$, so we can set $c=1$. Thus, the orbits of submersions in
the 1-jet space are
$
u,\pm v,w.
$
$\mbox{\boldmath $u$}llet$ Consider the 1-jet $g=v$ (the results follow similarly for $g=-v$). Then $\xi_1(g)=0$, $\xi_2(g)=2v$
and $\xi_3(g)=2w$, so for any integer $k\ge 2$, $\mathcal M_{3}^k\subset L\mathcal R_1(X)\cdot{}g + sp\{u^k\} + \mathcal M_{3}^{k+1}$, that is, a
complete $k$-transversal is given by $g=v+\lambda u^k$. Using
$\eta_2$ we can set $\lambda=\pm 1$ if $\lambda\ne 0$ (if $k$ is odd we can set $\lambda =1$). For the germ $g=v\pm u^k$,
we have
$
\xi_1(g)=\pm ku^{k-1},
\xi_2(g)=2v,
\xi_3(g)=2w.
$ Now $\mathcal M_{3}^{k+1}\subset L\mathcal R_1(X)\cdot{}g + \mathcal
M_{3}^{k+2}$, so $v\pm u^k$ is $k$-determined. Its codimension is
$k-2$, and clearly, $ v\pm u^k+a_{k-2}u^{k-2}+\ldots+a_1u_1$ is an $\mathcal R^+(X)$-versal
deformation.
$\mbox{\boldmath $u$}llet$ We consider now the 1-jet $g=w$. We have $\xi_1(g)=0$,
$\xi_2(g)=3w$ and $\xi_3(g)=3v^2$.
A complete $2$-transversal is given by $g=w+\lambda_1u^2+\lambda_2uv+\lambda_3v^2$.
We can consider $g$ as a 1-parameter family of germs of functions
parametrised by $\lambda_3$. Then
$\partial g/\partial {\lambda_3}=v^2\in \langle \mathcal M_3
\xi_1(g),\xi_2(g),\xi_3(g) \rangle + \mathcal M_3^{3}$, so by
Proposition \ref{prop:trivialfam}, $g$ is equivalent to
$w+\lambda_1u^2+\lambda_2uv$.
We proceed similarly to show that $g$ is
trivial along $\lambda_2$ if $\lambda_1\ne 0$.
Thus, if $\lambda_1\ne 0$ we can take
$g=w+\lambda_1 u^2$. We can now set $\lambda_1=\pm
1$ by rescaling. For the germ $g=w\pm u^2$, we have
$
\xi_1(g)=\pm 2u,
\xi_2(g)=3w,
\xi_3(g)=3v^2,
$
so $\mathcal M_{3}^{3}\subset L\mathcal R_1(X)\cdot{}g + \mathcal
M_{3}^{4}$, that is, $g$ is $2$-determined.
An $\mathcal R^+(X)$-versal deformation is given $g=w\pm u^2+a_1v$ and the germ has codimension 1.
If $\lambda_1=0$ but $\lambda_2\ne 0$ we can set $\lambda_2=1$ and consider $g=w+uv$. Then
$\xi_1(g)=v$, $\xi_2(g)=2uv+3w$ and $\xi_3(g)=2wu+3v^2$, so $\mathcal M_{3}^3\subset L\mathcal
R_1(X)\cdot{}g + sp\{u^3\} + \mathcal M_{3}^{4}$, that is, a
complete $3$-transversal is given by $g=w+uv+a u^3$. Here $a$ is a parameter modulus.
For $g=w+uv+au^3$ we have
$\xi_1(g)=v+3au^{2},$ $\xi_2(g)=2uv+3w,$
$\xi_3(g)=2uw+3v^2.$
Using the vectors
$
u^2\xi_2(g)=2u^3v+3u^2w,
u\xi_3(g)=2u^2w+3uv^2,
uv\xi_1(g)=uv^2+3avu^{3}
$
which are in $\mathcal R_1(X)\cdot{}g$, we get
$u^2w$, $uv^2$ and $u^3v$ if $a\ne -\frac{4}{27}$. Then, using
$u^3\xi_1(g)$ we get $u^5$ if $a\ne 0$. Now
using $\xi_1$ and $\xi_2$ we get all monomials divisible by $v$ and $w$ of degree $5$ in $L\mathcal R_1(X)\cdot{}g + \mathcal M_{3}^{6}$. Therefore $g$ is $5$-determined
if $a\ne 0, -\frac{4}{27}$.
A complete $4$-transversal of $g$ is
$g+\lambda u^{4}$. Using Mather's Lemma (the vectors
$\xi_3(g)$, $v\xi_1(g)$, $u\xi_2(g)$ give $uw,$ $v^2,$ $u^2v$ if
$a\ne -\frac{4}{27}$, then $u^2\xi_1(g)$ gives $u^4$ if $a\ne 0$)
we show that $g+\lambda u^{4}$ is $\mathcal R(X)$-equivalent to $g$. Therefore $g$ is $4$-determined.
An $\mathcal R^+(X)$-versal deformation is given $g=w+uv+a u^3+a_{2}u^{2}
+a_1u$ and has codimension $3$ (the codimension of the stratum is $2$).
\end{proof}
\subsection{The geometry of functions on a cuspidal edge}\label{sec:Geomsubmersions}
We consider the $\mathcal A$-model cuspidal edge $X$ parametrised by $f(x,y)=(x,y^2,y^3)$ and with equation $v^3-w^2=0$.
The tangential line at a singular point is parallel to $(1,0,0)$ and the tangent cone to $X$ at a singular point is the plane $w=0$.
Given a deformation $F:\mathbb R^3\times \mathbb R^2,0\to \mathbb R$ of a germ $g$ on $X$, we consider the family
$G(x,y,a)=F(f(x,y),a)$ and the following sets:
$$
\mathscr D_1(F)=\{(a,G(x,y,a))\in \mathbb R^2\times \mathbb R:
\frac{\partial G}{\partial x}=\frac{\partial G}{\partial y}=0 \mbox{ at } (x,y,a)
\}
$$
and
$$
\mathscr D_2(F)=\{(a,G(x,0,a))\in \mathbb R^2\times \mathbb R: \frac{\partial G}{\partial x}=0 \mbox{ at } (x,0,a)
\}.
$$
It is not difficult to show that for two $P$-$\mathcal R^+(X)$-equivalent deformations $F_1$ and $F_2$
the sets $\mathscr D_1(F_1)$ and $\mathscr D_1(F_2)$ are diffeomorphic and so are $\mathscr D_2(F_1)$ and $\mathscr D_2(F_2)$.
Therefore, it is enough to compute the sets $\mathscr D_1(F)$ and $\mathscr D_2(F)$
for the deformations in Table \ref{tab:germsubm}.
$\mbox{\boldmath $u$}llet$ {\it The germ $g=u$.}\\
The fibre $g=0$ is a plane transverse to both the tangential line and to the tangent cone to $X$.
Here an $\mathcal R^+(X)$-versal deformation is $F(u,v,w,a_1,a_2)=u$ and
both $\mathscr D_1(F)$ and $\mathscr D_2(F)$ are the empty set.
$\mbox{\boldmath $u$}llet$ {\it The germs $g=\pm v\pm u^k$, $k=2,3,4$.}
The fibre $g=0$ is tangent to the tangential line of $X$ at the origin but is transverse
the tangent cone to $X$.
The contact of the tangential line with the fibre $g=0$ is measured by the singularities
of $g(x,0,0)=\pm x^k$, so it is of type $A_{k-1}$.
(i) $k=2$. Here an $\mathcal R^+(X)$-versal deformation is $F(u,v,w,a_1,a_2)=\pm v\pm u^2$.
Then $G(x,y,a_1,a_2)=\pm y^2\pm x^2$, and both $\mathscr D_1(F)$ and $\mathscr D_2(F)$ are planes (Figure \ref{fig:Discvuk}, left).
(ii) $k=3$. We have $F(u,v,a_1,a_2)=\pm v+u^3+a_1u$ and $G(x,y,a)=\pm y^2+ x^3+a_1x$. Thus
$\frac{\partial G}{\partial x}=\frac{\partial G}{\partial y}=0$ when $y=0$ and $a_1=-3x^2$. The set $\mathscr D_1(F)$
is a surface parametrised by $(x,a_2)\mapsto (-3x^2,a_2,-2x^3)$, i.e., is a cuspidal edge.
The set $\mathscr D_2(F)$ is also a cuspidal edge and coincides with $\mathscr D_1(F)$ (Figure \ref{fig:Discvuk}, middle).
(iii) $k=4$. Here $F(u,v,w,a_1,a_2)=\pm v\pm u^4+a_2u^2+a_1u$ and $G(x,y,a_1,a_2)=\pm y^2\pm x^4+a_2x^2+a_1x$, so
$\frac{\partial G}{\partial x}=\frac{\partial G}{\partial y}=0$ when $y=0$ and $a_1=\mp 4x^3-2a_2x$.
The set $\mathscr D_1(F)$
is a surface parametrised by $(x,a_2)\mapsto (\mp 4x^3-2a_2x,a_2,\mp 3x^4-a_2x^2)$, which is
swallowtail surface.
The set $\mathscr D_2(F)$ is also a swallowtail surface and coincides with $\mathscr D_1(F)$ (Figure \ref{fig:Discvuk}, right).
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=10cm, height=2.5cm]{Fig1.pdf}
\caption{Discriminant $\mathscr D_1(F)$ of versal deformations of $\pm v\pm u^k$, from left to right: $k=2,3,4$.
Here $\mathscr D_2(F)$ coincides with $\mathscr D_1(F)$.}
\label{fig:Discvuk}
\end{center}
\end{figure}
$\mbox{\boldmath $u$}llet$ {\it The germ $g=w\pm u^2$.}
The tangent plane to the fibre $g=0$ coincides with the tangent cone to $X$ at the origin (and contains the tangential direction to $X$ at that point). The contact of the fibre $g=0$ with the tangential line is an ordinary one (of type $A_1$) as
$g(x,0,0)=\pm x^2$.
We have $F(u,v,a_1,a_2)=w+u^2+a_1v$ and $G(x,y,a_1,a_2)= x^2+y^3+a_1y^2$, so
$\frac{\partial G}{\partial x}=\frac{\partial G}{\partial y}=0$ when $x=0$ and $y(3y+a_1)=0$.
When $y=0$ we get the plane $(a_1,a_2,0)$ and for $3y+a_1=0$ we get a surface
parametrised by $(x,a_2)\mapsto (-3y,a_2,-2y^3)$. The set $\mathscr D_1(F)$ is the union of these two surfaces which have an $A_2$-contact along the $a_2$-axis, see Figure \ref{fig:Discwu2} (left).
The set $\mathscr D_2(F)$ is the plane $(a_1,a_2,0)$.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=5cm, height=3.5cm]{Fig2a.pdf}
\includegraphics[width=5cm, height=3.5cm]{Fig2b.pdf}
\caption{Left: discriminant $\mathscr D_1(F)$ of a versal deformation of $w\pm u^2$,
which is the union of two smooth surfaces having an $A_2$-contact along a curve.
Right: Discriminant $\mathscr D_1(F)$ of a versal deformation of $w+uv+au^3$, which is the union of two cuspidal edges. In both figures
the discriminant $\mathscr D_2(F)$, which is a subset of $\mathscr D_1(F)$, is the surface in gray.}
\label{fig:Discwu2}
\end{center}
\end{figure}
$\mbox{\boldmath $u$}llet$ {\it The germ $g=w+uv+au^3$, $a\ne 0$.}
Here too, as in the previous case, the tangent plane to the fibre $g=0$ coincides with the tangent cone to $X$ at the origin.
However, the contact of the fibre $g=0$ with the tangential line is of order $3$
as $g(x,0,0)=ax^3$.
We have $F(u,v,w,a_1,a_2)=w+uv+au^3+a_2u^2+a_1u$ and $G(x,y,a_1,a_2)= xy^2+y^3+ax^3+a_2x^2+a_1x$.
Differentiating we get
$$\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{l}
\frac{\partial G}{\partial x}=y^2+3ax^2+2a_2x+a_1\\
\frac{\partial G}{\partial y}=2xy+3y^2.
\end{array}
$$
We have $\frac{\partial G}{\partial y}=0$ when $y=0$ or $y=-(2/3)x.$ Substituting in
$\frac{\partial G}{\partial x}=0$ gives $\mathscr D_1(F)$
as the union of two surfaces parametrised by
$$(a_2,x)\mapsto (-3ax^2-2a_2x,a_2,-2ax^3-a_2x^2)$$
and
$$(a_2,x)\mapsto (-(\frac{4}{9}+3a)x^2-2a_2x,a_2,-(\frac{8}{27}+2a)x^3-a_2x^2).$$
Both these surfaces are cuspidal edges
and are as in Figure \ref{fig:Discwu2} (right).
The set $\mathscr D_2(F)$ coincides with the first cuspidal edge.
\subsection{Contact of a geometric cuspidal edge with planes}\label{sec:Contactplanes}
The family of height functions $H:M\times S^2\to \mathbb R$ on $M$
is given by $H((x,y),{\bf v})=H_{{\bf v}}(x,y)=\phi(x,y)\cdot {\bf v}.$
The height function $H_{{\bf v}}$ on $M$ along a fixed direction ${{\bf v}}$ measures the contact of $M$ at $p$
with the plane $\pi_{{\bf v}}$ through $p$ and orthogonal to ${{\bf v}}$.
The contact of $M$ with $\pi_{{\bf v}}$ is described by that of the fibre
$g=0$ with the model cuspidal edge $X$, with $g$ as in Theorem \ref{theo:Classification}. Following the transversality theorem in the Appendix of \cite{brucewest}, for a generic cuspidal-edge, the height functions $H_{{\bf v}}$, for any ${{\bf v}}\in S^2$,
can only have singularities of $\mathcal R(X)$-codimension $\le 2$ (of the stratum) at any point on the cuspidal edge.
We shall take $M$ parametrised as in \eqref{eq:prenormalform}
and write ${\bf v}=(v_1,v_2,v_3)$.
Then,
$$
H_{{\bf v}}(x,y)=H((x,y),{\bf v})=xv_1+(a(x)+\frac{1}{2}y^2)v_2+(b_1(x)+y^2b_2(x)+y^3b_3(x,y))v_3.
$$
The function $H_{{\bf v}}$ is singular at the origin
if and only if $v_1=0$, that is, if and only if the plane $\pi_{{\bf v}}$ contains the tangential direction to $M$ at the origin.
When $\pi_{{\bf v}}$ is transverse to the tangential direction to $M$ at the origin,
the contact of $M$ with $\pi_{{\bf v}}$ at the origin is the same as that of the zero fibre of $g=u$ with
the model cuspidal edge $X$.
Suppose that the plane $\pi_{{\bf v}}$ is a member of the pencil of planes
that contains the tangential direction to $M$ at the origin (in particular, $\pi_{{\bf v}}$ is a tangent plane of the curve $\Sigma$). If
$\pi_{{\bf v}}$ is not the tangent cone to $M$ at the origin, then the contact
of a generic $M$ with $\pi_{{\bf v}}$ is the same as that of the zero fibre of $g=\pm v\pm u^k$, $k=2,3,4$ with the model cuspidal edge $X$.
The integer $k$ is determined by the contact of $\pi_{{\bf v}}$ with the singular set $\Sigma$ (see \S \ref{sec:Geomsubmersions}).
The restriction of $H_{{\bf v}}$ to $\Sigma$ is given by
$$H_{{\bf v}}(x,0)=
\frac{1}{2}(a_{20}v_2+b_{20}v_3)x^2+\frac{1}{6}(a_{30}v_2+b_{30}v_3)x^3+
\frac{1}{24}(a_{40}v_2+b_{40}v_3)x^4+O(5).
$$
Therefore, the plane $\pi_{{\bf v}}$ has an $A_k$, $k=1,2,3,$ contact with $\Sigma$ if and only if
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rl}
A_1:&v_2a_{20}+v_3b_{20}\ne 0;\\
A_2:&v_2a_{20}+v_3b_{20}=0, \, a_{20}b_{30}-a_{30}b_{20}\ne 0;\\
A_3:&v_2a_{20}+v_3b_{20}=0, \,a_{20}b_{30}-a_{30}b_{20}= 0, \, a_{40}b_{20}-a_{20}b_{40}\ne 0.
\end{array}
$$
Geometrically, this mean that the plane $\pi_{{\bf v}}$ has an $A_k$, $k=1,2,3,$ contact with $\Sigma$ if and only if
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rl}
A_1:&\pi_{{\bf v}} \mbox{ \rm is not the osculating plane of }\Sigma;\\
A_2:&\pi_{{\bf v}} \mbox{ \rm is the osculating plane of }\Sigma,\, \tau_{\Sigma}(0)\ne 0;\\
A_3:&\pi_{{\bf v}} \mbox{ \rm is the osculating plane of }\Sigma,\, \tau_{\Sigma}(0)=0,\, \tau_{\Sigma}'(0)\ne 0.
\end{array}
$$
If the plane $\pi_{{\bf v}}$ coincides with the tangent cone to $M$
at the origin (i.e., ${\bf v}=(0,0,1)$)
but is not the osculating plane of $\Sigma$ (i.e., $\kappa_n(0)=b_{20}\ne 0$),
then the contact
of $M$ with $\pi_{{\bf v}}$ is the same as that of the zero fibre of $g=w\pm u^2$ with the model cuspidal edge $X$, that is, the height function has an $A_3$-singularity.
When $\pi_{{\bf v}}$ is the tangent cone to $M$ and coincides with the osculating
plane of $\Sigma$ ($\kappa_n(0)=b_{20}=0$) but
$\tau_{\Sigma}(0)\ne 0$ (i.e., $\kappa_i(0)=b_{30}\ne 0$) its contact with $M$ is described by the germ
$g=w+uv+au^3$ with the model cuspidal edge $X$. Here, the corresponding height function has a $D_4$-singularity.
(Compare with Theorem 2.11 in \cite{martinsnuno}
and Lemma 4.2 in \cite{teramoto}.) We observe that the case when the tangent cone to $M$
and the osculating plane of $\Sigma$ coincides at a point where $\tau_{\Sigma}(0)=0$ is not generic.
We have the sets
$$
\mathscr D_1(H)=\{({{\bf v}},H_{{\bf v}}(x,y))\in S^2 \times \mathbb R:
\frac{\partial H_{{\bf v}}}{\partial x}=\frac{\partial H_{{\bf v}}}{\partial y}=0 \mbox{ at } (x,y,{{\bf v}})
\}
$$
and
$$
\mathscr D_2(H)=\{({{\bf v}},H_{{\bf v}}(x,0))\in S^2\times \mathbb R: \frac{\partial H_{{\bf v}}}{\partial x}=0 \mbox{ at } (x,0,{{\bf v}})
\}.
$$
If $\pi_{\bf v}$ is a member of the pencil containing the tangential direction of $M$ but is not the tangent cone to $M$, then
the set $\mathscr D_1(H)$ coincides with $\mathscr D_2(H)$ and describes locally the dual of the curve $\Sigma$.
When $\pi_{\bf v}$ is the tangent cone to $M$, then
the set $\mathscr D_1(H)$ consists of two components. One of them is $\mathscr D_2(H)$ (the dual of $\Sigma$) and the other is the {\it proper dual} of $M$ which
is the surface consisting of the tangent planes to $M$ away from points on $\Sigma$ together with their limits at points on $\Sigma$, i.e.,
the tangent cones at points on $\Sigma$.
If the contact of $M$ with $\pi_{{\bf v}}$ is described by that of the fibre
$g=0$ with the model cuspidal edge $X$, with $g$ as in Theorem \ref{theo:Classification}, then $\mathscr D_1(H)$ (resp. $\mathscr D_2(H)$) is
diffeomorphic to $\mathscr D_1(F)$ (resp. $\mathscr D_2(F)$), where $F$ is an $\mathcal R^+(X)$-versal deformation of $g$ with 2-parametres.
In particular, the calculations and figures in \S\ref{sec:Geomsubmersions} give the models, up to diffeomorphisms, of $\mathscr D_1(H)$ and $\mathscr D_2(H)$.
We have thus the following result.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{prop} Let $M$ be a generic cuspidal edge in $\mathbb R^3$. Then any height function on $M$ has locally
one of the singularities modeled by the submersions in {\rm Table \ref{tab:germsubm}}.
The proper dual of $M$ together with the dual of its singular curve $\Sigma$ are as in
{\rm Figure \ref{fig:Discwu2} (left)} when the tangent cone to $M$ is distinct from the osculating plane to $\Sigma$ and as
{\rm Figure \ref{fig:Discwu2} (right)} otherwise.
\end{prop}
\section{Orthogonal projections of a cuspidal edge}\label{sec:projections}
The family of orthogonal projections in $\mathbb R^3$ is given by
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{cccc}
\Pi:&\mathbb R^3\times S^2&\to&TS^2\\
&(p,{{\bf v}}) &\mapsto& (v, \Pi_{{\bf v}}(p))
\end{array}
$$
where $\Pi_{{\bf v}}(p)=p-(p \cdot {{\bf v}}){{\bf v}}$.
Given a surface $M$, we denote by $P$ the restriction
of $\Pi$ to $M$. Thus, for $M$ parametrised by $\phi$,
the family of orthogonal projections $ P:U\times S^2\to TS^2$ is given by
$$
P((x,y),{{\bf v}})=(v, P_{{\bf v}}(x,y)),
$$
with $P_{{\bf v}}(x,y)=\Pi_{{\bf v}}(\phi(x,y))=
\phi(x,y)-(\phi(x,y) \cdot {{\bf v}}){{\bf v}}$. The map $P_{{\bf v}}$ is
locally a map-germ from the plane to the plane and measures the contact of $M$ with lines parallel to ${\bf v}$.
We take $M$ parametrised as in \eqref{eq:prenormalform}. Consider,
for example, the projection along the tangential direction ${\bf
v}=(0,1,0)$. We have $P_{\mbox{\bf
v}}(x,y)=(x,b_1(x)+y^2b_2(x)+y^3b_3(x,y))$. If $\kappa_t=b_{12}=0$
and $\kappa_t'=b_{22}\neq 0$ the singularity of $P_{{\bf v}}$
at the origin is $\mathcal A$-equivalent to $(x,y^3-x^4y)$ and has
$\mathcal A_e$-codimension $3$ (see \S \ref{sec:Geometryprojections}
for even higher $\mathcal A_e$-codimension cases.) Thus, it cannot
be $\mathcal A_e$-versally unfolded by the family $P$. For this
reason, the group $\mathcal A$ is not very useful for
describing the singularities of the projections of $M$ and the way
they bifurcate as the direction of projection changes in $S^2$. We
follow here the same approach as that for the contact of the
cuspidal edge with planes. The projections are germs of submersions,
so we fix the $\mathcal A$-model $X$ of the cuspidal edge and
consider the action of the group $_{ X}\mathcal A=\mathcal
R(X)\times \mathcal L$ on $\mathcal M_3.\mathcal E(3,2)$. We classify germs of
submersions $g$ in $\mathcal M_3.\mathcal E(3,2)$ of $_{X}\mathcal A_e$-codimension $d_e(f, {_{X}\mathcal A})=\dim_{\mathbb R}(\mathcal E(3,2)/L_X{\mathcal A}_e{\cdot}f) \le 2$. We need the following results from \cite{bkd} and \cite{bdw} adapted to our group, where
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rcl}
L_X{\cal A}_1{\cdot}{g}&=& L\mathcal R_1(X)\cdot{} g+g^*({\mathcal M}_3).\{e_1,e_2\},\\
L_X{\cal K}{\cdot}{g}&=& L\mathcal R(X)\cdot{} g+g^*({\mathcal M}_3).\{e_1,e_2\}.
\end{array}
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{theo}\label{prop:CompTrans_XA}{\rm (\cite{bkd})}
Let $g:\mathbb R^3,0\to \mathbb R^2,0$ be a smooth germ and
$h_1,\ldots, h_r$ be homogeneous maps of degree $k+1$ with
the property that
$$
\mathcal{M}_3^{k+1}.\mathcal E(3,2) \subset L_X\mathcal A_1{\cdot}g+ sp\{
h_1,\ldots, h_r\} +\mathcal{M}_3^{k+2}.\mathcal E(3,2) .
$$
Then any germ $h$ with $j^kh(0)=j^kf(0)$ is $_X\mathcal A_1$-equivalent to a germ of the form
$g(x)+\sum_{i=1}^lu_ih_i(x)+\phi(x) $, where $\phi(x)\in \mathcal M_3^{k+2}.\mathcal E(3,2)$. The
vector subspace $sp\{ h_1,\ldots, h_r\}$ is called a complete
$(k+1)$-$_X\mathcal A$-transversal of $g$.
\end{theo}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{theo}\label{theo:Detgroup_XA}
{\rm (\cite{bdw})}
If $g$ satisfies
$$\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rcl}
\mathcal{M}_3^{l}.\mathcal{E}(3,2)&\subset&L_X\mathcal{K}\cdot{}g\\
\mathcal{M}_3^{k+1}.\mathcal{E}(3,2)&\subset&L_X\mathcal{A}_1\cdot{}g+\mathcal{M}_3^{l+k+1}.\mathcal{E
}(3,2)
\end{array}
$$
then $g$ is $k$-$_X\mathcal{A}$-determined.
\end{theo}
We also use the following lemma.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{lem}{\rm \bf (Mather's Lemma)}
Let $\alpha:G\times M\to M$ be a smooth action of a Lie group $G$ on a manifold $M$, and let $V$ be a connected
submanifold of $M$. Then $V$ is contained in a single orbit if and only if the following hold:
{\rm (a)} $T_vV\subseteq T_v(G.v), \forall v\in V$,
{\rm (b)} $\dim T_v(G_v)$ is independent of $v\in V$.
\end{lem}
We can now state the classification results for submersions in $\mathcal M_3.\mathcal E(3,2)$ of $_{ X}\mathcal A_e$-codimension $\le 2$ (of the stratum).
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{theo} \label{theo:ClassProj}
Let $X$ be the germ of the $\mathcal A$-model of the cuspidal edge parametrised by
$f(x,y,z)=(x,y^2,y^3)$. Denote by $(u,v,w)$ the coordinates in the target.
Then any germ of a submersion in $\mathcal{M}_3.\mathcal E(3,2)$ of $_{ X}\mathcal A_e$-codimension $\le 2$
is
$_{ X}\mathcal A$-equivalent to one of the germs in {\rm Table \ref{tab:germProj}}.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{table}[ht]
\caption{$_X\mathcal A$-finitely determined germs of $_X\mathcal A_e$-codimension $\le 2$.}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
{
\footnotesize
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{tabular}{llcl} \hline Name & Normal form & $d_e(f,_{ X}\mathcal A)$ &$_{ X}\mathcal A_e$-versal deformation
\\
\hline
Type 1 & $(u,v)$ & $0$ &$(u,v)$\\
Type 2 &$(u,w+uv)$ & $0$ &$(u,w+uv)$\\
Type 3 &\textbf{}$(u,w+u^2v)$ & $1$ &$(u,w+u^2v+a_1v)$\\
Type 4 &$(u,w+u^3v)$ & $2$ &$(u,w+u^3v+a_2uv+a_1v)$\\
Type 5 &$(v+u^3,w+u^2)$ & $1$ &$(v+u^3+a_1u,w+u^2)$\\
Type 6 &$(v+u^5,w+u^2)$ & $2$ &$(v+u^5+a_2u^3+a_1u,w+u^2)$\\
Type 7 &$g_7=(v+au^2\pm u^4,w+uv+bu^3+P),$ & $2^{(*)}$ & $g_7+(0,a_1u+a_2u^2)$\\
& $P=cu^4+du^5+eu^6$&&\\
\hline
\end{tabular}
}
\end{center}
$(*)$: the codimension is of the stratum; $a,b,c,d,e$ are
moduli and are in the complement of some algebraic subsets of $\mathbb R^5$.
\label{tab:germProj}
\end{table}
\end{theo}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{proof}
We follow the complete transversal technique and classify germs of
submersions inductively of the jet level. Consider the 1-jet
$g=(a_1u+b_1v+c_1w,a_2u+b_2v+c_2w)$. If $a_1\ne 0$ or $a_2\ne 0$
then $g$ is equivalent to $(u,av+bw)$, with $a\ne 0$ or $b\ne 0$
($g$ is a germ of a submersion). For $g=(u,av+bw)$,
$\xi_1(g)=(1,0)$, $\xi_2(g)=(0,2av+3bw)$, $j^1\xi_3(g)=(0,2aw)$, so
if $a\ne 0$, by applying Mather's Lemma, we get
$g$ equivalent to $(u,v)$, otherwise it is equivalent to $(u,w)$.
If $a_1=a_2=0$, then changes of coordinates in the target give $g$ equivalent to $(v,w)$. Thus, we have three orbits of submersions
in the 1-jet space represented by
$(u,v),\, (u,w),\, (v,w).$ The germ $(u,v)$ is 1-determined and is stable.
$\mbox{\boldmath $u$}llet$ For $g=(u,w)$, we have $\xi_1(g)=(1,0)$, $\xi_2(g)=(0,w)$, $\xi_3(g)=(0,3v^2)$. Using these vectors and the left group, we can show that
a complete $(k+1)$-transversal is $(u,w+\lambda u^kv)$, $k\ge 1$.
We have two orbits in the $(k+1)$-jet space, namely $(u,w+u^kv)$ and $(u,w)$. For $g=(u,w+u^kv)$,
we have
$
\xi_1(g)=(1,ku^{k-1}v),\, \xi_2(g)=(0,3w+2u^kv),\, \xi_3(g)=(0,3v^2+2u^kw).
$
Using the above vectors and the left group, we can show that
$ \mathcal{M}_3^{2}.\mathcal{E}(3,2)\subset L_X\mathcal{K}\cdot{}g$. To prove that the germ is $k+1$-determined we need to prove that
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{equation}\label{eq:determinacy(u,w)}
\mathcal{M}_3^{k+2}.\mathcal{E}(3,2)\subset L_X\mathcal{A}_1\cdot{}g+\mathcal{M}_3^{k+4}.\mathcal{E}(3,2).
\end{equation}
For this, we first show that all monomials of degree $k+3$ are in the right hand side of \eqref{eq:determinacy(u,w)}.
Using $\xi_i$, $i=1,2,3$, we show that all the monomials of degree $k+3$ of the form $(P(u,v,w),0)$, $(0,wP(u,v,w))$ and $(0,v^2P(u,v,w))$ are in there. We use the left group to show that $(0,u^{k+3})$ is also in there.
If we write $g=(g_1,g_2)$, then $(0,g_1^ig_2)=(0,u^iw+u^{k+i}v)$ and $u^i\xi_2(g)=(0,3u^iw+2u^{k+i}v)$.
For $i\ge 1$ these vectors are in $L_X\mathcal{A}_1\cdot{}g$, so $(0,u^{k+2}v)$ is in the right hand side of \eqref{eq:determinacy(u,w)}. We proceed
similarly for the monomials of degree $k+2$ working now with vectors in $L_X\mathcal{A}_1\cdot{}g$ modulo elements in $\mathcal{M}_3^{k+3}.\mathcal{E}(3,2)$ to get all monomials of degree $k+2$ in the right hand side of \eqref{eq:determinacy(u,w)}. Therefore, \eqref{eq:determinacy(u,w)} holds.
The germ $g$ has $_X\mathcal A_e$-codimension $k-1$ and an $_X\mathcal A_e$-versal unfolding is
$(u,w+u^kv+a_{k-1}u^{k}v+\ldots a_{1}uv)$.
$\mbox{\boldmath $u$}llet$ For $g=(v,w)$, we have $\xi_1(g)=(0,0)$, $\xi_2(g)=(2v,3w)$, $\xi_3(g)=(2w,3v^2)$ and a complete 2-transversal is given by
$g=(v+a_1uv+a_2u^2,w+b_1uv+b_2u^2)$. Then $\xi_1(g)=(a_1v+2a_2u,b_1v+2b_2u)$,
$\xi_2(g)=(2v+2a_1uv,3w+2b_1uv)$, $\xi_3(g)=(2w+2a_1uw,3v^2+2b_1uw)$.
Now $j^2w\xi_1(g)=(a_1vw+2a_2uw,b_1vw+2b_2uw)$ and $j^2u\xi_3(g)=(2uw,0)$ and we have $(vw,0)$ and $(0,vw)$ from the left group in the 2-jet of the $_X\mathcal A$ tangent space to the orbit of $g$, so if $b_2\ne 0$, we obtain $(0,uw)$.
From this and $j^2u\xi_2(g)=(2uv,3uw)$, we also get $(uv,0)$. Then $j^2v\xi_1(g)=(a_1v^2+2a_2uv,b_1v^2+2b_2uv)$ together with $(v^2,0)$ and $(0,v^2)$ from the left group also gives $(0,uv)$ if $b_2\ne 0$. Therefore, by Mather's Lemma
$g$ is equivalent to $(v+au^2,w+bu^2)$, with $b\ne 0$.
Using the vectors $(w+bu^2,0)$, $\xi_3(g)=(2w,3v^2)$ and $(0,v^2)$ from the left group, shows that $g$ is equivalent to $(v,w+bu^2)$ if $b\ne 0$. We can then set $b=1$
by changes of scales.
If $b_2=0$ and $b_1\ne 0$, then $g$ is equivalent to $(v+au^2,w+uv)$, with $a$ a parameter modulus.
If $b_1=b_2=0$, the orbits are $(v\pm u^2,w)$, $(v+uv,w)$ and $(v,w)$ and all yield germs
of submersions of codimension (of the stratum) greater than 2. Thus, the 2 jets to consider are
$
(v,w+u^2)\,\mbox { \rm and }\, (v+au^2,w+uv).
$
Consider the germ $g=(v,w+u^2)$. Then $\xi_1(g)=(0,2u)$, $\xi_2(g)=(2v,3w)$ and $\xi_3(g)=(2w,3v^2)$. Using these vectors and those from
the left group we can show that a complete $3$-transversal is $g=(v+\lambda u^3,w+u^2)$ and the orbits in the $3$-jet space are
$(v+u^3,w+u^2)$ and $(v,w+u^2)$.
The germ $(v+u^3,w+u^2)$ is $3$-determined and has codimension $1$. An $_X\mathcal A_e$-versal unfolding is given by
$(v+u^3+a_1u,w+u^2)$.
The complete 4-transversal for $(v,w+u^2)$ is empty and the orbits in the 5-jet are $(v+u^5,w+u^2)$ and $(v,w+u^2)$. The germ
$(v+u^5,w+u^2)$ is 5-determined and has codimension 2. An $_X\mathcal A_e$-versal unfolding is given by
$(v+u^5+a_2u^3+a_1u,w+u^2)$.
For the 2-jet $(v+au^2,w+uv)$ a complete 3-transversal is given by
$g=(v+au^2+\lambda_1u^3,w+uv+\lambda_2u^3)$. Using Mather's Lemma,
it can be shown that g is equivalent to $g=(v+au^2,w+uv+bu^3)$,
where $b$ is also a modulus. A complete 4-transversal is
$g=(v+au^2\pm u^4,w+uv+bu^3+cu^4)$.
The computations here get too complicated to do by hand so we make use of the
computer package ``Transversal" (\cite{transkirk}). It gives that
the complete 5-transversal is not empty and the orbits in the 6-jet space can be parametrised by
$g_7=(v+au^2\pm u^4,w+uv+bu^3+cu^4+du^5+eu^6)$. The same computer package shows
that the germ $g_7$ is 6-determined provided the moduli are not in the zero set of some polynomial.
The codimension of the stratum of $g_7$ is $2$ and an $_X\mathcal A_e$-versal unfolding is given by
$g_7+(0,a_1u+a_2u^2)$.
\end{proof}
\subsection{Apparent contour of a cuspidal edge}\label{sec:Geometryprojections}
The singular set of an orthogonal projection of a smooth surface in $\mathbb R^3$ along a direction $\bf v$
is the set of point where $\bf v$ is tangent to the surface and is called the {\it contour generator}.
The image of the contour generator by the projection
in the direction $\bf v$ is called the {\it apparent contour} ({\it profile}) of the surface along the directions $\bf v$.
(See for example
\cite{gaffney, rieger} and also \cite{ShCarCidFaBook,koenderink} for more on apparent contours of smooth surfaces, \cite{brucegiblinProjBound} for surfaces with boundary, \cite{taricreases} for those with creases and corners and
\cite{martinyutaro, west} for those of a surface with a cross-cap singularity.)
For a geometric cuspidal edge $M$, the projection is always singular along the singular curve $\Sigma$, so $\Sigma$ is always part of the contour generator and its
image is part of the apparent contour of $M$ along ${\bf v}$. We call the {\it proper apparent contour $($profile$)$} of $M$
the projection of the set of points where ${\bf v}$ is tangent to $M$ at its regular points. We seek to describe the
apparent contour of $M$ and how it changes as the direction of projection changes locally in $S^2$.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{theo} The bifurcations on the proper apparent contour of $M$ together with those of the
projection of the singular set of $M$ are, up to diffeomorphisms, those in the following figures:
{\rm
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{tabular}{ll} \\
Type 2: &Figure \ref{fig:ProjType2}\\
Type 3: &Figure \ref{fig:ProjType3}\\
Type 4: &Figure \ref{fig:ProjType4}\\
Type 5: &Figure \ref{fig:ProjType5}\\
Type 6: &Figure \ref{fig:ProjType6}\\
Type 7: &Figures \ref{fig:ProjType7Lips(a)}, \ref{fig:ProjType7Lips(b)}, \ref{fig:ProjType7Lips(c)}, \ref{fig:ProjType7BeaksR7}, \ref{fig:ProjType7BeaksR6(2)} for some cases.\\
\end{tabular}
}
\label{theo:BiffAppCont}
For {\rm Type 1} singularities, the proper apparent contour is empty and the projection of the singular set is a regular curve.
\end{theo}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{proof}
The apparent contour is the discriminant of the projection (that is, the image of its singular set). For a generic surface,
the family of projections is an $_{ X}\mathcal A_e$-versal family of the singularities of its members.
Therefore, the diffeomorphism type of the bifurcations of the apparent contour can be obtained by considering the bifurcations
of the discriminants in the $_{ X}\mathcal A_e$-versal families restricted to $X$ in Theorem \ref{theo:Detgroup_XA}. We treat each case in Table \ref{tab:germProj} separately.
$\mbox{\boldmath $u$}llet$ Type 1: The germ $g=(u,v)$. We denote by $h$ the composite of $g$ with the parametrisation $f(x,y)=(x,y^2,y^3)$ of $X$. Then
$h(x,y)=(x,y^2)$ which has a singularity of $\mathcal A$-type fold.
The critical set of $h$ is the $x$-axis, i.e., is the singular set of $X$, and the discriminant is a regular curve.
$\mbox{\boldmath $u$}llet$ The germ $g=(u,w+u^kv)$, $k=1,2,3$. Here we have $h(x,y)=(x,y^3+x^ky^2)$ and its singular set is given by
$y(3y+2x^k)=0$. It has two components, one of which ($y=0$) is the singular set of $X$.
The other component (the proper contour generator) is a smooth curve and has $k$-point contact
with the singular set of $X$.
{Type 2: $k=1$.} The germ $h(x,y)=(x,y^3+xy^2)$ has a singularity of $\mathcal A$-type beaks
(which is of $\mathcal A_e$-codimension 1 but $g$ is $_X\mathcal A_e$-stable).
The discriminant is the union of the two curves $(x,0)$ and $(x,(4/27)x^3)$ which have 3-point contact at the origin \mbox{(Figure \ref{fig:ProjType2})}.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=3cm, height=3cm]{Fig3.pdf}
\caption{Proper apparent contour (thin) and the projection of the singular set (thick) of $M$ at an $_X\mathcal A$-singularity of Type 2.}
\label{fig:ProjType2}
\end{center}
\end{figure}
{Type 3: $k=2$.} We
consider the versal deformation $g=(u,w+u^2v+a_1uv)$ with parameter $a_1$, so $h(x,y)=(x,y^3+x^2y^2+a_1y^2)$. (When $a_1=0$,
$h$ is $\mathcal A$-equivalent to $(x,y^3-x^4y)$ which is a singularity of
$\mathcal A$-type $4_4$, see \cite{rieger}.)
For $a_1$ fixed, the critical set is given by $y(3y +2x^2+2a_1)=0$ and consists of two curves $y=0$ and $y=-2/3(x^2+a_1)$.
The discriminant if the union of the two curves $(x,0)$ and $(x,4/27(x^2+a_1)^3)$.
See Figure \ref{fig:ProjType3} for the bifurcations in these curves as $a_1$ varies near zero.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=9cm, height=3cm]{Fig4.pdf}
\caption{Bifurcations in generic 1-parameter families of the proper apparent contour (thin) and of the projection of the singular set (thick) of $M$ at an $_X\mathcal A$-singularity of Type 3.}
\label{fig:ProjType3}
\end{center}
\end{figure}
{Type 4: $k=3$:}
We consider the versal deformation $g=(u,w+u^3v+a_2uv+a_1v)$ with parameters $a_1, a_2$,
so $h(x,y)=(x,y^3+x^3y^2+a_2xy^2+a_1y^2)$. (At $a_1=a_2=0$, $f$ is $\mathcal A$-equivalent to $(x,y^3-x^6y)$ which is a singularity of
$\mathcal A$-type $4_6$.)
The critical set is given by $y(3y +2(x^3+a_2x+a_1))=0$. The family $x^3+a_2x+a_1$ is a $\mathcal K$-versal deformation of the $A_2$-singularity, so
the bifurcations in the critical set are as in \mbox{Figure \ref{fig:ProjType4} (left)} and those in the discriminant are as in Figure \ref{fig:ProjType4} (right). Singularities of Type 3 occur when $(a_1,a_2)$ are on the cusp curve $27a_1^2+4a_2^3=0$
(middle figures in Figure \ref{fig:ProjType4}).
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=6cm, height=6cm]{Fig5a.pdf}\qquad
\includegraphics[width=6cm, height=6cm]{Fig5b.pdf}
\caption{Bifurcations in generic 2-parameter families of
the critical set (left), and of the proper apparent contour (thin) and of the projection of the singular set (thick) of $M$ (right)
at an $_X\mathcal A$-singularity of Type 4.}
\label{fig:ProjType4}
\end{center}
\end{figure}
$\mbox{\boldmath $u$}llet$ Type 5: Here we have a versal family $g=(v+u^3+a_1u,w+u^2)$, so $h(x,y)=(y^2+x^3+a_1x,y^3+x^2)$. (When $a_1=a_2=0$,
$f$ has an $\mathcal A$-singularity of type $\mbox{\rm I}_{2,2}^{1,1}$, see \cite{riegerruas}.) The critical set is given by
$y(4x-3y(3x^2+a_1))=0$ and is the union of two transverse curves.
The image of the curve $y=0$ is the $\mathcal A_e$-versal family
$(x^3+a_1x,x^2)$ of a cusp curve (Figure \ref{fig:ProjType5}, thick curve).
The image of the other branch is a cusp when $a_1=0$, and when $a_1\ne 0$, we write $y=4x/(3(3x^2+a_1))$ so its image can be parametrised by
$$
(\frac{16}{9}\frac{x^2}{(3x^2+a_1)^2}+x^3+a_1x, \frac{64}{27}\frac{x^3}{(3x^2+a_1)^3}+x^2).$$
A short calculation shows that it has always a cusp singularity near the origin for all values of $a_1\ne 0$ near zero.
The origin is a Type 2 singularity for any $a_1\ne 0$; see
Figure \ref{fig:ProjType5}.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=9cm, height=2cm]{Fig6.pdf}
\caption{Bifurcations in generic 1-parameter families of the proper apparent contour (thin) and
of the projection of the singular set (thick) of $M$ at an $_X\mathcal A$-singularity of Type 5.}
\label{fig:ProjType5}
\end{center}
\end{figure}
$\mbox{\boldmath $u$}llet$ Type 6: The versal family $g=(v+u^5+a_2u^3+a_1u,w+u^2)$ gives
$h(x,y)=(y^2+x^5+a_2x^3+a_1x,y^3+x^2)$ (which has a singularity of $\mathcal A$-type
$\mbox{\rm I}_{2,2}^{1,2}$ at $a_1=a_2=0$).
The critical set is given by $y(4x-3y(a_1+3a_2x^2+5x^4))=0$ and consists of two transverse curves
for any $(a_1,a_2)$ near the origin.
The image of $y=0$ is the $\mathcal A_e$-versal family $(x^5+a_2x^3+a_1x,x^2)$
of the ramphoid cusp curve. The $\mathcal A_e$-deformations in this family are obtained in
\cite{GibsonHobbs, wallgenericgeometry} and are as in Figure \ref{fig:ProjType6}, thick curves. One can show that there are no other
local or multi-local singularities appearing in the deformation.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=8cm, height=8cm]{Fig7.pdf}
\caption{Stratification of the parameter space (central figure) and bifurcations in generic 2-parameter families of the proper apparent contour (thin) and of the projection of the singular set (thick) of $M$ at an $_X\mathcal A$-singularity of Type 6.}
\label{fig:ProjType6}
\end{center}
\end{figure}
$\mbox{\boldmath $u$}llet$ Type 7:
The versal family $(v+au^2\pm u^4,w+uv+bu^3+cu^4+du^5+eu^6+a_2u^2+a_1u)$ gives
$h(x,y)=(y^2+ax^2\pm x^4,y^3+xy^2+bx^3+cx^4+dx^5+ex^6+a_2x^2+a_1x)$ (which has
a non-simple corank 2 singularity when $a_1=a_2=0$; these are yet to be classified).
We shall stratify the parameter space $(a_1,a_2)$ near the origin by the loci of codimension 2,1,0 local and multi-local
singularities of $h$. The stratification depends on the
five moduli of $g_7$ in Table \ref{tab:germProj}. However, as shown by the calculations below,
the configuration of the strata (up to homeomorphism $\mathbb R^2,0\to \mathbb R^2,0$)
depends only on the two moduli $a$ and $b$. We also obtain a (partial) stratification of the $(a,b)$ plane
into strata where the configuration of the bifurcation set of $h$ is constant.
The singular set of $h$ is given by
$$
y\left(a_1+2a_2x-(2a-3b)x^2-3axy+y^2+4cx^3+(5d \mp4)dx^4 \mp 6yx^3+6ex^5\right)=0.
$$
It consists of the singular set of the cuspidal edge $y=0$ and another component which we denote by $S_{(a_1,a_2)}$.
The image of $y=0$ gives an $\mathcal A_e$-versal family
$$
(ax^2\pm x^4,bx^3+cx^4+dx^5+ex^6+a_2x^2+a_1x)
$$
of a cusp curve. (We require $a\ne 0$ and $b\ne 0$ which are also conditions for finite determinacy of the germ $g_7$.
We observe that we get a self-intersection in the image of $y=0$ if and only if $a_1b<0$.)
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=7cm, height=6cm]{Fig8.pdf}
\caption{A (partial) stratification of the $(a,b)$-plane.}
\label{fig:Type7Strat(ab)}
\end{center}
\end{figure}
\noindent
{\it The lips/beaks stratum}\\
The singular set $S_{(0,0)}$ has a Morse singularity at the origin if and only if
$9 a^2+8 a-12 b\ne 0.$ We obtain a stratum in the $(a,b)$-plane given by the curve
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{equation}
\label{eq:MorseCond}
9 a^2+8 a-12 b=0,
\end{equation}
where the singularity of $S_{(0,0)}$ is more degenerate.
The parabola \eqref{eq:MorseCond} splits the $(a,b)$-plane into two regions. For $(a,b)$ inside (resp. outside) the parabola,
we have a birth of a lips (resp. beaks) singularity on one of the regular sheets of the cuspidal edge (see Figure \ref{fig:Type7Strat(ab)}).
We call the singularities of Type 7 lips (resp. beaks) type if lips (resp. beaks) singularities appear in its bifurcations.
The lips/beaks stratum in the $(a_1,a_2)$-parameter space is given by
the set of parameters $(a_1,a_2)$ for which there exists $(x,y)$ near the origin such that
$S_{(a_1,a_2)}(x,y)=0$ and the function $S_{(a_1,a_2)}$ has a Morse singularity at $(x,y)$. Eliminating variables, we find that
the lips/beaks stratum is a regular curve parametrised by
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{equation}
\label{eq:LipsBeaksStrat}
a_1=-\frac{4}{9 a^2+8 a-12 b}\, a_2^2+O_3,
\end{equation}
where $O_3$ (here and in the rest of the paper) is a family of smooth functions in $a_2$ depending smoothly on $a,b,c,d,e$ and has a zero 2-jet as a function in $a_2$.
\noindent
{\it Swallowtail stratum} \\
Using the recognition criteria in \cite{kentaro},
we find the stratum where swallowtail singularities occur is given by
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{equation}
\label{eq:SwallotStrat}
a_1=\frac{4(3a^2+(3b-4a)\alpha -\alpha^3)\alpha}{9(a^2+2(b-a) \alpha -\alpha^2 a)^2}\, a_2^2+O_3,
\end{equation}
where $\alpha$ is a solution of the cubic equation
$$
P(\alpha)=2 (a-b)\alpha^3-3 a^2 \alpha^2-a^3=0.
$$
The discriminant of $P$ is $\delta_P=a^3+(a-b)^2$. It is a cusp curve tangent to the lips/beaks parabola
at $(-\frac{4}{9},-\frac{4}{27})$ where both curves have a horizontal tangent (dotted curve in Figure \ref{fig:Type7Strat(ab)}). There are thus three swallowtail
curves in the bifurcation set of $h$ for $(a,b)$ inside the cusp curve $\delta_P=0$ and one swallowtail
curve for $(a,b)$ outside this curve (see Figure \ref{fig:Type7Strat(ab)}). In particular, when lips singularities occur on the profile,
only one swallowtail curve is present in the bifurcation set of $h$. From \eqref{eq:SwallotStrat} we have two additional
strata in the $(a,b)$ plane given as follows:
(i) the swallowtail curve is inflectional, so $\alpha$ is also a root of $3a^2+(3b-4a)\alpha -\alpha^3$.
Calculating the resultant of this polynomial and $P$ we get $243a^4+4(4a-3b)^3=0$ (gray curve in Figure \ref{fig:Type7Strat(ab)}).
(ii) the swallowtail curve is singular, so $\alpha$ is also a root of $a^2+2(b-a) \alpha -\alpha^2 a$.
Calculating the resultant of this polynomial and $P$ we get $a^3(a^3+(a-b)^2)^2=0$, which gives curves that are
already present in the stratification.
\noindent
{\it Type 3 singularities stratum}\\
These occur when $S_{(a_1,a_2)}=0$ is tangent to $y=0$. This occurs
when
$$
a_1=\frac{1}{3b-2a}\, a_2^2+O_3.
$$
Here we require $3b-2a\ne 0$, so we have another stratum in the $(a,b)$-plane given by $3b-2a=0$.
This is precisely the tangent line to the lips/beaks stratum at the origin.
\noindent
{\it Type 5 singularities stratum}\\
These occur when the image of the singular set $h(0,y)$
is singular, and this happens when $a_1=0$ (for any value of the moduli).
The above strata exhaust all the possible codimension 1 local singularities that can occur in $h$. We turn now to the multi-local singularities.
\noindent
{\it Douple point $+$ fold stratum}\\
Here a proper profile passes through the
point of self-intersection (the double point) of the image of the singular set of the cuspidal edge.
Thus we have $h(x_1,0)=h(x_2,0)$ for some $x_1\ne x_2$, $S_{(a_1,a_2)}(x,y)=0$ and $h(x_1,0)=h(x,y)$.
From $h(x_1,0)=h(x_2,0)$ we get $x_2=-x_1$ and $a_1+bx_1^2+dx_1^4=0$, so $a_1=-bx_1^2-dx_1^4$. We have now a system of
five equations $S_{(-bx_1^2-dx_1^4,a_2)}(x,y)=0$ and $h(x_1,0)=h(x,y)$ in $x_1,x,y,a_2$. A necessary condition for the existence of a
solution is $a^3+(a-b)^2>0$ and $a\ne b$. Thus, there are no ``douple point $+$ fold" singularities for $(a,b)$
inside the cusp $\delta_P=a^3+(a-b)^2=0$ where 3 swallowtail curves appear. When $a^3-(a-b)^2<0$,
calculations show that the ``douple point $+$ fold" stratum is a regular curve parametrised
by
$$
a_1=-\frac{b}{a^3+(a-b)^2}\, a_2^2+O_3.
$$
\noindent
{\it Double point $+$ Type 2 stratum}\\
We have a double point on the image of the singular set of the cuspidal edge
and a fold tangent to one of its branches at the double point. Thus
$h(x_1,0)=h(x_2,0)$ for some $x_2=-x_1$ (from above stratum) and $S_{(a_1,a_2)}(x_1,0)=0$ (or $S_{(a_1,a_2)}(-x_1,0)=0$).
The stratum is again a regular curve parametrised by
$$
a_1=-\frac{b}{(a-b)^2}\, a_2^2+O_3.
$$
\noindent
{\it Type 2 $+$ fold stratum}\\
Here we a have a Type 2 singularity together with another piece of the proper profile intersecting the two
tangential curves transversally. Thus, we have a Type 2 singularity at $(x,0)$ and there exists $(x_1,y_1)$, with $y_1\ne 0$ such that
$S_{(a_1,a_2)}(x,0)=S_{(a_1,a_2)}(x_1,y_1)=0$ and $h(x,0)=h(x_1,y_1)$.
This stratum consists of 1 or 3 regular curves depending on whether the polynomial
$$
Q(\lambda)=(a^3+(a-b)^2)\lambda^3-3(a^3+(a-b)^2)\lambda^2+3(a-b)^2\lambda+4a^3-(a-b)^2
$$
has 1 or 3 roots. The discriminant of $Q$ is $-108a^6(a-b)^2(a^3+(a-b)^2)$. In particular, we have 1 (resp. 3) regular curves
if and only if the swallowtail stratum consists of 1 (resp. 3) regular curve(s).
The initial part of the parametrisation of the curve(s) is
$$
a_1=\frac{4(\lambda-1)((3b-3a)\lambda^2-(3a\mu +3b -2a)\lambda+a)}{((3b-3a)\lambda^2-3a\mu\lambda-3b+3a)^2}\, a_2^2+O_3,
$$
with $\mu=-\sqrt{a(1-\lambda^2)}$ and $\lambda$ a root of the polynomial $Q$.
\noindent
{\it Triple point stratum} \\
We have three pieces of the proper apparent contour intersecting transversally at a given point,
so we need to solve $h(x_1,y_1)=h(x_2,y_2)=h(x_3,y_3)$ in $(a_1,a_2)$, with $(x_i,y_i)\ne (x_j,y_j)$ for $i\ne j$.
The stratum is empty in the examples in Figures \ref{fig:ProjType7Lips(a)}, \ref{fig:ProjType7Lips(b)}, \ref{fig:ProjType7Lips(c)}, \ref{fig:ProjType7BeaksR7} and consists of a regular curve in the case of Figure \ref{fig:ProjType7BeaksR6(2)}. For the general case, the equations are too lengthy
to reproduce here and eliminating variables leads to equations of very high order.
\noindent
{\it Tacnode stratum}\\
This consists of a multi-local singularity where the proper apparent contour has an ordinary tangency with
the image of the cuspidal curve.
The stratum consists of one regular curve if $b>-\frac{4}{27}$ and 3 regular curves if $b<-\frac{4}{27}$ (and $ab\ne 0$) parametrised by
$$
a_1=\frac{(-a\lambda^4+2b\lambda^3+(a\mu+2a-3b)\lambda^2-a\mu-a+b)(\lambda-1)^2}
{((b-a)\lambda^3-a\mu\lambda^2+(3a\mu+3a-3b)\lambda-2a\mu+2b-2a)^2}\, a_2^2+O_3,
$$
with $\lambda$ a solution of
$$(a^3-(a+b)^2)\lambda^3+(3a^3+4b^2-(a-b)^2)\lambda^2+(3a^3-3b^2+(a+b)^2)\lambda+a^3+(a-b)^2=0$$
and $\mu=-\sqrt{-a(1-\lambda^2)}$.
\noindent
{\it Cusp $+$ fold}\\
We have a cusp on the proper apparent contour on the image of the singular set of the cuspidal edge. Here we get two
regular curves given by
$$
a_1=\frac{4}{9}\frac{(3a^2+(3b-4a)\mbox{\mbox{\boldmath $0$}ldmath $e$}ta-\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^3)\mbox{\mbox{\boldmath $0$}ldmath $e$}ta}{(a^2+2(b-a)\mbox{\mbox{\boldmath $0$}ldmath $e$}ta-a\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^2)^2}\,a_2^2+O_3,
$$
with $\mbox{\mbox{\boldmath $0$}ldmath $e$}ta$ one of the two real roots of
$$
3a^5-4a^3(a-b)\mbox{\mbox{\boldmath $0$}ldmath $e$}ta+6\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^2a^4-12a^2(a-b)\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^3+(4(a-b)^2-a^3)\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^4.
$$
We draw in Figures \ref{fig:ProjType7Lips(a)}, \ref{fig:ProjType7Lips(b)}, \ref{fig:ProjType7Lips(c)}
three possibilities for the bifurcations of the proper apparent contour and of the projections of the singular set
for the lips Type 7 singularity. Figures \ref{fig:ProjType7BeaksR7}, \ref{fig:ProjType7BeaksR6(2)}
are for the bifurcations of a beaks Type 7 singularity with one figure having one swallowtail stratum and the other 3 swallowtail strata.
As Figure \ref{fig:Type7Strat(ab)} shows, there are various open strata in the $(a,b)$-plane and in each stratum we have distinct
bifurcations of the apparent contour. We observe that the stratification in \mbox{Figure \ref{fig:Type7Strat(ab)}} is a partial one as we have not included, for instance,
where the various curves in the stratification of the $(a_1,a_2)$-plane are inflectional nor where their relative position changes.
Figures \ref{fig:ProjType7Lips(a)}-\ref{fig:ProjType7BeaksR6(2)} and \mbox{Figure \ref{fig:Type7Strat(ab)}} show the richness of the extrinsic differential geometry of the cuspidal edge.
\end{proof}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{rem}{\rm
In the calculations in the proof of Theorem \ref{theo:BiffAppCont} for the Type 7 singularity, we eliminate variables using resultant (using Maple)
until we get
get an equation $k=0$ involving only two variables. The function germ $k$ is finitely $\mathcal R$-determined and has a well understood singularity (in the cases involved), so we can deduce the smooth structure of $k=0$ and, in particular, the number of its branches and the initial parametrisation of each branch.
We reverse the process until we get the initial part of the local parametrisation of the desired stratum.
}
\end{rem}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=15cm, height=20cm]{Fig9.pdf}
\caption{Lips type bifurcations in the proper apparent contour and of the projections of the singular set at a Type 7 singularity with
$(a,b)$ in Region \textcircled{$\scriptstyle1$}, in Figure \ref{fig:Type7Strat(ab)}.}
\label{fig:ProjType7Lips(a)}
\end{center}
\end{figure}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=15cm, height=20cm]{Fig10.pdf}
\caption{Lips type bifurcations in the proper contour and of the projections of the singular set at a Type 7 singularity with
$(a,b)$ in Region \textcircled{$\scriptstyle2$}, in Figure \ref{fig:Type7Strat(ab)}.}
\label{fig:ProjType7Lips(b)}
\end{center}
\end{figure}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=15cm, height=20cm]{Fig11.pdf}
\caption{Lips type bifurcations of the proper contour and of the projections of the singular set at a Type 7 singularity with
$(a,b)$ in Region \textcircled{$\scriptstyle3$}, in Figure \ref{fig:Type7Strat(ab)}.}
\label{fig:ProjType7Lips(c)}
\end{center}
\end{figure}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=15cm, height=20cm]{Fig12.pdf}
\caption{Beaks type bifurcations of the proper contour and of the projections of the singular set at a Type 7 singularity with
$(a,b)$ in Region \textcircled{$\scriptstyle4$}, in Figure \ref{fig:Type7Strat(ab)}.}
\label{fig:ProjType7BeaksR7}
\end{center}
\end{figure}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{figure}[h]
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{center}
\includegraphics[width=15cm, height=19cm]{Fig13.pdf}
\caption{Beaks type bifurcations of the proper contour and of the projections of the singular set at a Type 7 singularity with
$(a,b)$ in Region \textcircled{$\scriptstyle5$}, in Figure \ref{fig:Type7Strat(ab)}. The figure for $(a_1,a_2)$ with $a_2<0$ is the reflection with respect to the $y$-axis of the figure for $(a_1,-a_2)$ and is omitted for lack of space.}
\label{fig:ProjType7BeaksR6(2)}
\end{center}
\end{figure}
\mbox{\mbox{\boldmath $0$}ldmath $i$}gskip
Finally, we consider geometric criteria for recognition of the generic singularities of the orthogonal projections of
the cuspidal edge $M$.
We denote by $TC_pM$ the tangent cone to $M$ at a point $p$ on the singular set $\Sigma$
and by ${\bf v}_{tg}$ the tangential direction at $p$.
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{prop}\label{prop:geomcondProj}
For a generic cuspidal edge, the projection $P_{\bf v}$ can have one of the local singularities in
{\rm Table \ref{tab:germProj}} and these occur
at a point $p\in \Sigma$ when the following geometric conditions are satisfied.
{\rm (i)} If ${\bf v}$ is transverse to the tangent cone $TC_pM$, then $P_{\bf v}$ has a singularity of \mbox{\rm Type 1}.
{\rm (ii)} If $\kappa_n(p)\ne 0$, for all directions in $TC_pM\setminus \{{\bf v}_{tg}\} $ except for one ${\bf v}_{0}$, the
singularity of $P_{\bf v}$ is of \mbox{\rm Type 2}. The singularity of $P_{{\bf v}_{0}}$ is of \mbox{\rm Type 3} at generic points on $\Sigma$ and
becomes of \mbox{\rm Type 4} at isolated points on $\Sigma$. The two types \mbox{\rm Type 3} and \mbox{\rm Type 4} are distinguished by the contact
of the proper contour generator with $\Sigma$, two for \mbox{\rm Type 3} and three for \mbox{\rm Type 4}.
If $\kappa_n(p)=0$ but $\kappa_t(p)\ne 0$, the singularity of $P_{\bf v}$ is of \mbox{\rm Type 2} for
all ${\bf v}\in TC_pM\setminus \{{\bf v}_{tg}\} $. If $\kappa_n(p)=\kappa_t(p)=0$,
the singularity of $P_{\bf v}$ is of \mbox{\rm Type 3} for
all ${\bf v}\in TC_pM\setminus \{{\bf v}_{tg}\} $ except for one direction where it becomes of \mbox{\rm Type 4}.
{\rm (iii)} The singularity of $P_{{\bf v}_{tg}}$ is of \mbox{\rm Type 5} if $\tau_\Sigma(p) \kappa_n(p)\ne 0$,
generically of \mbox{\rm Type 6} if $\tau_\Sigma(p)=0$ and $\kappa_n(p)\ne 0$, and generically of
\mbox{\rm Type 7} if $\kappa_n(p)=0$.
\end{prop}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{proof}
Following the transversality theorem in the Appendix of \cite{brucewest}, for a generic cuspidal edge only singularities of
$_X\mathcal A_e$-codimension $\le 2$ (of the stratum) occur in the projection $P_{\bf v}$ (i.e., those listed in Table \ref{tab:germProj}).
The kernel direction of $(dP_{\bf v})_p$ is
parallel to the direction of projection $\bf v$.
The relative position of the kernel direction with respect to the tangent cone and the tangential direction
is invariant under diffeomorphism, so can be considered on a submersion $g$ on the model $X$.
It follows from the classification in the proof of \mbox{Theorem \ref{theo:ClassProj}}, that $g$
has a singularity of Type 1 at $p$ when the kernel direction of $dg_p$ is transverse to the tangent cone $TC_pX$,
a singularity of Type 2,3,4 if the kernel direction is in $TC_pX$ but is not the tangential direction at $p$,
and of Type 5,6,7 if the kernel direction is parallel to the tangential direction.
We take the cuspidal edge parametrised as in \eqref{eq:prenormalform}. Suppose that
${\bf v}=(\alpha,\mbox{\mbox{\boldmath $0$}ldmath $e$}ta,0)\in TC_pM$ with $\alpha^2+\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^2=1$ and take $p$ to be the origin.
Consider the projection $\Pi_{\bf v}(u,v,w)=((1-\alpha^2)u-\alpha\mbox{\mbox{\boldmath $0$}ldmath $e$}ta v, -\alpha\mbox{\mbox{\boldmath $0$}ldmath $e$}ta u +(1-\mbox{\mbox{\boldmath $0$}ldmath $e$}ta^2) v,w)$. By rotating the
plane of projection $T_{\bf v}S^2$ to the plane $u=0$ and rescaling we get
$\Pi_{\bf v}(u,v,w)\sim_{_X\mathcal A}(\mbox{\mbox{\boldmath $0$}ldmath $e$}ta u-\alpha v,w)$. We shall modify the family of projections and take
$\Pi_{\bf v}(u,v,w)=(\mbox{\mbox{\boldmath $0$}ldmath $e$}ta u-\alpha v,w)$.
Suppose that ${\bf v}\ne {\bf v}_{tg}$ (i.e., $\mbox{\mbox{\boldmath $0$}ldmath $e$}ta \ne 0$; we can then set $\mbox{\mbox{\boldmath $0$}ldmath $e$}ta=1$). Following the proof of Theorem \ref{theo:BiffAppCont}, the singularities of Type 2, 3 and 4
are distinguished by the contact of the proper contour generator with the singular set $\Sigma$.
We have
$$
P_{\bf v}(x,y)=(\alpha x- (a(x)+\frac{1}{2}y^2), b_1(x)+y^2b_2(x)+y^3b_3(x,y)).
$$
The singular set of $P_{\bf v}$ is given by $yS_{\alpha}(x,y)=0$ with
$$
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{array}{rcl}
S_{\alpha}(x,y)&=&
(\alpha b_{20}+b_{12})x+\frac{1}{2}b_{03}y+\\
&&+(\frac{1}{2}\alpha b_{30}+b_{22}-\alpha a_{20} b_{12})x^2+\frac{1}{2}(b_{13}-\alpha a_{20}b_{03})xy+\frac{1}{2}\alpha b_{12}y^2+\\
&&+\alpha(\frac{2}{3} b_{40}-\frac{1}{2}a_{30}b_{12}-a_{20}b_{22})x^3-\frac{\alpha}{4}( a_{30}b_{03}+2 a_{20}b_{13})x^2y+\alpha b_{22}xy^2+\\
&&+\frac{1}{6}\alpha b_{13}y^3+O(4).
\end{array}
$$
The singular set $S_{\alpha}=0$ is transverse to $\Sigma$ unless $\alpha b_{20}+b_{12}=0$. If
$b_{20}=\kappa_n(p)\ne 0$, then there is a unique direction ${\bf v}_{0}$ parallel to $(-b_{12},b_{20},0)$ where transversality fails (so $\alpha=\alpha_0=-b_{12}/b_{20}$). The singular set $S_{\alpha_0}=0$
has contact of order 2 with $\Sigma$ at the origin if $2b_{12}^2a_{20}-b_{12}b_{30}+2b_{22}b_{20}\ne 0$. The contact is of order 3 if
$2b_{12}^2a_{20}-b_{12}b_{30}+2b_{22}b_{20}=0$ and
$\alpha_0(\frac{2}{3} b_{40}-\frac{1}{2}a_{30}b_{12}-a_{20}b_{22})\ne 0$.
Suppose now that $b_{20}=\kappa_n(p)=0$. Then transversality of $S_{\alpha}$ with $\Sigma$ fails if and only if
$b_{12}=\kappa_t(p)=0$. In this case the singularity of $P_{\bf v}$ is of Type 3 unless
$\alpha=\alpha_0=-2b_{22}/b_{30}$, where it becomes of Type 4.
When ${\bf v}\ne {\bf v}_{tg}=(1,0,0)$, we have
$P_{{\bf v}_{tg}}(x,y)=( a(x)+\frac{1}{2}y^2, b_1(x)+y^2b_2(x)+y^3b_3(x,y))$ and its critical set is
given by
$yS_{{\bf v}_{tg}}(x,y)=0$ with
$$
S_{{\bf v}_{tg}}(x,y)=-b_{20}x+(b_{12}a_{20}-\frac{1}{2}b_{30})x^2+\frac{1}{2}a_{20}b_{03}xy-\frac{1}{2}b_{12}y^2
+O(3).
$$
From the proof of Theorem \ref{theo:BiffAppCont}, the singularity of $P_{{\bf v}_{tg}}$ is of Type 5 (resp. Type 6)
if $S_{{\bf v}_{tg}}=0$ is a regular curve and the image of $\Sigma$ by $P_{{\bf v}_{tg}}$ is an ordinary
(resp. ramphoid) cusp. Now $\Sigma_{{\bf v}_{tg}}$ is a regular curve if and only if
$b_{20}=\kappa_n(p)\ne 0$. The image of $\Sigma$ is
$P_{{\bf v}_{tg}}(x,0)=( a(x), b_1(x))$. It has an ordinary cusp if and only if
$b_{30}a_{20}-a_{30}b_{20}\ne 0 $, that is $\tau_\Sigma(p)\ne 0$. When $\tau_\Sigma(p)=0$, the singularity
is generically a ramphoid cusp. When $b_{20}=0$, $S_{{\bf v}_{tg}}$ is singular so we have generically a singularity of Type 7.
\end{proof}
\mbox{\mbox{\boldmath $0$}ldmath $e$}gin{thebibliography}{99}
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{martinyutaro}
M. Barajas Sichaca and Y. Kabata, Projections of the cross-cap. Preprint, 2015.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{brucegiblinBook} J. W. Bruce and P. J. Giblin, Curves and singularities. Cambridge University Press, 1992.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{brucegiblinProjBound}
J. W. Bruce and P. J. Giblin, Projections of surfaces with boundary.
\emph{Proc. London Math. Soc.} 60 (1990), 392--416.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{bgt95}J. W. Bruce, P. J. Giblin and F. Tari, Families of surfaces: height functions, Gauss maps and duals.
{\it Pitman Res. Notes Math. Ser.} 333 (1995), 148--178.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{bkd} J. W. Bruce, N. P. Kirk and A. A. du Plessis, Complete transversals
and the classification of singularities. {\it Nonlinearity} 10 (1997), 253-275.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{bdw}
J. W. Bruce, A. A. du Plessis and C. T. C. Wall,
Determinacy and unipotency.
\emph{Invent. Math.} 88 (1987), 521--554.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{bruceroberts}J. W. Bruce and R. M. Roberts, Critical points of functions on analytic varieties.
{\it Topology 27} (1988), no. 1, 57–-90.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{brucewest}J. W. Bruce and J. M. West, Functions on a crosscap.
{\it Math. Proc. Cambridge Philos. Soc}. 123 (1998), 19--39.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{bruce-wilkinson} J. W. Bruce and T. C. Wilkinson, Folding maps and focal sets. Lecture Notes in Math., 1462, 63--72, Springer, Berlin, 1991.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{damon}
J. N. Damon, Topological triviality and versality for subgroups of
\mbox{${\mathcal A}$} and \mbox{${\mathcal K}$}. \emph{Mem. Amer. Math. Soc.}
{75} (1988), no. 389.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{gaffney}
T. Gaffney, The structure of the $T\mathcal A(f)$, classification and application to differential geometry.
{Singularities, Part 1 (Arcata, Calif., 1981)}. {\it Proc. Sympos. Pure Math.} 40 (1983) 09--427.
Amer. Math. Soc., Providence, RI.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{GibsonHobbs} C. G. Gibson and C. A. Hobbs, Singularities of general one-dimensional motions of the plane and space.
{\it Proc. Roy. Soc. Edinburgh Sect. A} 125 (1995), 639--656.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{ShCarCidFaBook}
S. Izumiya, M. C. Romero Fuster, M. A. S. Ruas and F. Tari, \emph{Differential geometry from singularity theory viewpoint}.
World Scientific, 2015.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{transkirk}
N. P. Kirk, {Computational aspects of classifying
singularities}. {\it LMS J. Comput. Math.} 3 (2000), 207–228 .
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{koenderink}
J. J. Koenderink, \emph{Solide Shape}. MIT Press,
Cambridge, MA, 1990.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{martinsnuno} L. Martins and J. J. Nu\~no Ballesteros,
Contact properties of surfaces in $\mathbb R^3$ with corank 1 singularities. {\it Tohoku Math. J.}
67 (2015), 105--124.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{martinssaji} L. Martins and K. Saji, Geometric invariants
of cuspidal edges. To appear in {\it Canad. J. Math.}
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{rieger} J. H. Rieger, Families of maps from the plane to the plane. {\it J. London Math. Soc.} 36 (1987), 351--369.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem {riegerruas} J. H. Rieger and M. A. S. Ruas,
Classification of $\mathcal A$-simple germs from $\mathbb K^n$
to $\mathbb K^2$. {\it Compositio Math.} 79 (1991), 99--108.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{kentaro} Saji, Kentaro; Umehara, Masaaki; Yamada, Kotaro, The geometry of fronts. {\it Ann. of Math.} (2) 169 (2009), 491--529.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{taricreases}
F. Tari, Projections of piecewise-smooth surfaces.
\emph{J. London Math. Soc.} 44 (1991), 155--172.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{teramoto} K. Teramoto, Parallel and dual surfaces of cuspidal edges. Preprint.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{wallgenericgeometry} C. T. C. Wall,
Geometric properties of generic differentiable manifolds.
{\it Lecture Notes in Math.}, 597 (1977), 707--774.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{west} J. M. West, The differential geometry of the crosscap. Ph.D. thesis, The University of Liverpool, 1995.
\mbox{\mbox{\boldmath $0$}ldmath $i$}bitem{wilkinson} T. C. Wilkinson, The geometry of folding maps. PhD Thesis, University of Newcastle-upon-Tyne, 1991.
\end{thebibliography}
\noindent
ROS: Departament de Geometria i Topologia, Universitat de València, c/ Dr Moliner nº 50, 46100, Burjassot, València, Spain.\\
Email: [email protected]\\
\noindent FT: Instituto de Ci\^encias Matem\'aticas e de
Computa\c{c}\~ao - USP, Avenida Trabalhador s\~ao-carlense, 400 -
Centro,
CEP: 13566-590 - S\~ao Carlos - SP, Brazil.\\
Email: [email protected]
\end{document}
|
\begin{document}
\title[Commutants of Toeplitz operators]{The commutants of certain Toeplitz operators on weighted Bergman spaces}
\author{Trieu Le}
\address{Trieu Le, Department of Mathematics, University of Toronto, Toronto, Ontario, Canada M5S 2E4}
\email{[email protected]}
\subjclass[2000]{Primary 47B35}
\keywords{Commutant, Toeplitz operator, weighted Bergman space.}
\begin{abstract}
For $\alpha>-1$, let $A^2_{\alpha}$ be the corresponding weighted Bergman space of the unit ball in $\mathbb{C}^n$. For a bounded measurable function $f$, let $T_f$ be the Toeplitz operator with symbol $f$ on $A^2_{\alpha}$. This paper describes all the functions $f$ for which $T_f$ commutes with a given $T_g$, where $g(z)=z_{1}^{L_1}\cdots z_{n}^{L_n}$ for strictly positive integers $L_1,\ldots, L_n$, or $g(z)=|z_1|^{s_1}\cdots |z_n|^{s_n}h(|z|)$ for non-negative real numbers $s_1,\ldots, s_n$ and a bounded measurable function $h$ on $[0,1)$.
\end{abstract}
\thanks{}
\date{}
\maketitle
\section{Introduction}
As usual, for any $z=(z_1,\ldots,z_n)\in\mathbb{C}^n$ we denote its Euclidean norm by $|z|$, which is $\sqrt{|z_1|^2+\cdots+|z_n|^2}$. Let $\mathbb{B}_n$ denote the open unit ball consisting of all $z\in\mathbb{C}^n$ with $|z|<1$. Let $\nu$ denote the Lebesgue measure on $\mathbb{B}_n$ normalized so that $\nu(\mathbb{B}_n)=1$. Fix a real number $\alpha>-1$. The weighted Lebesgue measure $\nu_{\alpha}$ on $\mathbb{B}_n$ is defined by $\mathrm{d}\nu_{\alpha}(z)=c_{\alpha}(1-|z|^2)^{\alpha}\mathrm{d}\nu(z)$, where $c_{\alpha}$ is a normalizing constant so that $\nu_{\alpha}(\mathbb{B}_n)=1$. A direct computation shows that $c_{\alpha}=\dfrac{\Gamma(n+\alpha+1)}{\Gamma(n+1)\Gamma(\alpha+1)}$. For $1\leq p\leq\infty$, let $L^{p}_{\alpha}$ denote the space $L^{p}(\mathbb{B}_n,\mathrm{d}\nu_{\alpha})$. Note that $L^{\infty}_{\alpha}$ is the same as $L^{\infty}=L^{\infty}(\mathbb{B}_n,\mathrm{d}\nu)$.
The weighted Bergman space $A^{2}_{\alpha}$ consists of all functions in $L^{2}_{\alpha}$ which are analytic on $\mathbb{B}_n$. It is well-known that $A^{2}_{\alpha}$ is a closed subspace of $L^{2}_{\alpha}$. We denote the inner product in $L^2_{\alpha}$ by $\langle \cdot, \cdot\rangle_{\alpha}$ and the corresponding norm by $\|\cdot\|_{2,\alpha}$.
For any multi-index $m=(m_1,\ldots, m_n)\in\mathbb{N}^n$ (here $\mathbb{N}$ denotes the set of all \emph{non-negative} integers), we write $|m|=m_1+\cdots+m_n$ and $m!=m_1!\cdots m_n!$. For any $z=(z_1,\ldots, z_n)\in\mathbb{C}^{n}$, we write $z^{m}=z_1^{m_1}\cdots z_n^{m_n}$ and $\bar{z}^{m}=\bar{z}_1^{m_1}\cdots \bar{z}_n^{m_n}$. The standard orthonormal basis for $A^{2}_{\alpha}$ is $\{e_{m}: m\in\mathbb{N}^n\}$, where
\begin{equation*}
e_{m}(z) = \Big[\dfrac{\Gamma(n+|m|+\alpha+1)}{m!\ \Gamma(n+\alpha+1)}\Big]^{1/2}z^{m},\ m\in\mathbb{N}^n, z\in\mathbb{B}_n.
\end{equation*}
For a more detailed discussion of $A^{2}_{\alpha}$, see Chapter 2 in \cite{Zhu2005}.
Since $A^2_{\alpha}$ is a closed subspace of the Hilbert space $L^2_{\alpha}$, there is an orthogonal projection $P_{\alpha}$ from $L^{2}_{\alpha}$ onto $A^{2}_{\alpha}$. For any function $f\in L^2_{\alpha}$ the Toeplitz operator with symbol $f$ is denoted by $T_f$, which is densely defined on $A^2_{\alpha}$ by $T_f\varphi = P_{\alpha}(f\varphi)$ for bounded analytic functions $\varphi$ on $\mathbb{B}_n$. If $f$ is a bounded function then $T_{f}$ is a bounded operator on $A^{2}_{\alpha}$ with $\|T_f\|\leq\|f\|_{\infty}$ and $(T_{f})^{*}=T_{\bar{f}}$. However there are unbounded functions $f$ that give rise to bounded operators $T_f$. If $f$ is an analytic function then $T_f$ is the multiplication operator on $A^2_{\alpha}$ with symbol $f$. The Toeplitz operator $T_f$ in this case is called \emph{analytic}. It is clear that if both $f$ and $g$ are bounded and analytic or conjugate analytic (that is, $\bar{f}$ and $\bar{g}$ are analytic) then $T_fT_g=T_gT_f$. Also if there are constants $a,b$ not both zero such that $af+bg$ is a constant function then it is clear that $T_f$ and $T_g$ commute. In the context of Toeplitz operators on the Hardy space of the unit circle, A. Brown and P. Halmos \cite{Brown1963} showed that these are the only cases where the operators $T_f$ and $T_g$ commute. For Toeplitz operators on the Bergman space of the unit disk, the situation becomes more complicated. The above Brown-Halmos result failed. In fact, if $f,g$ are radial functions, that is, $f(z)=f(|z|)$ and $g(z)=g(|z|)$ for almost all $z$, then both $T_f$ and $T_g$ are diagonal operators with respect to the standard orthonormal basis, hence, they commute. The problem that we are interested in is: if $T_f$ and $T_g$ commute on $A^2_{\alpha}$, what is the relation between the functions $f$ and $g$? Despite the difficulty of the general problem, several results have been known for Toeplitz operators on the Bergman space of the \emph{unit disk}:
\begin{enumerate}
\item If $g=z^N$ for some $N\geq 1$ then $f$ is analytic ({\v{Z}}. {\v{C}}u{\v{c}}kovi{\'c} \cite{Cuckovic1994}). This result was later extended to the case where $g$ is an arbitrary non-constant bounded analytic function by S. Axler, {\v{Z}}. {\v{C}}u{\v{c}}kovi{\'c} and N. Rao in \cite{Axler2000}.
\item If $f$ and $g$ are bounded harmonic functions, then either both functions are analytic or both are conjugate analytic or $af+bg$ is a constant for some constants $a$ and $b$ not both zero (Axler and {\v{C}}u{\v{c}}kovi{\'c} \cite{Axler1991}).
\item If $g$ is radial then $f$ is also radial ({\v{C}}u{\v{c}}kovi{\'c} and Rao \cite{Cuckovic1998}). In the same paper, they also characterized all bounded functions $f$ such that $T_f$ commutes with $T_g$ where $g(z)=z^{m_1}\bar{z}^{m_2}$ for integers $m_1,m_2\geq 0$.
\end{enumerate}
In this paper we generalize the results in (1) and (3) to Toeplitz operators on weighted Bergman spaces of the unit ball in higher dimensions. Let $\mathfrak{T}$ denote the $C^{*}-$algebra generated by $\{T_g: g\in L^{\infty}\}$. In addition to the result in (1), {\v{C}}u{\v{c}}kovi{\'c} \cite{Cuckovic1994} showed that if $S$ is a bounded operator on the Bergman space of the unit disk such that $S$ belongs to $\mathfrak{T}$ and it commutes with $T_{z^N}$ for some integer $N\geq 1$, then $S=T_f$ for some bounded analytic function $f$. The following theorem generalizes this result.
\begin{theorem}\label{theorem-1} If $S$ is an operator in $\mathfrak{T}$ that commutes with $T_{z^{L_1}_{1}\cdots z^{L_n}_{n}}$ for some integers $L_1,\ldots, L_n\geq 1$, then there is a bounded analytic function $f$ on $\mathbb{B}_n$ so that $S=T_{f}$.
\end{theorem}
Due to the complicated setting of several variables, {\v{C}}u{\v{c}}kovi{\'c} and Rao's result in (3) no longer holds when $n\geq 2$. We will see later that if $f(z)=z_1\bar{z}_2$ then $T_f$ commutes with all $T_g$ whenever $g$ is a bounded radial function (a function $h$ on $\mathbb{B}_n$ is called a radial function if there is a function $\tilde{h}:[0,1)\rightarrow\mathbb{C}$ such that $h(z)=\tilde{h}(|z|)$ for almost all $z\in\mathbb{B}_n$). In fact, we will show that if $g$ is a non-constant bounded radial function then $T_f$ commutes with $T_g$ if and only if $f$ satisfies $f(\mathrm{e}^{\mathrm{i}\theta}z)=f(z)$ for almost all $z\in\mathbb{B}_n$ and almost all $\theta\in\mathbb{R}$.
Throughout the paper, an operator on $A^2_{\alpha}$ is said to be diagonal if it is diagonal with respect to the standard orthonormal basis of $A^2_{\alpha}$. In one dimension, all diagonal Toeplitz operators arise from radial functions. In higher dimensions, in order to get all diagonal operators we have to replace radial functions by functions that are invariant under the action of the $n$-torus on $\mathbb{B}_n$. More precisely, it was showed in \cite{Le-5} that for $f\in L^{\infty}$, the operator $T_f$ is diagonal if and only if $f(z_1,\ldots,z_n)=f(|z_1|,\ldots,|z_n|)$ for almost all $z\in\mathbb{B}_n$. Even though we are unable to describe all the functions $f\in L^2_{\alpha}$ such that $T_f$ commutes with a given non-trivial diagonal Toeplitz operator $T_g$, we have been successful in doing so when the function $g$ is of the from $g(z)=|z|^{2s_1}\cdots |z_n|^{2s_n}h(|z|)$, where $s_1,\ldots,s_n\geq 0$ and $h$ is a bounded function on $[0,1)$. The technique we use involves results about the zero sets of bounded analytic functions on the open unit disk. See Section \ref{section-3} for more detail.
Let $\mathcal{P}$ denote the space of all analytic polynomials in the variable $z=(z_1,\ldots,z_n)$. Then $\mathcal{P}$ is dense in $A^2_{\alpha}$. The following theorem is our second result in the paper.
\begin{theorem}\label{theorem-2} Let $g$ be a non-constant function in $\mathbb{B}_n$ such that for almost all $z\in\mathbb{B}_n$, $g(z)=|z_1|^{2s_1}\cdots |z_n|^{2s_n}h(|z|)$ with $h$ a bounded measurable function on $[0,1)$ and $s_1,\ldots, s_n\geq 0$. Then for $f\in L^2_{\alpha}$, $T_fT_g=T_gT_f$ on $\mathcal{P}$ if and only if $f(\mathrm{e}^{\mathrm{i}\theta}z)=f(z)$ for almost all $\theta\in\mathbb{R}$, almost all $z\in\mathbb{B}_n$, and for $1\leq j\leq n$ with $s_j\neq 0$, $f(z_1,\ldots,z_{j-1},|z_j|,z_{j+1},\ldots,z_n)=f(z)$ for almost all $z\in\mathbb{B}_n$.
\end{theorem}
\section{Commuting with analytic Toeplitz operators}\label{section-2}
The following result is well-known and its proof for the one dimensional case is in Proposition 7.2 in \cite{Zhu2007}. The proof for higher dimensional cases is similar. For the reader's convenience, we provide here the proof.
\begin{lemma}\label{lemma-2} Suppose $f=f_1+\bar{f}_2$, where $f_1,f_2\in A^2_{\alpha}$, such that $\|fp\|_{2,\alpha}\leq M\|p\|_{2,\alpha}$ for all analytic polynomials $p$, where $M$ is a fixed positive constant. Then $\|f\|_{\infty}\leq M$.
\end{lemma}
\begin{proof} For any $z\in\mathbb{B}_n$, let $k_{z}^{\alpha}(w)=(1-|z|^2)^{(n+\alpha+1)/2}(1-\langle w,z\rangle)^{-(n+\alpha+1)}$ for $w\in\mathbb{B}_n$. Then $k_{z}^{\alpha}$ is a normalized reproducing kernel for $A^2_{\alpha}$. This means $\|k_{z}^{\alpha}\|_{2,\alpha}=1$ and $\langle g,k_{z}^{\alpha}\rangle_{\alpha}=(1-|z|^2)^{(n+\alpha+1)/2}g(z)$ for all $g\in A^2_{\alpha}$. See Chapter 2 of \cite{Zhu2005} for more detail. Since $k_{z}^{\alpha}$ is analytic in a neighborhood of the closed unit ball, there is a sequence $\{p_{s}\}_{s=1}^{\infty}$ of analytic polynomials converging uniformly to $k^{\alpha}_{z}$ on $\bar{\mathbb{B}}_n$. It then follows that $\lim\limits_{s\rightarrow\infty}\|f p_s - f k_{z}^{\alpha}\|_{2,\alpha}\rightarrow 0$. Hence
$
\langle fk_{z}^{\alpha}, k_{z}^{\alpha}\rangle_{\alpha} = \lim\limits_{s\rightarrow\infty}\langle fp_s, k_{z}^{\alpha}\rangle_{\alpha}.
$ Now for any integer $s\geq 1$, we have $|\langle fp_s, k_{z}^{\alpha}\rangle_{\alpha}|\leq \|fp_s\|_{2,\alpha}\leq M\|p_s\|_{2,\alpha}$. So we conclude that $
|\langle fk_{z}^{\alpha}, k_{z}^{\alpha}\rangle_{\alpha}|\leq \lim\limits_{s\rightarrow\infty} M\|p_s\|_{2,\alpha} = M\|k_{z}^{\alpha}\|_{2,\alpha}=M$. On the other hand,
\begin{align*}
\langle fk_{z}^{\alpha}, k_{z}^{\alpha}\rangle_{\alpha} & = \langle f_1 k_{z}^{\alpha}, k_{z}^{\alpha}\rangle_{\alpha} + \langle\bar{f}_2 k_{z}^{\alpha}, k_{z}^{\alpha}\rangle_{\alpha}\\
& = (1-|z|^2)^{(n+1+\alpha)/2}f_1(z)k_{z}^{\alpha}(z) + (1-|z|^2)^{(n+1+\alpha)/2}\bar{f}_2(z)k_{z}^{\alpha}(z)\\
& = f_1(z)+\bar{f}_2(z) = f(z).
\end{align*}
So from the above inequality, $|f(z)|\leq M$ for any $z\in\mathbb{B}_n$. This shows that $\|f\|_{\infty}\leq M$.
\end{proof}
For any multi-indexes $m,k\in\mathbb{N}^n$, there is a positive real number $d(m,k)$ such that $e_m e_k = d(m,k)e_{m+k}$. Strictly speaking, $d(m,k)$ must be written as $d_{n,\alpha}(m,k)$ because it depends also on $n$ and $\alpha$. But to simplify the notation and since $n$ and $\alpha$ are fixed throughout the paper, we drop the sub-indexes.
It is immediate that $d(m,k)=d(k,m)$ and $d(0,k)=d(m,0)=1$ for $m,k\in\mathbb{N}^n$. Also for any $m,k,l\in\mathbb{N}^n$, since $(e_{m}e_{k})e_{l}=e_{m}(e_{k}e_{l})$, we have
\begin{equation}\label{eqn-403}
d(m,k)d(m+k,l) = d(m,k+l)d(k,l).
\end{equation}
Using the explicit formulas for $e_{m}$ and $e_{k}$, we obtain
\begin{align}\label{eqn-408}
(d(m,k))^2 & = \dfrac{\Gamma(n+|m|+\alpha+1)\ \Gamma(n+|k|+\alpha+1)}{\Gamma(n+|m|+|k|+\alpha+1)\ \Gamma(n+\alpha+1)}\dfrac{(m+k)!}{m!\ k!}.
\end{align}
The following lemma characterizes analytic Toeplitz operators on $A^2_{\alpha}$ in terms of its matrix with respect to the standard orthonormal basis. Even though the matrix of an analytic Toeplitz operator is not the usual \emph{analytic Toeplitz matrix}, it becomes one after scaling each matrix entry by a factor depending on the position of the entry.
For an $n-$tuple of integers $r=(r_1,\ldots,r_n)\in\mathbb{Z}^n$ we write $r\succeq 0$ if $r_1,\ldots,r_n\geq 0$ and write $r\nsucceq 0$ if otherwise. For $m,k\in\mathbb{N}^n$ we write $m\succeq k$ (respectively, $m\nsucceq k$) if $m-k\succeq 0$ (respectively, $m-k\nsucceq 0$).
\begin{lemma}\label{lemma-3} Suppose $S$ is a linear operator (not necessarily bounded) on $A^2_{\alpha}$ whose domain contains the space $\mathcal{P}$ of all analytic polynomials. Then there is a function $f\in A^2_{\alpha}$ such that $T_f = S$ on $\mathcal{P}$ if and only if $\langle S e_{m}, e_{k}\rangle_{\alpha}=0$ whenever $k\nsucceq m$ and for any $l\in\mathbb{N}^n$, $\dfrac{1}{d(l,m)}\langle S e_m, e_{m+l}\rangle_{\alpha}$ is independent of $m\in\mathbb{N}^n$.
\end{lemma}
\begin{proof}
Suppose $f\in A^2_{\alpha}$ and it has the expansion $f=\sum\limits_{l\in\mathbb{N}^n}a_{l}e_{l}$. Then for any $m$ in $\mathbb{N}^n$, we have
\begin{equation*}
T_f e_{m} = fe_{m} = \sum\limits_{l\in\mathbb{N}^n}a_{l}e_{l}e_{m} = \sum\limits_{l\in\mathbb{N}^n} a_{l}d(l,m)e_{m+l}.
\end{equation*}
Therefore, $
\langle T_f e_{m}, e_{k}\rangle_{\alpha} = \begin{cases}
0 & \text{ if } k\nsucceq m\\
a_{l} d(l,m) & \text{ if } k=m+l.
\end{cases}$
This shows that if $S = T_f$ on $\mathcal{P}$ then $\langle S e_{m}, e_{k}\rangle_{\alpha}=0$ whenever $k\nsucceq m$ and for any $l\in\mathbb{N}^n$, $\dfrac{1}{d(l,m)}\langle Te_m, e_{m+l}\rangle_{\alpha}=a_{l}$, which is independent of $m$.
Now suppose $S$ has the above property. Let $a_{l}=\langle Te_{0}, e_{l}\rangle_{\alpha}$ for each $l$ in $\mathbb{N}^n$. Then by assumption, $a_{l}=\dfrac{1}{d(l,m)}\langle S e_m, e_{m+l}\rangle_{\alpha}$ for all $m$ in $\mathbb{N}^n$. We have
\begin{equation*}
\sum\limits_{l\in\mathbb{N}^n}|a_{l}|^2 = \sum\limits_{l\in\mathbb{N}^n}|\langle S e_{0}, e_{l}\rangle_{\alpha}|^2 = \|S e_{0}\|^2_{2,\alpha}<\infty.
\end{equation*}
So the function $f=\sum\limits_{l\in\mathbb{N}^n}a_{l}e_{l}$ is an element of $A^2_{\alpha}$ and we have $\langle T_f e_{m}, e_{k}\rangle_{\alpha}=\langle S e_{m}, e_{k}\rangle_{\alpha}$ for all $m,k$ in $\mathbb{N}^n$. Hence $T_f=S$ on $\mathcal{P}$.
\end{proof}
Suppose $l\in\mathbb{N}^n$ is a multi-index. For any $m,k\in\mathbb{N}^n$ we have
\begin{align*}
\langle T_{e_{l}}e_{m}, e_{k}\rangle_{\alpha} & = d(l,m)\langle e_{l+m}, e_{k}\rangle_{\alpha}
= \begin{cases}
0 & \text{ if } k\neq l+m,\\
d(l,m) & \text{ if } k=l+m.
\end{cases}
\end{align*}
This implies $T_{e_{l}}e_{m}=d(l,m)e_{l+m}$ and
$
T_{\bar{e}_{l}}e_{k} = \begin{cases}
0 & \text{if } k\nsucceq l,\\
d(l,k-l)e_{k-l} & \text{if } k\succeq l.
\end{cases}
$
For any linear operator $S$ on $A^2_{\alpha}$ whose domain contains the space of all analytic polynomials, we have
\begin{align*}
\langle [S,T_{e_{l}}]e_{m},e_{k}\rangle_{\alpha} & = \langle ST_{e_{l}}e_{m},e_{k}\rangle_{\alpha}-\langle Se_{m},T_{\bar{e}_{l}}e_{k}\rangle_{\alpha}\notag\\
& = d(l,m)\langle S e_{m+l},e_{k}\rangle_{\alpha} - \begin{cases}
0 & \text{if } k\nsucceq l,\\
d(l,k-l)e_{k-l} & \text{if } k\succeq l.
\end{cases}
\end{align*}
This shows that for $m,k\in\mathbb{N}^n$,
\begin{align}
\langle [S,T_{e_{l}}]e_{m},e_{k}\rangle_{\alpha} & = d(l,m)\langle S e_{m+l}, e_{k}\rangle_{\alpha} \text{ if } k\nsucceq l,\label{eqn-409}\\
\text{and }\langle [S,T_{e_{l}}]e_{m},e_{k+l}\rangle_{\alpha} & = d(l,m)\langle S e_{m+l}, e_{k+l}\rangle_{\alpha}-d(l,k)\langle S e_{m}, e_{k}\rangle_{\alpha}.\label{eqn-410}
\end{align}
\begin{lemma}\label{lemma-4} Suppose that $S$ is an operator (not necessarily bounded) on $A^2_{\alpha}$ whose domain contains $\mathcal{P}$ and that $S$ commutes with $T_{e_{L}}$ where $L=(L_1,\ldots,L_n)\in\mathbb{N}^n$ with $L_1,\ldots,L_n\geq 1$. Suppose $l\in\mathbb{N}^n$ such that the operator $K=[S,T_{e_{l}}]$ is a compact operator on $A^2_{\alpha}$. Then for any $m,k\in\mathbb{N}^n$ we have $
\dfrac{d(l,m)}{d(l,k)}\langle S e_{m+l}, e_{k+l}\rangle_{\alpha} = \langle S e_{m}, e_{k}\rangle_{\alpha}.$
\end{lemma}
\begin{proof}
Since $S$ commutes with $T_{e_{L}}$, it commutes with $T_{e_{sL}}$ for any positive integer $s$ because $T_{e_{sL}}$ is a multiple of $(T_{e_{L}})^{s}$. Then \eqref{eqn-410} implies that for any $m,k\in\mathbb{N}^n$ and $s\in\mathbb{N}$,
\begin{equation*}
\dfrac{d(sL,m)}{d(sL,k)}\langle S e_{m+sL}, e_{k+sL}\rangle_{\alpha} = \langle S e_{m}, e_{k}\rangle_{\alpha}.
\end{equation*}
Now for any $m,k\in\mathbb{N}^n$, and $s\in\mathbb{N}$,
\begin{align*}
& \dfrac{d(l,m)}{d(l,k)}\langle S e_{m+l},e_{k+l}\rangle_{\alpha} - \langle S e_{m},e_{k}\rangle_{\alpha}\\
& = \dfrac{d(l,m)}{d(l,k)}\dfrac{d(sL,m+l)}{d(sL,k+l)}\langle S e_{m+l+sL}, e_{k+l+sL}\rangle_{\alpha}-\dfrac{d(sL,m)}{d(sL,k)}\langle S e_{m+sL},e_{k+sL}\rangle_{\alpha}\\
& = \dfrac{d(sL,m)d(sL+m,l)}{d(sL,k)d(sL+k,l)}\langle S e_{m+l+sL}, e_{k+l+sL}\rangle_{\alpha} -\dfrac{d(sL,m)}{d(sL,k)}\langle S e_{m+sL},e_{k+sL}\rangle_{\alpha}\\
& = \dfrac{d(sL,m)}{d(sL,k)d(sL+k,l)}\Big\{d(sL+m,l)\langle S e_{m+l+sL}, e_{k+l+sL}\rangle_{\alpha}\\
&\phantom{\dfrac{d(sL,m)}{d(sL,k)d(sL+k,l)}}\quad\quad - d(sL+k,l)\langle S e_{m+sL},e_{k+sL}\rangle_{\alpha}\Big\}\\
& = \dfrac{d(sL,m)}{d(sL,k)d(sL+k,l)}\langle [S,T_{e_{l}}]e_{sL+m}, e_{sL+k+l}\rangle_{\alpha}\quad\quad\text{(by \eqref{eqn-410})}\\
& = \dfrac{d(sL,m)}{d(sL,k)d(sL+k,l)}\langle K e_{sL+m}, e_{sL+k+l}\rangle_{\alpha}.
\end{align*}
Now using \eqref{eqn-408} we have
\begin{align*}
&\Big[\dfrac{d(sL,m)}{d(sL,k)d(sL+k,l)}\Big]^2\\
& = \dfrac{\Gamma(n+s|L|+\alpha+1)\ \Gamma(n+|m|+\alpha+1)}{\Gamma(n+s|L|+|m|+\alpha+1)\ \Gamma(n+\alpha+1)}\dfrac{(sL+m)!}{(sL)!\ m!}\\
&\quad\times \dfrac{\Gamma(n+s|L|+|k|+\alpha+1)\ \Gamma(n+\alpha+1)}{\Gamma(n+s|L|+\alpha+1)\ \Gamma(n+|k|+\alpha+1)}\dfrac{(sL)!\ k!}{(sL+k)!}\\
&\quad\times \dfrac{\Gamma(n+s|L|+|k|+|l|+\alpha+1)\ \Gamma(n+\alpha+1)}{\Gamma(n+s|L|+|k|+\alpha+1)\ \Gamma(n+|l|+\alpha+1)}\dfrac{(sL+k)!\ l!}{(sL+k+l)!}\\
& = C(n,\alpha,m,k,l,L)\dfrac{\Gamma(n+s|L|+|l|+|k|+\alpha+1)}{\Gamma(n+s|L|+|m|+\alpha+1)}\dfrac{(sL+m)!}{(sL+k+l)!}\\
& \approx C(n,\alpha,m,k,l,L)(s|L|)^{|l|+|k|-|m|}\prod_{j=1}^{n}(sL_{j})^{m_j-k_j-l_j}\\
&\quad\text{(by Stirling's formula for the Gamma function)}\\
& = \tilde{C}(n,\alpha,m,k,l,L).
\end{align*}
This shows that $\dfrac{d(sL,m)}{d(sL,k)d(sL+k,l)}$ is bounded when $s\rightarrow\infty$. On the other hand, $\lim\limits_{s\rightarrow\infty}\langle K e_{sL+m}, e_{sL+k+l}\rangle_{\alpha}=0$ because $K$ is compact. So we conclude that $
\dfrac{d(l,m)}{d(l,k)}\langle S e_{m+l},e_{k+l}\rangle_{\alpha} = \langle S e_{m},e_{k}\rangle_{\alpha}$ for all $m,k\in\mathbb{N}^n$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{theorem-1}] Put $L=(L_1,\ldots,L_n)$. Then by assumption, $S$ commutes with $T_{e_{L}}$. Since $S$ belongs to $\mathfrak{T}$, it is well-known that $[S,T_{e_{l}}]$ is compact for all $l\in\mathbb{N}$ (see \cite{Coburn1973} for more detail). Now Lemma \ref{lemma-4} shows that
\begin{equation*}
\dfrac{d(l,m)}{d(l,k)}\langle S e_{m+l}, e_{k+l}\rangle_{\alpha} = \langle S e_{m}, e_{k}\rangle_{\alpha}\quad\text{ for any } m,k,l\in\mathbb{N}^n.
\end{equation*}
Put $m=0$ we see that for each $k\in\mathbb{N}^n$, $\dfrac{1}{d(l,k)}\langle S e_{l}, e_{k+l}\rangle_{\alpha} = \langle S e_{0}, e_{k}\rangle_{\alpha}$, which is independent of $l\in\mathbb{N}^n$.
Now suppose $m,k\in\mathbb{N}^n$ such that $k\nsucceq m$. Then there is an integer $1\leq j\leq n$ so that $k_j<m_j$. Consider first the case $k_j=0$. Put $l=(L_1,\ldots, L_{j-1}, L_j-1, L_{j+1},\ldots, L_{n})$. Then we have $k+l\nsucceq L$ but $m+l\geq L$. Hence,
\begin{align*}
\langle S e_{m}, e_{k}\rangle_{\alpha} & = \dfrac{d(l,m)}{d(l,k)}\langle S e_{m+l}, e_{k+l}\rangle_{\alpha}\\
& =\dfrac{d(l,m)}{d(l,k)d(L,m+l-L)}\langle S T_{e_{L}}e_{m+l-L}, e_{k+l}\rangle_{\alpha}\\
& =\dfrac{d(l,m)}{d(l,k)d(L,m+l-L)}\langle T_{e_{L}}Se_{m+l-L}, e_{k+l}\rangle_{\alpha}\\
& = \dfrac{d(l,m)}{d(l,k)d(L,m+l-L)}\langle Se_{m+l-L}, T_{\bar{e}_{L}}e_{k+l}\rangle_{\alpha}=0
\end{align*}
since $T_{\bar{e}_{L}}e_{k+l}=0$.
Now consider the case $k_j>0$. Let $\tilde{k}=k-k_j\delta_j$ and $\tilde{m}=m-k_j\delta_j$, where $\delta_{j}=(\delta_{1j},\ldots,\delta_{nj})$. Then $\tilde{k},\tilde{m}$ are in $\mathbb{N}^n$ and $0=\tilde{k}_j<\tilde{m}_j$. We have
\begin{align*}
\langle S e_{m}, e_{k}\rangle_{\alpha} & = \langle S e_{\tilde{m}+m_j\delta_j}, e_{\tilde{k}+m_j\delta_j}\rangle_{\alpha}\\
& = \dfrac{d(m_j\delta_j,\tilde{k})}{d(m_j\delta_{j},\tilde{m})}\langle S e_{\tilde{m}}, e_{\tilde{k}}\rangle_{\alpha}\\
& = 0\text{ (by the case considered above).}
\end{align*}
By Lemma \ref{lemma-3} there is a function $f\in A^2_{\alpha}$ so that $S=T_f$ on the space of analytic polynomials. Since $S$ is a bounded operator, Lemma \ref{lemma-2} implies that $f$ is bounded and $\|f\|_{\infty}\leq\|S\|$. Consequently, $S=T_f$ on $A^2_{\alpha}$ because they are bounded operators that agree on a dense subset of $A^2_{\alpha}$.
\end{proof}
We now discuss the necessity of the condition $L_1,\ldots, L_n\geq 1$ in Theorem \ref{theorem-1}. When $n=1$ this condition is necessary because $\mathfrak{T}$ obviously contains non-analytic Toeplitz operators that commute with $T_{e_{0}}\equiv I$. For the case $n\geq 2$, we will show that there is an operator $S$ in $\mathfrak{T}$ such that $S$ commutes with $T_{z_1},\ldots, T_{z_{n-1}}$ but it does not commute with $T_{z_n}$. Recall that for $1\leq j\leq n$, $\delta_{j}$ denotes $(\delta_{1j},\ldots,\delta_{nj})$.
\begin{proposition}\label{prop-4} Suppose $n\geq 2$. For any $\varphi\in A^{2}_{\alpha}$, define
\begin{equation}\label{eqn-411}
S\varphi = S\Big(\sum\limits_{m\in\mathbb{N}^n}\langle\varphi, e_{m}\rangle_{\alpha}e_{m}\Big) = \sum_{\substack{m\in\mathbb{N}^n\\
m_{n}=0
}}d(m,\delta_n)\langle\varphi, e_{m}\rangle_{\alpha} e_{m+\delta_n}.
\end{equation}
Then the following statements hold true:
\begin{enumerate}
\item $S$ is a compact operator on $A^2_{\alpha}$ and hence it belongs to $\mathfrak{T}$,
\item $S$ commutes with $T_{z_{1}},\ldots, T_{z_{n-1}}$,
\item $S$ does not commute with $T_{z_{n}}$ and hence $S$ is not an analytic Toeplitz operator.
\end{enumerate}
\end{proposition}
\begin{proof}
From the definition \eqref{eqn-411} of $S$, we see that $Se_{m}=d(m,\delta_n)e_{m+\delta_n}$ if $m_n=0$ and $Se_{m}=0$ if $m_n>0$. For any $m\in\mathbb{N}^n$ with $m_n=0$, formula \eqref{eqn-408} gives
\begin{align*}
(d(m,\delta_n))^2 & = \dfrac{\Gamma(n+|m|+\alpha+1)\ \Gamma(n+|\delta_n|+\alpha+1)}{\Gamma(n+|m|+|\delta_n|+\alpha+1)\ \Gamma(n+\alpha+1)}\dfrac{(m+\delta_n)!}{m!\delta_n!}\\
& = \dfrac{\Gamma(n+|m|+\alpha+1)\ \Gamma(n+\alpha+2)}{\Gamma(n+|m|+\alpha+2)\ \Gamma(n+\alpha+1)}\dfrac{(m+\delta_n)!}{m!}\\
& = \dfrac{(n+\alpha+1)(m_n+1)}{n+|m|+\alpha+1}\\
& = \dfrac{n+\alpha+1}{n+|m|+\alpha+1}\text{ (since $m_n=0$)}.
\end{align*}
Thus, $\lim\limits_{\substack{|m|\rightarrow\infty\\ m_n=0}}d(m,\delta_n)=0$. This shows that the operator $S$ is not only bounded but also compact on $A^2_{\alpha}$. On the other hand, it is well-known that $\mathfrak{T}$ contains the ideal of compact operators on $A^2_{\alpha}$ (see \cite{Coburn1973}). Hence $S$ belongs to $\mathfrak{T}$.
Now let $j$ be an integer in $\{1,\ldots,n-1\}$. For $m\in\mathbb{N}^n$ we have
\begin{align*}
ST_{e_{\delta_j}} e_{m} & = S\big(d(m,\delta_j)e_{m+\delta_j}\big)\\
& = \begin{cases}
0 & \text{if } m_n>0\\
d(m,\delta_j)d(m+\delta_j,\delta_n)e_{m+\delta_j+\delta_n} & \text{if } m_n=0,
\end{cases}\\
T_{e_{\delta_j}}S e_{m} & = \begin{cases}
0 & \text{if } m_n>0\\
T_{e_{\delta_j}}\big(d(m,\delta_n)e_{m+\delta_n}\big) & \text{if } m_n=0
\end{cases}\\
& = \begin{cases}
0 & \text{if } m_n>0\\
d(m,\delta_n)d(\delta_j,m+\delta_n)e_{m+\delta_n+\delta_j} & \text{if } m_n=0
\end{cases}\\
& = \begin{cases}
0 & \text{if } m_n>0\\
d(m,\delta_j)d(m+\delta_j,\delta_n)e_{m+\delta_n+\delta_j} & \text{if } m_n=0.
\end{cases}
\end{align*}
Thus $ST_{e_{\delta_j}}e_{m}=T_{e_{\delta_j}}Se_{m}$ for all $m\in\mathbb{N}^n$. This shows that $S$ commutes with $T_{e_{\delta_j}}$ (hence $T_{z_j}$) for $1\leq j\leq n-1$. Now for $m\in\mathbb{N}^n$ with $m_n=0$, we have
\begin{align*}
ST_{e_{\delta_n}} e_{m} & = S\big(d(\delta_n,m)e_{m+\delta_n}\big) = 0,\\
T_{e_{\delta_n}}S e_{m} & = T_{e_{\delta_{n}}}\big(d(m,\delta_n)e_{m+\delta_n}\big) = d(\delta_n,m+\delta_n)d(m,\delta_n)e_{m+2\delta_{n}}\neq 0.
\end{align*}
This shows that $ST_{e_{\delta_n}}\neq T_{e_{\delta_n}}S$, so $S$ does not commute with $T_{e_{\delta_n}}$. Since $T_{z_n}$ is a nonzero multiple of $T_{e_{\delta_n}}$, $S$ does not commute with $T_{z_n}$ either.
\end{proof}
\section{Commuting with diagonal Toeplitz operators}\label{section-3}
In the first part of this section we use results from complex analysis of one variable, more precisely, results about zeros of bounded analytic functions on the open unit disk, to obtain some function-theoretic results which are crucial for the proof of Theorem \ref{theorem-2}. The proof of Theorem \ref{theorem-2} itself will be presented at the end of the section.
For the rest of the paper, $N_{*}$ denotes the set of all positive integers. For any $1\leq j\leq n$, let $\sigma_{j}:\mathbb{N}_{*}\times\mathbb{N}_{*}^{n-1}\longrightarrow\mathbb{N}_{*}^{n}$ be the map defined by the formula $\sigma_{j}(s,(r_1,\ldots,r_{n-1}))=(r_1,\ldots,r_{j-1},s,r_{j},\ldots,r_{n-1})$ for all $s\in\mathbb{N}_{*}$ and $(r_1,\ldots,r_{n-1})\in\mathbb{N}_{*}^{n-1}$. If $M$ is a subset of $\mathbb{N}_{*}^n$ and $1\leq j\leq n$, we define
\begin{equation*}\label{eqn-2}
\widetilde{M}_{j} = \Big\{\tilde{r}=(r_1,\ldots,r_{n-1})\in\mathbb{N}_{*}^{n-1}: \sum\limits_{\substack{s\in\mathbb{N}_{*}\\
\sigma_{j}(s,\tilde{r})\in M}}
\dfrac{1}{s+1}=\infty\Big\}.
\end{equation*}
\begin{definition}
We say that $M$ has property (P) if one of the following statements holds.
\begin{enumerate}
\item $M=\emptyset$, or
\item $M\neq\emptyset$, $n=1$ and $\sum\limits_{s\in M}\dfrac{1}{s+1}<\infty$, or
\item $M\neq\emptyset$, $n\geq 2$ and for any $1\leq j\leq n$, the set $\widetilde{M}_{j}$ has property (P) as a subset of $\mathbb{N}_{*}^{n-1}$.
\end{enumerate}
\end{definition}
The following observations are then immediate.
\begin{enumerate}
\item If $M\subset\mathbb{N}_{*}$ and $M$ does not have property (P) then $\sum_{s\in M}\frac{1}{s+1}=\infty$. If $M\subset\mathbb{N}_{*}^{n}$ with $n\geq 2$ and $M$ does not have property (P) then $\widetilde{M}_{j}$ does not have property (P) as a subset of $\mathbb{N}_{*}^{n-1}$ for some $1\leq j\leq n$.
\item If $M_1$ and $M_2$ are subsets of $\mathbb{N}_{*}^{n}$ that both have property (P) then $M_1\cup M_2$ also has property (P).
\item If $M\subset\mathbb{N}_{*}^n$ has property (P) and $l\in\mathbb{Z}^{n}$ then $(M+l)\cap\mathbb{N}_{*}^n$ also has property (P). Here $M+l=\{m+l: m\in M\}$.
\item If $M\subset\mathbb{R}^n$ has property (P) then $\mathbb{N}_{*}\times M$ also has property (P) as a subset of $\mathbb{N}_{*}^{n+1}$. This can be showed by induction on $n$.
\item The set $\mathbb{N}_{*}^n$ does not have property (P) for all $n\geq 1$. This together with (2) shows that if $M\subset\mathbb{N}_{*}^n$ has property (P) then $\mathbb{N}_{*}^n\backslash M$ does not have property (P).
\end{enumerate}
\begin{proposition}\label{prop-1} Let $\mathbb{K}$ denote the right half of the complex plane. Let $F:\mathbb{K}^{n}\rightarrow\mathbb{C}$ be an analytic function. Suppose there exists a polynomial $p$ such that $|F(z)|\leq p(|z|)$ for all $z\in\mathbb{K}^n$. Put $Z(F)=\{r\in\mathbb{N}_{*}^{n}: F(r)=0\}$. If $Z(F)$ does not have property (P), then $F$ is identically zero in $\mathbb{K}^n$.
\end{proposition}
\begin{proof}
Suppose $F$ is a function that satisfies the hypothesis of the proposition and that $Z(F)$ does not have property (P). We will show that $F(z)=0$ for all $z\in\mathbb{K}^n$ by induction on $n$.
Consider the case $n=1$. Write $p(|z|)=a_{0}+\cdots + a_{d}|z|^d$ for some positive integer $d$. For $z\in\mathbb{K}$, since $\max\{|z|,1\}\leq |z+1|$, we have $p(|z|)\leq (|a_0|+\cdots+|a_d|)|z+1|^d$. Let $G(z)=F(z)/(z+1)^{d}$ for $z\in\mathbb{K}$. Then $G$ is a bounded analytic function on $\mathbb{K}$ and $Z(G)=Z(F)$. Now define
\begin{equation*} H(z) = G\Big(\dfrac{1+z}{1-z}\Big)\quad\quad (|z|<1).\end{equation*}
Then $H$ is a bounded analytic function on the unit disk. We have $H(\theta)=0$ for all $\theta=\frac{r-1}{r+1}$ with $r\in Z(G)$. Since
\begin{equation*}
\sum\limits_{H(\theta)=0}(1-|\theta|)\geq\sum\limits_{r\in Z(G)}\Big(1-\big|\dfrac{r-1}{r+1}\big|\Big) = \sum\limits_{r\in Z(G)}\dfrac{2}{r+1} = \sum\limits_{r\in Z(F)}\dfrac{2}{r+1} =\infty,
\end{equation*}
Corollary to Theorem 15.23 in \cite{Rudin1987} shows that $H$ is identically zero on the unit disk. Thus $G$ is identically zero in $\mathbb{K}$, which implies that $F$ is identically zero in $\mathbb{K}$.
Now suppose that the conclusion of the proposition holds whenever $n\leq N$ for some integer $N\geq 1$. Consider the case $n=N+1$. Since $Z(F)$ does not have property (P), $\widetilde{Z(F)}_{j}$ does not have property (P) for some $1\leq j\leq N+1$. Without loss of generality, we may assume that $j=N+1$. For any $\tilde{r}$ in $\widetilde{Z(F)}_{N+1}$, put $M_{\tilde{r}}=\{s\in\mathbb{N}_{*}:(\tilde{r},s)\in Z(F)\}$. Then $\sum\limits_{s\in M_{\tilde{r}}}\frac{1}{s+1}=\infty$. Put $F_{\tilde{r}}(\zeta)=F(\tilde{r},\zeta)$ for $\zeta\in\mathbb{K}$. Then $F_{\tilde{r}}$ is analytic in $\mathbb{K}$ with $Z(F_{\tilde{r}})=M_{\tilde{r}}$ and $|F_{\tilde{r}}(\zeta)|\leq p(|(\tilde{r},\zeta)|)$ for all $\zeta\in\mathbb{K}$. Since $M_{\tilde{r}}$ does not have property (P), the proposition in the case $n=1$ implies that $F_{\tilde{r}}(\zeta)=0$ for all $\zeta\in\mathbb{K}$. Hence we have $F(\tilde{r},\zeta)=0$ for all $\zeta\in\mathbb{K}$ and all $\tilde{r}\in\widetilde{Z(F)}_{N+1}$. But $\widetilde{Z(F)}_{N+1}$ does not have property (P), the induction hypothesis shows that $F(\tilde{z},\zeta)=0$ for all $\zeta\in\mathbb{K}$ and all $\tilde{z}\in\mathbb{K}^{N}$. Thus $F$ is identically zero on $\mathbb{K}^{N+1}$.
\end{proof}
\begin{lemma}\label{lemma-1} For any function $f\in L^{1}(\mathbb{B}_n,\mathrm{d}\nu)$ and any $l\in\mathbb{Z}^n$, put
\begin{equation*}
Z(f,l) = \{m\in\mathbb{N}^n: m+l\succeq 0\text{ and }\int\limits_{\mathbb{B}_n}f(z)z^{m+l}\bar{z}^{m}\mathrm{d}\nu=0\}.
\end{equation*}
If $Z(f,l)$ does not have property (P) then it is the set of all $m\in\mathbb{N}^n$ with $m+l\succeq 0$.
\end{lemma}
\begin{proof}
Suppose $l=(l_1,\ldots,l_n)$ where $l_j\in\mathbb{Z}$ for $j=1,\ldots,n$. Put $l^{*}=(|l_1|,\ldots,|l_n|)$, $l^{+}=\frac{1}{2}(l^{*}+l)$ and $l^{-}=\frac{1}{2}(l^{*}-l)$. Then $l^{+}, l^{-}\succeq 0$ and $l=l^{+}-l^{-}$. Also, for any $m\in\mathbb{N}^n$, we have $m+l\succeq 0$ if and only if $m-l^{-}\succeq 0$. For $m\in Z(f,l)$, put $k=m-l^{-}$, then $k\succeq 0$ and
\begin{align}\label{eqn-1}
0 = \int\limits_{\mathbb{B}_n}f(z)z^{k+l^{+}}\bar{z}^{k+l^{-}}\mathrm{d}\nu & = \int\limits_{\mathbb{B}_n}f(z)z^{l^{+}}\bar{z}^{l^{-}}|z_1|^{2k_1}\cdots|z_n|^{2k_n}\mathrm{d}\nu\\
& = \int\limits_{\substack{\mathbb{B}_n\\ |z_1|,\ldots,|z_n|>0}}f(z)z^{l^{+}}\bar{z}^{l^{-}}|z_1|^{2k_1}\cdots|z_n|^{2k_n}\mathrm{d}\nu.
\end{align}
For any $\zeta=(\zeta_1,\ldots,\zeta_n)\in\bar{\mathbb{K}}^n$, define
\begin{equation*}
F(\zeta) = \int\limits_{\substack{\mathbb{B}_n\\ |z_1|,\ldots,|z_n|>0}}f(z)z^{l^{+}}\bar{z}^{l^{-}}|z_1|^{2\zeta_1}\cdots|z_n|^{2\zeta_n}\mathrm{d}\nu.
\end{equation*}
Here for a complex number $w$ and a real number $t>0$, $t^{w}=\exp(w\log t)$, where $\log$ is the principle branch of the logarithmic function. Since $|t^{w}|\leq 1$ for all $0<t<1$ and $w\in\mathbb{C}$ with $\Re(w)\geq 0$, the function $F$ is well-defined, bounded, and in fact continuous on $\bar{\mathbb{K}}^n$. Now an application of Morera's Theorem shows that $F$ is analytic on $\mathbb{K}^n$.
Next, \eqref{eqn-1} shows that $F(k)=0$ for all $k\in Z(f,l)-l^{-}$. Since $Z(f,l)$ does not have property (P), $Z(F)=\{r\in\mathbb{N}_{*}^n: F(r)=0\}$ does not have property (P) either. Proposition \ref{prop-1} and the continuity of $F$ on $\bar{\mathbb{K}}^n$ now imply that $F(\zeta)=0$ for all $\zeta\in\bar{\mathbb{K}}^n$. In particular, \eqref{eqn-1} holds for all $k\in\mathbb{N}^n$. The conclusion of the lemma then follows.
\end{proof}
\begin{corollary}\label{cor-1} Suppose $f\in L^{1}(\mathbb{B}_n,\mathrm{d}\nu)$ such that for all $l\in\mathbb{Z}^n$ the set $Z(f,l)$ (as in Lemma \ref{lemma-1}) does not have property (P). Then $f(z)=0$ for almost all $z\in\mathbb{B}_n$.
\end{corollary}
\begin{proof}
Lemma \ref{lemma-1} shows that $Z(f,l)=\mathbb{N}^n\cap(\mathbb{N}^n-l)$ for all $l\in\mathbb{Z}^n$. This implies that $\displaystyle\int\limits_{\mathbb{B}_n}f(z)z^{m}\bar{z}^k\mathrm{d}\nu=0$ for all $m,k\in\mathbb{N}^n$. Since the span of $\{z^m\bar{z}^k: m,k\in\mathbb{N}^n\}$ is dense in $C(\bar{\mathbb{B}}_n)$ we conclude that $f(z)=0$ for almost all $z\in\mathbb{B}_n$.
\end{proof}
\begin{corollary}\label{cor-2} Let $\gamma=(\gamma_1,\ldots,\gamma_n)$ be an $n$-tuple of integers and let $f$ be in $L^{1}(\mathbb{B}_n,\mathrm{d}\nu)$. Then the following statements hold true.
\begin{enumerate}
\item If for almost all $z\in\mathbb{B}_n$, $f(\mathrm{e}^{\mathrm{i}\gamma_1\theta}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n\theta}z_n)=f(z)$ for almost all $\theta\in\mathbb{R}$, then whenever $l=(l_1,\ldots,l_n)\in\mathbb{Z}^n$ with $\gamma_1 l_1+\cdots+\gamma_n l_n\neq 0$, we have $\displaystyle\int\limits_{\mathbb{B}_n}f(z)z^{m+l}\bar{z}^{m}\mathrm{d}\nu(z)=0$ for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$.
\item If the set $Z(f,l)=\displaystyle\{m\in\mathbb{N}^n: m+l\succeq 0\text{ and }\int\limits_{\mathbb{B}_n}f(z)z^{m+l}\bar{z}^{m}\mathrm{d}\nu=0\}$ does not have property (P) whenever $l=(l_1,\ldots,l_n)\in\mathbb{Z}^n$ with $\gamma_1 l_1+\cdots+\gamma_n l_n\neq 0$ then for almost all $z\in\mathbb{B}_n$, for almost all $\theta\in\mathbb{R}$, we have $f(\mathrm{e}^{\mathrm{i}\gamma_1\theta}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n\theta}z_n)=f(z)$.
\end{enumerate}
\end{corollary}
\begin{proof}
Define $\displaystyle
g(z) = \dfrac{1}{2\pi}\int\limits_{0}^{2\pi}f(\mathrm{e}^{\mathrm{i}\gamma_1 t}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n t}z_n)\mathrm{d}t,
$
for $z\in\mathbb{B}_n$ such that the integral on the right hand side is defined. Since $f\in L^{1}(\mathbb{B}_n,\mathrm{d}\nu)$, $g(z)$ is defined for almost all $z\in\mathbb{B}_n$ and for such $z$, $g(\mathrm{e}^{\mathrm{i}\gamma_1\theta}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n\theta}z_n)=g(z)$ for all $\theta\in\mathbb{R}$. Now for $l\in\mathbb{Z}^n$ and $m\in\mathbb{N}^n$ with $m+l\succeq 0$,
\begin{align*}
&\int\limits_{\mathbb{B}_n}g(z)z^{m+l}\bar{z}^m\mathrm{d}\nu(z)\\
&\quad\quad\quad = \int\limits_{\mathbb{B}_n}\big\{\dfrac{1}{2\pi}\int\limits_{0}^{2\pi}f(\mathrm{e}^{\mathrm{i}\gamma_1 t}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n t}z_n)\mathrm{d}t\big\}z^{m+l}\bar{z}^m\mathrm{d}\nu(z)\\
&\quad\quad\quad = \dfrac{1}{2\pi}\int\limits_{0}^{2\pi}\big\{\int\limits_{\mathbb{B}_n}f(\mathrm{e}^{\mathrm{i}\gamma_1 t}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n t}z_n)z^{m+l}\bar{z}^m\mathrm{d}\nu\big\}\mathrm{d}t\\
&\quad\quad\quad = \dfrac{1}{2\pi}\int\limits_{0}^{2\pi}\big\{\int\limits_{\mathbb{B}_n}f(z_1,\ldots,z_n)z^{m+l}\bar{z}^m\mathrm{d}\nu\big\}\mathrm{e}^{-\mathrm{i}(\gamma_1 l_1+\cdots\gamma_n l_n)t}\mathrm{d}t\\
&\quad\quad\text{(by the invariance of the measure $\nu$ under the action of the $n-$torus)}\\
&\quad\quad\quad = \big(\dfrac{1}{2\pi}\int\limits_{0}^{2\pi}\mathrm{e}^{-\mathrm{i}(\gamma_1 l_1+\cdots+\gamma_n l_n)t}\mathrm{d}t\big)\big(\int\limits_{\mathbb{B}_n}f(z_1,\ldots,z_n)z^{m+l}\bar{z}^m\mathrm{d}\nu\big)\\
&\quad\quad\quad = \begin{cases}
0 & \text{if } \gamma_1 l_1 + \cdots + \gamma_n l_n \neq 0\\
\displaystyle\int\limits_{\mathbb{B}_n}f(z_1,\ldots,z_n)z^{m+l}\bar{z}^m\mathrm{d}\nu & \text{if } \gamma_1 l_1+\cdots+\gamma_n l_n = 0.
\end{cases}
\end{align*}
If for almost all $z\in\mathbb{B}_n$, for almost all $\theta\in\mathbb{R}$, $f(\mathrm{e}^{\mathrm{i}\gamma_1\theta}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n\theta}z_n)=f(z)$ then $f(z)=g(z)$ for almost all $z\in\mathbb{B}_n$. The above computations then show that $\displaystyle\int\limits_{\mathbb{B}_n}f(z)z^{m+l}\bar{z}^{m}\mathrm{d}\nu(z)=0$ for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$, whenever $\gamma_1 l_1 + \cdots + \gamma_n l_n \neq 0$.
Now suppose $Z(f,l)$ does not have property (P) whenever $\gamma_1 l_1 + \cdots + \gamma_n l_n \neq 0$. Then from the above computations, for all $l\in\mathbb{Z}^n$, the set of all $m\in\mathbb{N}^n$ with $m+l\succeq 0$ and $\displaystyle\int\limits_{\mathbb{B}_n}(f(z)-g(z))z^{m+l}\bar{z}^m\mathrm{d}\nu(z)=0$ does not have property (P). Corollary \ref{cor-1} now shows that $f(z)=g(z)$ for almost all $z\in\mathbb{B}_n$. Hence for almost all $z\in\mathbb{B}_n$, we have $f(\mathrm{e}^{\mathrm{i}\gamma_1\theta}z_1,\ldots,\mathrm{e}^{\mathrm{i}\gamma_n\theta}z_n)=f(z)$ for almost all $\theta\in\mathbb{R}$.
\end{proof}
For $\zeta=(\zeta_1,\ldots,\zeta_n)\in\mathbb{C}^n$ we write $\Sigma\zeta$ for $\zeta_1+\cdots+\zeta_n$. If $m=(m_1,\ldots,m_n)\in\mathbb{N}^n$ is a multi-index then we use the more common notation $|m|$ for $m_1+\cdots+m_n$ instead of $\Sigma m$.
For any bounded measurable function $g$ on $\mathbb{B}_n$, any $m\in\mathbb{N}^n$, and $\alpha>-1$, define
\begin{equation*}
\omega_{\alpha}(g,m) = \langle T_g e_{m}, e_{m}\rangle_{\alpha} = \int\limits_{\mathbb{B}_n}g(z)e_{m}(z)\bar{e}_{m}(z)\mathrm{d}\nu_{\alpha}(z).
\end{equation*}
The following theorem characterizes all $l\in\mathbb{Z}^n$ such that the set $\{m\in\mathbb{N}^n: m+l\succeq 0\text{ and } \omega_{\alpha}(g,m+l)=\omega_{\alpha}(g,m)\}$ does not have property (P), when $g$ has a special form.
\begin{proposition}\label{prop-2}
Suppose $g(z)=|z_1|^{2s_1}\cdots|z_n|^{2s_n}h(|z|)$ for $z\in\mathbb{B}_n$, where $s_1,\ldots, s_n\geq 0$ and $h:[0,1)\rightarrow\mathbb{C}$ is a bounded measurable function. Assume that $g$ is not a constant function on $\mathbb{B}_n$. Then for $l=(l_1,\ldots,l_n)\in\mathbb{Z}^n$ with $\Sigma l=0$ and $s_1l_1=\cdots=s_nl_n=0$, we have $\omega_{\alpha}(g,m+l)=\omega_{\alpha}(g,m)$ for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$. Conversely, if $l=(l_1,\ldots,l_n)\in\mathbb{Z}^n$ such that the set $\{m\in\mathbb{N}^n: m+l\succeq 0 \text{ and } \omega_{\alpha}(g,m+l)=\omega_{\alpha}(g,m)\}$ does not have property (P) then $\Sigma l=0$ and $s_1 l_1=\cdots=s_nl_n=0$.
\end{proposition}
\begin{proof}
We first notice that when $n=1$, if $g(z_1)=|z_1|^{s_1}h(|z_1|)$ then we may rewrite $g(z_1)=\tilde{h}(|z_1|)$ where $\tilde{h}(t)=t^{s_1}h(t)$ for $0\leq t<1$. By this reason we always assume that $s_1=0$ if $n=1$.
We next recall the following formula. For any $\lambda=(\lambda_1,\ldots,\lambda_n)\in\mathbb{C}^n$ with $\Re(\lambda_1),\ldots,\Re(\lambda_n)>-1$, we have
\begin{equation}\label{eqn-3}
\int\limits_{\mathbb{S}_n}|\zeta_1|^{2\lambda_1}\cdots|\zeta_n|^{2\lambda_n}\mathrm{d}\sigma(\zeta) = \dfrac{\Gamma(n)\Gamma(\lambda_1+1)\cdots\Gamma(\lambda_n+1)}{\Gamma(n+\Sigma\lambda)}.
\end{equation}
The case $\lambda\in\mathbb{N}^n$ is in Lemma 1.11 in \cite{Zhu2005} and in fact the same argument works also for the general case.
Now for $\lambda=(\lambda_1,\ldots,\lambda_n)\in\mathbb{C}^n$ with $\Re(\lambda_1),\ldots,\Re(\lambda_n)>-1$, we have
\begin{align}
& \int\limits_{\mathbb{B}_n}|z_1|^{2\lambda_1}\cdots|z_n|^{2\lambda_n}h(|z|)\mathrm{d}\nu_{\alpha}(z)\notag\\
& =2nc_{\alpha}\int\limits_{0}^{1}r^{2n-1}\Big\{\int\limits_{\mathbb{S}_n}|r\zeta_1|^{2\lambda_1}\cdots|r\zeta_n|^{2\lambda_n}\mathrm{d}\sigma(\zeta)\Big\}h(r)(1-r^2)^{\alpha}\mathrm{d}r\notag\\
& = 2nc_{\alpha}\dfrac{\Gamma(n)\Gamma(\lambda_1+1)\cdots\Gamma(\lambda_n+1)}{\Gamma(n+\Sigma\lambda)}\int\limits_{0}^{1}r^{2n+2\Sigma\lambda-1}h(r)(1-r^2)^{\alpha}\mathrm{d}r\notag\\
& = c_{\alpha}\dfrac{\Gamma(n+1)\Gamma(\lambda_1+1)\cdots\Gamma(\lambda_n+1)}{\Gamma(n+\Sigma\lambda)}\int\limits_{0}^{1}r^{n+\Sigma\lambda-1}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r\notag\\
& = \dfrac{\Gamma(n+\alpha+1)}{\Gamma(\alpha+1)}\dfrac{\Gamma(\lambda_1+1)\cdots\Gamma(\lambda_n+1)}{\Gamma(n+\Sigma\lambda)}\int\limits_{0}^{1}r^{n+\Sigma\lambda-1}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r,\label{eqn-500}
\end{align}
since $c_{\alpha}=\dfrac{\Gamma(n+\alpha+1)}{\Gamma(n+1)\Gamma(\alpha+1)}$.
Put $s=(s_1,\ldots,s_n)$. For any $w\in\mathbb{C}$ with $\Re(w)\geq 1$, define
\begin{equation}\label{eqn-501}
H(w) = \dfrac{\Gamma(w+\alpha+1)}{\Gamma(w+\Sigma s)}\int\limits_{0}^{1}r^{w+\Sigma s-1}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r.
\end{equation}
Arguing as in the proof of Lemma \ref{lemma-1} we see that $H$ is analytic on the half plane $\Re(w)>1$ and is continuous on $\Re(w)\geq 1$. By the asymptotic behavior of the Gamma function at infinity and the boundedness of $h$, there is a polynomial $p$ such that $|H(w)|\leq p(|w|)$ for all $\Re(w)\geq 1$.
For $\zeta=(\zeta_1,\ldots,\zeta_n)\in\mathbb{C}^n$ with $\Re(\zeta_j)\geq 0$, define
\begin{align*}
F(\zeta) & = \dfrac{1}{\Gamma(\alpha+1)}\prod_{j=1}^{n}\dfrac{\Gamma(\zeta_j+s_j+1)}{\Gamma(\zeta_j+1)}H(n+\Sigma\zeta).
\end{align*}
Then $F$ is analytic in the interior of its defining domain and for any $m=(m_1,\ldots,m_n)$ in $\mathbb{N}^n$, we have
\begin{align*}
& \omega_{\alpha}(g,m)\\
& = \langle T_g e_m, e_m\rangle_{\alpha}\\
& = \dfrac{\Gamma(n+|m|+\alpha+1)}{\Gamma(n+\alpha+1)\prod_{j=1}^{n}\Gamma(m_j+1)}\int\limits_{\mathbb{B}_n}g(z)z^{m}\bar{z}^{m}\mathrm{d}\nu_{\alpha}(z)\\
& = \dfrac{\Gamma(n+|m|+\alpha+1)}{\Gamma(n+\alpha+1)\prod_{j=1}^{n}\Gamma(m_j+1)}\int\limits_{\mathbb{B}_n}|z_1|^{2(s_1+m_1)}\cdots|z_n|^{2(s_n+m_n)}h(|z|)\mathrm{d}\nu_{\alpha}(z)\\
& = \dfrac{1}{\Gamma(\alpha+1)}\prod_{j=1}^{n}\dfrac{\Gamma(m_j+s_j+1)}{\Gamma(m_j+1)}H(n+|m|)\quad\quad\text{(by \eqref{eqn-500} and \eqref{eqn-501})}\\
& = F(m).
\end{align*}
Suppose $l=(l_1,\ldots,l_n)\in\mathbb{Z}^n$ such that $s_1l_1=\cdots=s_nl_n=0$ and $\Sigma l=0$. Then for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$ we have $\dfrac{\Gamma(m_j+s_j+1)}{\Gamma(m_j+1)}=\dfrac{\Gamma(m_j+l_j+s_j+1)}{\Gamma(m_j+l_j+1)}$ for $1\leq j\leq n$, and $|m+l|=|m|+\Sigma l=|m|$. Hence we have $\omega_{\alpha}(g,m+l)=F(m+l)=F(m)=\omega_{\alpha}(g,m)$ for all such $m$.
Conversely, suppose $l=(l_1,\ldots,l_n)\in\mathbb{Z}^n$ such that the set $\{m\in\mathbb{N}^n: m+l\succeq 0 \text{ and } \omega_{\alpha}(g,m+l)=\omega_{\alpha}(g,m)\}$ does not have property (P). Then the set $\{m\in\mathbb{N}^n: m+l\succeq 0 \text{ and } F(m+l)=F(m)\}$ does not have property (P). By Proposition \ref{prop-1}, we see that $F(\zeta+l)=F(\zeta)$ for all $\zeta\in\mathbb{C}^n$ with $\Re(\zeta_j)>\max\{0,-l_j\}, j=1,\ldots, n$. This implies that for such $\zeta$,
\begin{equation}\label{eqn-301}
\prod_{j=1}^{n}{\dfrac{\Gamma(\zeta_j+s_j+l_j+1)}{\Gamma(\zeta_j+l_j+1)}}H(n+\Sigma\zeta+\Sigma l)
= \prod_{j=1}^{n}{\dfrac{\Gamma(\zeta_j+s_j+1)}{\Gamma(\zeta_j+1)}}H(n+\Sigma\zeta).
\end{equation}
We now show that $s_1 l_1=0$. If $s_1=0$, there is nothing to show. So suppose $s_1>0$ (then we must have $n\geq 2$). We will show that $l_1=0$. Assume for contradiction that $l_1\neq 0$. We consider here only the case $l_1>0$. The case $l_1<0$ can be handled is a similar fashion. Since $l_1\geq 1$, equation \eqref{eqn-301} gives
\begin{align}\label{eqn-302}
& (\zeta_1+s_1+1)\cdots(\zeta_1+s_1+l_1)\prod_{j=2}^{n}\dfrac{\Gamma(\zeta_j+s_j+l_j+1)}{\Gamma(\zeta_j+l_j+1)} H(n+\Sigma\zeta+\Sigma l)\notag\\
&= (\zeta_1+1)\cdots (\zeta_1+l_1)\prod_{j=2}^{n}\dfrac{\Gamma(\zeta_j+s_j+1)}{\Gamma(\zeta_j+1)}H(n+\Sigma\zeta).
\end{align}
Now choose $\tilde{m}=(m_2,\ldots, m_n)$ such that $m_j\geq\max\{1,1-l_j\}$ for all $j=2,\ldots,m$ and such that $\displaystyle\int\limits_{0}^{1}r^{n+|\tilde{m}|+\Sigma l+\Sigma s-2}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r\neq 0$. It is possible to do so since $h$ is not identically zero on $[0,1)$. Then with $\zeta=(\zeta_1,\tilde{m})$, \eqref{eqn-302} gives
\begin{align*}
& (\zeta_1+s_1+1)\cdots(\zeta_1+s_1+l_1)\prod_{j=2}^{n}\dfrac{\Gamma(m_j+s_j+l_j+1)}{\Gamma(m_j+l_j+1)} H(n+\zeta_1+|\tilde{m}|+\Sigma l)\\
&= (\zeta_1+1)\cdots (\zeta_1+l_1)\prod_{j=2}^{n}\dfrac{\Gamma(m_j+s_j+1)}{\Gamma(m_j+1)} H(n+\zeta_1+|\tilde{m}|),
\end{align*}
for all $\zeta_1\in\mathbb{C}^n$ with $\Re(\zeta_1)>0$. Since each side of the above identity is in fact an analytic function of $\zeta_1$ on $\Re(\zeta_1)>\max\{1-n-|\tilde{m}|-\Sigma l,1-n-|\tilde{m}|\}$ (which contains $-1$), the identity still holds true for $\zeta_1=-1$. Therefore, we get
\begin{equation*}
s_1(s_1+1)\cdots (s_1+l_1-1)\prod_{j=2}^{n}\dfrac{\Gamma(m_j+s_j+l_j+1)}{\Gamma(m_j+l_j+1)} H(n+|\tilde{m}|+\Sigma l-1) = 0,
\end{equation*}
which is a contradiction because the left hand side is nonzero by the choice of $m_2,\ldots,m_n$. Thus we have $l_1=0$. Similarly, we have $s_j l_j=0$ for any $j=2,\ldots,n$. Now equation \eqref{eqn-301} becomes $
H(n+\Sigma\zeta+\Sigma l)= H(n+\Sigma\zeta)$ for all $\zeta\in\mathbb{C}^n$ with $\Re(\zeta_j)>\max\{0,-l_j\}, j=1,\ldots,n$. Let $w=n+\Sigma\zeta$. Then $\Re(w)>\max\{n,n-\Sigma l\}$ and $H(w+\Sigma l)=H(w)$.
We now show that $\Sigma l=0$. Assume for contradiction that $\Sigma l\neq 0$. By changing $w$ to $w+\Sigma l$ if necessary, we may assume that $\Sigma l>0$. Since $H$ is periodic and analytic in $\Re(w)>n$, it extends to a periodic entire function on $\mathbb{C}$ which we still denote by $H$. Now for a complex number $w$ with $\Re(w)\leq n$, choose a number $k\in\mathbb{N}$ such that $\Re(w)+k(\Sigma l)\leq n< \Re(w)+(k+1)(\Sigma l)$. Then we have
\begin{align*}
|H(w)| & = |H(w+(k+1)(\Sigma l))|\leq p(|w+k(\Sigma l)+\Sigma l|)\leq q(|w+k(\Sigma l)|),
\end{align*}
for some polynomial $q$ with nonnegative coefficients. Now since $k(\Sigma l)<n-\Re(w)\leq n+|w|$, we have $q(|w+k(\Sigma l)|)\leq q(|w|+k(\Sigma l))\leq q(n+2|w|)$. Hence $|H(w)|\leq q(n+2|w|)$ when $\Re(w)\leq n$. Now when $\Re(w)>n$, we have $|H(w)|\leq p(|w|)$. In general, for any $w\in\mathbb{C}$, $|H(w)|\leq q(n+2|w|)+p(|w|)$. Since $H$ is entire, it must be a polynomial. But $H$ is also periodic, so it must be a constant function. We conclude that there is a constant $c$ so that
\begin{equation*}
\dfrac{\Gamma(w+\alpha+1)}{\Gamma(w+\Sigma s)}\int\limits_{0}^{1}r^{w+\Sigma s-1}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r=H(w) = c
\end{equation*}
for all $w\in\mathbb{C}$ with $\Re(w)\geq 1$.
Suppose $\Sigma s=0$ (hence $s_1=\cdots=s_n=0$). Then from the identity
\begin{equation*}
1 = \dfrac{\Gamma(w+\alpha+1)}{\Gamma(w)\Gamma(\alpha+1)}\int\limits_{0}^{1}r^{w-1}(1-r)^{\alpha}\mathrm{d}r,
\end{equation*}
we conclude that $\displaystyle\int\limits_{0}^{1}\Big(h(r^{1/2})-\dfrac{c}{\Gamma(\alpha+1)}\Big)r^{w-1}(1-r)^{\alpha}\mathrm{d}r=0$ for all $w\in\mathbb{C}$ with $\Re(w)\geq 1$. Thus, $h(t)=\dfrac{c}{\Gamma(\alpha+1)}$ for almost all $t\in [0,1)$. This shows that $g(z)=h(|z|)$ is a constant function on $\mathbb{B}_n$, which is a contradiction.
Now suppose $\Sigma s>0$. Then for $w=u\in\mathbb{R}$ with $u\geq 1$, we have
\begin{align*}
|c| & = \Big|\dfrac{\Gamma(u+\alpha+1)}{\Gamma(u+\Sigma s)}\int\limits_{0}^{1}r^{u+\Sigma s-1}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r\Big|\\
& \leq \|h\|_{\infty}\dfrac{\Gamma(u+\alpha+1)}{\Gamma(u+\Sigma s)}\int\limits_{0}^{1}r^{u+\Sigma s-1}(1-r)^{\alpha}\mathrm{d}r\\
& = \|h\|_{\infty}\dfrac{\Gamma(u+\alpha+1)}{\Gamma(u+\Sigma s)}\dfrac{\Gamma(\alpha+1)\Gamma(u+\Sigma s)}{\Gamma(u+\Sigma s+\alpha+1)}\\
& = \|h\|_{\infty}\dfrac{\Gamma(\alpha+1)\Gamma(u+\alpha+1)}{\Gamma(u+\Sigma s+\alpha+1)}\\
& \approx \|h\|_{\infty}\Gamma(\alpha+1)u^{-\Sigma s},
\end{align*}
by Stirling's formula for the Gamma function. Let $u\rightarrow\infty$, we get $c=0$. So we have $\displaystyle\int\limits_{0}^{1}r^{w+\Sigma s-1}h(r^{1/2})(1-r)^{\alpha}\mathrm{d}r = 0$ for all $w\in\mathbb{C}$ with $\Re(w)\geq 1$. This implies that $h(r)=0$ for almost all $r\in [0,1)$. Hence $g(z)=0$ for almost all $z\in\mathbb{B}_n$, which is again a contradiction. Thus we have $\Sigma l=0$.
\end{proof}
We are now ready for the proof of Theorem \ref{theorem-2}.
\begin{proof}[Proof of Theorem \ref{theorem-2}]
Since $g(z_1,\ldots,z_n)=g(|z_1|,\ldots,|z_n|)$ for almost all $z\in\mathbb{B}_n$, Theorem 3.1 in \cite{Le-5} shows that the Toeplitz operator $T_g$ is diagonal with respect to the standard orthonormal basis. The eigenvalues of $T_g$ are given by $\omega_{\alpha}(g,m)=\langle T_g e_m, e_m\rangle_{\alpha}$ for $m\in\mathbb{N}^n$. Note that $\omega_{\alpha}(\bar{g},m)=\bar{\omega}_{\alpha}(g,m)$ for all such $m$.
Now $T_fT_g=T_gT_f$ on $\mathcal{P}$ if and only if for all $l\in\mathbb{Z}^n$ and $m\in\mathbb{N}^n$ with $m+l\succeq 0$,
\begin{align}\label{eqn-304}
0 & = \langle (T_fT_g-T_gT_f)e_{m+l}, e_{m}\rangle_{\alpha}\notag\\
& = \langle \omega_{\alpha}(g,m+l) T_f e_{m+l}, e_{m}\rangle_{\alpha} - \langle T_f e_{m+l}, \omega_{\alpha}(\bar{g},m)e_{m}\rangle_{\alpha}\\
& = (\omega_{\alpha}(g,m+l)-\omega_{\alpha}(g,m))\langle T_f e_{m+l}, e_{m}\rangle_{\alpha}.\notag
\end{align}
Suppose $f(\mathrm{e}^{\mathrm{i}\theta}z)=f(z)$ for almost all $\theta\in\mathbb{R}$, almost all $z\in\mathbb{B}_n$, and for $1\leq j\leq n$ with $s_j\neq 0$, $f(z_1,\ldots,z_{j-1},|z_j|,z_{j+1},\ldots,z_n)=f(z)$ for almost all $z\in\mathbb{B}_n$. Let $l=(l_1,\ldots,l_n)$ be in $\mathbb{Z}^n$. If $\Sigma l\neq 0$ or for some $1\leq j\leq n$, $s_jl_j\neq 0$ (hence $s_j\neq 0$ and $l_j\neq 0$) then conclusion (1) of Corollary \ref{cor-2} shows that $\langle T_f e_{m+l}, e_{m}\rangle_{\alpha}=0$ for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$. If $\Sigma l=0$ and $s_1l_1=\cdots=s_n l_n=0$, Proposition \ref{prop-2} shows that $\omega_{\alpha}(g,m+l)=\omega_{\alpha}(g,m)$ for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$. Thus \eqref{eqn-304} holds for all $l\in\mathbb{Z}^n$ and $m\in\mathbb{N}^n$ with $m+l\succeq 0$. Therefore $T_fT_g=T_gT_f$ on $\mathcal{P}$.
Now suppose $T_fT_g=T_gT_f$ on $\mathcal{P}$. Let $l=(l_1,\ldots,l_n)$ be in $\mathbb{Z}^n$ such that $\Sigma l\neq 0$ or $s_j l_j\neq 0$ for some $1\leq j\leq n$. Then Proposition \ref{prop-2} shows that the set $\{m\in\mathbb{N}^n: m+l\succeq 0\text{ and } \omega_{\alpha}(g,m+l)=\omega_{\alpha}(g,m)\}$ has property (P). Since \eqref{eqn-304} holds for all $m\in\mathbb{N}^n$ with $m+l\succeq 0$, we conclude that the set $\displaystyle\{m\in\mathbb{N}^n: m+l\succeq 0 \text{ and }\int\limits_{\mathbb{B}_n}f(z)z^{m+l}\bar{z}^{m}(1-|z|^2)^{\alpha}\mathrm{d}\nu(z)=0\}$ does not have property (P). This is true whenever $l\in\mathbb{Z}^n$ such that $\Sigma l\neq 0$ or $s_j l_j\neq 0$ for some $1\leq j\leq n$. Conclusion (2) of Corollary \ref{cor-2} now implies that for almost all $\theta\in\mathbb{R}$ and almost all $z\in\mathbb{B}_n$, we have $f(z)=f(\mathrm{e}^{\mathrm{i}\theta}z)=f(z_1,\ldots,z_{j-1},\mathrm{e}^{\mathrm{i}\theta}z_{j},z_{j+1},\ldots,z_n)$ for any $1\leq j\leq n$ with $s_j\neq 0$. This shows that $f(\mathrm{e}^{\mathrm{i}\theta}z)=f(z)$ for almost all $\theta\in\mathbb{R}$, almost all $z\in\mathbb{B}_n$, and for $1\leq j\leq n$ with $s_j\neq 0$, $f(z_1,\ldots,z_{j-1},|z_j|,z_{j+1},\ldots,z_n)=f(z)$ for almost all $z\in\mathbb{B}_n$.
\end{proof}
\begin{remark}
If $s_1=\cdots=s_n=0$ so that $g(z)=h(|z|)$ for a non-constant bounded measurable function $h$ on $[0,1)$, then Theorem \ref{theorem-2} shows that for $f\in L^2_{\alpha}$, $T_f$ commutes with $T_g$ if and only if $f(\mathrm{e}^{\mathrm{i}\theta}z)=f(z)$ for almost all $z\in\mathbb{B}_n$, almost all $\theta\in\mathbb{R}$. In the one dimensional case, those functions are exactly radial functions. So we recover {\v{C}}u{\v{c}}kovi{\'c} and Rao's result.
\end{remark}
\begin{remark}
If $g(z)=|z_1|\cdots |z_{n-1}|h(|z|)$ for some bounded measurable function $h$ on $[0,1)$ then Theorem \ref{theorem-2} shows that for $f\in L^{2}_{\alpha}$, $T_f$ commutes with $T_g$ if and only if $f(z)=f(\mathrm{e}^{\mathrm{i}\theta}z)=f(|z_1|,\ldots,|z_{n-1}|,z_n)$ for almost all $\theta\in\mathbb{R}$ and almost all $z\in\mathbb{B}_{n}$ . This is equivalent to the condition that $f(z)=f(|z_1|,\ldots,|z_n|)$ for almost all $z\in\mathbb{B}_n$.
\end{remark}
\end{document}
|
\begin{document}
\title{A diffusion-map-based algorithm for gradient computation on manifolds and applications}
\begin{abstract}
We present a technique to estimate the Riemannian gradient of a given function defined on interior points of a Riemannian submanifold in the Euclidean space based on a sample of function evaluations at points in the submanifold. This approach is based on the estimates of the Laplace-Beltrami operator proposed in the diffusion-map theory. Analytical convergence results of the Riemannian gradient expansion are proved.
\textcolor{black}{The methodology provides a new algorithm to compute the gradient in cases where classical methods for numerical derivatives fail. For instance, in classification problems, and in cases where the information is provided in an unknown nonlinear lower-dimensional submanifold lying in high-dimensional spaces.
The results obtained in this article connect the theory of diffusion maps with the theory of learning gradients on manifolds.
We apply the Riemannian gradient estimate in a gradient-based algorithm providing a derivative-free optimization method.}
We test and validate several applications, including tomographic reconstruction from an unknown random angle distribution, and the sphere packing problem in dimensions 2 and 3.
\end{abstract}
\begin{keywords}{Diffusion-Maps; Dimensionality reduction; Gradient operator; Gradient descent; Gradient flow; Machine learning; Tomographic reconstruction; Sphere packing}
\end{keywords}
\\
\classifications {Primary: 49N45, 65K05, 90C53, 65J22; Secondary: 94A08, 68T01, 68T20.}
\section{Introduction}
A vast number of iterative minimization algorithms rely on the fact that the negative gradient determines the steepest descent direction. The applications in science, in general, and inverse problems, in particular, abound~\cite{ denoising,gradteo2,paperfundamental}. Some examples of these algorithms are the Gradient Descent and Newton's method~\cite{claudia} which have deep theoretical aspects~\cite{smale, shub, yuan,benarsvaiter}. Although most of the focus in applications concern Euclidean spaces, these methods are also important in the context of Riemannian geometry. See~\cite{shub2,sepul,edelman,smith,bloch} and references therein.
In this article, we address an important task in the aforementioned methods, namely to compute the Riemannian gradient from data or from inexactly computed function values. In many cases such gradient is not easily computable due to the complexity of the function's local behavior. Problems also arise whenever the available information consists of high-dimensional unsorted sample points lying in an unknown nonlinear lower-dimensional submanifold~\cite{muksa}. The latter issue does not allow the tangent space to be efficiently and economically computed from noisy sample points.
Thus, one of the purposes of this article is to confront such difficulties. We emphasize that we focus on giving Riemannian gradient estimates instead of proposing an optimization method. In other words, we compute approximations of the Riemannian gradient of a function using sample points. An important feature of our approximations is that it does not depend on differential conditions of the function. The main tool to compute these estimates is the diffusion-map theory. The latter is a dimensionality reduction methodology that is based on the diffusion process in a manifold. See Refs.~\cite{COIFMAN20065,COIFMAN201479,Coifman7426} for more details.
An important feature of the theory of diffusion maps is that it recovers the Laplace-Beltrami operator when the dataset approximates a Riemannian submanifold of $\mathbb{R}n$. The diffusion-map theory is based on a symmetric kernel defined on the dataset. The symmetric kernel measures the connectivity between two points. Our approach is based on implementing this theory in the recently developed case of asymmetric kernels~\cite{alvaro}. Compared to symmetric kernels, asymmetric kernels provide more details on how the information is distributed in each direction. This characteristic allows us to know the path with the greatest variations.
\textcolor{black}{In comparison with classical methods where the gradient is numerically computed using the knowledge of the differential structure of the manifold, our approach focuses on cases where the available information consists only of sample points lying in an unknown manifold. In a certain sense, we follow the
paradigm of a data driven computation to solve the problem in the spirit of \cite{GLZ2018}.
}
\textcolor{black}{
The problem we consider here appears, for instance, in the context
of the Learning Gradient Theory~\cite{learning2}. In this framework, one computes the gradient of a function defined on a submanifold and apply it to supervised learning, in algorithms for classification, and dimensionality reduction .}
\textcolor{black}{ However, the estimates in the Learning Gradient Theory are based on the representation theorem for Reproducing Kernel Hilbert Space (RKHS), which requires solving an optimization problem to compute the coefficients in the representation. This, in turn, might be computationally expensive when the sample size is large enough. In the present work, we use the diffusion-map theory and the family of associated kernels to give a closed form for the gradient approximation, thus, improving the computational complexity.}
As an application of our methodology, we use our approach as the main direction in a gradient-based algorithm. See Ref.~\cite{sepul}. The main advantage of using this operator is that it does not depend on some {\it a priori} knowledge of the Riemannian gradient of the function. Furthermore, since the operator is defined as an integral, then it is robust to noise in the data.
We test our proposed gradient-based algorithm in two applications.
Firstly, we apply it to the sphere packing problem in dimensions $2$ and $3$. This problem was addressed numerically,
in Ref.~\cite[Chapter 2]{spheregrassmanian}. Here, an optimization algorithm using the gradient descent technique is proposed to tackle the sphere packing problem on a Grassmannian manifold, in this case, there is a closed form to compute the gradient of the function.
\textcolor{black}{In contradistinction, in the present article, as an experiment, we consider the sphere packing in the Euclidean space. This is more difficult because there is no closed form for the gradient of the objective function
due to the singularities in the ambient space. In fact, the objective function is not differentiable. In our approach, we reformulate the sphere packing problem as an optimization problem over the special linear group, and we use the proposed methodology to find a computational solution.}
\textcolor{black}{To analyze the performance of the methodology, we test and compare the proposed algorithm with the derivative-free solvers (\textsc{PSO} and \textsc{Nelder-Mead}) implemented in the \textsc{Manopt toolbox}, described in Refs. \cite{manopt1,manopt2}.}
Secondly, we apply the proposed methodology to the tomographic reconstruction problem from samples of unknown angles.
This post-processing algorithm is parallelizable. It also has a similar flavor to the algorithm developed in Refs. \cite{MZ2001,MR2002m} since we are trying to solve a high dimensional optimization problem with a swarm of computed auxiliary data. In the latter case, this is done with the approximation to the roots of a high-degree polynomial.
Our reconstruction method is based on using the diffusion maps for a partition of the dataset, instead of considering the complete database as proposed in Ref. \cite{angucoif}. We remark that we reconstruct the image except for a possible rotation and reflection.
Compared to traditional reconstruction methods Refs. \cite{angucoif,angudesco}, our method does not assume the hypothesis that the distribution of the angles is previously known, which makes it a more general and practical method for numerical implementations. In addition, our method runs faster and more efficiently than the method proposed in Ref. \cite{angucoif}. In fact, if the number of sample points is $us+r$ with $ r < s < u$, then the complexity of the algorithm proposed in Ref. \cite{angucoif} is $O(u^3 \, s^3)$, while our algorithm runs with complexity $O(u\, s^3)$. On the other hand, the numerical implementation described in Ref. \cite{india} of the methodology proposed in Ref. \cite{angudesco}, uses brute force which is not suitable when the number of sample points is large.
\par This paper is organized as follows, in Section~\ref{diffusion}, we give a brief exposition of the classical representation theory for diffusion distances proposed in Refs.~\cite{COIFMAN20065, COIFMAN201479, Coifman7426}, and we state our main result in Theorem~\ref{teo1}.
In Section~\ref{gradientflowsect}, we review facts about flows defined over manifolds, and we show how to use the flow generated by the approximations to find minimizers. In Section~\ref{applications}, we show some experiments related to the sphere packing problem, and we also show the effectiveness of our tomographic reconstruction method when the angles are unknown. Finally, in Appendices~\ref{ape1} and \ref{ape2}, we cover the technical details of the proof of the main result.
\section{Diffusion-Maps}
\label{diffusion}
In this section, we review some facts on diffusion-map theory. We refer the reader to Refs.~\cite{COIFMAN20065,COIFMAN201479,Coifman7426} for more details. Diffusion-maps is a nonlinear dimensionality reduction method that is based on the diffusion process over datasets.
In diffusion-map theory, we assume that our dataset $X=\{{x_i} \}_{i=1}^{k}$ satisfies $X \subset \mathcal{M} \subset\mathbb{R}n$, where $\mathcal{M}$ is a $d-dimensional$ Riemannian submanifold of the ambient space $\mathbb{R}n$. In this case the dimension $d$ of $\mathcal{M}$ is assumed to be much smaller than $n$.
In our approach, we use asymmetric vector-valued kernels as in Ref.~\cite{alvaro}. The main advantage of using these kernels is that we have a more specific description of the distribution of the dataset in certain directions. Based on the expansion for the Laplace-Beltrami operator proposed in Ref.~\cite{COIFMAN20065} we recover the Riemannian gradient. Firstly, we consider the vector-valued kernel
$$ \overline{K}_t:\mathcal{M} \times \mathcal{M} \to \mathbb{R}^n ,$$
defined as
$$ \overline{K}_t(x,y)= (y-x) e^{\frac{-\| y-x \|^2}{2 t^2}}. $$
We fix the exponent $\delta \in (1/2,1)$,
and let $d_t(x)$ be defined by
$$d_t(x)=\int_{U(x,t^\delta)} e^{\frac{-\| y-x \|^2}{2 t^2}} dy,$$
where
\begin{equation}
\label{conjuntopequ}
U(x,t)=\{ y\in \mathcal{M} | \|y-x\| \le t \}.
\end{equation}
Here, the parameter $\delta$ has to be in $(1/2,1)$ to guarantee convergence of the estimates as shown in Lemma \ref{lemaprinci}. We consider the Markov normalized kernel given by
$$ \rho_t (x,y) = \frac{\overline{K}_t(x,y)}{d_t(x)} .$$
For a function $f$, we define the operator
\begin{equation}
\overline{P}_{t}f (x)=\int_{U(x,t^\delta)} \rho_t(x,y) (f(y)-f(x)) dy.
\label{kerneloperator}
\end{equation}
We now show that this operator approximates the Riemannian gradient of a given function on some Riemannian submanifold. The technical details of the proof are given in Appendices~\ref{ape1} and~\ref{ape2}.
\begin{thm} \label{teo1}
Let $\mathcal{M}$ be a Riemannian submanifold of $\mathbb{R}n$ and assume that the function $f$ is smooth, and $x$ is an interior point of $\mathcal{M}$. Then, the following estimate holds
\begin{equation}
\overline{P}_{t} f (x) =\nabla f(x) \, t^{2}+O(t^{4 \delta }),
\end{equation}
where $\nabla f$ is the Riemannian gradient of $f$. In particular, we have that
\begin{equation}
\lim_{t\to 0}\frac{\overline{P}_{t} f (x)}{ t^{2}} =\nabla f(x).
\label{ecuacionprincipal}
\end{equation}
\end{thm}
Note that the operator $\overline{P}_{t} $ does not depend on differentiability conditions. Furthermore, since the operator is defined as an integral one, then it is robust to noise perturbation. Considering these characteristics, we use this operator as a substitute for the Riemannian gradient as the main direction of a gradient-based algorithm on manifolds detailed in Ref.~\cite{sepul,sato}.
\section{Flows and optimization methods on submanifolds}
\label{gradientflowsect}
In this section, we review some facts about flows defined on submanifolds and we show how the flow generated by the vector field $\overline{P}_{t} f(\cdot)$ can be used in optimization methods.
\par Assume that $h:\mathcal{M} \to \mathbb{R}^n$ is a continuous function defined on the submanifold $\mathcal{M} \subset \mathbb{R}n$. We say that a curve $b$ starts at $x_0$, if $b(0) = x_0$. The Peano existence theorem guarantees that for all $x_0 \in \mathcal{M}$, there exists a smooth curve $c_{h,x_0}:(-\varepsilon, \varepsilon) \to \mathcal{M}$ starting at $x_0$, which is solution of
\begin{equation}
\label{gradientflowequ}
\begin{aligned}
c_{h,x_0}'(s) &= - h(c_{h,x_0}(s)).\\
\end{aligned}
\end{equation}
We refer the reader to Ref.~\cite{libroedo} for a complete background about ordinary differential equations. We observe that assuming only the continuity condition, the uniqueness of the curve is not guaranteed. Since the solution of Eq.~(\ref{gradientflowequ}) may not be unique, we can concatenate solutions as follows. Let $ c_{h,x_0}$ be a solution of Eq.~(\ref{gradientflowequ}) starting at the point $x_0$. For a fix $s_1$ in the domain of $ c_{h,x_0}$, we define $x_1=c_{h,x_0}(s_1)$. If $ c_{h,x_1}$ is a solution of Eq.~(\ref{gradientflowequ}) starting in $x_1$, we define a new curve $c_{h,x_0,x_1}$ as
$$ c_{h,x_0,x_1}(s) = \left\{\begin{array}{lr}
c_{h,x_0}(s), & \text{for } s \le s_1\\
c_{h,x_1}(s-s_1), & \text{for } s_1< s\\
\end{array} \right. \mbox{. }$$
Proceeding recursively, we obtain a piecewise differentiable curve $c_{h,x_0,x_1,x_2 \cdots}(s)$ starting at $x_0$, and satisfying Eq.~(\ref{gradientflowequ}) (except in a discrete set). See Figure \ref{concadenaop} for a graphic description. In this case, we say that the curve $c_{h,x_0,x_1,x_2 \cdots}(s)$ is a piecewise solution of Eq.~(\ref{gradientflowequ}). We focus on curves which are solutions (except in a discrete set) of Eq.~(\ref{gradientflowequ}), because these curves allow updating the direction in which we look for stationary points.
\begin{figure}
\caption{Piecewise curve obtained by concatenating four curves.}
\label{concadenaop}
\end{figure}
Suppose that $f:\mathcal{M} \to \mathbb{R}$ defines a smooth function. In this case we consider the vector field $h=\nabla f$. If $c_{h,x_0,x_1,x_2 \cdots}$ is a piecewise solution of Eq.~(\ref{gradientflowequ}) starting at $x_0$, then, for all $t$ (except in a discrete set), we have that
\begin{equation}
\| c_{h,x_0}'(s) \|^2 = - \frac{d}{ds} f (c_{h,x_0}(s)).
\label{energiagradiente}
\end{equation}
Therefore, the function $ f (c_{h,x_0}(\cdot))$ is decreasing. \textcolor{black}{Thus, we can use use the flow $c$ to find a local minimum for the function $f$ .}
\subsection{Lipschitz functions}
We recall that $f$ is a locally Lipschitz function if for all $x \in \mathcal{M}$ there exists a neighborhood $x \in U\subset \mathcal{M}$ and a positive constant $C$, such that for all $y \in U$ it holds that
$$ | f(x)-f(y)| \le C \|x-y\|_{L^2}.$$
We also recall that the Sobolev space $H^{1}(0,T,\mathcal{M})$ is defined as the set of all square integrable functions from $[0,T]$ to $\mathcal{M}$ whose weak derivative has also finite $L^2$ norm.
\par Our goal is to use the gradient approximation in Theorem \ref{teo1} to find minimal points of locally Lipschitz functions. Recall that Rademacher's theorem states that for a locally Lipschitz function $f$, the gradient operator $ \nabla f$ exists almost everywhere. See Ref.~\cite{evans10} for more details. However, for a locally Lipschitz function $ f $, the gradient $\nabla f$ may not exist for all points. In this case, it is not possible to define the gradient flow.
\par To address this problem, we propose to use the flow generated with $ {\overline{P}_{t}f (x)}$ defined in Eq.~(\ref{kerneloperator}) instead of the gradient. The operator $\overline{P}_{t}f$ is defined as an integral, and thus it is continuous. This fact guarantees the existence of a flow associated with $ \frac{\overline{P}_{t}f (x)}{t^2}$ for arbitrarily small positive $t$.
\par Now we show that at the points where the function is smooth, this flow approximates a curve for which the function decreases with time. To do that, we first prove a technical result.
\begin{prop}
Suppose that $f$ is continuously differentiable in an open neighborhood of $x_0$. We define the function $J:[0,T] \times \overline{B(x_0, R) \cap \mathcal{M}} \to \mathbb{R}n$ as
$$ J(t,x) = \left\{\begin{array}{lr}
\frac { {P}_{t}f (x)} {t^2}, & \text{for } t > 0\\
\nabla f (x), & \text{for } t=0\\
\end{array} \right. \mbox{, }$$
where $B(x_0, R)$ is the ball in $\mathbb{R}n$ with center $x_0$ and radius $R$. Then, for small enough numbers $T$ and $R$, the function $J$ is uniformly continuous. In particular, there exists a positive constant $M$ such that for all $(t,x) \in (0,T] \times \overline{B(x_0, R) \cap \mathcal{M}}$ the following estimate holds.
\begin{equation}
\label{estimaflow}
\frac{ {P}_{t}f (x)} {t^2} \le M.
\end{equation}
\begin{proof} Since the set $ [0,T] \times \overline{B(x_0, R) \cap \mathcal{M}}$ is compact, it is enough to show that $J$ is continuous. Firstly, we show that $J$ is continuous on $(0,T] \times \overline{B(x_0, R) \cap \mathcal{M}}$. For that, we claim that for a continuous vector-valued function $\omega: (0,T] \times \overline{B(x_0, R) \cap \mathcal{M}} \times \overline{B(x_0, R) \cap \mathcal{M}} \to \mathbb{R}^m $, the operator
$$ \Omega(t,x)= \int_{U(x,t^\delta)} \omega(t,x,y) \, dy,$$
is continuous. In fact, we observe that
\begin{equation}
\label{omegagra}
\Omega(t,x)-\Omega(t_1,x_1)= \int_{U(x,t^\delta)}\omega(t,x,y)-\omega(t_1,x_2,y) \, dy + \int_{G(t,t_1,x,x_1)} \omega(t_1,x_2,y) dy,
\end{equation}
where
$$G(t,t_1,x,x_1)= U(x_1,t_{1}^\delta) \setminus U(x,t^\delta) \cup U(x,t^\delta) \setminus U(x_1,t_{1}^\delta).$$
On the other hand, a straightforward computation shows that
$$\lim_{(t_1,x_1) \to (t,x)} 1_ {G(t,t_1,x,x_1)} =0,$$
where the convergence is pointwise almost everywhere, therefore
\begin{equation}
\label{funclimi1}
\lim_{(t_1,x_1) \to (t,x)} \int_{G(t,t_1,x,x_1)} \omega(t_1,x_2,y) dy=0.
\end{equation}
In addition, since the function $\omega$ is continuous, then
\begin{equation}
\label{funclimi2}
\lim_{(t_1,x_1) \to (t,x)} \int_{U(x,t^\delta)}\omega(t,x,y)-\omega(t_1,x_2,y) \, dy=0.
\end{equation}
Using Eqs.~\eqref{funclimi1} and \eqref{funclimi2} in Eq.~\eqref{omegagra}, we conclude that $\Omega$ is a continuous function. We apply the previous result to the function $w_1(t,x,y)=e^{\frac{-\| y-x \|^2}{2 t^2}}$ to obtain that $\Omega_1(t,x)=d_t(x)$ is a continuous function. This implies that the function
$$ w_2(t,x,y)= \frac {\rho_t(x,y) (f(y)-f(x))} { t^2},$$
is continuous on $(0,T] \times \overline{B(x_0, R) \cap \mathcal{M}}$.
Again, we apply the same result to the function $$ w_2(t,x,y),$$ to conclude that $J(t,x)$ is a continuous function on $(0,T] \times \overline{B(x_0, R) \cap \mathcal{M}}$.
\par Moreover, using Estimate~(\ref{ecuafinal}) of the proof of Theorem~\ref{ecuacionprincipal} and Lemma~\ref{lemma2}, we conclude that the function $J$ is continuous for all points of the form $(0,x)$. This proves our result.
\end{proof}
\label{propauxi}
\end{prop}
\par The estimate of Proposition~\ref{propauxi} states that for a fixed $x_0$, and small $T$, the family of curves $\{c_{h(t_n),x_0}\}_{t_n}$ is uniformly bounded on the Sobolev space $H^{1}(0,T,\mathcal{M})$. Thus, the Rellich-Kondrachov theorem states that for any sequence $t_n \to 0$, there exists a subsequence $t_{n_k} \to 0$ such that $c_{h(t_{n_k}),x_0}$ converges to some curve $c$ in the $L^2$-norm. Observe that by the Arzela-Ascoli theorem, we can also suppose that the sequence $c_{h(t_n),x_0}$ converges uniformly to $c$. Finally we prove the main result in this section.
\begin{prop} Assume the same assumptions and notations of Proposition~\ref{propauxi}. Then, for $t_1<t_2$ we have that
$$ f(c(t_1))\ge f(c(t_2)). $$
\begin{proof}
We claim that $\frac{\overline{P}_{t}f (c_{h(t_n),x_0}(\cdot))}{t^2}$ converges pointwise to $\nabla f (c(\cdot))$, where $ c $ is the curve previously described. In fact, for all $s,$ we have by Proposition~\ref{propauxi} that
$$ \lim_{n \to \infty}\frac{\overline{P}_{t}f (c_{h(t_n),x_0}(s))}{t^2}-\nabla f (c_{h(t_n),x_0}(s))=0.$$
The continuity of the gradient guarantees that
$$\lim_{n \to \infty} \nabla f (c_{h(t_n),x_0}(s))= \nabla f (c (s)). $$
The above estimates prove our claim. Using inequality \eqref{estimaflow} together with the dominated convergence theorem, we obtain that
\begin{equation}
\label{convergenciagradi}
\lim_{n \to \infty} \int_0^T \left \| \frac{\overline{P}_{t}f (c_{h(t_n),x_0}(s))}{t^2}- \nabla f (c (s)) \right \| ^2 ds = 0.
\end{equation}
On the other hand, since $c_{h(t_n),x_0}(I)$ is solution of Eq.~(\ref{gradientflowequ}), then
$$\begin{array}{rcl} 0 & \ge & \langle \frac{\overline{P}_{t}f (c_{h(t_n),x_0}(s))}{t^2} ,c_{h(t_n),x_0}'(s) \rangle \\ \,&\ge & \, \langle\frac{\overline{P}_{t}f (c_{h(t_n),x_0}(s))}{t^2}- \nabla f (c (s)) ,c_{h(t_n),x_0}'(s)\rangle +\langle \nabla f (c (s)),c_{h(t_n),x_0}'(s)-c'(s) \rangle\, \\ \, &+ & \langle \nabla f (c (s)),c'(s) \rangle.
\end{array} $$
Using the weak convergence assumption, together with Eq.~(\ref{convergenciagradi}), we conclude that for all points $t_1<t_2$, the following inequality holds
$$0 \ge \int_{t_1}^{t_2} \langle\nabla f (c (s)),c'(s) \rangle ds= f(c(t_2))-f(c(t_1)).$$
\end{proof}
\label{proposifuerte}
\end{prop}
\textcolor{black}{ The previous result establishes that the flow generated by $\frac{\overline{P}_{t}f (x)}{t^2}$ approximates a curve $c$ for which the function $f$ is decreasing.}
\section{Algorithm Development}
\label{applications}
\textcolor{black}{In this section we propose a computational algorithm to approximate the Riemannian gradient of a function defined on a Riemannian submanifold of the Euclidean space using a set of sample points. We use these approximations as principal directions in gradient-based algorithms as described in Ref.~\cite{sepul}.} If the function is not differentiable at a point $x$, we say that $x$ is a singularity. Here, we assume that the singularity points form a discrete set.
\par Theorem~\ref{teo1} states that the operator $\overline{P}_{t} f (x)$ can be used to approximate the Riemannian gradient.
An important task is to compute the integrals involving the operator
$\overline{P}_{t}$, defined in Eq.~(\ref{kerneloperator}). In practical applications, we only have access to a finite sample points $x_1, x_2, x_3, \cdots, x_m$ on $U(x,t^{\delta})$, which are the realizations of i.i.d random variables with probability density function {\it (PDF) } $q$. However, the integral in Eq.~$\eqref{kerneloperator}$ does not depend on the {\it (PDF) } $q$. To address this issue, for a fixed $x$, we consider the normalized points
$$ (x_i-x) (f(x_i)-f(x)) \, \,e^{\frac{-\| x_i-x \|^2}{2 t^2}} \, \, / q(x_i),$$
($i=1,\cdots, m$) which are realizations of i.i.d random variables regarding the {\it PDF } $q(x)$. In that case, the Law of Large Numbers {\it LLN } guarantees that
$${\overline{P}_{t} f (x)}= \lim_{m \to \infty} \frac{1}{m \, \,d_t(x)}\sum_{i=1}^m (x_i-x) \, \,(f(x_i)-f(x)) \, \,e^{\frac{-\| x_i-x \|^2}{2 t^2}} \, \, / q(x_i), $$
where $d_t(x)$ can be computed similarly using the {\it LLN }
$$ d_t(x)= \lim_{m \to \infty} \frac{1}{m} \sum_{i=1}^m e^{\frac{-\| x_i-x \|^2}{2 t^2}} \, \, / q(x_i) .$$
\textcolor{black}{The following result establishes a connection between the tolerance of the approximation involving the finite sums and the parameters $\delta$, $t$ and $m$. }
\begin{prop} \label{estimaparamefull}
\textcolor{black}{ Let $x$ be a fixed point in $\mathcal{M}$, and $t$ a positive number. Assume that $q(x)$ is a {\it PDF } on $U(x,t^{\delta})$, and $X_1, X_2, X_3, \cdots, X_m$ are i.i.d multivariate random variables regarding $q$, and that there exists a positive constant $M$ such that
$$q(X_i)>M,$$
for $1 \le i \le m$. Define
$$ S^{1}_{m,t}= \frac{1}{m} \sum_{i=1}^m (X_i-x) \, \,(f(X_i)-f(x)) \, \,e^{\frac{-\| X_i-x \|^2}{2 t^2}} / q(X_i),$$
and
$$ S^{2}_{m,t}= \frac{1}{m} \sum_{i=1}^m e^{\frac{-\| x_i-x \|^2}{2 t^2}} \, \, / q(X_i) .$$
For a positive constant $C_1$ and $2<u<4 \delta$, we define the set
$$ A_{t,n}(C_1)= \{\|S^{1}_{m,t} / ( t^{2}S^{2}_{m,t}) - \nabla f(x) \|\le C_1 t^u \}, $$
where $n$ and $t$ are the approximation parameters. Thus, there exist positive constants $C_1$ and $C_2$ such that the probability of the set $A_{t,n}(C_1)$ is bounded below by
\begin{equation}
\label{paramecontrodes}
\mathbb{P}(A_{t,n}(C_1))\ge 1 - \frac{W_4}{( m e^{-t^{2(\delta-1)}/2} t^{2+d+u})^{2}}.
\end{equation} }
\end{prop}
\begin{proof}
\textcolor{black}{Observe that
\begin{equation}
\label{desiguparame1}
\begin{array}{rcl}
\|S^{1}_{m,t} / ( t^{2}S^{2}_{m,t}) - \nabla f(x) \| & \le & \|S^{1}_{m,t} / ( t^{2}S^{2}_{m,t}) -{\overline{P}_{t} f (x)}/ {t^{2}}\| + \\ & & \, \| {\overline{P}_{t} f (x)}/ {t^{2}}- \nabla f(x)\|.
\end{array}
\end{equation}
Since $\| x_i-x \|<t^{\delta}$, we obtain that
$$ \|S^{2}_{m,t} d_t(x) \| > W_1 e^{-t^{2(\delta-1)}/2},$$
where $W_1$ is a positive constant which does not depend on $t$. In addition, by Eq. \eqref{estimativanormali} we have that
$$ \| d_t(x) \| > W_2 t^{d},$$
where $W_2$ is a positive constant. If we define
$$I_{t}=\int_{U(x,t^\delta)} (y-x) (f(y)-f(x))\,e^{\frac{-\| y-x \|^2}{2 t^2}} \, dy,$$
there exists a positive upper bound $W_3$ satisfying $\|I_{t}\| \le W_3$ for all $t$ small enough.
On the other hand,
\begin{equation}
\label{desipara1}
\begin{array}{rcl}
\|S^{1}_{m,t} / ( t^{2}S^{2}_{m,t}) -{\overline{P}_{t} f (x)}/ {t^{2}}\| \le \frac{1}{t^{2}} ( \| S^{1}_{m,t} -I_1 \| / (W_1 e^{-t^{2(\delta-1)}/2}) + \\ \|I_1\| \|d_t(x)-S^{2}_{m,t} \| / (W_2 W_1 t^{d} e^{-2t^{(\delta-1)} }).
\end{array}
\end{equation}
We define the sets
$$B^{1}_{t,m}(W_1)=\{\| S^{1}_{m,t} -I_1 \| \ge (W_1 e^{-t^{2(\delta-1)}/2}) t^{2+u}\},$$
and
$$B^{2}_{t,m}(W_2)=\{\| d_t(x)-S^{2}_{m,t} \| \ge (W_2 W_1 t^{d} e^{-t^{2(\delta-1)}/2}) t^{2+u}\}.$$
The Chebyshev's inequality guarantees that
$$\mathbb{P}(B^{1}_{t,m}(W_1) )\le \frac{\sigma_1^2}{m (W_1 e^{-t^{2(\delta-1)}/2}t^{2+u})^{2}},$$
and
$$\mathbb{P}(B^{2}_{t,m}(W_2) )\le \frac{\sigma_2^2}{m (W_2 W_1 t^{d} e^{-t^{2(\delta-1)}/2} t^{2+u} )^{2}},$$
where $\sigma_1^2$ and $\sigma_1^2$ are the respective variance in each case. Therefore,
\begin{equation}
\label{desigfina}
\mathbb{P}( B^{1}_{t,m}(W_1)^\complement \cap B^{2}_{t,m}(W_2)^\complement)\ge 1 - \frac{W_4}{( m e^{-t^{2(\delta-1)}/2} t^{2+d+u})^{2}},
\end{equation}
for a proper positive constant $W_4$.
By Theorem \ref{teo1} and Inequalities \eqref{desiguparame1} and \eqref{desipara1}, we have that the following inequality holds
$$ \|S^{1}_{m,t} / ( t^{2}S^{2}_{m,t}) - \nabla f(x) \| \le W_5 t^u,$$
in the set $B^{1}_{t,m}(W_1)^\complement \cap B^{2}_{t,m}(W_2)^\complement$, where $W_5$ is a proper positive constant. The proof is concluded using the previous inequality together with Estimate \eqref{desigfina}. }
\end{proof}
\textcolor{black}{As a consequence of the fast decay of the exponential function, we obtain the following result:}
\begin{corollary}
Under the same assumptions of Proposition \ref{estimaparamefull}, we have the inequality
\begin{equation}
\mathbb{P}(A_{t,n}(C_1))\ge 1 - W_4\frac{ e^{t^{\delta-1}}}{ m^2}.
\end{equation}
\end{corollary}
\textcolor{black}{Thus, the convergence rate does not depend on the dimension of the submanifold or the dimension of the ambient space. In this case, convergence is controlled by parameters $t$ and $m$, where $t$ is the approximation parameter and $m$ is the number of sample points.}
In particular, when the {\it PDF } is the function
\begin{equation}
\, \, q(y)= e^{\frac{-\| y-x \|^2}{2 t^2}} \, \,/d_t(x),
\label{gausiandistri}
\end{equation}
we can approximate $\overline{P}_{t} f (x)$ using $\mathcal{V}$, where
\begin{equation}
\mathcal{V}= \frac{1}{m} \sum_{i=1}^m (x_i-x) \, \, (f(x_i)-f(x)).
\label{estimator}
\end{equation}
This vector is analogous to the weighted gradient operator defined for graphs. See Ref.~\cite{gradientgrafo} for more details.
\par \textcolor{black}{Proposition \ref{estimaparamefull} states that once we have chosen the parameters $\delta$ and $t$, the value of $m$ must be greater than $(e^{-t^{2(\delta-1)}/2} t^{2+d+l})^{2}$ to guarantee a proper control in Inequality \eqref{paramecontrodes}. The parameter $t$ controls how much we approximate the true gradient. Needless to say, a choice of an extremely small $t$ would lead to numerical
instabilities, and thus $t$ in a certain sense would work as a regularization parameter. In such a scenario, we consider taking the parameter $\delta$ close to $1$ and $t$ moderately small to avoid instabilities generated by selecting the parameter $m$. We shall call $t$ the {\it gradient approximation parameter} and it will be provided as an input to the Algorithm~\ref{algoritgradie}. }
\begin{algorithm}[H]
\begin{flushleft}
\textbf{input} Sample points $x_1, x_2, x_3 \cdots x_m$ on $U(x,t^{\delta})$ with {\it PDF} $q$, and gradient approximation parameter $t$. \\
\begin{enumerate}
\item \textbf{for} $i = 1 $ to $m$ \textbf{do}
\begin{itemize}
\item $c_i \gets e^{\frac{-\| x_i-x \|^2}{2 t^2}} \, \, / q(x_i)$
\end{itemize}
\item \textbf{end for}
\item $d_{t} \gets \sum_{i=1}^m c_i $
\item $ \mathcal{V} \gets \frac{1}{d_{t}} \sum_{i=1}^m (x_i-x) \, \,(f(x_i)-f(x)) \, \, c_i $
\end{enumerate}
\textbf{return} $\mathcal{V}/t^2 $ which is an approximation for the gradient $\nabla f (x)$ \\
\end{flushleft}
\caption{ Approximate Gradient Sampling Algorithm}
\label{algoritgradie}
\end{algorithm}
\textcolor{black}{In Appendix \ref{numericalcomparison}, we explore the numerical consistency of Proposition \ref{estimaparamefull}, and we also compare the result with the learning gradient approach \cite{muksa}.}
\par We apply Algorithm~\ref{algoritgradie} in a gradient-based optimization method. Intuitively, Proposition~\ref{proposifuerte} says that the energy associated with the gradient decreases along the curve $c$. Therefore, we can use this curve to find a better approximation for local minimizers, ultimately leading to a derivative-free optimization method. \textcolor{black}{ The proposed algorithm is useful in situations where it is not straightforward to compute the gradient of a function.}
\par {Using Proposition~\ref{proposifuerte}, we have that the flow generated by
\begin{equation}
\label{flujoaproximado}
Dir(x)=\frac{\overline{P}_{t}f (x)}{t^2},
\end{equation}
approximates a curve along which the function $f$ decreases. }
Thus, suggesting that if we use the direction $Dir(x)$ defined in Eq.~(\ref{flujoaproximado}) as the main direction in a gradient-based algorithm, then in a certain way we are approximating the gradient descent method.
The gradient-based optimization method generated by the direction $Dir(x)$ is described by
$$ x_ {k+1}= \beta_{x_{k}} (x_k - \lambda Dir(x)),$$
where $\lambda$ is some relaxation parameter which defines the step size and $\beta_{x}$ is a local retraction of $\mathcal{M}$ around the point $x$.
\par We recall that a local retraction
$\beta_x$ consists of a locally defined
smooth map from a local neighbourhood around $x$ onto the
manifold $\mathcal{M}$, such that
it coincides with the identity when
restricted to $\mathcal{M}$. In other words,
$\beta_X \circ \iota = I_{A}$,
where $A$ is an open neighbourhood of the point $x$ in the topology induced by $\mathcal{M}$, and $\iota$ is the inclusion map from $A$ into the ambient space~\footnote{In the framework of matrix groups or more generally Riemannian submanifolds of $\mathbb{R}n$ a retraction function is used also in \cite{sepul}.}.
\par The parameter $\lambda$ must be regularly reduced to avoid instabilities in our iteration. We propose to reduce the {\it relaxation parameter} $\lambda$ by a step-scale factor $s_f$ after $l$ consecutive numerical iterations. \textcolor{black}{ This procedure is similar to Armijo point rule described in Ref. \cite{sepul} }. We shall call
$l$ the {\it sub-iteration control number}.
\par We update the size $ \lambda $ of the step such that after a certain number of iterations, it decreases to a pre-conditioned proportion. We do this since the interval for which the curve is defined can be limited, and iterating with a fixed size would generate instabilities in the algorithm. Therefore, if we take smaller step sizes as the number of iterations increases, we obtain better estimates for the minimizer. As the iteration numbers increases, we get closer to a local minimum. For this reason, our stopping criteria is achieved when
$$ |f(x_k)-f(x_{k+1})|\le \epsilon,$$
for a certain tolerance $\epsilon$. The latter will be called the {\it termination tolerance on the function value} and will be provided as an input parameter. Results on the convergence of this algorithm, as well as stopping criteria are described in Ref.~\cite{sepul}.
\par We summarize the above discussion in Algorithm~\ref{algoprinno}.
\begin{algorithm}[H]
\begin{flushleft}
\textbf{input} Initial guess $x_0$, gradient approximation parameter $t$, relaxation parameter $\lambda$, sub-iteration control number $l$, termination tolerance $\epsilon$, and step-scale factor $s_f$. \\
\textbf{initialization}\\
$k \gets 0 $\\
$counter \gets 0$\\
$x_{min} \gets x_0$ \\
$x_{-1} \gets x_0$ \\
\textbf{while} \quad $| f(x_{k-1})-f(x_k) | \ge \epsilon$ \textbf{or} $k=0$
\end{flushleft}
\begin{enumerate}
\item $x_{k+1} \gets \beta_{x_k}( x_k- \lambda \frac{\overline{P}_{t} f (x_k)}{ t^{2}})$
\item \textbf{if} $f(x_{k+1}) < f(x_{min})$ \textbf{do}
\begin{itemize}
\item $x_{min} \gets x_{k+1}$
\end{itemize}
\item \textbf{end if}
\item $k \gets k+1$
\item \textbf{if} $l < counter$ \textbf{do}
\begin{itemize}
\item $counter \gets 0$
\item $x_k \gets x_{min}$
\item $\lambda \gets \lambda/s_f$
\end{itemize}
\item \textbf{end if}
\item $counter \gets counter+1$
\end{enumerate}
\begin{flushleft}
\textbf{end while}\\
\textbf{return} $x_{min} $ \\
\end{flushleft}
\caption{Diffusion-map-based optimization}
\label{algoprinno}
\end{algorithm}
\subsection{High-dimensional datasets}
\textcolor{black}{
In many optimization problems, the dataset consists of sample points lying in an unknown lower-dimensional submanifold embedded in a high-dimensional space. We propose to use the dimensional reduction method and then, Algorithm \ref{algoprinno} to solve the optimization problem in the embedded space. This will be done without directly involving the {\it a priori} knowledge of the manifold.}
\par \textcolor{black}{ To be more specific, we assume that the optimization problem under consideration consists on minimizing the cost function $f$ over the dataset $X=\{{x_i} \}_{i=1}^{k}$. Regarding the dataset, we suppose that $X \subset \mathcal{M} \subset\mathbb{R}n$, where $n$ is a large number, and $\mathcal{M}$ is a lower-dimensional Riemannian submanifold.
Since the information contains a large number of irrelevant data that make the computing process inefficient, we use the diffusion-maps approach to embed our dataset in a lower-dimensional space. This embedding process allows us to work only with the most important features, and thus, we obtain a better computational performance of the optimization algorithm. We denote the embedded points by
\begin{equation}
y_i=\psi_{m}^t(x_i),
\label{embededata}
\end{equation}
where $\psi_{m}^t$ is the diffusion-map.
We apply Algorithm \ref{algoprinno} to the dataset $Y=\{{y_{i}} \}_{i=1}^{k}$, and the function $\tilde{f}$. Here, the function $\tilde{f}$ is defined as
$\tilde{f}(y_i)=f(x_i),$
for all $x_i \in X$, and $y_i$ the associated point (\ref{embededata}). In this case, we use the retraction $\beta_x$, defined as the projection on $Y$, that is,
$$ \beta_x(z)= \underset{y_i \, \, \in Y}{\arg\min} \, \, \|z-y_i\|. $$}
\section{Numerical Experiments and Applications}
The following experiments were implemented
in \textsc{Matlab}\xspace software, using a desktop computer with the following configuration: Intel i5 9400 4.1 GHz processor, and 16 GB RAM.
\subsection{Sphere packing problem in dimensions 2 and 3}
The sphere packing problem \textcolor{black}{in the Euclidean space} poses the following question: How to arrange non-overlapping congruent balls as densely as possible. This problem has exact solution in dimensions $1, 2, 3, 8$, and $24$. See Refs.~\cite{mari8,mari245}. The one-dimensional sphere
packing problem is the interval packing problem on the line, which is trivial. The two and
three-dimensional cases are far from trivial. In the two-dimensional case the hexagonal packing gives the largest density; see Figure \ref{final2d}.
The three-dimensional case of packing spheres in $\mathbb{R}^3$ was solved by Hales in $2005$ and he gave a complex proof, which makes intensive use of computers~\cite{10.2307/20159940}. In this case, the
pyramid arrangement of equally sized spheres filling space is the optimal solution; see Figure \ref{fianl3d}.
In 2017, Viazovska solved the problem in dimensions eight and twenty-four with coworkers in the latter. See Refs.~\cite{mari8,mari245}.
\par \textcolor{black}{In this experiment, we reformulate the sphere packing problem as an optimization problem over a manifold, and we use the proposed
methodology to find a computational solution.}
\par We now discuss the problem in more detail. We denote $Vol$ the volume form associated with the Lebesgue measure, and for $x \in \mathbb{R}n$ and $r$ a positive real number, we denote by $B (x,r)$
the ball in $\mathbb{R}n$ with center $x$ and radius $r$.
\par How do we define a sphere packing in the $n$ dimensional space? To this end, we assume that $C\subset \mathbb{R}n$ be a discrete set of points such that $2r \le\| x-y \| $, for any two distinct $x,y \in C$, where $r$ is a positive real number. Then, the union
$$ S=\bigcup_{x\in C} B(x,r),$$
is a sphere packing, and its density $\Delta_{S}$ is defined as
$$\Delta_{S}=\limsup_{r\to \infty} \frac{Vol(S \cap B(0,r))}{Vol \, (B(0,r))}.$$
Intuitively, the density of a sphere packing is the fraction of space covered by the spheres of
the packing. The sphere packing problem consists in knowing what is the supremum $\Delta_{n}$ over all possible packing densities in $\mathbb{R}n$. The number $\Delta_{n}$ is called the $n$ dimensional sphere packing constant.
\par One important way to create a sphere packing is to start with a lattice $\Lambda \subset \mathbb{R}n$, and center the spheres at the points of $\Lambda$, with radius half the length of the shortest non-zero vectors in $\Lambda$. Such packing is called lattice packing. A more general notion than lattice packing is periodic packing. In periodic packings, the spheres are centered on the points in the
union of finitely many translates of a lattice $\Lambda$. Not every sphere packing is a lattice packing, and, in all sufficiently large dimensions, there are packings denser than every lattice packing. In contrast, it is proved in Ref.~\cite{Groemer1963} that
periodic packings get arbitrarily close to the greatest packing density. Moreover, in Ref.~\cite{Groemer1963} it is shown that for
every periodic packing $P$ of the form
$$ P=\bigcup_{i=1}^k \bigcup_{x\in \Lambda} (x_i+ B(x,r)), $$
where $\Lambda$ is a lattice, its density is given by
$$ \Delta_{P}=k \frac{Vol \,(B(0,r))}{Vol \, (\Lambda)},$$
where $r=\min_{x,y \in P} \|x-y\|$.
\par Observe that the density packing is invariant under scaling, that is, for a lattice $\Lambda$ and a positive constant $\alpha$ we have $\Delta_{\alpha \Lambda}=\Delta_{\Lambda}$. Thus, without loss of generality and normalizing if necessary, we can assume that the volume of the lattice is $Vol \, (\Lambda)=1$. If $b_1, \cdots b_n$ is a basis for $\Lambda$, then our problem can be reformulated as
\begin{equation}
\begin{alignedat}{3}
&\!\max_{b_1, \cdots b_n} &\qquad& Vol \, ( B(0,1)) \, (\frac{g(b_1, \cdots b_n)}{2}) ^n\\
&\text{subject to} & &det \, (b_1, \cdots, b_n)=1.\\
\end{alignedat}
\label{optipro}
\end{equation}
where $det(\cdot)$ is the determinant function, and the function $g(b_1, \cdots b_n)$ is defined as the minimum value of $\| z_1\,b_1+ \cdots+ z_n b_n \|_{2}$ over all possible $(z_1, \cdots z_n) \in \mathbb{Z}^ n \setminus {0}$.
\par Since the function $g$ is defined as a minimum, then this function is non-differentiable at least in the set of orthonormal matrices. In fact, if we consider an orthonormal set $b_1, \cdots, b_n$, then $g(b_1, \cdots, b_n)=1$. In that case, the smooth curve defined as
$$c(t)=(tb_1, \frac{1}{t} b_2, b_3, \cdots, b_n),$$
for $t>0$, satisfies
$$ g(c(t)) = \left\{\begin{array}{lr}
\frac { 1} {t}, & \text{for } t \ge 1\\
t & \text{for } t < 1\\
\end{array} \right. \mbox{. }$$
Since $g(c(t))$ is non-differentiable, then $g$ is not differentiable in $(b_1, \cdots, b_n)$.
\par To apply our approach, we first prove that the function $ g $ is locally Lipschitz. We write the matrices $A$ and $B$ as the column form $A=[a_1,\cdots a_n]$ and $B=[b_1,\cdots b_n]$, and the special linear group as $SL(n)=\{A \, | \, det(A)=1 \}$. Since the inverse of a matrix is a continuous function on $SL(n)$, then for $A \in SL(n)$, there exists an open set $ U \ni A $ and a positive constant $D$ such that for all $B \in U$
$$\|B^{-1}\|_{2} \le D. $$
Assume that $g(a_1,\cdots, a_n)=\|A \,\vec{z} \|_{2}$ and $g(b_1,\cdots, b_n)=\|B \,\vec{z_2} \|_{2}$ for $\vec{z}, \vec{z_2} \in \mathbb{Z}^ n \setminus {0}$. In this case $g(b_1,\cdots, b_n)\le \|B \,\vec{z} \|_{2}$. Then, we have that
\begin{align*}
g(b_1,\cdots, b_n)- g(a_1,\cdots, a_n) & \le \|( A-B) \|_{2} \,\|\vec{z}\|_{2} \\
&\le \|A^{-1}\|_{2} \|A-B\|_{2} \|A \,\vec{z}\|_{2}.
\end{align*}
Minkowski's theorem for convex sets \cite{minkowsk} guarantees that for any matrix $A$ with $det(A)=1,$ the estimate $g(A)\le \sqrt{n}$ is satisfied. Thus, we obtain that
$$g(b_1,\cdots, b_n)- g(a_1,\cdots, a_n)\le \sqrt{n}D\|A-B\|_{2}.$$
By symmetry, the above inequality is still valid if we change the order of $A$ and $B$. This proves that $g$ is locally Lipschitz.
\par In dimensions $2$ and $3$ the solutions of the problem in Eq.~ \eqref{optipro} are $\Delta_{2}=\frac{\pi }{2 \sqrt{3}}$ and $\Delta_{3}=\frac{\pi}{3 \sqrt{2}}$, respectively. In these dimensions the maximizers are the hexagonal lattice, Figure \ref{final2d}, and the pyramid lattice packing, Figure \ref{fianl3d}.
\par \textcolor{black} { Observe that the problem in Eq.~\eqref{optipro} can be considered as an optimization problem on the manifold $SL(n)$. We use our approach to find the maximizers in dimensions $2$ and $3$. Since maximizing the function $g$ is equivalent to minimizing $-g$, then we apply our approach to the function $-g$.
We use Algorithm \ref{algoprinno} to minimize the function $-g$, and thus Algorithm \ref{algoritgradie} to compute $\overline{P}_{t}f (x)$. In this experiment, we use the {\it PDF} function $q$ defined as in Eq~(\ref{gausiandistri}) to compute the gradient. In this case, the approximation is given by Eq.~\eqref{estimator}. We generate a total of $m=20$ sample points from the normal distribution for the parameter $\delta=0.99$ using the \textsc{Matlab}\xspace function {\it normrnd}, and then projected to the manifold $SL(n)$ using the retraction given by
\begin{equation}
\beta_A(b_1,\cdots, b_n)=\frac{\left(\mathrm{sign}(\det(B)\right) b_1,b_2,\cdots, b_n)}{|\det(B)|^{\frac{1}{n}}}.
\label{retracspehere}
\end{equation}
Since $\Delta_n\le 1$, then, we take a small initial step size to get a better performance of our methodology. Our initial guess $x_0$, is the identity matrix and initial parameters $t=10^{-5}, \,\, \lambda=0.1, \, \, l=10, \, \,\epsilon=10^{-10}, \, \, s_f=1.1$.
We note that these are the parameters for which we obtain better results.}
\par We use the Exhaustive Enumeration Algorithm proposed in Ref.~\cite{schnorr1994lattice} to compute the function $g$. The implementation of this algorithm is provided in the GitHub repository~\cite{softwa} using \textsc{Matlab}\xspace.
In Figures \ref{final2d} and \ref{fianl3d}, we plot the final step of each execution of the proposed algorithm in dimensions $2$ and $3$. Observe that in all executions, the final step approximates the optimal sphere packing illustrated in Figures~\ref{final2d} and \ref{fianl3d} in each dimension (to rotations). This fact was verified by calculating the error as shown in Figure~\ref{erroresdel}.
\begin{figure}
\caption{Plot of the absolute error ($AE$) generated by five executions using Algorithm~\ref{algoprinno}
\label{erroresdel}
\end{figure}
\par \textcolor{black}{ We now compare the proposed algorithm with the \textsc{PSO} and \textsc{Nelder-Mead}, for that, we run five different executions for the different algorithms. In Figure~\ref{erroresdel}, we plot the absolute error $(AE)$ of approximating $\Delta_{2}$ and $\Delta_{3}$ for the iteration value $x_n$. Each color represents a different execution. The \textsc{PSO} and \textsc{Nelder-Mead} algorithms are implemented in the \textsc{Manopt toolbox} using default parameters. We implement the PSO algorithm with $40$ particles. }\\
\textcolor{black}{In addition, we test the proposed method to compute an approximation for the densities $\Delta_4$ and $\Delta_5$ using the previous setting. Although the problem remains unsolved in these dimensions, the best packing densities in the literature \cite{conway_a._bannai_1993,cohn2022} are $0.6168502750680849 \cdots$ for $n=4$ and $0.4652576133092586…$ for $n=5$. Through the execution of the different algorithms, for both cases, the best packing density is obtained using the proposed methodology. In fact, the Algorithm~\ref{algoprinno} for the case $n=4$, gives an optimal packing density equal to $0.616825892885318$ and for $n=5$, gives $0.465218060094373$.}
\textcolor{black}{Thus, as evidence, we observe that the proposed methodology outperforms the $PSO$ and $Nelder-Mead$ free derivative algorithms, for dimensions greater than $2$.}
\textcolor{black}{We emphasize that the proposed methodology focuses on cases where in each iteration the only information available is a set of sample points lying in an unknown manifold. In such case, the solvers \textsc{PSO} and \textsc{Nelder-Mead} cannot be executed.}
\begin{figure}
\caption{Plot of the best packing density generated by ten executions using Algorithm~\ref{algoprinno}
\label{erroresdel}
\end{figure}
\begin{figure}
\caption{Plot of final lattice packing step of five executions to approximate the density $\Delta_{2}
\label{final2d}
\end{figure}
\begin{figure}
\caption{Plot of final lattice packing step of five executions to approximate the density $\Delta_{3}
\label{fianl3d}
\end{figure}
\subsection{ Tomographic reconstruction from unknown random angles}
\textcolor{black}{ Tomographic reconstruction is a widely studied problem in the field of inverse problems. Its goal is to reconstruct an object from its angular projections. This problem has many applications in medicine, optics and other areas. We refer the reader to Refs.~\cite{radon3,radon1,radon2,natterer} for more details .}
\par \textcolor{black}{Classical reconstruction methods are based on the fact that the angular position is known. See Ref.~\cite{radon3}. In contrast, there are many cases for which the angles of the projections are not available, for instance, when the object is moving. The latter is a nonlinear inverse problem, which can be more difficult when compared to the classical linear inverse problem.}
\par \textcolor{black} {Now, we explain the problem in more details. Suppose that $f: \mathbb{R}^2 \to \mathbb{R}_{\ge 0} $ describes the density of an object, and let $\theta$ be an angle. We define the one-dimensional tomographic projection over the angle $\theta$ as
$$\mathbb{P}_\theta f (x)=\int f(R_{\theta}(x,y)) \, dy,$$
where $R_{\theta}(x,y)$ is the counterclockwise rotation of the two-dimensional vector $(x,y)$ with respect to the angle $\theta$. Since $$ \int |\mathbb{P}_{\theta_i}f(x) | \, dx =\int f(x,y) \, dy dx,$$
thus, normalizing if necessary, we also assume that $\|\mathbb{P}_{\theta_i}f \|_{L^1}=1$.}
\textcolor{black}{ The problem under consideration consists in reconstructing the density $ f $ with the knowledge of projections $\mathbb{P}_{\theta_1} f, \mathbb{P}_{\theta_2} f, \cdots \mathbb{P}_{\theta_k} f$, where the angles $ \theta_1, \theta_2, \cdots \theta_k $ are unknown. If through some method the rotations are known, then we can obtain the density function $ f $ using classical reconstruction methods.}
\par \textcolor{black}{In Ref. \cite{angucoif} an approach using the graph Laplacian is proposed to deal with this problem. However, the difficulty in using the previous approach is that it assumes {\it a priori} the knowledge of the distribution of the angles $\{ \theta_i \}_{i=1}^k$}. That is, it is necessary to assume the Euclidean distance between two consecutive angles. We use our methodology to tackle the latter problem, the road-map of our approach is established in Algorithm \ref{algorittomogra}. Let $DS$ be the dataset defined as the set of all tomographic projections
\begin{equation}
\label{dataset}
DS= \{\mathbb{P}_{\theta_i} f \}_{i=1}^k.
\end{equation}
If we assume that the density function $f$ has compact support, then a straightforward computation gives
\begin{align}
\int \mathbb{P}_{\theta_i}f (x) \, x \, dx & =\int \int \langle(x,y) , (f(R_{\theta}(x,y),0)) \rangle dx dy \nonumber \\
& = \int \int \langle (x,y) , R_{\theta}(f(x,y) , 0)) \rangle dx dy \nonumber\\
& = \langle \tilde{V} , R_{\theta_i} \,( 1 , 0 ) \rangle,
\label{equacionrelacionrado}
\end{align}
where $\tilde{V}$ is the two-dimensional vector
$$\tilde{V}=(\int \int x f(x,y) \, dx dy , \int \int y f(x,y) \, dx dy ).$$
For practical purposes, we consider the discretization of the projection $\mathbb{P}_{\theta_i}f $ as the multidimensional vector given by
$$\overline{\mathbb{P}_{\theta_i}}f= ( \mathbb{P}_{\theta_i} f (x_1) , \mathbb{P}_{\theta_i} f (x_2) , \cdots , \mathbb{P}_{\theta_i} f (x_l) ),$$
where $x_1 < x_2 < \cdots < x_l $ are equally spaced fixed points on the $x$ axis that describe the projection onto the angle $\theta_i$. See Figure~\ref{figutomogrejem}.
\begin{figure}
\caption{Tomography of an object.}
\label{figutomogrejem}
\end{figure}
\par Let $X$ be the multidimensional vector
$$X=(x_1, x_2 , \cdots , x_l).$$
The discretization of the integrals in Eq.~\eqref{equacionrelacionrado} gives
\begin{equation}
\frac{1}{h} \, \langle \,\overline{\mathbb{P}_{\theta_i}} f , X \rangle \approx \langle \tilde{V} , R_{\theta_i} \,( 1 , 0 ) \rangle ,
\label{eqprinang}
\end{equation}
where $ h $ is the distance between two consecutive points. Equation~\eqref{eqprinang} allows to estimate, except for a possible sign and translation, the angle $\theta_i$. Namely, if the two-dimensional vector $\tilde{V}$ has angle $\tilde{\theta}$, then, we recover $\theta_i$ using the expression
\begin{equation}
\cos \, (\theta_i-\tilde{\theta}) \approx \frac{1}{ h \, \,\|\tilde{V} \|_{2} } \langle \overline{\mathbb{P}_{\theta_i}} f , X \rangle .
\label{cosenang}
\end{equation}
In this case, we use Eq.~(\ref{eqprinang}) to compute the value $\|\tilde{V} \|$ as
\begin{equation}
\|\tilde{V} \|_{2} \approx \max_{\theta_i} \left | \frac{1}{ h } \langle \overline{\mathbb{P}_{\theta_i}} f , X \rangle \right |.
\label{angulomaxi}
\end{equation}
We remark that in this approach we do not compute the two-dimensional vector $\tilde{V}$, instead, we compute the norm $\|\tilde{V} \| $ using Eq.~\eqref{angulomaxi}. Observe that to solve the optimization Problem in Eq.~~\eqref{angulomaxi} it is sufficient to assume that ${\theta_i} \in [0, \pi]$.
\par Once we solve the previous optimization problem, we use Eq.~(\ref{eqprinang}) to calculate the angle $\theta_i-\tilde{\theta}$. Observe that if we do not determine the sign of the $\theta_i-\tilde{\theta}$, then a flipping effect appears on the reconstructed object, resulting in an image with many artifacts. We apply our gradient estimates to determine the sign of the angle. For that, we assume that the angles are distributed on the interval $ I=[0, \pi]$, and consider the numbers
\begin{equation}
m_1= \min_{i} |\theta_{i}-\tilde{\theta}| \quad \quad and \quad \quad M_1=\max_{i} |\theta_{i}-\tilde{\theta}|.
\label{angupeque}
\end{equation}
Since the maximum of the optimization problem in Eq.~\eqref{angulomaxi} is reached for some $\theta_i$, then $m_{1}=0$ or $M_{1}=\pi$. Without loss of generality, it is enough to consider the case $m_1=0$. In fact, if $M_{1}=\pi$, then we reflect the angles over the $y$-axis. Furthermore, changing the order if necessary we assume that
\begin{equation}
0=|\theta_{1}-\tilde{\theta}| < |\theta_{2}-\tilde{\theta}|< \cdots < |\theta_{k}-\tilde{\theta}|.
\label{anguorga}
\end{equation}
\par \textcolor{black}{ We observe that our dataset $(DS)$ defined as in Eq.~(\ref{dataset}) lies in the curve $c(I)$, which is parameterized by
$$ c(\theta)=\mathbb{P}_{\theta}f,$$
and in our case this parametrization is unknown. The main idea in our algorithm is to use the gradient flow of the function $g$ on the manifold $c(I)$, where $g:c(I) \to \mathbb{R} $ is defined as
\begin{equation}
g(Y)= \frac{1}{ h } \langle Y, X \rangle.
\label{levantamiento1}
\end{equation}
The importance of the gradient flow in our method lies in the fact that in a local neighborhood of the vector associated with the angle $0$, the gradient flow divides the dataset into two different clusters that determine the sign of the associated angles. This fact is proved using the approximation \eqref{cosenang} and the fact that the derivative of $\it{cosine}$ is an odd function on the real line. }
\par \textcolor{black}{ Before initializing our algorithm we divide the indices $\tilde{A}=\{i\}_{i=1}^k$ as follows. We select a fixed number $s$, which represents the size of the partition, and we consider the decomposition $k=us+r$, where $u$ and $r$ are non-negative integers with $r<s$. Then, we define the sets
\begin{equation}
\tilde{A}_i=\{is+1, is+2, \cdots, (i+1)s\},
\label{indice1}
\end{equation}
for $i \in \{0, 1, 2, \cdots,q-1\}$, and
\begin{equation}
\tilde{A}_q=\tilde{A} \, \, \setminus \bigcup_{i=0}^{u-1} \tilde{A}_i.
\label{indice2}
\end{equation}
We use the partition $\{\tilde{A}_i\}_{i=1}^q$ to represent the local geometry of the dataset. For that, we consider the subset $DS_i$ of $DS$, defined as
\begin{equation}
DS_i=\{\mathbb{P}_{\theta_j}f \, | j \in A_i\}.
\label{dataparti}
\end{equation}
The first step in our algorithm is to determine the sign of angles in a local neighborhood of $0$, for that, we use the diffusion-map algorithm to embed the dataset $\overline{DS}_1=DS_1 \cup DS_2 \cup DS_3$ into the two-dimensional space $\mathbb{R}^2$. We endow this embedded dataset with the counting measure. Once the dataset is embedded, we proceed to compute the approximation for $\overline{P}_{1} \tilde{g}$ as described in Algorithm \ref{algoritgradie}. Here, we select the points $x_1, x_2, x_3 \cdots x_m$ as the $m$ closest points to $x$.
Since we only are interested in the direction induced by the gradient, then we propose to reduce the computational cost of the execution using the approximation
\begin{equation}
\overline{\mathcal{V}}= \sum_{i=1}^m (x_i-x) \, \,(\tilde{g}(x_i)-\tilde{g}(x)) \, \,e^{\frac{-\| x_i-x \|^2}{2 }},
\label{normaestima}
\end{equation}
where, the function $\tilde{g}$ is such that for each two-dimensional embedded point $y \in \mathbb{R}^2$ associated with vector $Y \in DS $, the value of $\tilde{g} (x)$ is defined as
\begin{equation}
\tilde{g}(y)=g(Y).
\label{levantamiento2}
\end{equation}
The two-dimensional representation of the dataset allows determining the sign of the angles $\theta_{i}-\tilde{\theta}$ regarding the orientation of the flow generated by the function $\tilde{g}(y)$. This is done by observing that locally the set of gradient vectors associated with positive angles and the set of gradient vectors associated with negative angles are separated by a hyperplane. Since $\theta_{2}-\tilde{\theta}$ is the smallest nonzero angle, then we use its gradient to define a hyperplane that separates the sets mentioned above. To be more specific, we separate the sets according to the sign of the inner product of its gradient with the gradient associated with $\theta_{2}-\tilde{\theta}$. We remark that in the first step we only classify the sign of angles associated with points lying in $DS_1 \cup DS_2$, to avoid instabilities generated by computing the gradient of the boundary points lying in $DS_3$.}
\par \textcolor{black}{The second step is to proceed inductively to determine the sign of the remaining angles as follows. Assume that for $2\le i$ the sign of the angles associated with points lying in the set $DS_{i}$ is determined, and consider the dataset $\overline{DS}_i=DS_i \cup DS_{i+1}$. As in the first step, we use diffusion-maps to embed this dataset into $\mathbb{R}^2$. Observe that the function $g$ has not critical points on $\overline{DS}_i$. Then, the two-dimensional representation is divided at most into two clusters, for which each cluster represents the set of points with the same sign. We determine the sign of each cluster according to the sign of angles associated with points in $DS_{i}$ lying in the corresponding cluster. For practical purposes, we define the sign of each angle $\theta_{i}-\tilde{\theta}$ as the sign of the angle previously determined with the closest two-dimensional representation. We run this step until all the signs are determined. We summarize this reconstruction method in Algorithm \ref{algorittomogra}. We remark that the choice of parameters $s$ and $m$ have to be modestly small to avoid instabilities in our algorithm.}
\begin{algorithm}[]
\begin{flushleft}
\textbf{input} Tomographic projections $DS= \{Y_i\}_{i=1}^k$, where $Y_i=\mathbb{P}_{\theta_i} f$, size of the partition $s$. \\
\begin{enumerate}
\item Normalize the dataset $DS$ such that $\|\mathbb{P}_{\theta_i}f \|_{L^1}=1$ for all $i$.
\item Compute $\|\tilde{V} \|_{2} $ solving the optimization problem~\ref{angulomaxi}.
\item Determine the angles $\theta_{i}-\tilde{\theta}$ using Eq.~(\ref{cosenang}).
\item Compute $M_1$ as in Eq.~(\ref{angupeque}).
\item If $M_1=\pi$, then we proceed to reflect the angles $\tilde{\theta_i}$ over the $y$-axis.
\item Construct $DS_i$ following Eqs~(\ref{indice1}),~(\ref{indice2}), and~(\ref{dataparti}).
\item Use the diffusion-map approach to embed the dataset $DS_1 \cup DS_2 \cup DS_3$ into $\mathbb{R}^2$.
\item Compute $\overline{P}_{1} \tilde{g}$ using the approximation~(\ref{normaestima}), where $\tilde{g}$ is defined in Eqs.~(\ref{levantamiento1}) and~(\ref{levantamiento2}).
\item Determine the sign of the angles $\tilde{\theta_i}$ associated with points in $DS_1 \cup DS_2 $, according to the sign of the inner product of the associated gradient with the gradient associated with $\theta_2$.
\item \textbf{for} $j = 2 $ to $s$ \textbf{do}
\begin{itemize}
\item Use the diffusion-map approach to embed the dataset $DS_j \cup DS_{j+1}$ into $\mathbb{R}^2$.
\item Determine the sign of each angle $ \tilde{\theta_i}$ in $DS_{j+1}$ as the sign of angle previously determined with the closest two-dimensional representation.
\end{itemize}
\item \textbf{end for}
\item Reconstruct the signed angles.
\end{enumerate}
\end{flushleft}
\caption{ Tomographic reconstruction from unknown random angles}
\label{algorittomogra}
\end{algorithm}
The computational complexity of all the embeddings is $O(us^3)$, which corresponds to the complexity of the eigenvalue decomposition. On the other hand, the complexity of all gradient computations is $O(s)$, and the computational complexity of the other procedures described in Algorithm \ref{algorittomogra} is $O(s)$. Thus, Algorithm \ref{algorittomogra} runs with a $O(us^3)$ complexity which improves the $O(u^3s^3)$ complexity of the algorithm proposed in Ref.~\cite{angucoif}.
\par \textcolor{black}{We test our algorithm on the tomographic reconstruction of two objects. The first is the Shepp–Logan phantom, and the second is a computed tomography of a knee taken from Ref.~\cite{radiografi}. See Figure \ref{figtomogr}. In this experiment, we generate $k=2 \times 10^3$ random points uniformly distributed in $[0, \pi]$. The parameters used in Algorithm \ref{algorittomogra} are $s=20$, and $m=10$. The tomographic projections $\mathbb{P}_{\theta_1} f, \mathbb{P}_{\theta_2} f, \cdots \mathbb{P}_{\theta_k} f$ are computed using \textsc{Matlab}\xspace`s {\texttt {radon} } function. We add random noise to these projections, for that, we consider the dataset of the form
\begin{equation}
\label{tomografialevelerro}
\mathbb{P}_{R_i}^{\varepsilon} f=\mathbb{P}_{R_i} f+ \eta W,
\end{equation}
where $W$ is a white noise. Our purpose is to recover the density $f$, using only the measurements $\mathbb{P}_{R_i}^{\varepsilon} f$, regardless of their respective angles. }
\par \textcolor{black}{To illustrate how Algorithm \ref{algorittomogra} works, we plot the two essential steps in the method. In Figure \ref{twoembedd}, we plot the first two-dimensional embedding and their respective gradient approximation defined in Eq.~(\ref{normaestima}). Points with blue color are associated with positive angles and those with red color with negative angles. Furthermore, in Figure \ref{ste} , we plot the second two-dimensional embedding of our method. We observe that our method performs effectively in dividing the dataset into two different clusters according to the sign of the corresponding angle.}
\par In Figures \ref{reconstrufan} and \ref{reconstruknee}, we plot the reconstructed images of the Shepp–Logan phantom and the knee tomography, respectively. Here, the samples of the angles are uniformly distributed over $[0 , \pi ]$. We consider different levels of additive order error $\eta$ as represented in Eq.~\eqref{tomografialevelerro}.
We remark that we obtained similar results to those shown using multiple executions of our method. To measure the effectiveness of our method, we compare the $L^2$ error generated when our algorithm is implemented. The computed $L^2$ error is shown in Tables \ref{taberrorfan} and \ref{taberrorknee}.
Observing the computational error and image quality, we conclude that our reconstruction algorithm works efficiently with relatively low computational cost.
\begin{figure}
\caption{Picture of the Shepp–Logan phantom (a), and a knee sample image (b). Source for the latter Ref.~\cite{radiografi}
\label{figtomogr}
\end{figure}
\begin{figure}
\caption{Plot of the first two-dimensional embedding (left), and their associated gradient approximation (right). In this experiment, the angle sample is uniformly distributed on $[0,\pi]$. Each color represents a different sign. Figure (a) corresponds to the Shepp–Logan phantom, and Figure (b) to the image of the knee. }
\label{twoembedd}
\end{figure}
\begin{figure}
\caption{Plot of the second two-dimensional embedding (left). Figure (a) corresponds to the Shepp–Logan phantom, and Figure (b) to the image of the knee. }
\label{ste}
\end{figure}
\begin{figure}
\caption{Reconstructed Shepp–Logan phantom for several additive errors $\eta$ as in Eq.~\eqref{tomografialevelerro}
\label{reconstrufan}
\end{figure}
\begin{figure}
\caption{Reconstructed knee tomography for several additive errors $\eta$ as in Eq.~\eqref{tomografialevelerro}
\label{reconstruknee}
\end{figure}
\section{Conclusions}
In this work, we recover the gradient operator defined on Riemannian submanifolds of the Euclidean space from random samples in a neighborhood of the point of interest. Our methodology is based on the estimates of the Laplace-Beltrami operator proposed in the diffusion maps approach. The estimates do not depend on the intrinsic parametrization of the submanifold. This feature is useful in cases where it is not feasible to identify the submanifold in which the dataset is lying. \textcolor{black}{The proposed method gives a closed form of the gradient representation in the learning gradient theory. This improves the numerical implementation and the accuracy of the approximations. }A natural continuation of the present work
would be to incorporate information of the cotangent bundle and deal with a duality version of our results, in this case, the aforementioned approach would be very handy.
\begin{table}
\begin{tabularx}{1\textwidth} {
| >{\centering\arraybackslash}X
| >{\centering\arraybackslash}X
| >{\centering\arraybackslash}X | }
\hline
Value of $\eta$ & With determination of the sign & Without determination of the sign \\
\hline
0 & 0.0814 & 0.2087 \\
0.05 & 0.0816 & 0.2101 \\
0.1 & 0.0824 & 0.2129 \\
\hline
\end{tabularx}
\caption{Error of the reconstructed Shepp–Logan phantom. We use the $L^2$ norm to compute the errors. Here, the sample angles are uniformly distributed over $[0 , \pi ]$.}
\label{taberrorfan}
\end{table}
\begin{table}
\begin{tabularx}{1\textwidth} {
| >{\centering\arraybackslash}X
| >{\centering\arraybackslash}X
| >{\centering\arraybackslash}X | }
\hline
Value of $\eta$ & With determination of the sign & Without determination of the sign \\
\hline
0 & 0.1001 & 0.1411 \\
0.05 & 0.1053 & 0.1425 \\
0.1 & 0.1114 & 0.1445 \\
\hline
\end{tabularx}
\caption{Error of the reconstructed knee tomography . We use the $L^2$ norm to compute the errors. Here, the sample angles are uniformly distributed over $[0 , \pi ]$.}
\label{taberrorknee}
\end{table}
Furthermore, this circle of ideas could be conjoined with the techniques proposed in Ref. \cite{PWD2020}.
\par \textcolor{black}{We conclude that the operator $\overline{P}_{t}f (x)$ locally approximates a smoothness version of the gradient of $f$. In fact, integrating by parts gives}
$$\overline{P}_{t}f (x) = \frac{2 t^2}{d_t(x)} \left( \int_{U(x,t^\delta)} \nabla f(y) e^{\frac{-\| y-x \|^2}{2 t^2}} dy + O( t^{\delta (d-1)}) \right ).$$
\textcolor{black}{The question of whether $\overline{P}_{t}f (x)$ is a global approximation of some smoothness gradient remains open and it could be investigated in future work.}
\par We apply our methodology in a step size algorithm as an optimization method on manifolds. This optimization method is effective in cases where it is difficult to compute the gradient of a function. As an application, we used our method to find an approximation to the sphere packing problem in dimensions 2 and 3, for the lattice packing case. Moreover, we use our approach to reconstruct tomographic images where the projected angles are unknown. The latter does not depend on {\it a priory} knowledge of the distribution of the angles, and its execution is computationally feasible.
\par \textcolor{black}{A natural follow-up is to apply this methodology to the dimension reduction of high-dimensional datasets}.
\par Due to the promising results obtained, another natural follow-up would be to implement our algorithm in the case of periodic lattice packing to obtain computational estimates for the sphere packing constant in several dimensions.
\par \textcolor{black}{ In addition, we plan to implement the gradient estimates in the reinforcement learning methodology, as well as implement the proposed method for other image reconstruction problems as well as integrate with other processing techniques such as the one described in Ref.~\cite{ZMSG2003}. }
\pagenumbering{arabic}
\setcounter{page}{1}
\begin{appendices}
\section{Numerical comparison with learning gradients}
\label{numericalcomparison}
\textcolor{black}{ In this section, we verify the consistency of Proposition \ref{estimaparamefull} and also compare the proposed algorithm with the learning gradient approximation \cite{muksa}. We recall that given a sample set $\{x_i \}_i$ and a function $f$ in the manifold $\mathcal{M}$, the learning gradient method computes an approximation $\vec{f}$ for the gradient using the sample points $x_i$ as}
\begin{equation}
\label{RKHS}
\vec{f}=\sum_{i} C_i K_{t}( \cdot, x_i),
\end{equation}
\textcolor{black}{
where $K$ is the Gaussian kernel $K_{t}( x, y)=e^{-\|x-y\|^2 / 2 t^2}$, and the coefficients $C_i$ are determine by solving the optimal problem}
\begin{equation}
\label{learninggradientformula}
\arg \max \sum_{i,j} w_{i,j} \left( f(x_j)-f(x_i)- \vec{f}(x_i) \cdot (x_j-x_i) \right)^2 +\lambda \| \vec{f} \|_{L_2}^2
\end{equation}
\textcolor{black}{
where $w_{i,j}=K(x_i,x_j)$. According to the theoretical results \cite{muksa}, to guarantee the convergence of the approximation the value for $\lambda$ is given by $\lambda=t^{d+3}$, where $d$ is the dimension of the manifold $\mathcal{M}$. The implementation of the difference between the Learning gradient and the proposed methodology lies in the fact that we compute a close form for the coefficients in the representation form \eqref{RKHS} using the Markov normalization associated with Gaussian kernels. Thus, we avoid the costly time computation of solving the optimization problem \eqref{learninggradientformula}. }
\textcolor{black}{
We test the learning gradient and the proposed methodology to compute the gradient of the function $f: M \to \mathbb{R}$ defined as}
\begin{equation*}
f(x)= < x , AA^{T} x>,
\end{equation*}
\textcolor{black}{
where $A$ is a squared matrix with random entries, and $<\cdot,\cdot>$ is the dot product in the Euclidean space. Here, the manifold $M$ is the curve $(c(t),c(t),c(t)) \in \mathbb{R}^9$ parameterized by}
$$ c(t)=(\cos{2 \pi t}, \sin{2 \pi t}, \cos{4 \pi t}) \in \mathbb{R}^3,$$
\textcolor{black}{
where $t \in [0,1]$ In this example, we consider random points on $t_i$ on $[0,1]$ and the set of sample points for which we compute the gradient approximation is defined as $$x_i=(c(t_i),c(t_i),c(t_i)).$$
We test both algorithms for different sample sizes $m$ and approximation parameters $t$. In Table \ref{tab1}, we compute the mean squared error (\textit{MSE}) of each approximation method in a logarithmic scale. We remark that since this result is probabilistic, several executions were carried out to obtain similar results without altering the conclusions concerning the tolerance of the approximation involving the several parameters.
In this experiment, we use $\delta=0.9$ and the parameter $t$ modestly small. Observe that for a fixed $t$, the \textit{MSE} error decreases when the number of sample points $m$ increases, which is consistent with the result of Proposition \ref{estimaparamefull}. In addition, observe that the proposed methodology gives a less \textit{MSE} error than the learning gradient method. This fact shows the consistency of the method with the theoretical development in this article.}
\begin{table}[H]
\centering
\begin{tabular}{|l|l|l|l|}
\hline
$t$ & $m$ & Proposed methodology & Learning gradient \\
\hline
1 & 100 & 4.13 & 4.8 \\
1 & 200 & 4.04 & 5.27 \\
1 & 300 & 3.8 & 5.63 \\
1 & 400 & 3.99 & 5.07 \\ \hline
0.5 & 100 & 3.23 & 5.51 \\
0.5 & 200 & 3.69 & 5.72 \\
0.5 & 300 & 3.25 & 5.4 \\
0.5 & 400 & 3.41 & 5.68 \\ \hline
0.1 & 100 & 2.45 & 5.41 \\
0.1 & 200 & 2.98 & 6.38 \\
0.1 & 300 & 2.66 & 5.95 \\
0.1 & 400 & 2.28 & 6.02 \\ \hline
0.05 & 100 & 3.11 & 4.93 \\
0.05 & 200 & 3.28 & 5.91 \\
0.05 & 300 & 2.4 & 5.1 \\
0.05 & 400 & 2.14 & 5.94 \\ \hline
\end{tabular}
\caption{Mean squared error of the gradient approximation for the proposed method and the learning gradient in a logarithm scale. Here, $m$ is the number of sample points, $t$ is the approximation parameter}
\label{tab1}
\end{table}
\section{Review of differential geometry}
\label{ape1}
We review some facts of differential geometry. We refer the reader to Ref.~\cite{do1992riemannian} for a more detailed description. Given an interior point $x \in \mathcal{M}$, there exists a positive real number $\varepsilon$ such that the map $\psi=\exp_{x} \circ \,T : B(0,\varepsilon)\subset \mathbb{R}^d \to \mathcal{M}$ is a local chart. Here, $\exp_{x}$ is the exponential map at the point $x$, and $T:\mathbb{R}^d \to T_{x} \mathcal{M}$ is a rotation from $\mathbb{R}^d$ onto $T_{x} \mathcal{M}$, both sets considered subsets of $\mathbb{R}^n$. The chart $\psi$ defines the normal coordinates at point $x$.
\par Given a smooth function $f \in C^{\infty} (\mathcal{M})$, the gradient operator $\nabla f (x)\in T_{x} \mathcal{M}$ is given in normal coordinates by
$$ \nabla f(x)= \sum_{i=1}^d \frac{\partial f}{\partial x_i} T(e_i).$$
Here, $e_i$ is the standard basis in $\mathbb{R}^d$. Now, we recall some estimates that use normal coordinates that are useful when estimating approximations for differential operators. The Taylor series of $\psi$ around the point $0$ is given by
\begin{equation}
\psi(v)= x+ T (v)+\frac{1}{2} D^2\psi_0(v,v)+ O(\| v\|^3).
\label{taylorexpo}
\end{equation}
Let $v \in B(0,\varepsilon)\subset \mathbb{R}^d$, and consider the geodesic $\gamma_{T(v)}$, with initial tangent vector $T(v) \in T_{x} \mathcal{M}$, then using Estimate~(\ref{taylorexpo}) we obtain
$$\gamma_{T(v)}(t)=x+T(v)\,t+\frac{1}{2} D^2\psi_0\,(v,v) t^2+O(\|v\|^3)t^3. $$
Since the covariant derivative of a geodesic vanishes, then $\gamma''_{{T(v)}}$ is orthogonal to $T_{x} \mathcal{M}$. Thus, we have the following estimates
\begin{equation}
\|\psi(v)-x\|^2= \|T(v)\|^2+O(\|v\|^4),
\label{estimativaorden2}
\end{equation}
and
\begin{equation}
\mathcal{P}_x(\psi(v)-x)= T(v)+O(\|v\|^3),
\label{estimativaorden3}
\end{equation}
where $\mathcal{P}_x$ is the orthogonal projection on $T_{x} \mathcal{M}$.
Using the Estimates~(\ref{estimativaorden2}) and~(\ref{estimativaorden3}), we obtain that there exist positive constants $M_1$ and $M_2$ such that for $\|v\|$ small
$$\|v\|-M_2 \|v\|^3 \le \|\psi(v)-x\|\le M_1 \|v\|.$$
Thus, if $\|v\|^2 \le \frac{1}{2M2}$ we have
$$\frac{1}{2}\|v\| \le \|\psi(v)-x\|\le M_1 \|v\|.$$
This says that for $t$ small
\begin{equation}
\label{condilocal}
B(0,t/{M_1}) \subseteq \psi^{-1} (U(x,t^\delta)) \subseteq B(0,2 t).
\end{equation}
\section{Expansion of the gradient operator}
\label{ape2}
Here, we show the technical details of the proof of Theorem \ref{teo1}. The main idea is to use the Taylor expansion of the function $f$ around the point $x$.
\begin{lem}
\label{lemaprinci}
Assume that $\frac{1}{2} < \delta<1$, and let $K:\mathcal{M} \times \mathcal{M} \to \mathbb{R}^m$ be a vector value kernel. Define
$$P_{t}(x)=\int_{U(x,t^\delta)} K(x,y) \, e^{\frac{-\| y-x \|^2}{2 t^2}} dy,$$
where $U(x,t^\delta)$ is defined as in Eq.~(\ref{conjuntopequ}). Assume that for $t$ small, the function $\psi:B(0,2t^\delta) \to \mathcal{M} $ defines normal coordinates in a neighborhood of $x$, and let $S$ be a vector value function defined in $\mathbb{R}^d$ such that
$$K(x,\psi(v))-S(v)=O(\|v\|^r),$$
and
$$K(x,y)=O(\|x-y\|^s).$$
Then, we have
$$ P_{t}(x)=O( (e^{C_2 t^{4\delta-2}} -1) t^{s+d}+ t^{r+d})+\int_{\psi^{-1} (U(x,t^\delta))} S(v) \, e^{\frac{-\| T(v)\|^2}{2 t^2}} dv.$$
\end{lem}
\begin{proof}
Using Eq.~(\ref{condilocal}), we assume that for $t$ small, the set $U(x,t^\delta)$ lies in the image of a normal chart $\psi:B(0,2t^{\delta}) \to \mathcal{M} $ centered in $x$. Thus,
$$\begin{array}{rcl} \int_{U(x,t^\delta)} K(x,y) \, e^{\frac{-\| y-x \|^2}{2 t^2}} dy & = & \int_{\psi^{-1} (U(x,t^\delta))} K(x,\psi(v)) e^{\frac{-\| \psi(v)-x \|^2}{2 t^2}} dv \\ \, & = & \int_{\psi^{-1} (U(x,t^\delta))} K(x,\psi(v)) (e^{\frac{-\| \psi(v)-x \|^2}{2 t^2}}-e^{\frac{-\| T(v) \|^2}{2 t^2}}) dv \\ \, & + & \int_{\psi^{-1} (U(x,t^\delta))} (K(x,\psi(v))-S(v)) e^{\frac{-\| T(v)\|^2}{2 t^2}} dv
\\ \, & + & \int_{\psi^{-1} (U(x,t^\delta))} S(v) \, e^{\frac{-\| \mathbb{T}(v)\|^2}{2 t^2}} dv.
\end{array} $$
We now estimate
$$A=\int_{\psi^{-1} (U(x,t^\delta))} K(x,\psi(v)) (e^{\frac{-\| \psi(v)-x \|^2}{2 t^2}}-e^{\frac{-\| T(v) \|^2}{2 t^2}}) dv. $$
Using Eq.~(\ref{estimativaorden2}), and the inequality $ |e^x-1|\le e^{|x|}-1$ we obtain
$$\begin{array}{rcl} |e^{\frac{-\| \psi(v)-x \|^2}{2 t^2}}-e^{\frac{-\| T(v) \|^2}{2 t^2}}| & = & e^{\frac{-\| T(v) \|^2}{2 t^2}} | e^{\frac{O(\|v\|^4)}{2 t^2}} -1| \\ \, & \le & e^{\frac{-\| T(v) \|^2}{2 t^2}} ( e^{\frac{C_1\|v\|^4}{2 t^2}} -1).
\end{array} $$
Therefore, by Equation~(\ref{condilocal}) we obtain
$$\begin{array}{rcl} \| A\| & \le & C_3 \, \, t^s ( e^{C_2 t^{4\delta-2}} -1) t^d \int_{\mathbb{R}^d} \| v \|^s e^{-\| v \|^2 /2} dv \\ \, & = & O( (e^{C_2 t^{4\delta-2}} -1) t^{s+d} ).
\end{array} $$
On the other hand, by assumption we have
$$\int_{\psi^{-1} (U(x,t^\delta))} ( K(x,\psi(v))-S(v)) e^{\frac{-\| T(v)\|^2}{2 t^2}} dv=O( t^{r+d} \,).$$
\end{proof}
\begin{lem} \label{lemma2}
Under the same assumptions of Lemma \ref{lemaprinci}, we define
$$E=\int_{\psi^{-1} (U(x,t^\delta))} Q(v) e^{\frac{-\| T(v)\|^2}{2 t^2}} g(v)dv,$$
where $g$ is a smooth function and $Q$ is a homogeneous polynomial of degree $l$. Then, we have
$$E=\int_{\mathbb{R}^d} Q(v) e^{\frac{-\| T(v)\|^2}{2 t^2}} (g(0)+\sum \frac{\partial g}{\partial v_i} (0) v_i) dv\,+ O(t^{d+l} e^{-M_2 t^{2(\delta-1)}}+t^{d+2+l}).$$
\end{lem}
\begin{proof}
Using the Taylor expansion of $g$ around $0$ we have
$$E=\int_{\psi^{-1} (U(x,t^\delta))} Q(v) e^{\frac{-\| T(v)\|^2}{2 t^2}} (g(0)+\sum \frac{\partial g}{\partial v_i} (0)\, v_i+ O(\|v\|^2) )dv .$$
Let $B$ be defined as
$$ B= \|\int_{\mathbb{R}^d \setminus \psi^{-1} (U(x,t^\delta))} Q(v) e^{\frac{-\| T(v)\|^2}{2 t^2}} (g(0)+\sum \frac{\partial g}{\partial v_i} (0)\, v_i) dv\|.$$
Using Eq.~(\ref{condilocal}) and the fast decay of the exponential function, we obtain that
$$ B \le C_4 t^{d+l} e^{-M_2 t^{2(\delta-1)}} \int_{\mathbb{R}^d \setminus B(0 , t^{\delta-1}/M_1)} P(\|v\|) e^{\frac{-\| T(v)\|^2}{4}} dv.$$
for a certain polynomial $P$. Therefore, we have
$$B = O(t^{d+l} e^{-M_2 t^{2(\delta-1)}} ),$$
for a proper constant $M_2$. Finally, we observe that
$$\int_{\psi^{-1} (U(x,t^\delta))} Q(v) e^{\frac{-\| T(v)\|^2}{2 t^2}} O(\|v\|^2) dv=O(t^{d+2+l} ).$$
\end{proof}
We recall the following computations related to the moments of the normal distribution that are useful in proving Theorem~\ref{teo1}.
For all index $i$
$$\int_{\mathbb{R}^d} v_i e^{\frac{-\| T(v)\|^2}{2 t^2}}dv=0,$$
and
$$\int_{\mathbb{R}^d} v_i^2 e^{\frac{-\| T(v)\|^2}{2 t^2}}dv=(2 \pi)^{\frac{d}{2}}\, t^{d+2},$$
moreover, if $i \neq j$ then
$$\int_{\mathbb{R}^d} v_i \, v_j e^{\frac{-\| T(v)\|^2}{2 t^2}}dv=0.$$
\begin{lem}
\label{lemanor}
Under the same assumptions of Lemmas \ref{lemaprinci} and \ref{lemma2} we have
\begin{equation}
\label{estimativanormali}
d_t(x)= (2 \pi)^{\frac{d}{2}} t^d+O(t^{d+4 \delta -2}).
\end{equation}
\end{lem}
\begin{proof}
We apply Lemmas \ref{lemaprinci} and \ref{lemma2} to the functions $K(x,y)=1$, $S(v)=1$, $Q(v)=1$, and $g(x)=1$. We use the parameters $r=2$, $s=0$ and $l=0$. Using the exponential decay we obtain the following estimate
$$d_t(x)= (2 \pi)^{\frac{d}{2}} t^d+O(t^{d+4 \delta -2}).$$
\end{proof}
\begin{proof}[Proof of Theorem \ref{teo1}]
We apply Lemmas \ref{lemaprinci} and \ref{lemma2} to the functions $K(x,y)= (y-x)(f(y)-f(x))$, $S(v)=T(v)(f(\psi(v))-f(x))=\sum v_i (f(\psi(v))-f(x)) T(e_i) $ , $Q(v)=v_i $ and $g(v)=(f(\psi(v))-f(x))$. Since $\psi(v)-x-T(v)=O(\|v\|^2)$ and $f(\psi(v))-f(x)=O(\|v\|^1)$, then the parameters that we use are $r=3$, $s=2$ and $l=1$. Again, using the exponential decay we have that
\begin{equation}
\int_{U(x,t^\delta)} \overline{K}(x,y) \, e^{\frac{-\| y-x \|^2}{2 t^2}} dy=(2 \pi)^{\frac{d}{2}}\, t^{d+2}\sum \frac{\partial f}{\partial v_i} (0)\, T(e_i)+O(t^{d+4 \delta}).
\label{ecuafinal}
\end{equation}
Finally we use Eq.~(\ref{estimativanormali}) of Lemma \ref{lemanor} to conclude the result.
\end{proof}
\end{appendices}
Received xxxx 20xx; revised xxxx 20xx.
\textit{E-mail address}, Alvaro Almeida Gomez: \texttt{[email protected]}
\textit{E-mail address}, Antônio J. Silva Neto: \texttt{[email protected]}
\textit{E-mail address}, Jorge P. Zubelli: \texttt{[email protected]}
\end{document}
|
\begin{document}
\title{Delegated Online Search}
\begin{abstract}
In a delegation problem, a \emph{principal} $\ensuremath{p}incipal$ with commitment power tries to pick one out of $n$ options. Each option is drawn independently from a known distribution. Instead of inspecting the options herself, $\ensuremath{p}incipal$ delegates the information acquisition to a rational and self-interested \emph{agent} $\ensuremath{\mathcal{A}}$. After inspection, $\ensuremath{\mathcal{A}}$ proposes one of the options, and $\ensuremath{p}incipal$ can accept or reject.
Delegation is a classic setting in economic information design with many prominent applications, but the computational problems are only poorly understood.
In this paper, we study a natural \emph{online} variant of delegation, in which the agent searches through the options in an online fashion. For each option, he has to irrevocably decide if he wants to propose the current option or discard it, before seeing information on the next option(s). How can we design algorithms for $\ensuremath{p}incipal$ that approximate the utility of her best option in hindsight?
We show that in general $\ensuremath{p}incipal$ can obtain a $\Theta(1/n)$-approximation and extend this result to ratios of $\Theta(k/n)$ in case (1) $\ensuremath{\mathcal{A}}$ has a lookahead of $k$ rounds, or (2) $\ensuremath{\mathcal{A}}$ can propose up to $k$ different options. We provide fine-grained bounds independent of $n$ based on two parameters. If the ratio of maximum and minimum utility for $\ensuremath{\mathcal{A}}$ is bounded by a factor $\alpha$, we obtain an $\Omega(\log\log \alpha / \log \alpha)$-approximation algorithm, and we show that this is best possible. Additionally, if $\ensuremath{p}incipal$ cannot distinguish options with the same value for herself, we show that ratios polynomial in $1/\alpha$ cannot be avoided. If the utilities of $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$ for each option are related by a factor $\beta$, we obtain an $\Omega(1 / \log \beta)$-approximation, where $O(\log \log \beta / \log \beta)$ is best possible.
\end{abstract}
\section{Introduction}
The study of delegation problems is a prominent area in economics with numerous applications. There are two parties -- a decision maker (called \emph{principal}) $\ensuremath{p}incipal$ and an \emph{agent} $\ensuremath{\mathcal{A}}$. $n$ actions or \emph{options} are available to $\ensuremath{p}incipal$. Each option has a utility for $\ensuremath{p}incipal$ and a (possibly different) utility for $\ensuremath{\mathcal{A}}$, which are drawn from a known distribution $\ensuremath{\mathcal{D}}$. Instead of inspecting options herself, $\ensuremath{p}incipal$ delegates the search for a good option to $\ensuremath{\mathcal{A}}$. $\ensuremath{\mathcal{A}}$ sees all realized utility values and sends a signal to $\ensuremath{p}incipal$. Based on this signal (and $\ensuremath{\mathcal{D}}$), $\ensuremath{p}incipal$ chooses an option. Both parties play this game in order to maximize their respective utility from the chosen option.
Many interesting applications can be captured within this framework. For example, consider a company that is trying to hire an expert in a critical area. Instead of searching the market, the company delegates the search to a head-hunting agency that searches the market for suitable candidates. Alternatively, consider an investor, who hires a financial consultant to seek out suitable investment opportunities. Clearly, principal and agent might not always have aligned preferences. While the investor might prefer investments with high interest rates, the financial consultant prefers selling the products for which he gets a provision.
In applications such as searching for job candidates or financial investments, availability of options often changes over time, and the pair of agents needs to solve a stopping problem. For example, many lucrative financial investment opportunities arise only within short notice and expire quickly. Therefore, a consultant has to decide whether or not to recommend an investment without exactly knowing what future investment options might become available. Hence, $\ensuremath{\mathcal{A}}$ faces an online search problem, in which the $n$ options are realized in a sequential fashion. After seeing the realization of option $i$, he has to decide whether to propose the option to $\ensuremath{p}incipal$ or discard it. If the option is proposed, $\ensuremath{p}incipal$ decides to accept or reject this option and the process ends. Otherwise, the process continues with option $i+1$.
In the study of delegation problems, $\ensuremath{p}incipal$ usually has commitment power, i.e., $\ensuremath{p}incipal$ specifies in advance her decision for each possible signal, taking into account the subsequent best response of $\ensuremath{\mathcal{A}}$. This is reasonable in many applications (e.g., an investor can initially restrict the investment options she is interested in, or the company fixes in advance the required qualifications for the new employee). Interestingly, although $\ensuremath{p}incipal$ commits and restricts herself in advance, this behavior is usually in her favor. The induced best response of $\ensuremath{\mathcal{A}}$ can lead to much better utility for $\ensuremath{p}incipal$ than in any equilibrium, where both parties mutually best respond. Using a revelation-principle style argument, the communication between $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$ can be reduced to $\ensuremath{\mathcal{A}}$ revealing the utilities of a single option and $\ensuremath{p}incipal$ deciding to accept or reject that option (for a discussion, see, e.g.~\cite{KleinbergK18}).
The combination of online search and delegation has been examined before, albeit from a purely technical angle. Kleinberg and Kleinberg~\cite{KleinbergK18} recently designed approximation algorithms for delegation, using which $\ensuremath{p}incipal$ can obtain a constant-factor approximation to the expected utility of her best option in hindsight. Their algorithms heavily rely on techniques and tools developed in the domain of prophet inequalities. However, they are applied to an \emph{offline} delegation problem. Instead, our model is an extension of~\cite{KleinbergK18} to online search. Interestingly, our results reveal a notable contrast -- in online delegation a constant-factor approximation might be impossible to achieve. In fact, in the worst case the approximation ratio can be as low as $\Theta(1/n)$ and this is tight. Motivated by this sharp contrast, we provide a fine-grained analysis based on two natural problem parameters: (1) the discrepancy of utility for the agent, and (2) the misalignment of agent and principal utilities.
\subsection{Model}
We study \emph{online delegation} between principal $\ensuremath{p}incipal$ and agent $\ensuremath{\mathcal{A}}$ in (up to) $n$ rounds. In every round $i$, an option is drawn independently from a known distribution $\ensuremath{\mathcal{D}}_i$ with finite support $\Omega_i$ of size $s_i$. We denote the options of $\ensuremath{\mathcal{D}}_i$ by $\Omega_i = \{\omega_{i1}, \ldots, \omega_{i,s_i}\}$ and the random variable of the draw from $\ensuremath{\mathcal{D}}_i$ by $O_i$. For every $i \in [n]$ and $j \in [s_i]$, the option $\omega_{ij}$ has probability $\ensuremath{p}_{ij}$ to be drawn from $\ensuremath{\mathcal{D}}_i$. If this option is proposed by $\ensuremath{\mathcal{A}}$ and chosen by $\ensuremath{p}incipal$, it yields utility $\ensuremath{a}_{ij} \ge 0$ for $\ensuremath{\mathcal{A}}$ and $\ensuremath{b}_{ij} \ge 0$ for $\ensuremath{p}incipal$.
We assume that $\ensuremath{p}incipal$ has commitment power. Before the start of the game, she commits to an \emph{action scheme} $\varphi$ with a value $\varphi_{ij} \in [0,1]$ for each option $\omega_{ij}$. $\varphi_{ij}$ is the probability that $\ensuremath{p}incipal$ accepts option $\omega_{ij}$ when it is proposed by $\ensuremath{\mathcal{A}}$ in round $i$. We will sometimes consider \emph{deterministic} action schemes, which we represent using sets $E_i = \{ \omega_{ij} \mid \varphi_{ij} = 1\}$ of \emph{acceptable options} in each round $i \in [n]$.
In contrast to $\ensuremath{p}incipal$, $\ensuremath{\mathcal{A}}$ gets to see the $n$ random draws from the distributions in an online fashion. He has to decide after each round whether he proposes the current option $O_i$ to $\ensuremath{p}incipal$ or not. If he decides to propose it, then $\ensuremath{p}incipal$ decides according to $\varphi$ whether or not she accepts the option. If $\ensuremath{p}incipal$ accepts, the respective utility values are realized; if not, both players receive utility 0. In either case, the game ends after $\ensuremath{p}incipal$ decides. Clearly, both players strive to maximize their expected utility.
Initially, both players know the distribution $\ensuremath{\mathcal{D}}_i$ for every round $i \in [n]$. The sequence of actions then is as follows: (1) $\ensuremath{p}incipal$ decides $\varphi$ and communicates this to $\ensuremath{\mathcal{A}}$; (2) in each round $i$, $\ensuremath{\mathcal{A}}$ sees $O_i \sim \ensuremath{\mathcal{D}}_i$ and irrevocably decides to propose or discard $O_i$; (3) as soon as $\ensuremath{\mathcal{A}}$ decides to propose some option $O_i = \omega_{ij}$, then $\ensuremath{p}incipal$ accepts it with probability $\varphi_{ij}$, and the game ends.
Because $\ensuremath{\mathcal{A}}$ knows the distributions and the action scheme $\phi$ of upcoming rounds which modifies his expected utility from proposed options, $\ensuremath{\mathcal{A}}$ essentially faces an online stopping problem which he can solve using backwards induction. Hence, we can assume without loss of generality that all decisions (not) to propose an option by $\ensuremath{\mathcal{A}}$ are deterministic. That is, if the expected utility from the realization in the current round is greater than the expected utility obtained in the upcoming round, propose the current option, otherwise, wait for the next round.
To avoid technicalities in the analysis, we assume that $\ensuremath{\mathcal{A}}$ breaks ties in favor of $\ensuremath{p}incipal$.
Our goal is to design action schemes $\varphi$ with high expected utility for $\ensuremath{p}incipal$. We compare the expected utility to the one in the non-delegated (online) optimization problem, where $\ensuremath{p}incipal$ searches through the $n$ realized options herself. The latter is an elementary stopping problem, for which a classic prophet inequality relates the expected utility of the optimal online and offline strategies by at most a factor of 2~\cite{KrengelS77,KrengelS78}.
We also analyze scenarios with \emph{oblivious} and \emph{semi-oblivious proposals}. In both these scenarios, $\ensuremath{\mathcal{A}}$ reveals only the utility value $\ensuremath{b}_{ij}$ for $\ensuremath{p}incipal$ when proposing an option (but not his own value $\ensuremath{a}_{ij}$). In contrast, when $\ensuremath{p}incipal$ gets to see the utility values of both agents, we term this \emph{conscious proposals}. The difference between semi-oblivious and (fully) oblivious scenarios lies in the prior knowledge of $\ensuremath{p}incipal$. In the semi-oblivious scenario, $\ensuremath{p}incipal$ is fully aware of the distributions, including all potential utility values $\ensuremath{a}_{ij}$ for $\ensuremath{\mathcal{A}}$. In the oblivious scenario, $\ensuremath{p}incipal$ initially observes the probabilities of all options along with her utility values $\ensuremath{b}_{ij}$, but the values $\ensuremath{a}_{ij}$ of $\ensuremath{\mathcal{A}}$ remain unknown to $\ensuremath{p}incipal$ throughout. In the scenarios with restricted discrepancy studied in Section~\ref{sec:max-min-agent-ratio}, $\ensuremath{p}incipal$ is aware of the bound $\alpha = \max_{i,j} \ensuremath{a}_{ij} / \min_{i,j} \ensuremath{a}_{ij}$.
\begin{example}
Consider the following simple example for illustration. We consider deterministic strategies by $\ensuremath{p}incipal$ and conscious proposals. There are two rounds with the options distributed according to Table~\ref{table:example}.
\begin{table}
\centering
\begin{tabular}{c||cc|cc}
round $i$ & \multicolumn{2}{c|}{1} & \multicolumn{2}{c}{2} \\ \hline
option $\omega_{ij}$ & $\omega_{11}$ & $\omega_{12}$ & $\omega_{21}$ & $\omega_{22}$ \\
value-pair $(a_{ij},b_{ij})$ & (3,1) & (3,8) & (2,4) & (16,4) \\
probability $p_{ij}$ & 0.75 & 0.25 & 0.75 & 0.25 \\
\end{tabular}
\caption{An example instance}
\label{table:example}
\end{table}
For the benchmark, we assume that $\ensuremath{p}incipal$ can see and choose the options herself. The best option is $\omega_{12}$. If this is not realized in round 1, the option realized in round 2 is the best choice. Note that this optimal choice for $\ensuremath{p}incipal$ can be executed even in an online scenario, where she first sees round 1 and gets to see round 2 only after deciding about round 1. The expected utility of this best (online) choice for $\ensuremath{p}incipal$ is $5$.
Now in the delegated scenario, suppose $\ensuremath{p}incipal$ accepts all option $\omega_{22}$. Then $\ensuremath{\mathcal{A}}$ would always wait for round 2 and hope for a realization of $\omega_{22}$, even if $\omega_{21}$ would not be accepted by $\ensuremath{p}incipal$. Hence, accepting $\omega_{22}$ leads to an expected utility for $\ensuremath{p}incipal$ of at most $4$.
In contrast, the optimal decision scheme for $\ensuremath{p}incipal$ is to accept only $\omega_{12}$ and $\omega_{21}$ with an expected utility of $4.25$.
Note that for the (semi-)oblivious scenario, $\ensuremath{p}incipal$ cannot distinguish the options in round $2$ which decreases her expected utility to at most 4.
This shows that $\ensuremath{p}incipal$ has to strike a careful balance between (1) accepting a sufficient number of high-profit options to obtain a high expected utility overall and (2) rejecting options to motivate $\ensuremath{\mathcal{A}}$ to propose options that are better for $\ensuremath{p}incipal$ in earlier rounds.
$\blacklozenge$
\end{example}
\subsection{Contribution}
In Section~\ref{sec:lb} we show that the worst-case approximation ratio for online delegation is $\Theta(1/n)$ and this is tight. Intuitively, $\ensuremath{\mathcal{A}}$ waits too long and forgoes many profitable options for $\ensuremath{p}incipal$. $\ensuremath{p}incipal$ can only force $\ensuremath{\mathcal{A}}$ to propose earlier if she refuses to accept later options -- this, however, also hurts the utility of $\ensuremath{p}incipal$. The instances require a ratio of maximum and minimum utility values for $\ensuremath{\mathcal{A}}$ that is in the order of $n^{\Theta(n)}$.
We further show that this lower-bound instance can be used for extensions in which (1) $\ensuremath{\mathcal{A}}$ has a lookahead of $k$ rounds, or (2) $\ensuremath{\mathcal{A}}$ can propose up to $k$ options, resulting in tight approximation ratios of $\Theta(k/n)$.
In the Section~\ref{sec:max-min-agent-ratio}, we examine the effect of the discrepancy of utility for $\ensuremath{\mathcal{A}}$ using the ratio $\alpha$ of maximum and minimum utility values. We obtain an $\Omega(\log\log \alpha/\log \alpha)$-approximation of the optimal (online) search for $\ensuremath{p}incipal$, which is tight. The algorithm limits the acceptable options of $\ensuremath{p}incipal$, partitions them into different bins, and then restricts $\ensuremath{\mathcal{A}}$'s search space to the best possible bin for $\ensuremath{p}incipal$. The challenge is to carefully design a profitable set of options that should be accepted by $\ensuremath{p}incipal$ without giving $\ensuremath{\mathcal{A}}$ an incentive to forgo proposing many of these options. Our algorithm shows that even if differences in utility of $\ensuremath{\mathcal{A}}$ are polynomial in $n$, a good approximation for $\ensuremath{p}incipal$ can be obtained.
Additionally, we consider more challenging \emph{semi-oblivious} and \emph{oblivious} scenarios in which $\ensuremath{p}incipal$ does not get to see the agent's utility of the proposed option. In the (fully) oblivious case, $\ensuremath{p}incipal$ is even apriori unaware of the utility values for $\ensuremath{\mathcal{A}}$ for all options (and thus remains so throughout). In the semi-oblivious case, $\ensuremath{p}incipal$ knows the prior distributions fully, i.e., for every option the probability and the utility values for \emph{both} agents.
Our Algorithms~\ref{algo:alpha-approx} and~\ref{algo:semiOblivious-approx} achieve $\Omega(1/\alpha)$ and $\Omega(1/(\sqrt{\alpha}\log \alpha))$-approximations for oblivious and semi-oblivious scenarios, respectively. This is contrasted with a set of instances for which any action scheme cannot extract more than an $O(1/\alpha)$- and $O(1/\sqrt{\alpha})$-approximation in the oblivious and semi-oblivious scenarios, respectively. These results highlight the effect of the hiding of $\ensuremath{\mathcal{A}}$'s utilities from $\ensuremath{p}incipal$ (in the proposal, or in the proposal and the prior) -- the achievable approximation ratios increase from logarithmic to polynomial ratios in $\alpha$.
In Section~\ref{sec:principal-agent-ratio}, we consider the misalignment of agent and principal utilities via a parameter $\beta \ge 1$, which is the smallest value such that all utilities of $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$ are related by a factor in $[1/\beta, \beta]$. Limited misalignment also leads to improved approximation results for $\ensuremath{p}incipal$. We show an $\Omega(1/\log \beta)$-approximation of the optimal (online) search for $\ensuremath{p}incipal$. Moreover, every algorithm must have a ratio in $O(\log \log \beta / \log \beta)$. For the agent-oblivious variant, we obtain an $\Omega(1/\beta)$-approximation, whereas every algorithm must have a ratio in $O(1/\sqrt{\beta})$.
\subsection{Related Work}
Holmstrom \cite{Holmstrom77,Holmstrom84} initiated the study of delegation as a bilevel optimi\-za\-tion between an uninformed principal and a privately informed agent. The principal delegates the decision to the agent who himself has an interest in the choice of decision. Since the principal has the power to limit the search space, her optimization problem lies in striking the balance between restricting the space enough such that the second-level optimization by the agent doesn't hurt her too much and allowing a large enough set of potential decisions such that an acceptable decision can be found at all. Holmstrom identified sufficient conditions for a solution to the problem to exist. Subsequent papers \cite{MelumadS91,AlonsoM08} studied the impact of (mis-)alignment of the agent's and the principal's interests on the optimal delegation sets.
In another direction, the model was extended by allowing the principal to set costs for subsets instead of forbidding them \cite{AmadorB13,AmbrusE17}. These costs might be non-monetary, i.e., using different levels of bureaucracy for different subsets of options.
Armstrong and Vickers~\cite{ArmstrongV10} studied the delegation problem over discrete sets of random cardinality with elements drawn from some distribution. They identify sufficient conditions for the search problem to have an optimal solution. A similar model was considered by Kleinberg and Kleinberg~\cite{KleinbergK18}, where the option set searched by the agent consists of $n$ iid draws from a known distribution. Their results include constant-factor approximations of the optimal expected principal utility when performing the search herself rather than delegating it to the agent. For this, they employ tools from online stopping theory. The key difference between their work and our paper is that -- although using tools from online optimization -- they study an \emph{offline} problem while we focus on an \emph{online} version.
Bechtel and Dughmi~\cite{BechtelD21} recently extended this line of research by combining delegation with stochastic probing. Here a subset of elements can be observed by the agent (subject to some constraints), and several options can be chosen (subject to a different set of constraints). They provide constant-factor approximations for several downwards-closed constraint systems.
The study of persuasion, another model of strategic communication, has gained a lot of traction at the intersection between economics and computation in recent years. Here, the informed party (the ``sender'') is the one with commitment power, trying to influence the behavior of the uninformed agent (the ``receiver''). Closely related to our paper is the study of persuasion in the context of stopping problems~\cite{HahnHS20,HahnHS20IJCAI}. These works study persuasion problems in a prophet inequality~\cite{HahnHS20IJCAI} as well as in a secretary setting~\cite{HahnHS20}.
Other notable algorithmic results on persuasion problems concern optimal policies, hardness, and approximation algorithms in the general case~\cite{DughmiX16} as well as in different variations, e.g., with multiple receivers~\cite{BabichenkoB17,BadanidiyuruBX18,DughmiX17,Rubinstein17,Xu20}, with limited communication complexity~\cite{DughmiKQ16,GradwohlHHS21}, or augmenting the communication through payments~\cite{DughmiNPW19}. A more elaborate communication model with evidence was studied recently in the framework of persuasion as well as delegation~\cite{HoeferMP21}.
\section{Impossibility}\label{sec:lb}
\subsection{A Tight Bound}
As a first simple observation, note that $\ensuremath{p}incipal$ can always achieve an $n$-approximation with a deterministic action scheme, even in the agent-oblivious case. $\ensuremath{p}incipal$ accepts exactly all options in a single round $i^*$ with optimal expected utility, i.e., $E_{i^*} = \{ \omega_{i^*,j} \mid j \in [s_{i^*}] \}$ for $i^* = \arg \max_{i \in [n]} \ensuremath{\mathbb{E}}[\ensuremath{b}_{ij}]$, and $E_j = \emptyset$ otherwise. This motivates $\ensuremath{\mathcal{A}}$ to always propose the option from round $i^*$, and $\ensuremath{p}incipal$ gets expected utility $\ensuremath{\mathbb{E}}[\ensuremath{b}_{i^*,j}]$. By a union bound, the optimal utility from searching through all options herself is upper bounded by $\ensuremath{\mathbb{E}}\left[ \sum_i \ensuremath{b}_{ij} \right] \le n \cdot \ensuremath{\mathbb{E}}[\ensuremath{b}_{i^*,j}]$.
\begin{proposition}
For online delegation there is a deterministic action scheme $\varphi$ such that $\ensuremath{p}incipal$ obtains at least a $1/n$-approximation of the expected utility for optimal (online) search.
\end{proposition}
We show a matching impossibility result, even in the IID setting with $\ensuremath{\mathcal{D}}_i = \ensuremath{\mathcal{D}}$ for all rounds $i \in [n]$, and when $\ensuremath{p}incipal$ gets to see the full utility pair of any proposed option. There are instances in which $\ensuremath{p}incipal$ suffers a deterioration in the order of $\Theta(n)$ over the expected utility achieved by searching through the options herself.
For the proof, consider the following class of instances. The distribution $\ensuremath{\mathcal{D}}$ can be cast as an independent composition, i.e., we independently draw the utility values for $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$. For $\ensuremath{p}incipal$ there are two possibilities, either utility $1$ with probability $1/n$, or utility 0 with probability $1-1/n$. For $\ensuremath{\mathcal{A}}$, there are $n$ possibilities with agent utility of $n^{4\ell}$, for $\ell = 1,\ldots,n$, where each one has probability $1/n$. In combination, we can view $\ensuremath{\mathcal{D}}$ as a distribution over $j=1,\ldots,2n$ options. Options $\omega_j$ for $j=1,\ldots,n$ have probability $1/n^2$ and utilities $(\ensuremath{b}_{j}, \ensuremath{a}_{j}) = (1,n^{4j})$, for $j =n+1,\ldots,2n$ they have probability $1/n-1/n^2$ and utilities $(\ensuremath{b}_{j}, \ensuremath{a}_{j}) = (0,n^{4(j-n)})$.
\begin{theorem}\label{thm:generalLB}
There is a class of instances of online delegation in the IID setting, in which every action scheme $\varphi$ obtains at most an $O(1/n)$-approximation of the expected utility for optimal (online) search.
\end{theorem}
\begin{proof}
For simplicity, we first show the result for schemes $\varphi$ with $\varphi_{ij} = 0$ for all rounds $i \in [n]$ and all $j = n+1, \ldots, 2n$. In the end of the proof we discuss why this can be assumed for an optimal scheme.
Since all options $j \in [n]$ have the same utility for $\ensuremath{p}incipal$, she wants to accept one of them as soon as it appears. If she searches through the options herself, the probability that there is an option of value 1 is $1-(1-1/n)^n \ge 1-1/e$. Her expected utility is a constant. In contrast, when delegating the search to $\ensuremath{\mathcal{A}}$, the drastic utility increase motivates him to wait for the latest round in which a better option is still acceptable by $\ensuremath{p}incipal$. As a result, $\ensuremath{\mathcal{A}}$ waits too long, and removing acceptable options in later rounds does not remedy this problem for $\ensuremath{p}incipal$.
More formally, interpret an optimal scheme $\varphi$ as an $n \times n$ matrix, for rounds $i\in [n]$ and options $j \in [n]$. We outline some adjustments that preserve the optimality of matrix $\varphi$.
Consider the set $S$ of all entries with $\varphi_{ij} \le 1/n$. For each $(i,j) \in S$, the probability that option $j$ is realized in round $i$ is $1/n^2$. When it gets proposed by $\ensuremath{\mathcal{A}}$, then it is accepted by $\ensuremath{p}incipal$ with probability at most $1/n$. By a union bound, the utility obtained from all these options is at most $1 \cdot |S| \cdot 1/n^2 \cdot 1/n \le 1/n$.
Suppose we change the scheme by decreasing $\varphi_{ij}$ to 0 for each $(i,j) \in S$. Then each entry in $\varphi$ is either 0 or at least $1/n$. If $\ensuremath{\mathcal{A}}$ makes the same proposals as before, the change decreases the utility of $\ensuremath{p}incipal$ by at most $1/n$. Then again, in the new scheme $\ensuremath{\mathcal{A}}$ can have an incentive to propose other options in earlier rounds. Since all options with $\varphi_{ij} \neq 0$ have utility 1 for $\ensuremath{p}incipal$, this only leads to an increase of utility for $\ensuremath{p}incipal$. Moreover, in round 1 we increase all acceptance probabilities to $\varphi_{1j} = 1$ for $j \in [n]$. Then, upon arrival or such an option $\varphi_{1j}$, the change can incentivize $\ensuremath{\mathcal{A}}$ to propose this option -- which is clearly optimal for $\ensuremath{p}incipal$, since this is an optimal option for her. Since the change is in round 1, it introduces no incentive to wait for $\ensuremath{\mathcal{A}}$. As such, it can only increase the utility for $\ensuremath{p}incipal$.
Now consider any entry $\varphi_{ij} \ge 1/n$. We observe two properties:
\begin{enumerate}
\item Suppose $\varphi_{i'j'} \ge 1/n$ for $i' < i$ and $j' < j$. Then $\ensuremath{p}incipal$ accepts realization $\omega_{j'}$ in round $i'$ with positive probability, but she will also accept the better (for $\ensuremath{\mathcal{A}}$) realization $\omega_{j}$ in a later round $i$ with positive probability. $\ensuremath{\mathcal{A}}$ will not propose $\omega_{j'}$ in round $i'$ but wait for round $i$, since the expected utility in the later round $i$ is at least $n^{4j} \cdot 1/n^2 \cdot \varphi_{ij} \ge n^{4j-3} > n^{4(j-1)} \ge n^{4j'} \cdot \varphi_{i'j'}$, the utility in round $i'$. As such, we assume w.l.o.g.\ that $\varphi_{i'j'} = 0$ for all $i' < i$ and $j' < j$.
\item Suppose $\varphi_{i'j} < \varphi_{ij}$ for $i' < i$. By property 1., all realizations $\omega_{j'}$ with $j' < j$ are not accepted in rounds $1,\ldots,i-1$. Hence, setting $\varphi_{i'j} = \varphi_{ij}$ does not change the incentives for $\ensuremath{\mathcal{A}}$ w.r.t.\ other options, and thus only (weakly) increases the expected utility of $\ensuremath{p}incipal$. By the same arguments, we set $\varphi_{ij'} = \max\{\varphi_{ij'}, \varphi_{ij}\}$ for all inferior options $j' < j$ in the same round $i$.
\end{enumerate}
We apply the previous two observations repeatedly, starting for the entries $\varphi_{in}$ in the $n$-th column for option $\omega_n$, then in column $n-1$, etc. By 1., every positive entry $\varphi_{ij} \ge 1/n$ leads to entries of 0 in all ``dominated'' entries $\varphi_{i'j'}$ with $i' < i$ and $j' < j$. As a consequence, the remaining positive entries form a ``Pareto curve'' in the matrix $\varphi$ or, more precisely, a Manhattan path starting at $\varphi_{1n}$, ending at $\varphi_{n1}$, where for each $\varphi_{ij} \ge 1/n$ the path continues either at $\varphi_{i+1,j} \ge 1/n$ or $\varphi_{i,j-1} \ge 1/n$.
We can upper bound the expected utility of $\ensuremath{p}incipal$ by assuming that all $2n-1$ entries on the Manhattan path are 1 (i.e., $\varphi$ is deterministic) and $\ensuremath{\mathcal{A}}$ proposes an acceptable option whenever possible. The probability that this happens is at most $(2n-1)/n^2 = O(1/n)$ by a union bound. This is an upper bound on the expected utility of $\ensuremath{p}incipal$ and proves the theorem for schemes with $\varphi_{ij} = 0$ for all $i \in [n]$ and $j \ge n+1$.
Finally, suppose $\varphi_{ij} > 0$ for some $j \ge n+1$. Clearly, option $\omega_{j}$ adds no value to the expected utility of $\ensuremath{p}incipal$. Moreover, the fact that $\omega_{j}$ has positive probability to be accepted in round $i$ can only motivate $\ensuremath{\mathcal{A}}$ to refrain from proposing inferior options in earlier rounds. As such, setting $\varphi_{ij} = 0$ only (weakly) increases the utility of $\ensuremath{p}incipal$.
\end{proof}
The lower bound in Theorem~\ref{thm:generalLB} remains robust also in several extensions of the model.
\subsection{Extensions}
\label{sec:extensions}
We discuss two generalizations for which a slight adaptation of the above lower bound is sufficient. First, we consider the case that $\ensuremath{\mathcal{A}}$ has a lookahead. Second, we allow several proposals to be made by $\ensuremath{\mathcal{A}}$.
\subsubsection{Agent with Lookahead}
Consider online delegation when $\ensuremath{\mathcal{A}}$ has a lookahead of $k$ rounds. In round $i$, $\ensuremath{\mathcal{A}}$ gets to see all realized options of rounds $i,i+1,\ldots,\min\{n,i+k\}$. For simplicity, our benchmark here is the expected value of $\ensuremath{p}incipal$ for optimal (non-delegated) \emph{offline} search (i.e., online search with lookahead $n-1$). Note that the expected value for optimal online search is at least 1/2 of this~\cite{KrengelS77,KrengelS78}. Hence, asymptotically all benchmarks of expected utility for optimal offline or online search, with or without lookahead, are the same.
\begin{proposition}
\label{prop:lookahead}
For online delegation with lookahead $k$ there is an action scheme $\varphi$ such that $\ensuremath{p}incipal$ obtains an $\Omega(k/n)$-approximation of the expected utility for optimal (offline) search.
\end{proposition}
Partition the $n$ rounds into $\lceil n/(k+1) \rceil $ parts with at most $k+1$ consecutive rounds each. Suppose we apply (non-delegated) offline search on each part individually. The expected value of offline search on the best of the $O(n/k)$ parts yields an $\Omega(k/n)$-approximation of the expected value of offline search on all $n$ rounds.
To obtain an $\Omega(k/n)$-approximation for the online delegated version, apply online search with $\ensuremath{\mathcal{A}}$ and lookahead of $k$ to the best part of at most $k+1$ consecutive rounds. Due to the lookahead, this results in offline delegated search. In terms of utility for $\ensuremath{p}incipal$, offline delegated search using prophet-inequality techniques~\cite{KleinbergK18} approximates optimal offline search by at least a factor of 1/2. Hence, applying the offline delegation algorithm of~\cite{KleinbergK18} on the best set of $k+1$ consecutive rounds yields an $\Omega(k/n)$-approximation.
Let us show that this guarantee is asymptotically optimal. The argument largely follows the proof of Theorem~\ref{thm:generalLB}. The class of instances is the same. We only explain which parts of the proof must be adapted.
\begin{corollary}\label{thm:lookaheadLB}
There is a class of instances of online delegation with lookahead $k$ in the IID setting, in which every action scheme $\varphi$ obtains at most an $O(k/n)$-approximation of the expected utility for optimal (offline) search.
\end{corollary}
\begin{proof}
Using similar observations as in the proof of Theorem~\ref{thm:generalLB} above, we can again (a) assume w.l.o.g.\ that $\varphi_{ij} = 0$ for all $j=n+1,\ldots,2n$, and (b) assume that $\varphi_{ij} = 0$ or $\varphi_{ij} \ge 1/n$, for all $i=1,\ldots,n$ and $j=1,\ldots,2n$, which deteriorates the expected utility for $\ensuremath{p}incipal$ by at most $O(1/n)$.
Consider the two properties in the proof of Theorem~\ref{thm:generalLB}. For property (1), we extend the idea to entries $\varphi_{i'j'} \ge 1/n$ with $j' < j$ and $i'+ k < i$. In particular, $\ensuremath{\mathcal{A}}$ will decide to wait and not propose option $\omega_{j'}$ in round $i'$ if there is a round $i > i'+ k$ with a better option $\omega_j$ being acceptable (with probability at least $\varphi_{ij} \ge 1/n$). As such, we drop $\varphi_{i'j'}$ to 0 whenever such a constellation arises. Then, whenever an entry remains $\varphi_{i'j'} \ge 1/n$, this means that all entries for better options $j > j'$ in rounds $i = i'+k+1,\ldots,n$ must be $\varphi_{ij} = 0$.
Now for a given option $\omega_{j'}$, consider round $i_{j'} = \arg\min \{i \mid \varphi_{ij'} \ge 1/n\}$. Then, for all better options with $j > j'$, property (1) requires that $\varphi_{ij} = 0$ for all $i \ge i_{j'} + k+1$. As such, for each option $\omega_j$, there can be at most $k$ positive entries ``beyond the Manhattan path'', i.e., $k$ rounds in which $\omega_j$ remains acceptable (with prob.\ at least $1/n$) after the first round when any lower-valued $\omega_{j'}$ becomes acceptable (with prob.\ at least $1/n$).
Property (2) applies similarly as before. As such, we obtain a Manhattan path with $2n-1$ entries, and in addition there can be up to $nk$ entries with $\varphi_{ij} \ge 1/n$, i.e., a total of at most $(k+2)n - 1$ entries. We again upper bound the expected utility of $\ensuremath{p}incipal$ by assuming that all these entries are 1 and $\ensuremath{\mathcal{A}}$ proposes an acceptable option whenever possible. The probability that this happens is at most $((k+2)n-1)/n^2 = O(k/n)$ by a union bound, and this implies the upper bound on the expected utility of $\ensuremath{p}incipal$.
\end{proof}
\subsubsection{Agent with $k$ Proposals}
Now consider the case when $\ensuremath{\mathcal{A}}$ can propose up to $k$ options in $k$ different rounds. In this case, the definition of an action scheme becomes more complex -- rather than a single matrix, $\varphi$ turns into a decision tree. For each round $i$, consider the \emph{history} $H_i = (h_1,\ldots,h_{i-1})$. For every round $j = 1,\ldots,i-1$, the entry $h_j$ indicates whether or not there was a proposal by $\ensuremath{\mathcal{A}}$ in round $j$, and if so, which option was proposed. Now given a round $i$ and a history $H_i$, an action scheme yields a value $\varphi_{ij}(H_i) \in \{0,1\}$ indicating whether or not $\ensuremath{p}incipal$ accepts option $\omega_j$ when being proposed in round $i$ after history $H_i$. As before, $\ensuremath{p}incipal$ commits to an action scheme anticipating the induced rational behavior of $\ensuremath{\mathcal{A}}$. A simple backward induction shows that there is always an optimal proposal strategy for $\ensuremath{\mathcal{A}}$ is deterministic. For simplicity, we also restrict attention to deterministic action schemes for $\ensuremath{p}incipal$.
\begin{proposition}
For online delegation with $k$ proposals there is a deterministic action scheme $\varphi$ such that $\ensuremath{p}incipal$ obtains an $\Omega(k/n)$-approximation of the expected utility for optimal (offline) search.
\end{proposition}
The scheme is related to the approach in the previous section. Select the best interval $\ell, \ldots, \ell+k-1$ of $k$ consecutive rounds that maximize the expected value of offline search for $\ensuremath{p}incipal$ over these rounds. We observed in the previous section that optimal offline search in these $k$ rounds yields an $\Omega(k/n)$-approximation of optimal offline search over $n$ rounds. We design an action scheme that incentivizes $\ensuremath{\mathcal{A}}$ to propose exactly the $k$ options in rounds $\ell, \ldots, \ell+k-1$, thereby reducing the scenario to (non-delegated) online search for $\ensuremath{p}incipal$ over these rounds. Since the performance of online and offline search are related by a factor of 2, asymptotically we achieve the same performance as offline search over the $k$ rounds.
We set $\varphi_{ij}(H_i) = 0$ for rounds $i < \ell$ and all $j$ and $H_i$, as well as for rounds $i > \ell + k -1$ and all $j$ and $H_i$. For each round $\ell \le i \le \ell+k-1$, we set $\varphi_{ij}(H_i) = 0$ for all options $j$ whenever the history reveals that in at least one of the rounds $\ell,\ldots,i-1$ there was no proposal from $\ensuremath{\mathcal{A}}$. Otherwise, if $H_i$ reveals that there was a proposal in each of these rounds, we set $\varphi_{ij}(H_i)$ as in an optimal online (non-delegated) search over rounds $\ell,\ldots,\ell+k-1$.
In this action scheme, $\ensuremath{p}incipal$ immediately terminates the search whenever she did not receive a proposal from $\ensuremath{\mathcal{A}}$ in one of the $k$ rounds, leaving both agents with a utility of 0. This creates an incentive for $\ensuremath{\mathcal{A}}$ to submit a proposal in each of the $k$ rounds, since this is the only possibility to obtain a positive utility value.
Let us show that the approximation guarantee is asymptotically optimal. The argument uses and extends the result of Theorem~\ref{thm:generalLB}. The class of instances is the same.
\begin{theorem}\label{thm:kProposalLB}
There is a class of instances of online delegation with $k$ proposals in the IID setting, in which every deterministic action scheme $\varphi$ obtains at most an $O(k/n)$-approximation of the expected utility for optimal (offline) search.
\end{theorem}
\begin{proof}
We analyze the process by splitting the evolution of the process into at most $k$ non-overlapping \emph{phases}. Let $i_{\ell}$ be the (random) round in which $\ensuremath{\mathcal{A}}$ makes the $\ell$-th proposal, for $\ell = 1,\ldots,k$. For completeness, we set $i_0 = 0$. Phase $\ell$ is the set of rounds $\{i_{\ell-1} + 1,\ldots, i_{\ell}\}$. $\ensuremath{p}incipal$ can accept an option in at most one of the phases.
Thus, by lineary of expectation, the expected utility of $\ensuremath{p}incipal$ is upper bounded by the sum of expected utilities that $\ensuremath{p}incipal$ obtains in each phase. In the rest of the proof, we will show that in each phase, the expected utility for $\ensuremath{p}incipal$ is at most $O(1/n)$. Hence, the total expected utility of $\ensuremath{p}incipal$ is $O(k/n)$, which proves the approximation guarantee.
Towards this end, consider a single phase $\ell$. We condition on the \emph{full history} of the process before phase $\ell$, i.e., we fix all options drawn as well as decisions of $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$ that have led to the $(\ell-1)$-th proposal in round $i_{\ell-1}$.
We denote this full history by $H^f$. During phase $\ell$ (conditioned on $H^f$), we want to interpret the process as a single-proposal scenario analyzed in Theorem~\ref{thm:generalLB}. In particular, by fixing the history and the starting round of phase $\ell$, the histories $H_i$ within phase $\ell$ are also fixed. As such, during phase $\ell$, the scheme $\varphi$ can be seen as an action scheme for a single-proposal scenario with $n-i_{\ell-1}$ rounds.
Now let us define an auxiliary single-proposal instance with $n$ rounds. In this instance, we assume $\ensuremath{p}incipal$ sets $\varphi'_{ij} = 0$ for all options $j=1,\ldots,2n$ in the first $i=1,\ldots,i_{\ell-1}$ rounds and then in rounds $i = i_{\ell-1}+1,\ldots,n$ applies $\varphi'_{ij} = \varphi_{ij}(H_i)$ (where $H_i$ is composed of $H^f$ and no proposal in rounds $i_{\ell-1}+1,\ldots,i-1$).
Then $\ensuremath{p}incipal$ behaves in the auxiliary instance exactly as in phase $\ell$ of the $k$-proposal instance. In contrast, $\ensuremath{\mathcal{A}}$ does not necessarily show the same behavior. In the auxiliary instance, $\ensuremath{\mathcal{A}}$ gets utility 0 if the proposal is rejected.
In phase $\ell$ of the $k$-proposal instance, however, proposing an option that gets rejected can be profitable for $\ensuremath{\mathcal{A}}$. After rejection, phase $\ell + 1$ is reached and better expected utility for $\ensuremath{\mathcal{A}}$ might be achievable in upcoming rounds (since the scheme could result in more favorable behavior of $\ensuremath{p}incipal$ when the $\ell$-th reject decision happens in round $i$).
In the auxiliary instance, we model this property by a \emph{reject bonus} for $\ensuremath{\mathcal{A}}$ -- whenever a proposal is rejected in any round $i \ge i_{\ell-1}+1$,
then ($\ensuremath{p}incipal$ receives no utility and) $\ensuremath{\mathcal{A}}$ receives the conditional expected utility from optimal play in the remaining rounds of the $k$-proposal instance, conditioned on $\varphi$ and history $H_{i+1}$ composed of $H_{i}$ and the rejected proposal in round $i$. It is straightforward to see that in the auxiliary instance with reject bonus, the interaction between $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$ exactly proceeds as in phase $\ell$ of the $k$-proposal instance.
Consider the auxiliary single-proposal instance with reject bonus for any given phase $\ell$.
We prove that the expected utility for $\ensuremath{p}incipal$ does not decrease when the following \textit{adjustments} are made: (1) the reject bonus is reduced to 0, and (2) we set $\varphi'_{ij} = 0$ for all rounds $i$ and options $j \ge n+1$ (i.e., the ones with $b_{j} = 0$). We prove the statement by induction over the rounds.
Clearly w.l.o.g.\ there are no proposals in rounds $i \le i_{\ell-1}$. We can assume that (1) and (2) hold for all these rounds. Now consider round $i = i_{\ell-1} + 1$. When rejecting an option, or when accepting an option of value 0, the utility for $\ensuremath{p}incipal$ is 0. For these options, adjustments (1) and (2) in round $i$ change the utility for $\ensuremath{\mathcal{A}}$ to 0, as well. When facing such an option in round $i$, the adjustments incentivize $\ensuremath{\mathcal{A}}$ to wait for potentially better subsequent options. It weakly increases the expected utility of $\ensuremath{p}incipal$.
Towards an induction, suppose the statement is true after the adjustments (1) and (2) in all rounds $i_{\ell-1} + 1 \le i' \le i$. Now consider round $i+1$. First, condition on the event that in both instances (with and without adjustments on round $i+1$), we reach round $i+1$. As argued above, the adjustments in round $i+1$ imply that $\ensuremath{\mathcal{A}}$ has less incentive to propose an option of value 0 for $\ensuremath{p}incipal$ in round $i+1$ and more incentive to wait for subsequent rounds. Hence, the utility for $\ensuremath{p}incipal$ (conditioned on reaching round $i+1$) does not decrease.
Note, however, that the probability of reaching round $i+1$ also changes by the adjustments. For every $i_{\ell-1} < i' < i+1$, removing the reject bonus and reducing the set of acceptable options in round $i+1$ lead to a reduction in expected utility for $\ensuremath{\mathcal{A}}$ from the rounds \emph{after} round $i'$. This increases the probability that $\ensuremath{\mathcal{A}}$ will propose an option in some round $i'$ \emph{before} $i+1$. It decreases the probability of reaching round $i+1$. Nevertheless, this is again good news for $\ensuremath{p}incipal$: Since by hypothesis $\ensuremath{p}incipal$ accepts only options of utility 1 and there is no reject bonus in all rounds $i' \le i$, any proposal in these rounds is accepted and results in optimal utility for $\ensuremath{p}incipal$. Overall, $\ensuremath{p}incipal$ only profits from the adjustments (1) and (2) in round $i+1$. By induction, this holds when the adjustments are made in all rounds.
After the adjustments, the auxiliary instance is a standard single-proposal instance studied in the context of Theorem~\ref{thm:generalLB}. This shows that the expected utility obtained by $\ensuremath{p}incipal$ is in $O(1/n)$.
As a consequence, the conditional expected utility for $\ensuremath{p}incipal$ in phase $\ell$ (conditioned on each $H^f$) is upper bounded by $O(1/n)$. Hence, the overall expected utility from phase $\ell$ is at most $O(1/n)$. The expected utility from $k$ phases is upper bounded by $O(k/n)$. This proves the theorem.
\end{proof}
\section{Discrepancy of Agent Utilities}\label{sec:max-min-agent-ratio}
\subsection{Conscious Proposals}
The lower bound instances in Theorem~\ref{thm:generalLB} rely on an exponentially large ratio of agent utilities between 1 and $n^{O(n)}$. Is such a drastic discrepancy necessary to show a substantial lower bound? Can we obtain better approximation results for instances with a smaller ratio of the maximum and minimum utility values for $\ensuremath{\mathcal{A}}$?
We first assume that $\ensuremath{a}_{ij} > 0$ for all options (see Remark~\ref{rem:loglogWithZero} below how to extend our results to the case when $a_{ij} = 0$ is allowed). Let $\alpha = \max\{ \ensuremath{a}_{ij} \mid i \in [n], j \in [s_i] \} / \min\{ \ensuremath{a}_{ij} \mid i \in [n], j \in [s_i] \}$. W.l.o.g.\ we scale all utility values to $\ensuremath{a}_{ij} \in [1,\alpha]$, where both boundaries $\alpha$ and $1$ are attained by at least one option. We say that the agent has \emph{$\alpha$-bounded utilities}.
\begin{algorithm}[t]\DontPrintSemicolon
\caption{$\Omega(\log \log \alpha / \log \alpha)$-Approximation}\label{algo:log-alpha-loglog-alpha-approx}
\KwIn{$n$ distributions $\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n$}
\KwOut{Action Scheme $\varphi$}
Let $Q = $ RestrictOptions$(\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n,1/2)$.\;
\lIf{$Q$ spans only a single round}{
Set $B_1 = Q$.}
\Else{
Construct $c = \lceil \log_2 \alpha \rceil$ classes $\mathcal{C}_1, \dots, \mathcal{C}_c$ such that $\mathcal{C}_k = \{(i,j) \in Q \mid \ensuremath{a}_{ij} \in [2^{k-1},2^k)\}$ for all $k = 1, \dots, c - 1$ and $\mathcal{C}_{c} = \{ (i,j) \in Q \mid a_{ij} \in [2^{c-1},2^c]\}$. \label{algo-loglog-log:constructClasses} \\
Set $b=1$, and $s = c$. Open bin 1 and set $B_1 = \emptyset$. \\
\For{$k = c$ \textnormal{down to} $1$}{\label{algo-loglog-log:startFor}
\If{$2^{k-1} < 2^s \cdot \sum_{(i,j) \in B_b \cup \mathcal{C}_k} p_{ij}$ }{ \label{step10}
set $b = b+1$ and $s=k$. \tcp*{$\sum_{(i,j) \in Q} p_{ij} < 1/2$, so no open bin stays empty}
Open the new bin $b$ and set $B_b = \emptyset$}
Add class $\mathcal{C}_k$ to bin $B_b = B_b \cup \mathcal{C}_k$.}
}\label{algo-loglog-log:endFor}
$b^* = \arg \max_{b=1,\ldots} \sum_{(i,j) \in B_b} p_{ij} \ensuremath{b}_{ij}$, the bin with highest utility for $\ensuremath{p}incipal$.\\
Set $\varphi_{ij} = 1$ for all $(i,j) \in B_{b^*}$ and $\varphi_{ij} = 0$ otherwise.\\
\textbf{return} $\varphi$
\end{algorithm}
We use Algorithm~\ref{algo:log-alpha-loglog-alpha-approx} to compute a good action scheme.
Intuitively, we partition the best options for $\ensuremath{p}incipal$ that add up a total probability mass of roughly $1/2$ into $O(\log \alpha / \log \log \alpha)$ many bins. Each bin is constructed in a way such that $\ensuremath{\mathcal{A}}$ is incentivized to propose the first option he encounters from that particular bin. The algorithm determines the best bin for $\ensuremath{p}incipal$ and deterministically restricts the acceptable options to the ones from this bin.
The algorithm uses the subroutine RestrictOptions$(\ensuremath{\mathcal{D}}_1,\dots,\ensuremath{\mathcal{D}}_n,m)$ (Algorithm~\ref{algo:semiOblivious-restrict}), which considers all possible options in descending order of principal value until a total mass of $m$ is reached. These options are then collected in the set $Q = \{(i,j) \mid b_{ij} \ge b_{i'j'} \ \forall (i',j') \notin Q\}$ such that $\sum_{(i,j) \in Q} p_{ij} < m$. The options in the set $Q$ are compared with the set consisting solely of the first option to surpass the combined mass of $m$. Whichever gives the higher expected utility for principal is then returned by the subroutine as $Q$. This ensures that $\sum_{(i,j) \in Q \cup B_0} p_{ij} \cdot \ensuremath{b}_{ij} \ge m/2 \cdot \ensuremath{\mathbb{E}}_{\omega_{ij} \sim \ensuremath{\mathcal{D}}_i}[\max_{i \in [n]} \ensuremath{b}_{ij}]$. We formally prove this in Lemma~\ref{lem:restrictOptions} below.
\begin{algorithm}[t]\DontPrintSemicolon
\caption{\label{algo:semiOblivious-restrict} RestrictOptions}
\KwIn{$n$ distributions $\ensuremath{\mathcal{D}}_1,\dots, \ensuremath{\mathcal{D}}_n$, value $m$ restricting the mass}
\KwOut{Set $Q$ of good options for $\ensuremath{p}incipal$}
Set $Q = \emptyset$, $p=p^*=0$, $U = \bigcup_{i=1}^n \bigcup_{j=1}^{s_i} \{ (i,j) \}$.\\
\While{$p < m$}{\label{restrictOptions:startWhileQB}
$U^* = \emptyset$, $p^* = 0$ \\
\For{$k = 1, \dots, n$}{\label{restrictOptions:startForSamePrincValue}
Let $U_k^* = \{ (k,j) \in U \mid \ensuremath{b}_{kj} \ge \ensuremath{b}_{i'j'}$ for all $(i',j') \in U \}$ be the options in round $k$ from the set of all remaining options with the best utility for $\ensuremath{p}incipal$\\
Set $p_k = \sum_{(i,j) \in U^*_k} p_{ij}$\\
\lIf{\label{restrictOptions:startIfCombinedMass}$p^* + p_k < m$}{add $U^* = U^* \cup U_k^*$, update $p^* = p^* + p_k$}
\uElse{ \lIf{$p_k > p^*$}{ set $U^* = U_k^*$}
\textbf{break for-loop}}\label{restrictOptions:endIfCombinedMass}
}\label{restrictOptions:endForSamePrincValue}
Set $p^* = \sum_{(i,j) \in U^*} p_{ij}$ and $\ensuremath{b}^* = \ensuremath{b}_{ij}$ for $(i,j) \in U^*$\\
\tcc*{Note that all options in $U^*$ have the same value for $\ensuremath{p}incipal$.}
\textbf{if} $p + p^* > m$ \textbf{then} set $B = U^*$; \textbf{else} add $Q = Q \cup U^*$ \\
Remove $U = U \setminus U^*$, update $p = p + p^*$.\\
}\label{restrictOptions:endWhileQB}
Set $\ensuremath{b}_Q = \sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij}$ and $\ensuremath{b}_B = p^* \ensuremath{b}^*$ \label{restrictOptions:computeValues}\\
\textbf{if} $\ensuremath{b}_Q < \ensuremath{b}_B$ \textbf{then} set $Q = B$ \label{restrictOptions:chooseBetterSet} \\
\textbf{return} $Q$
\end{algorithm}
If the set $Q$ returned by RestrictOptions only spans a single round $i$, the agent will always be incentivized to propose an acceptable option in round $i$. For this scenario, the algorithm creates a single bin $B_1$. Otherwise, the set $Q$ is divided into $c = \lceil \log_2 \alpha \rceil$ classes depending on their agent utility in line~\ref{algo-loglog-log:constructClasses}.
The lowest and highest agent utilities in any given class differ by at most a factor of 2. More precisely, classes $\mathcal{C}_1, \dots, \mathcal{C}_c$ are constructed such that $\mathcal{C}_k = \{(i,j) \in Q \mid a_{ij} \in [2^{k-1},2^k)\}$ for $k=1,\dots,c-1$ and $\mathcal{C}_c = \{(i,j) \in Q \mid a_{ij} \in [2^{c-1},2^c]\}$.
These classes are then (lines~\ref{algo-loglog-log:startFor}-\ref{algo-loglog-log:endFor}) combined into bins such that (1) the bins are as big as possible and (2) $\ensuremath{\mathcal{A}}$ optimizes his own expected utility by proposing the first option he encounters from any bin -- assuming that only options from this bin are allowed. Classes are either added to a bin completely or not at all. Let $s$ be the index of the class with the highest agent utilities currently considered, i.e., the first class to be added to the current bin $B_b$. We consider the classes by decreasing agent utility values, i.e., with indices $k=s,s-1, \dots$. While $2^{k-1} \ge 2^s \cdot \sum_{(i,j) \in B_b \cup \mathcal{C}_k} p_{ij}$, a rational $\ensuremath{\mathcal{A}}$ will always propose the first option available from the current bin if that is the only allowed bin as it has a higher utility than what $\ensuremath{\mathcal{A}}$ can expect from later rounds.
Before stating the main result of the section, we analyze the subroutine RestrictOptions.
\begin{lemma}\label{lem:restrictOptions}
The subroutine RestrictOptions$(\ensuremath{\mathcal{D}}_1,\dots,\ensuremath{\mathcal{D}}_n,m)$ with distributions $\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n$ and a parameter $0 < m \le 1$ as input identifies $Q$, the best set of options for $\ensuremath{p}incipal$, such that
\[\sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij} \ge m/2 \cdot \ensuremath{\mathbb{E}}_{\omega_{ij}\sim \ensuremath{\mathcal{D}}_i}[\max_{i \in [n]} \ensuremath{b}_{ij}] \]
and either (1) the combined mass $\sum_{(i,j) \in Q} p_{ij} < m$ or (2) all options in $Q$ arrive in the same round.
\end{lemma}
\begin{proof}
RestrictOptions first restricts the possible set of options to $Q \cup B$ consisting of the best options for $\ensuremath{p}incipal$, with a union probability mass of at least $m$ in the while-loop in lines~\ref{restrictOptions:startWhileQB}-\ref{restrictOptions:endWhileQB}.
Inside this while-loop, the options with the highest utility for $\ensuremath{p}incipal$ are identified using the for-loop in lines~\ref{restrictOptions:startForSamePrincValue}-\ref{restrictOptions:endForSamePrincValue}. This loop ensures that no more than a combined mass of $m$ is considered for a set of options from different rounds with the same (currently highest) utility for $\ensuremath{p}incipal$. Should such a set with a higher combined mass than $m$ exist, the if/else-statement in lines~\ref{restrictOptions:startIfCombinedMass}-\ref{restrictOptions:endIfCombinedMass} picks the better part of this set while ensuring that either the combined mass of the set is at most $m$ or only options from a single round are considered.
Hence, by line~\ref{restrictOptions:endWhileQB} it holds that
\[ 1/m \cdot \sum_{(i,j) \in Q \cup B} p_{ij}\ensuremath{b}_{ij} \ge \ensuremath{\mathbb{E}}_{\omega_{ij} \sim \ensuremath{\mathcal{D}}_i}[\max_{i \in [n]} \ensuremath{b}_{ij}] \enspace.\]
In line~\ref{restrictOptions:computeValues}, the utility for $\ensuremath{p}incipal$ from the sets $Q$ and $B$ is calculated, in line~\ref{restrictOptions:chooseBetterSet}, the better one for $\ensuremath{p}incipal$ is chosen. This means that at most another factor of 2 is lost, which means that in total, the set $Q$ which is returned by RestrictOptions guarantees
\[ 2/m \cdot \sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij} \ge \ensuremath{\mathbb{E}}_{\omega_{ij} \sim \ensuremath{\mathcal{D}}_i}[\max_{i \in [n]} \ensuremath{b}_{ij}] \enspace.\]
\end{proof}
Theorem~\ref{theo:loglog-log} is our main result in this section. We prove it below using Lemmas~\ref{lem:OneOverClasses} and~\ref{lem:numberOfBins}.
\begin{theorem}\label{theo:loglog-log}
If the agent has $\alpha$-bounded utilities, there is a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(\log \log \alpha/\log \alpha)$-approximation of the expected utility for optimal (online) search.
\end{theorem}
\begin{lemma}
\label{lem:OneOverClasses}
Let $\ell$ be the number of bins opened in Algorithm~\ref{algo:log-alpha-loglog-alpha-approx}. Then the scheme computed by the algorithm obtains at least an $1/(8\ell)$-approximation of the expected utility of the best option for $\ensuremath{p}incipal$ in hindsight.
\end{lemma}
\begin{proof}
By Lemma~\ref{lem:restrictOptions}, we know that $Q$ satisfies
\[
4 \cdot \sum_{(i,j) \in Q } p_{ij} \ensuremath{b}_{ij} \ge \ensuremath{\mathbb{E}}_{\omega_{ij} \sim \ensuremath{\mathcal{D}}_i} [\max_{i \in [n]} \ensuremath{b}_{ij} ] = \ensuremath{\operatorname{OPT}}\enspace.
\]
Now consider the construction of the bins.
Suppose
we split $Q$ into $\ell$ bins $B_1, B_2, \ldots, B_{\ell}$. In the end, we choose the best one $B_{b^*}$ among the $\ell$ bins $B_1,\ldots,B_{\ell}$, so
\[
\sum_{(i,j) \in B_{b^*}} p_{ij} \ensuremath{b}_{ij} \ge \frac{1}{\ell} \sum_{(i,j) \in Q} p_{ij} \ensuremath{b}_{ij} \ge \frac{1}{4\ell} \cdot \ensuremath{\operatorname{OPT}}\enspace.
\]
The action scheme restricts attention to $B_{b^*}$ and accepts each proposed option $\omega_{ij}$ from the bin with probability 1. Let $k^- = \min \{ k \mid \mathcal{C}_k \subseteq B_{b^*}\}$ be the class of smallest index in $B_{b^*}$, and $k^+$ the one with largest index, respectively. Now suppose the agent learns in round $i$ that an option $\omega_{ij}$ with $(i,j) \in B_{b^*}$ arrives in this round. We claim that $\ensuremath{\mathcal{A}}$ will then decide to propose this option. This is obvious if all options in $B_{b^*}$ are only realized in round $i$. Otherwise, the agent might want to wait for an option in a later round.
If $\ensuremath{\mathcal{A}}$ proposes, then his utility is $\ensuremath{a}_{ij}$. Otherwise, if he waits for another option from $B_{b^*}$ in a later round, then a union bound shows that the expected utility is at most
\begin{align*}
\sum_{(i',j') \in B_{b^*}, i' > i} p_{i'j'} \cdot \ensuremath{a}_{i'j'} \le \sum_{(i',j') \in B_{b^*}, i' > i} p_{i'j'} 2^{k^+} < 2^{k^+} \cdot \sum_{(i',j') \in B_{b^*}} p_{i'j'} \le 2^{k^- - 1} \le \ensuremath{a}_{ij} \enspace,
\end{align*}
where the second-to-last inequality is a consequence from the construction of the bin. Hence, the first option from the bin that is realized also gets proposed by $\ensuremath{\mathcal{A}}$ and accepted by $\ensuremath{p}incipal$.
Now for each option $(i,j) \in B_{b^*}$, the probability that this option is proposed and accepted is the combination of two
independent events: (1) no other option from $B_{b^*}$ was realized in any of the rounds $i' < i$, (2) option $\omega_{ij}$ is realized in round $i$. The probability for event (2) is $p_{ij}$. For event (1), we define $m_i = \sum_{(i,j) \in B_{b^*}} p_{ij}$. With probability $\ensuremath{p}od_{i' < i} (1-m_i')$, no option from $B_{b^*}$ is realized in rounds $i' < i$. Note that $\sum_{i=1}^n m_i \le 1/2$. The term $\ensuremath{p}od_{i=1}^n (1-m_i)$ is minimized for $m_1 = 1/2$ and $m_{i'} = 0$ for $1 < i' < i$. Thus $\ensuremath{p}od_{i=1}^n (1-m_i) \ge 1/2$, i.e., the probability of event (1) is at least 1/2.
Overall, by linearity of expectation, the expected utility of $\ensuremath{p}incipal$ when using $\varphi$ is at least
\[
\sum_{(i,j) \in B_{b^*}} \frac 12 \cdot p_{ij} \cdot \ensuremath{b}_{ij} \ge \frac{1}{2} \sum_{(i,j) \in B_{b^*}} p_{ij} \ensuremath{b}_{ij} \ge \frac{1}{8\ell} \cdot \ensuremath{\operatorname{OPT}}
\]
and this proves the lemma.
\end{proof}
\begin{lemma}\label{lem:numberOfBins}
Let $\ell$ be the number of bins opened in Algorithm~\ref{algo:log-alpha-loglog-alpha-approx}. It holds that $\ell = O(\log \alpha / \log \log \alpha)$.
\end{lemma}
\begin{proof}
Consider some bin $B$ and the mass $p_B = \sum_{(i,j) \in B} p_{ij}$. We want to argue that at most $O(c/\log c)$ bins are opened. To do so, we first condition on having $\ell$ open bins and strive to lower bound the number of classes in these $\ell$ bins.
Consider a bin $B$ starting at $\mathcal{C}_s$. The algorithm adds classes to $B$ until $2^{k-1} < 2^s p_{B}$. Thus, $s-k+1 > \log_2(1/p_{B})$, i.e., the number of classes in $B_i$ is lower bounded by $\log_2(1/p_B)$.
Now consider two bins $B_i$ and $B_{i+1}$ and condition on $q = p_{B_i} + p_{B_{i+1}}$. Together the bins contain at least $\log_2(1/p_{B_i}) + \log_2(1/(q-p_{B_i}))$ classes. Taking the derivative for $p_{B_i}$, we see that this lower bound is minimized when $p_{B_i} = q/2 = p_{B_{i+1}}$. Applying this balancing step repeatedly, the lower bound on the number of classes in all bins becomes smallest when $p_{B_i} = p_{B_j}$ for all bins $B_i, B_j$. Thus, when opening $\ell$ bins, we obtain the smallest lower bound on the number of classes in these bins by setting $p_{B_i} = \frac{1}{\ell} \cdot \sum_{(i,j) \in Q} p_{ij} < 1/(2\ell)$ for all bins $B_i$. Conversely, when opening $\ell$ bins, we need to have at least $\ell \log_2 (2\ell)$ classes in these bins.
Now, since we need to put $c$ classes into the bins, we need to ensure that for the number $\ell$ of open bins we have
$\ell (\log_2 \ell + 1) \le c$, since otherwise the $\ell$ bins would require more than $c$ classes in total. This implies that $c = \Omega( \ell \log_2 \ell )$ and, hence, $\ell = O(c / \log c) = O(\log \alpha / \log \log \alpha)$.
\end{proof}
Observe that the approximation ratio of this algorithm is tight in general. Consider the instances in Theorem~\ref{thm:generalLB} with $\alpha = n^{O(n)}$. The theorem shows that every scheme can obtain at most a ratio of $O(1/n) = O(\log \log \alpha / \log \alpha)$.
\begin{remark}\rm\label{rem:loglogWithZero}
If there are options with utility 0 for $\ensuremath{\mathcal{A}}$, the maximum ratio between the lowest and highest utility for $\ensuremath{\mathcal{A}}$ becomes unbounded. Still, if the maximum ratio between the lowest and highest \emph{non-zero} utility for $\ensuremath{\mathcal{A}}$ is bounded by $\alpha$, an $\Omega(\log \log \alpha / \log \alpha)$-approximation can be achieved with a slight modification of Algorithm~\ref{algo:log-alpha-loglog-alpha-approx}. Suppose there are any options with $\ensuremath{a}_{ij} = 0$ in $Q$, then construct another bin $B_{-1}$ which consists of all options with 0 utility for $\ensuremath{\mathcal{A}}$ in the set $Q$. If $B_{-1}$ is the bin that is chosen as the best bin in the algorithm, the agent will not receive any utility and, due to tie-breaking in favor of $\ensuremath{p}incipal$, can be assumed to execute an online search for $\ensuremath{p}incipal$. Using standard prophet inequality results, this yields a $1/2$-approximation for $\ensuremath{p}incipal$ within this bin. If bin $B_{-1}$ is not the best bin, the analysis from the theorem can be applied.
\end{remark}
\begin{corollary}
\label{cor:loglogWithZero}
If the agent has $\alpha$-bounded utilities for all options with strictly positive utility,
there is a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(\log \log \alpha/\log \alpha)$-approximation of the expected utility for optimal (online) search.
\end{corollary}
\subsection{Oblivious Proposals}
In the previous section, we considered algorithms for $\ensuremath{p}incipal$ when she learns the utility pair for the proposed option. In this section, we show that (fully) oblivious proposals can be a substantial drawback for $\ensuremath{p}incipal$. Obviously, the lower bound in Theorem~\ref{thm:generalLB} remains intact even for oblivious proposals, when $\ensuremath{p}incipal$ does not learn the utility value of the proposed option for $\ensuremath{\mathcal{A}}$. For oblivious proposals and $\alpha$-bounded agent utilities, we can significantly strengthen the lower bound. In contrast to the logarithmic approximation guarantee above, we provide a linear lower bound in $\alpha$ for oblivious proposals.
\begin{theorem}
\label{thm:fullyObliviousLB}
There is a class of instances of online delegation with $\alpha$-bounded utilities for the agent and oblivious proposals, in which every action scheme $\varphi$ obtains at most an $O(1/\alpha)$-approximation of the expected utility for optimal (online) search.
\end{theorem}
\begin{proof}
Consider the following class of instances. In $\ensuremath{\mathcal{D}}_i$, there are two options with the following probabilities and utilities: $\omega_{i1}$ with $p_{i1} = 1-1/n$ and $(\ensuremath{b}_{i1},\ensuremath{a}_{i1}) = (0,1)$, as well as $\omega_{i2}$ with $p_{i2} = 1/n$ and $(\ensuremath{b}_{i2},\ensuremath{a}_{i2}) = (1,x_i)$, where $x_i \in \{1,\alpha\}$ and $\alpha \in [1,n]$. In the first rounds $i=1,\ldots,i^*-1$ we have $x_i = 1$, then $x_i = \alpha$ for rounds $i=i^*,\ldots,n$. The expected utility when $\ensuremath{p}incipal$ performs (undelegated) online search is $1 - (1-1/n)^n \ge 1-1/e$.
Clearly, $\ensuremath{p}incipal$ has an incentive that $\ensuremath{\mathcal{A}}$ proposes any profitable option $\omega_{i2}$ as soon as possible. As in the proof of Theorem~\ref{thm:generalLB}, we can assume that all $\varphi_{i1} = 0$ in an optimal scheme -- this option yields no value for $\ensuremath{p}incipal$ and could only raise the incentive to wait for $\ensuremath{\mathcal{A}}$.
Due to oblivious proposals, $\ensuremath{p}incipal$ has to choose $\varphi$ without being aware of the value of $i^*$. For our impossibility result, we adjust $i^*$ to the scheme $\varphi$ chosen by $\ensuremath{p}incipal$: Set $i^* \in \{1,\ldots,n\}$ to the largest number such that $\sum_{i=i^*}^n \varphi_{i2} \ge e \cdot n/\alpha$, or $i^*=1$ if no such number exists.
First, suppose that $i^* = 1$. Then, even if we make $\ensuremath{\mathcal{A}}$ propose \emph{every} option $\omega_{i2}$ as soon as it arises, a union bound implies that the expected utility of $\ensuremath{p}incipal$ is upper bounded by $\sum_{i=1}^n \frac{1}{n} \cdot \varphi_{i2} \le \frac{e}{\alpha} + \frac{1}{n}$. Hence, $\ensuremath{p}incipal$ obtains only an $O(1/\alpha)$-approximation, for any $\alpha \in [1,n]$.
Now suppose that $i^* > 1$. Consider an optimal scheme $\varphi$ for $\ensuremath{p}incipal$. If $\omega_{i2}$ arises in round $i$, $\ensuremath{\mathcal{A}}$ decides if it is more profitable to propose $i$ or wait for a later round. Indeed, we show that $\ensuremath{\mathcal{A}}$ never proposes $\omega_{i2}$ in any round $i < i^*$. Consider the expected utility from proposing the first option $\omega_{k2}$ arising in rounds $k = i^*,\ldots,n$. This is
\begin{align*}
\alpha \cdot\left( \sum_{k=i^*}^n \frac{1}{n} \left(1-\frac{1}{n}\right)^{k-i^*} \varphi_{k2} \right)& = \alpha \cdot \frac{1}{n} \cdot \sum_{k=i^*}^n\left(1-\frac{1}{n}\right)^{k-i^*}\varphi_{k2} \\
&> \frac{\alpha}{n} \cdot \frac{1}{e} \sum_{k=i^*}^n \varphi_{k2} \ge \frac{\alpha}{en} \cdot \frac{en}{\alpha} = 1 \ge \varphi_{i2}\enspace,
\end{align*}
i.e., strictly larger than the expected utility $\varphi_{i2}$ from proposing $\omega_{i2}$ in round $i < i^*$. Hence, $\ensuremath{\mathcal{A}}$ only proposes in rounds $k = i^*,\ldots,n$. Now even if $\ensuremath{\mathcal{A}}$ would be able to propose \emph{every} option $\omega_{k2}$ in rounds $k=i^*,\ldots,n$, a union bound implies that the expected utility of $\ensuremath{p}incipal$ from these rounds is upper bounded by $\sum_{k=i^*}^n \frac{1}{n} \cdot \varphi_{k2} \le \frac{e}{\alpha} + \frac{1}{n}$. For any $\alpha \in [1,n]$, this implies $\ensuremath{p}incipal$ obtains an $O(1/\alpha)$-approximation.
\end{proof}
\begin{algorithm}[t]\DontPrintSemicolon
\caption{$\Omega(1/\alpha)$-Approximation for Oblivious Proposals}\label{algo:alpha-approx}
\KwIn{$n$ distributions $\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n$}
\KwOut{Action Scheme $\varphi$}
Let $Q = $ RestrictOptions$(\ensuremath{\mathcal{D}}_1,\dots,\ensuremath{\mathcal{D}}_n,1/(2\alpha))$. \\
Set $\varphi_{ij} = 1$ for all $(i,j) \in Q$. \\
\Return $\varphi$
\end{algorithm}
\begin{theorem}
\label{thm:fullyObliviousAlgo}
If the agent has $\alpha$-bounded utilities and makes oblivious proposals, there is a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(1/\alpha)$-approximation of the expected utility for optimal (online) search.
\end{theorem}
\begin{proof}
The proof follows along the lines of Lemma~\ref{lem:OneOverClasses} above. By Lemma~\ref{lem:restrictOptions}, we have $4 \alpha \cdot \sum_{(i,j) \in Q} p_{ij} \ensuremath{b}_{ij} \ge \ensuremath{\operatorname{OPT}}$,
the expected value of the best option in hindsight.
The action scheme accepts each proposed option $\omega_{ij}$ from the set $Q$ with probability 1. Note that $Q$ satisfies either that $\sum_{(i,j) \in Q} p_{ij} < 1/(2\alpha)$ or all options in $Q$ arrive in the same round $i$.
In the latter case, $\ensuremath{\mathcal{A}}$ will propose any option $\omega_{ij}$ with $(i,j) \in Q$ he encounters in round $i$. In a later round $i' > i$, $\ensuremath{p}incipal$ will not accept any option.
Hence, let us consider the former case that $Q$ satisfies $\sum_{(i,j) \in Q} p_{ij} < 1/(2\alpha)$. Suppose the agent learns in round $i$ that an option $\omega_{ij}$ with $(i,j) \in Q$ arrives. We claim that $\ensuremath{\mathcal{A}}$ will propose this option. If $\ensuremath{\mathcal{A}}$ proposes, then the expected utility is $\ensuremath{a}_{ij}$. Otherwise, if he waits for another option from $Q$ in a later round, then a union bound shows that the expected utility is at most
\begin{align*}
&\sum_{(i',j') \in Q,\ i' > i} p_{i'j'} \cdot \ensuremath{a}_{i'j'} \le \sum_{(i',j') \in Q,\ i' > i} p_{i'j'} \cdot \alpha \cdot \ensuremath{a}_{ij} \le \ensuremath{a}_{ij}\enspace,
\end{align*}
where the first inequality is due to $\alpha$-bounded utilities, and the second inequality follows since $\sum_{(i,j) \in Q} p_{ij} \le 1/(2\alpha)$ by construction. Hence, the first option from $Q$ that is realized also gets proposed by $\ensuremath{\mathcal{A}}$ and accepted by $\ensuremath{p}incipal$.
Now, for each option $(i,j) \in Q$, the probability that this option is proposed and accepted is the combination of two independent events: (1) no other option from $Q$ was realized in any of the rounds $i' < i$, (2) option $\omega_{ij}$ is realized in round $i$. The probability for event (2) is $p_{ij}$. For probability for event (1) we define $m_i = \sum_{(i,j) \in Q} p_{ij}$. With probability $\ensuremath{p}od_{i' < i} (1-m_i)$ no option from $Q$ is realized in rounds $i' < i$. Note that $\sum_{i=1}^n m_i \le 1/(2\alpha)$. The term $\ensuremath{p}od_{i=1}^n (1-m_i)$ is minimized for $m_1 = 1/(2\alpha)$ and $m_{i'} = 0$ for $1 < i' < i$. Thus $\ensuremath{p}od_{i=1}^n (1-m_i) \ge 1- 1/(2\alpha)$, i.e., the probability of event (1) is at least $1-1/(2\alpha) \ge 1/2$.
By linearity of expectation, the expected utility of $\ensuremath{p}incipal$ when using $\varphi$ based on $Q$ is at least
\[
\sum_{(i,j) \in Q} \frac{1}{2} \cdot p_{ij} \cdot \ensuremath{b}_{ij} \ge \frac{1}{8\alpha} \ensuremath{\operatorname{OPT}} \enspace.
\]
\end{proof}
In contrast to Corollary~\ref{cor:loglogWithZero}, the result of Theorem~\ref{thm:fullyObliviousAlgo} does \emph{not} generalize to the case when $\ensuremath{\mathcal{A}}$ has options with utility 0, and $\alpha$ is the ratio of maximum and minimum non-zero utility. Even in the semi-oblivious scenario (discussed in the next section), all algorithms must have a ratio in $O(1/n)$, even when all utilities for $\ensuremath{\mathcal{A}}$ are $a_{ij} \in \{0,1\}$.
\subsection{Semi-Oblivious Proposals}
In this section, we analyze semi-oblivious proposals, where $\ensuremath{p}incipal$ has full apriori information about the prior, but she does not learn the utility value of $\ensuremath{\mathcal{A}}$ upon a proposal. The additional information about the prior can indeed help to improve the approximation ratio from $\Theta(1/\alpha)$ to $\Omega(1/(\sqrt{\alpha} \log \alpha))$, but not to a logarithmic bound as shown for conscious proposals in Theorem~\ref{theo:loglog-log}. In particular, we start by showing the following limit on the approximation ratio.
\begin{theorem}\label{thm:semiObliviousLB}
There is a class of instances of online delegation with IID options, $\alpha$-bounded utilities for the agent, and semi-oblivious proposals, in which every action scheme $\varphi$ obtains at most an $O(1/\sqrt{\alpha})$-approximation of the expected utility for optimal (online) search.
\end{theorem}
An IID instance with three different options suffices. One option is bad for both $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$, but has a very high probability of $1-1/n$. The remaining two options provide the same (good) utility for $\ensuremath{p}incipal$, one of which is good and the other one bad for $\ensuremath{\mathcal{A}}$. The combined probability of both options is $1/n$. Since $\ensuremath{p}incipal$ cannot distinguish between the two good options, in each of the rounds she has to decide to either accept both or reject both. While $\ensuremath{p}incipal$ would like to accept any of the good options, $\ensuremath{\mathcal{A}}$ has an incentive to wait and propose only the option that is good for both. Overall, this implies that every achievable approximation ratio for $\ensuremath{p}incipal$ must be in $O(1/\sqrt{\alpha})$.
\begin{proof}[Proof of Theorem~\ref{thm:semiObliviousLB}]
Consider the following class of IID instances with $\ensuremath{\mathcal{D}}_i = \ensuremath{\mathcal{D}}_j = \ensuremath{\mathcal{D}}$. In $\ensuremath{\mathcal{D}}$, there are three options with the following probabilities and utilities: $\omega_1$ with $p_1 = 1-1/n$ and $(\ensuremath{b}_1,\ensuremath{a}_1) = (0,1)$, $\omega_2$ with $p_2 = 1/n - 1/(n\sqrt{\alpha})$ and $(\ensuremath{b}_2,\ensuremath{a}_2) = (1,2)$, and $\omega_3$ with $p_3 = 1/(n\sqrt{\alpha})$ and $(\ensuremath{b}_3,\ensuremath{a}_3)= (1,\alpha)$, for any $\alpha \in [2,n^2]$.
Note that $\ensuremath{p}incipal$ cannot distinguish between the latter options when they are proposed. Thus, in each round $i$, $\ensuremath{p}incipal$ accepts option $\omega_1$ with probability $\varphi_{i1}$ and options $\omega_2$ and $\omega_3$ with $\varphi_{i2}$. As in the proof of Theorem~\ref{thm:generalLB}, we can assume that all $\varphi_{i1} = 0$ in an optimal scheme -- this option yields no value for $\ensuremath{p}incipal$ and could only raise the incentive to wait for $\ensuremath{\mathcal{A}}$.
Consider any optimal scheme $\varphi$ for $\ensuremath{p}incipal$. To obtain an upper bound on the utility of $\ensuremath{p}incipal$, we assume that $\ensuremath{\mathcal{A}}$ always proposes $\omega_3$ whenever it is realized\footnote{Note that due to the differences in acceptance probabilities $\varphi_{i2}$, he might actually have an incentive to wait for a later round, in which the probability that $\ensuremath{p}incipal$ accepts is higher.}. For $\omega_2$, he evaluates whether or not it is profitable to wait for a later round. Suppose $\ensuremath{\mathcal{A}}$ proposes $\omega_2$ in round $i$. A necessary condition for this is that the expected utility from proposing $\omega_3$ in subsequent rounds is smaller, i.e.,
\begin{equation}\label{eq:necessaryConditionAgentProposal}
2 \cdot \varphi_{i2} \ge \alpha \cdot\left( \sum_{k=i+1}^n \frac{1}{n\sqrt{\alpha}} \left(1-\frac{1}{n\sqrt{\alpha}}\right)^{k-i-1} \varphi_{k2} \right) = \frac{\sqrt{\alpha}}{n} \cdot \sum_{k=i+1}^n\left(1-\frac{1}{n\sqrt{\alpha}}\right)^{k-i-1}\varphi_{k2} \enspace.
\end{equation}
If this condition is fulfilled, we set $\delta_i = 1$. Otherwise, we set $\delta_i = 0$. Then, using a union bound, the utility of $\ensuremath{p}incipal$ from $\varphi$ can be upper bounded by
\begin{equation}
\label{eq:LBprincUB}
\sum_{i = 1}^n \varphi_{i2} \left(\frac{1}{n\sqrt{\alpha}} + \delta_i \left(\frac{1}{n}-\frac{1}{n\sqrt{\alpha}}\right) \right) \enspace.
\end{equation}
Consider the first round $i_s$ in which $\delta_{i_s} = 1$.
Combining \eqref{eq:necessaryConditionAgentProposal} with the fact that $\varphi_{i_s,2} \le 1$, this means that
\[
2 \ge \frac{\sqrt{\alpha}}{n} \cdot \sum_{k=i_s + 1}^n\left(1-\frac{1}{n\sqrt{\alpha}}\right)^{k-i_s-1}\varphi_{k2} \enspace,
\]
which implies
\[
\sum_{k=i_s+1}^n \varphi_{k2} < \frac{2n}{\sqrt{\alpha}} \cdot \left(1-\frac{1}{n\sqrt{\alpha}}\right)^{-n} < \frac{n}{\sqrt{\alpha}} \cdot 2e^{1/\alpha} \enspace.
\]
Using \eqref{eq:LBprincUB} and our assumption that $\alpha \in [2,n^2]$, the utility of $\ensuremath{p}incipal$ is upper bounded by
\[
\frac{1}{n} \sum_{i = 1}^n \varphi_{i2} \left(\frac{1}{\sqrt{\alpha}} + \delta_i \left(1-\frac{1}{\sqrt{\alpha}}\right) \right) \le \frac{1}{n} \left(\frac{i_s - 1}{\sqrt{\alpha}} + 1 + 2e^{1/\alpha} \frac{n}{\sqrt{\alpha}} \right) = O(1/\sqrt{\alpha}) \enspace.
\]
\end{proof}
For semi-oblivious proposals we design a more elaborate algorithm. The resulting action scheme provides an $\Omega(1/(\sqrt{\alpha} \log \alpha))$-appro\-xi\-mation for $\ensuremath{p}incipal$.
Our algorithm uses two subroutines, depending on the expected utility for $\ensuremath{\mathcal{A}}$ (for pseudocode see Algorithm~\ref{algo:semiOblivious-approx}).
\begin{algorithm}[t]\DontPrintSemicolon
\caption{$\Omega(1/(\sqrt{\alpha}\log \alpha))$-Approximation for Semi-Oblivious Proposals}\label{algo:semiOblivious-approx}
\KwIn{$n$ distributions $\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n$}
\KwOut{Action Scheme $\varphi$}
Set $U = \bigcup_{i=1}^n \bigcup_{j=1}^{s_i} \{ (i,j) \}$.\\
Partition $U$ into $U_L = \{(i,j) \in U \mid \sum_{\stackrel{k=1}{b_{ik} = b_{ij}}}^{s_i} p_{ik}a_{ik} < \sqrt{\alpha} \sum_{\stackrel{k=1}{b_{ik} = b_{ij}}}^{s_i} p_{ik}\}$ and $U_H = U \setminus U_L$. \\
\For{$k=1,\dots,n$}{
$\ensuremath{\mathcal{D}}_k^{(L)} \leftarrow \ensuremath{\mathcal{D}}_k$, $\ensuremath{\mathcal{D}}_k^{(H)} \leftarrow \ensuremath{\mathcal{D}}_k$\;
In $\ensuremath{\mathcal{D}}_k^{(L)}$ set utilities of every option $(k, j) \in U_H$ to 0 for $\ensuremath{p}incipal$ and 1 for $\ensuremath{\mathcal{A}}$ \;
In $\ensuremath{\mathcal{D}}_k^{(H)}$ set utilities of every option $(k, j) \in U_L$ to 0 for $\ensuremath{p}incipal$ and $\sqrt{\alpha}$ for $\ensuremath{\mathcal{A}}$.
}
Set $\varphi_L = AlgoLow(\ensuremath{\mathcal{D}}^{(L)}_1,\dots, \ensuremath{\mathcal{D}}^{(L)}_n)$, \quad $\varphi_H = AlgoHigh(\ensuremath{\mathcal{D}}^{(H)}_1, \dots, \ensuremath{\mathcal{D}}^{(H)}_n)$. \\
\textbf{return} $\varphi_L$ or $\varphi_H$ whichever yields better expected utility for $\ensuremath{p}incipal$
\end{algorithm}
Consider all options with the same utility for $\ensuremath{p}incipal$ in a single round. This set of options has \emph{low agent expectation} if the conditional expected utility for $\ensuremath{\mathcal{A}}$ in this set of options is less than $\sqrt{\alpha}$. Otherwise, it has \emph{high agent expectation}.
For the first subroutine, we concentrate on all options with low agent expectation. Hence, this subroutine is called AlgoLow (Algorithm~\ref{algo:semiOblivious-low-expec}).
\begin{algorithm}[t]\DontPrintSemicolon
\caption{AlgoLow}\label{algo:semiOblivious-low-expec}
\KwIn{$n$ distributions $\ensuremath{\mathcal{D}}_1,\dots, \ensuremath{\mathcal{D}}_n$, where in every distribution individually, the set of options with the same value for $\ensuremath{p}incipal$ has an expectation for $\ensuremath{\mathcal{A}}$ of less than $\sqrt{\alpha}$}
\KwOut{Action Scheme $\varphi$}
Set $Q = RestrictOptions(\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n, 1/2)$.\\
Set $\ell = 1$, $\ensuremath{b}_1 = p_1 = 0$, $\mathcal{C}_1 = \emptyset$ \\
\For{ $k =1, \dots, n$}{\label{algoLow:startForClasses}
Set $p^* = \sum_{(k,j) \in Q} p_{kj}$\\
\textbf{if} $p_\ell + p^* > 1/\sqrt{\alpha}$ \textbf{then} set $\ell = \ell +1, \mathcal{C}_\ell = \{(k,j) \in Q\}, p_\ell = p^*$;\; \quad \textbf{else} add $\mathcal{C}_{\ell} = \mathcal{C}_{\ell} \cup \{(k,j) \in Q\}$
}\label{algoLow:endForClasses}
Set $\ensuremath{b}_\ell' = \sum_{(i,j)\in \mathcal{C}_{\ell'}} p_{ij}\ensuremath{b}_{ij}$ for all $1 \le \ell' \le \ell$.\\
Choose $\ell^*$ such that $\ensuremath{b}_{\ell^*} \ge \ensuremath{b}_{\ell'}$ for all $1 \le \ell' \le \ell$. \\
Set $\varphi_{ij} = 1$ for all $(i,j) \in \mathcal{C}_{\ell^*}$.\\
\textbf{return} $\varphi$
\end{algorithm}
Other options are considered to receive a utility of 0 for $\ensuremath{p}incipal$ and, thus, are excluded from consideration. The scheme $\varphi_L$ achieves an $\Omega(1/\sqrt{\alpha})$-approximation in the instance $\ensuremath{\mathcal{D}}^{(L)}$, where only options with low agent expectation generate value for $\ensuremath{p}incipal$.
Similarly, for options with high agent expectation we describe procedure AlgoHigh (Algorithm~\ref{algo:semiOblivious-high-expec}).
\begin{algorithm}[t]\DontPrintSemicolon
\caption{AlgoHigh}\label{algo:semiOblivious-high-expec}
\KwIn{$n$ distributions $\ensuremath{\mathcal{D}}_1,\dots, \ensuremath{\mathcal{D}}_n$, where in every distribution individually, the set of options with the same value for $\ensuremath{p}incipal$ has an expectation for $\ensuremath{\mathcal{A}}$ of at least $\sqrt{\alpha}$}
\KwOut{Action Scheme $\varphi$}
Set $Q = RestrictOptions(\ensuremath{\mathcal{D}}_1, \dots, \ensuremath{\mathcal{D}}_n, 1/4)$.\\
\For{$k = 0, \dots, \lfloor \log_2 \sqrt{\alpha} \rfloor -1 $}{
Set $\mathcal{C}_k = \{ (i,j) \in Q \mid \frac{\sum_{(i,j') \in Q, \ensuremath{b}_{ij} = \ensuremath{b}_{ij'}}p_{ij}a_{ij}}{\sum_{(i,j') \in Q, \ensuremath{b}_{ij} = \ensuremath{b}_{ij'}}p_{ij}} \in \left[\sqrt{\alpha} \cdot 2^k, \sqrt{\alpha} \cdot 2^{k+1}\right) \}$ \\
Set $\ensuremath{b}_k = \sum_{(i,j) \in \mathcal{C}_k} p_{ij}\ensuremath{b}_{ij}$.\\
}
Set $\mathcal{C}_{\lfloor \log_2 \sqrt{\alpha} \rfloor} = \{ (i,j) \in Q \mid \frac{\sum_{(i,j') \in Q, \ensuremath{b}_{ij} = \ensuremath{b}_{ij'}}p_{ij}a_{ij}}{\sum_{(i,j') \in Q, \ensuremath{b}_{ij} = \ensuremath{b}_{ij'}}p_{ij}} \in \left[\sqrt{\alpha} \cdot 2^{\lfloor \log_2 \sqrt{\alpha}\rfloor}, \alpha\right] \}$ \\
Set $\ensuremath{b}_k = \sum_{(i,j) \in \mathcal{C}_k} p_{ij}\ensuremath{b}_{ij}$.\\
Choose $k$ such that $\ensuremath{b}_k \ge \ensuremath{b}_{k'}$ for all $k' = 0, \dots, \lfloor \log_2 \sqrt{\alpha} \rfloor$.
Set $\varphi_{ij} = 1$ for all $(i,j) \in \mathcal{C}_k$.\\
\textbf{return} $\varphi$
\end{algorithm}
The scheme $\varphi_H$ achieves an $\Omega(1/(\sqrt{\alpha}\log_2{\alpha}))$-approximation in the instance $\ensuremath{\mathcal{D}}^{(H)}$, where only options with high agent expectation generate value for $\ensuremath{p}incipal$. In the end, we choose the better scheme for $\ensuremath{p}incipal$, thereby forfeiting at most another factor 2 of her optimal expected utility. Overall, our Algorithm obtains a ratio of $\Omega(1/(\sqrt{\alpha}\log \alpha))$.
\begin{theorem}
\label{thm:semiObliviousAlgo}
If the agent has $\alpha$-bounded utilities and makes semi-oblivious proposals, there is a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(1/ (\sqrt{\alpha}\log \alpha))$-approximation of the expected utility for optimal (online) search.
\end{theorem}
AlgoLow and AlgoHigh use the procedure RestrictOptions with a parameter $m = 1/2$ and $m=1/4$, respectively. For a formal description of the subroutine see Algorithm~\ref{algo:semiOblivious-restrict} above.
Let us give a brief intuition for AlgoLow. The algorithm leverages the low expectation for $\ensuremath{\mathcal{A}}$ by restricting the number of rounds from which options are accepted. More precisely, it partitions the set $Q$ computed by RestrictOptions into $O(\sqrt{\alpha})$ many classes according to contiguous time intervals of rounds. The action scheme $\varphi$ then accepts only options from the best class for $\ensuremath{p}incipal$. The overall probability that any acceptable option arrives turns out to be high enough (to obtain a $O(1/\sqrt{\alpha})$-approximation for $\ensuremath{p}incipal$) and low enough (such that $\ensuremath{\mathcal{A}}$ wants to propose the first acceptable option rather than wait for a better one later on).
\begin{lemma}\label{lem:obliviousLowExpec}
If the agent has $\alpha$-bounded utilities, makes semi-oblivious proposals, and all options have low agent expectation, AlgoLow (Algorithm~\ref{algo:semiOblivious-low-expec}) constructs a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(1/\sqrt{\alpha})$-approximation of the expected utility for optimal (online) search.
\end{lemma}
\begin{proof}
The set $Q$ returned by $RestrictOptions(\ensuremath{\mathcal{D}}_1,\dots,\ensuremath{\mathcal{D}}_n,1/2)$ guarantees $4 \cdot \sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij} \ge \ensuremath{\operatorname{OPT}}$ by Lemma~\ref{lem:restrictOptions}.
When splitting the set $Q$ into classes in the beginning of the algorithm, it is guaranteed that no class spanning more than a single round has a combined probability mass greater than $1/\sqrt{\alpha}$.
This means that whenever a new class is opened, the mass of the previous and the current one combined are greater than $1/\sqrt{\alpha}$. Hence, there are at most $2 \cdot \sqrt{\alpha}$ many classes in total.
Now assume class $\mathcal{C}$ is chosen by the algorithm and some acceptable option arrives in round $i$. From the assumption that utilities are $\alpha$-bounded, we know that this option has an agent value of at least 1. By a union bound, the probability that any additional acceptable option from $\mathcal{C}$ arrives in a future round is at most $1/\sqrt{\alpha}$ (as all classes that consist of a higher mass than $1/\sqrt{\alpha}$ only have options from a single round). The conditional expectation for $\ensuremath{\mathcal{A}}$ for any acceptable option in a future round is at most $\sqrt{\alpha}$. Hence, $\ensuremath{\mathcal{A}}$ proposes the option in round $i$.
Similar to Algorithm~\ref{algo:log-alpha-loglog-alpha-approx}, the probability that an action $(i,j)$ from the chosen class is proposed is the combination of two independent events: (1) no other option from this class was proposed in a prior round $i' < i$ and (2) $(i,j)$ is realized in round $i$. If the chosen class only consists of a single round, the probability for (1) is trivially 1, otherwise, we can use the same argumentation as in the proof of Lemma~\ref{lem:OneOverClasses} to see that the probability that round $i$ is reached is at least $1/2$. This means that $\ensuremath{p}incipal$ achieves an expected utility of at least $1/2 \cdot \sum_{(i,j) \in \mathcal{C}} p_{ij}\ensuremath{b}_{ij}$.
As there are at most $2\cdot \sqrt{\alpha}$ many classes
and the algorithm chooses the best one for $\ensuremath{p}incipal$, by running AlgoLow, she will achieve an expected utility of
\[ \frac{1}{2} \cdot \sum_{(i,j) \in \mathcal{C}} p_{ij}\ensuremath{b}_{ij} \ge \frac{1}{4\sqrt{\alpha}} \cdot \sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij} \ge \frac{1}{16\sqrt{\alpha}} \ensuremath{\operatorname{OPT}} = \Omega\left(\frac{1}{\sqrt{\alpha}}\right)\ensuremath{\operatorname{OPT}} \enspace.
\]
\end{proof}
AlgoLow classifies options only based on utility for $\ensuremath{p}incipal$ and time intervals. AlgoHigh instead uses an approach similar to Algorithm~\ref{algo:log-alpha-loglog-alpha-approx}, namely classifying good options for $\ensuremath{p}incipal$ by their utility for $\ensuremath{\mathcal{A}}$. Since in the semi-oblivious scenario, options from a single round $i$ with the same utility for $\ensuremath{p}incipal$ cannot be distinguished, the algorithm classifies options by their expected utility for $\ensuremath{\mathcal{A}}$ such that the expectation for $\ensuremath{\mathcal{A}}$ of all options in a single class differs by no more than a factor 2. Finally, the algorithm identifies the best one of these $O(\log \alpha)$ many classes.
\begin{lemma}\label{lem:semiObliviousHighExpec}
If the agent has $\alpha$-bounded utilities, makes semi-oblivious proposals, and all options have high agent expectation, AlgoHigh (Algorithm~\ref{algo:semiOblivious-high-expec}) constructs a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(1/(\sqrt{\alpha}\log\alpha))$-approximation of the expected utility for optimal (online) search.
\end{lemma}
\begin{proof}
Using $RestrictOptions(\ensuremath{\mathcal{D}}_1,\dots,\ensuremath{\mathcal{D}}_n,1/4)$, the algorithm first identifies the best options for $\ensuremath{p}incipal$. By Lemma~\ref{lem:restrictOptions}, it holds that $8 \cdot \sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij} \ge \ensuremath{\operatorname{OPT}}$.
The set $Q$ is then further partitioned into $\lfloor \log_2 \sqrt{\alpha}\rfloor + 1$ smaller classes depending on their conditional expectation for $\ensuremath{\mathcal{A}}$, namely such that the conditional expectation for $\ensuremath{\mathcal{A}}$ of the elements in a class differs by at most a factor 2. Then, the class $\mathcal{C}$ such that $\sum_{(i,j)\in \mathcal{C}} p_{ij}\ensuremath{b}_{ij} \ge \sum_{(i,j) \in \mathcal{C}'} p_{ij}\ensuremath{b}_{ij}$ for all classes $\mathcal{C}'$ is chosen. This means that
\[(\lfloor \log_2 \sqrt{\alpha}\rfloor + 1) \cdot \sum_{(i,j) \in \mathcal{C}} p_{ij}\ensuremath{b}_{ij} \ge \sum_{(i,j) \in Q} p_{ij}\ensuremath{b}_{ij} \ge 1/8 \cdot \ensuremath{\operatorname{OPT}} \enspace.
\]
We denote by $E$ the lower bound for the expected $\ensuremath{\mathcal{A}}$ utility of the interval of the chosen class $\mathcal{C}$. Recall that all utilities for $\ensuremath{\mathcal{A}}$ are in the interval $[1,\alpha]$. This means that with a probability of at least $E/(2\alpha-E) \ge E/(2\alpha)$, a random element from $\mathcal{C}$ has an agent utility of at least $E/2$ -- otherwise, an expected utility of at least $E$ for $\ensuremath{\mathcal{A}}$ would not be possible. Since the probability that another allowed option in a later round arrives is at most 1/4 due to the choice of $m=1/4$ for the call to $RestrictOptions$ and the expectation conditional on arrival of an allowed option is at most $2E$, $\ensuremath{\mathcal{A}}$ always proposes the first option with a utility of at least $E/2$. This in turn means that the agent will propose the first element from $\mathcal{C}$ he encounters with a probability of at least $E/(2\alpha)$. Since $E \ge \sqrt{\alpha}$, the probability that $\ensuremath{\mathcal{A}}$ proposes the first allowed element is at least $1/(2\sqrt{\alpha})$.
In total, this means that $\ensuremath{p}incipal$ achieves an expected utility of at least
\[\frac{1}{2\sqrt{\alpha}} \cdot \sum_{(i,j) \in \mathcal{C}} p_{ij}\ensuremath{b}_{ij} \ge \frac{1}{2\sqrt{\alpha}} \cdot \frac{1}{8 \cdot (\lfloor \log_2 \sqrt{\alpha}\rfloor + 1)}\cdot \ensuremath{\operatorname{OPT}} = \Omega\left(\frac{1}{\sqrt{\alpha}\log_2\alpha} \right) \ensuremath{\operatorname{OPT}} \enspace. \]
\end{proof}
In contrast to Corollary~\ref{cor:loglogWithZero}, the result of Theorem~\ref{thm:semiObliviousAlgo} does \emph{not} generalize to the case when $\ensuremath{\mathcal{A}}$ has options with utility 0, and $\alpha$ is the ratio of maximum and minimum non-zero utility. A simple adaptation of the proof of Theorem~\ref{thm:semiObliviousLB} shows that in this case all algorithms must have a ratio in $O(1/n)$, even when all utilities for $\ensuremath{\mathcal{A}}$ are $a_{ij} \in \{0,1\}$.
We adapt the instance from the proof of Theorem~\ref{thm:semiObliviousLB} as follows. We set $p_1 = 1-1/n$ and $(b_1,a_1) = (0,0)$, $p_2 = 1/n-1/n^2$ and $(b_2, a_2) = (1,0)$, $p_3 = 1/n^2$ and $(b_3,a_3) = (1,1)$. Note that $\alpha = 1$ here, as there is only a single non-zero utility value for $\ensuremath{\mathcal{A}}$.
Consider any deterministic scheme for $\ensuremath{p}incipal$. Clearly, $\ensuremath{\mathcal{A}}$ does not want to propose any option of value 0 for him until the last round in which options $p_2$ and $p_3$ are acceptable. By a union bound, the overall probability to propose an option of value 1 for $\ensuremath{p}incipal$ is at most $(n-1) \cdot 1/n^2 + 1/n < 2/n$, so the expected utility of $\ensuremath{p}incipal$ is in $O(1/n)$. By searching through the options herself, $\ensuremath{p}incipal$ obtains a value of at least $1-(1-1/n)^n \ge 1-1/e$. Hence, every deterministic scheme is $O(1/n)$-approximate, even in this case with $\alpha = 1$. A similar argument shows this result also for randomized schemes.
\section{Misalignment of Principal and Agent Utility}\label{sec:principal-agent-ratio}
In this section, we consider performance guarantees based on the amount of misalignment of principal and agent utility. Formally, let $\beta \ge 1$ be the smallest number such that
\[
\frac{1}{\beta} \cdot \frac{\ensuremath{a}_{ij}}{\ensuremath{a}_{i'j'}} \le \frac{\ensuremath{b}_{ij}}{\ensuremath{b}_{i'j'}} \le \beta \cdot \frac{\ensuremath{a}_{ij}}{\ensuremath{a}_{i'j'}}
\]
for any two options $\omega_{ij}$ and $\omega_{i'j'}$ in the instance. We assume that the preference of $\ensuremath{p}incipal$ between any pair $\omega_{ij}, \omega_{i'j'}$ of options is shared by $\ensuremath{\mathcal{A}}$ -- up to a factor of at most $\beta$. We term this $\beta$-bounded utilities.
For most of the section, we assume that all utility values are strictly positive. Suppose we choose an arbitrary realization $\omega_{i'j'}$. Divide all utility values of $\ensuremath{p}incipal$ for all realizations by $\ensuremath{b}_{i'j'}$, and all utility values of $\ensuremath{\mathcal{A}}$ by $\ensuremath{a}_{i'j'}$. Note that this adjustment neither affects the incentives of the players nor the approximation ratios of our algorithms. Considering $\omega_{ij}$ with the adjusted utilities, we see that $1/\beta \cdot \ensuremath{b}_{ij}/\ensuremath{a}_{ij} \le 1 \le \beta \cdot \ensuremath{b}_{ij}/\ensuremath{a}_{ij}$, and thus $1/\beta \le \ensuremath{b}_{ij}/\ensuremath{a}_{ij} \le \beta$ for all $\omega_{i'j'}$. This condition turns out to be convenient for our analysis.
Our main idea is to use $O(\log \beta)$ clusters $\mathcal{C}_k$ to group all the options that have a utility ratio between $2^k$ and $2^{k+1}$, i.e.,
\[
\mathcal{C}_k = \{ \omega_{ij} \in \Omega \mid 2^k \le \ensuremath{b}_{ij}/\ensuremath{a}_{ij} < 2^{k+1} \}
\]
for $k = \lfloor \log 1/\beta \rfloor, \ldots, \lceil \log \beta \rceil$. Our deterministic scheme restricts the acceptable options to a single cluster $\mathcal{C}_{k^*}$. Note that here $\ensuremath{p}incipal$ is assumed to see $\ensuremath{a}_{ij}$ upon a proposal. The principal determines the cluster $k^*$, such that the best response by $\ensuremath{\mathcal{A}}$ (i.e., his optimal online algorithm applied with the options from that cluster) delivers the largest expected utility for $\ensuremath{p}incipal$.
\begin{theorem}
If principal and agent have $\beta$-bounded utilities, there is a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(1/\log \beta)$-approximation of the expected utility for optimal (online) search.
\end{theorem}
\begin{proof}
Consider any cluster $\mathcal{C}_k$. We denote by $\ensuremath{b}(\ensuremath{\mathcal{A}}, k)$ and $\ensuremath{a}(\ensuremath{\mathcal{A}}, k)$ the expected utility for $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$ when $\ensuremath{p}incipal$ uses $\mathcal{C}_k$ to determine $\varphi$. Now consider a hypothetical algorithm for $\ensuremath{p}incipal$ that observes all realizations and chooses the best option from $\mathcal{C}_k$ for $\ensuremath{p}incipal$ if possible. If there is no such option, it obtains a utility of 0. Let $\ensuremath{b}(\ensuremath{p}incipal, k)$ and $\ensuremath{a}(\ensuremath{p}incipal, k)$ be the expected utility of the hypothetical algorithm for $\ensuremath{p}incipal$ and $\ensuremath{\mathcal{A}}$, respectively. Clearly, $\ensuremath{b}(\ensuremath{p}incipal, k) \ge \ensuremath{b}(\ensuremath{\mathcal{A}}, k)$ and $\ensuremath{a}(\ensuremath{\mathcal{A}}, k) \ge \ensuremath{a}(\ensuremath{p}incipal, k)$, but also, by definition of $\mathcal{C}_k$,
\[
\ensuremath{b}(\ensuremath{\mathcal{A}}, k) \ge \ensuremath{a}(\ensuremath{\mathcal{A}}, k) \cdot 2^k \ge \ensuremath{a}(\ensuremath{p}incipal, k) \cdot 2^k \ge \ensuremath{b}(\ensuremath{p}incipal, k) / 2
\]
Now consider the best option for $\ensuremath{p}incipal$ in hindsight. The best-option-algorithm for cluster $\mathcal{C}_k$ picks the best option in hindsight if it comes from cluster $\mathcal{C}_k$. Otherwise, it returns a value of 0. Let $\ensuremath{b}^*_k$ be the expected utility of this algorithm for $\ensuremath{p}incipal$, and let $OPT$ be the expected utility of the best option in hindsight for $\ensuremath{p}incipal$. Then
\[
OPT = \sum_{k= \lfloor \log 1/\beta \rfloor}^{ \lceil \log \beta \rceil} \ensuremath{b}^*_k \le \sum_{k= \lfloor \log 1/\beta \rfloor}^{ \lceil \log \beta \rceil} \ensuremath{b}(\ensuremath{p}incipal, k) \le \sum_{k= \lfloor \log 1/\beta \rfloor}^{ \lceil \log \beta \rceil} \ensuremath{b}(\ensuremath{\mathcal{A}}, k) \cdot 2 \enspace.
\]
Hence, since the scheme chooses the cluster $k^*$ that maximizes $\ensuremath{b}(\ensuremath{\mathcal{A}}, k^*)$, we obtain an $\Omega(1/\log \beta)$-approximation.
\end{proof}
By treating all options of utility 0 for $\ensuremath{\mathcal{A}}$ in a separate class and ignoring all options of utility 0 for $\ensuremath{p}incipal$, we can again adapt the performance guarantee also to instances, in which all utility pairs of $\ensuremath{\mathcal{A}}$ and $\ensuremath{p}incipal$ with strictly positive entries are $\beta$-bounded.
\begin{corollary}
If principal and agent have $\beta$-bounded utilities for the set of options with only strictly positive utilities, there is a deterministic action scheme such that $\ensuremath{p}incipal$ obtains an $\Omega(1/\log \beta)$-approximation of the expected utility for optimal (online) search.
\end{corollary}
The bound in Theorem~\ref{thm:generalLB} for conscious proposals can be applied rather directly to this case, i.e., when treating the 0-utility options for $\ensuremath{p}incipal$ in a separate class. Also the bounds for oblivious and semi-oblivious proposals in Theorems~\ref{thm:fullyObliviousLB} and~\ref{thm:semiObliviousLB} apply directly, since in these instances $\beta = \Theta(\alpha)$. This implies that any algorithm has a ratio in $O(\log \log \beta/ \log \beta)$ for conscious proposals, in $O(1/\sqrt{\beta})$ for semi-oblivious proposals, and in $O(1/\beta)$ for oblivious proposals. Finally, it is trivial to obtain a $\Omega(1/\beta)$-approximation for $\ensuremath{p}incipal$ in case of $\beta$-bounded utilities and oblivious proposals -- simply accept every option proposed by $\ensuremath{\mathcal{A}}$. The bound on the ratio is a simple consequence of $\beta$-boundedness. As such, note that $\ensuremath{p}incipal$ is not required to know $\beta$ to obtain the approximation.
\end{document}
|
\begin{document}
\date{\today}
\title{Resonances in open quantum systems}
\author{
Hichem Eleuch$^{1}$\footnote{email: [email protected]} and
Ingrid Rotter$^{2}$\footnote{email: [email protected]}}
\address{
$^1$ Institute for Quantum Science and Engineering,
Texas A$\&$M University, College Station, Texas 77843, USA}
\address{
$^2$ Max Planck Institute for the Physics of Complex Systems,
D-01187 Dresden, Germany }
\vspace*{1.5cm}
\begin{abstract}
The Hamilton operator of an open quantum system is non-Hermitian.
Its eigenvalues are, generally,
complex and provide not only the energies but also the lifetimes
of the states of the system.
The states may couple via the common environment of scattering
wavefunctions into which the system is embedded. This causes an
{\it external mixing} (EM) of the states. Mathematically,
EM is related to the existence of singular (the so-called exceptional)
points (EPs).
The eigenfunctions of a non-Hermitian operator are biorthogonal,
in contrast to the orthogonal eigenfunctions of a Hermitian operator.
A quantitative measure for the ratio between biorthogonality and
orthogonality is the phase rigidity of the wavefunctions.
At and near an EP, the phase rigidity takes its minimum value.
The lifetimes of two nearby eigenstates of a quantum system
bifurcate under the influence of an EP. At the parameter value of
maximum width
bifurcation, the phase rigidity approaches the value one, meaning that
the two eigenfunctions become orthogonal. However, the eigenfunctions
are externally mixed at this parameter value.
The S-matrix and therewith
the cross section do contain, in the one-channel case, almost no
information on the EM of the states.
The situation is completely different in the case with two (or more)
channels where the resonance structure is strongly influenced by
the EM of the states and interesting features of non-Hermitian
quantum physics are revealed. We provide numerical
results for two and three nearby eigenstates of a non-Hermitian
Hamilton operator which are embedded in one common continuum
and are influenced by two adjoining EPs. The results are discussed.
They are of interest for an experimental test of the non-Hermitian
quantum physics as well as for applications.
\end{abstract}
\pacs{\bf }
\maketitle
\section{Introduction}
\label{intr}
In experiments \cite{yacobi,schuster,heiblum}
on Aharonov-Bohm rings containing a quantum dot in one arm,
both the phase and magnitude of the transmission amplitude
$T=|T|~e^{i\beta}$ of the dot can be extracted. The obtained results
did not fit into the general understanding of the transmission process.
As a function of the plunger gate voltage $V_g$, a series of well-separated
transmission peaks of rather similar width and height has been observed
and, according to expectations, the transmission phases $\beta(V_g)$
increase continuously by $\pi$ across every resonance.
In contrast to expectations, however,
$\beta$ always jumps sharply downwards by $\pi$ in each valley
between any two successive peaks. These jumps, called
{\it phase lapses in multi-level systems}, were observed
in a large succession of valleys for every many-electron dot studied.
They have been discussed in many theoretical papers,
including in a Focus Issue in New J. Phys. 2007.
In spite of much effort, the experimental
results could not be explained in Hermitian quantum physics.
Using the non-Hermitian formalism of the quantum physics,
is was however possible, in 2009, to explain \cite{muro}
convincingly the experimentally observed phase lapses
(see also the discussion of this problem in
Sect. 4.3.2 of the recent review \cite{ropp}).
This example shows the meaning which non-Hermitian quantum physics
{\it can} have for the description of a concrete physical system
that is open, in contrast to a closed (or almost closed) system that
is well described in the framework of Hermitian quantum physics.
Another example which shows the meaning of a non-Hermitian Hamilton
operator for a concrete quantum system, is
the description of laser-induced continuum structures in atoms
more than 15 years ago
\cite{marost12,marost3}. In these papers the motion of the complex
eigenvalues of the non-Hermitian Hamiltonian
is traced as a function of the field strength for different field
frequencies and atomic parameters. Level repulsion in
the complex plane is shown to occur at a critical field intensity.
With further increasing intensity, the complex energies move
differently. This effect is called {\it resonance trapping} according
to similar results obtained earlier in nuclear physics \cite{ro91}.
Recently, non-Hermitian Hamilton operators are used for the description
or prediction of different phenomena in quantum physics,
e.g. \cite{moiseyev}. We mention here only a few of many examples
\cite{atabek,gilary,jaouadi}.
A non-Hermitian Hamilton operator describing an open quantum system,
may play an important role also
in explaining well-known puzzles of quantum physics.
The natural environment of a localized quantum mechanical system
is the extended continuum of scattering wavefunctions in which
the system is embedded. This environment can be
changed by means of external forces, however it can never be deleted.
It exists at all times and is completely independent of any observer.
For this reason, radioactive dating can be used in geologic studies.
According to this statement, the properties of an open quantum
system can be described by means of
two projection operators each of which is related to
one of the two parts of the function space. The localized part of
the quantum system is basic for spectroscopic studies.
Mathematically, the localized part of the open quantum system
is a subsystem that is related to another subsystem.
The Hamiltonian of the (localized) system is
therefore non-Hermitian while the Hamiltonian of the total system
consisting of the two subsystems, is Hermitian \cite{comment3}.
In the standard Hermitian description of a localized quantum system,
the system is considered to be closed; the Hamiltonian is Hermitian;
the eigenstates are discrete.
Their decay is described by tunneling of particles into
the function space of scattering states into which the system
is embedded. The tunneling time can be calculated. It is small and
could be measured experimentally only recently \cite{tunnel}. The
experimental results have shown that the tunneling time is
extremely short what does not correspond to the expectations of
standard Hermitian quantum physics. They agree, however, with the
conclusions obtained when the system is considered to be an open
system described by a non-Hermitian Hamilton operator. In this case,
the eigenvalues ${\cal E}_i$ of the system are complex and the
lifetime of the states is given by Im(${\cal E}_i$). There is no need
to consider any tunneling time.
In a similar manner, the problem of the Schr\"odinger cat
does not exist when the system is considered to be an open quantum
system. The price to be paid for this
is to describe the system by a non-Hermitian Hamilton operator
and to solve the involved mathematical problems.
Usually, the calculations with a non-Hermitian Hamiltonian
give results for observable values of the quantum system which differ
only little from those obtained with a Hermitian Hamiltonian,
especially in relation to the uncertainties involved in the
comparison with experimental data. There are however exceptions from
this rule. These exceptions
arise from the mathematical existence of singular points.
One example are the so-called {\it exceptional points} (EPs) which
are known in mathematics for many years \cite{kato}.
Consider a family of operators of the form
\begin{eqnarray}
\label{kato1}
T(\kappa)=T(0)+\kappa T'
\end{eqnarray}
where $\kappa$ is a scalar parameter,
$T(0)$ is an unperturbed operator and
$\kappa T'$ is a perturbation.
Kato \cite{kato} has shown that the
number of eigenvalues of $T(\kappa)$ is independent of $\kappa$, as
expected, however
with the exception of some special values of $\kappa$. The
corresponding points in the parameter space are called
EPs. Here, (at least) two eigenvalues coalesce. An example is
\begin{eqnarray}
\label{kato2}
T(\kappa) = \left(
\begin{array}{cc}
1 & \kappa \\
\kappa & -1
\end{array} \right)
\end{eqnarray}
in which the two values
$\kappa = + i$ and $\kappa = - i$ result in the
same eigenvalue $0$.
Now the following questions arise:
What is the behavior of the eigenfunctions of the non-Hermitian
Hamilton operator under the influence of an EP?
Can EPs be observed directly in experimental results?
These questions are answered only partly in the literature up to now
although their
influence onto the dynamics of open quantum systems is
well known (for references see the review \cite{top}).
The meaning of EPs is studied in literature during last about twenty
years, in classical as well as in quantum physics. We will not discuss
here the problems of classical physics. Instead we refer to the
excellent book \cite{kirillov} and to the collection of articles
on spectral analysis, stability and bifurcation in modern nonlinear
physical systems \cite{kir_pel}. Related problems are studied also in
molecular physics, see the recent paper \cite{past} where the
references to older papers can be found.
In quantum physics the problems are studied, unfortunately,
in a confusing and often contradictory manner. We will not
enumerate here the references to the different papers.
They will rather be cited in those sections of the present paper
in which they can be discussed consistently \cite{comment4}.
The non-Hermiticity of the Hamiltonian is introduced mostly by adding
a non-Hermitian part to the Hermitian Hamiltonian that is known to
describe the system quite well, for references see the review \cite{top}.
It appears therefore more or less as a perturbation that is able to
describe the changes of the system properties under special
conditions, i.e. under the influence of an EP. Although this
treatment gives mostly reliable results, the question remains open
what are the properties of a Hamiltonian which is really
non-Hermitian, i.e. when the non-Hermiticity does {\it not} appear
as some perturbation.
The aim of the present paper is to find an answer to
this question in a mathematically exact manner, however by
keeping in mind that points in the continuum are of measure zero and
cannot be observed directly. It is important therefore to point to
observable signatures of the EPs \cite{comment2} occurring in
physical values, by means of which their existence can be proven. This is,
e.g., avoided level crossing and formation of different time scales
in the two-level case. Similar signatures exist in the three and
more level cases. Most interesting is the
so-called {\it external mixing (EM)} of the
states via the common continuum into which the system is embedded.
By definition, an EM of the states can occur {\it only} when the
system is open. It is therefore one of the characteristic values of
the non-Hermitian physics of open quantum systems \cite{comment3}.
Many years ago, EM has been shown to play an important role
in the open quantum mechanical nuclear system (continuum shell model
in contrast to the standard shell model) \cite{ro91}.
Today we know that it
characterizes the main features of the influence of EPs
on the dynamics of an open quantum system, see the recent review
\cite{ropp} on experimental and theoretical results.
Experimentally, an example of EM has been provided
a few years ago in a mesoscopic system. It
has been shown in \cite{bird1} that two distinct quantum
states are coupled through a common continuum.
In a further experiment, the authors were
able to show that EM survives even under conditions of strongly
nonequilibrium transport in the system \cite{bird2}.
The present paper is organized as follows. First we consider the
Hamiltonian which describes the properties of an open
quantum system. By definition, an
open quantum system is localized in space and embedded
in the continuum of scattering wavefunctions
due to which the states of the system become resonance states
and have, generally, a finite lifetime.
This Hamiltonian is non-Hermitian.
In Sects. \ref{eigf2} and \ref{eigf3}, respectively,
we consider the eigenvalues and
eigenfunctions of a $2\times 2$ and of a $3\times 3$
non-Hermitian Hamilton operator.
The eigenstates are coupled via one common
continuum and show the typical EM.
The eigenfunctions of a non-Hermitian Hamilton operator
are biorthogonal, and their phases are
not rigid in approaching an EP.
We show in the next section \ref{nonl} that the
EPs cause nonlinear effects in an open quantum system.
They can be traced best in the resonance structure of the
scattering cross section under the condition that
it is influenced by two adjoining EPs.
The further results given in Sect. \ref{smatr1c} show that,
in the one-channel case, the resonance structure of the cross section
is almost independent of the EM.
It is therefore impossible to reason, in this case, the
existence of EPs from a study of the resonance structure of the cross
section. The situation is another one in the
case with two (or more) channels as discussed in Sect. \ref{smatr2c}.
Here, EPs and EM cause interesting observable effects.
In Sect. \ref{disc}, we summarize and discuss the results obtained
in the present paper. We conclude the paper in
the last section \ref{concl} with some general remarks on EPs and,
above all, on the eigenfunctions of a non-Hermitian Hamilton
operator. By doing this, we hope to stimulate experimental studies in
order to prove, on the one
hand, the theoretical results, and to use, on the other hand, the
rich possibilities they provide for applications.
\section{Eigenvalues and eigenfunctions of a $2\times 2$
non-Hermitian Hamiltonian}
\label{eigf2}
Let us consider the $2\times 2$ non-Hermitian matrix
\begin{eqnarray}
\label{ham2}
{\cal H}^{(2)} =
\left( \begin{array}{cc}
\varepsilon_{1} \equiv e_1 + \frac{i}{2} \gamma_1 & ~~~~\omega \\
\omega & ~~~~\varepsilon_{2} \equiv e_2 + \frac{i}{2} \gamma_2 \\
\end{array} \right) \; .
\end{eqnarray}
Here, $\varepsilon_i$ are the complex eigenvalues of the basic
non-Hermitian operator \cite{comment1}.
The $\omega$ stand for the coupling matrix elements of the two
states via the common environment \cite{comment3}. Their mathematical
expression is derived in Sect. 3 of \cite{top}. They
are complex where Re($\omega$) is the principal value integral
and Im($\omega$) is the residuum \cite{top}.
The imaginary part is responsible for coherent processes
occurring in the system, while the real part contains
decoherences. The non-Hermitian matrix
\begin{eqnarray}
\label{ham0}
{\cal H}_0^{(2)} =
\left( \begin{array}{cc}
\varepsilon_{1} \equiv e_1 + \frac{i}{2} \gamma_1 & 0 \\
0 & ~~~~\varepsilon_{2} \equiv e_2 + \frac{i}{2} \gamma_2 \\
\end{array} \right)
\end{eqnarray}
describes the system without any
mixing of its states via the environment. In other words, $\omega =0$
corresponds to vanishing EM of the eigenstates.
In this paper, our main interest is in the effects caused by
$\omega$. Most visible are the
changes in the widths of the states: the
original widths $\gamma_i$ of the states
turn into the widths $\Gamma_i$ of the eigenstates of ${\cal H}^{(2)}$
due to $\omega \ne 0$.
\subsection{Eigenvalues of ${\cal H}^{(2)}$ }
\label{eigv}
The eigenvalues ${\cal E}_i \equiv E_i + \frac{1}{2} \Gamma_i$
of ${\cal H}^{(2)}$ are, generally, complex:
\begin{eqnarray}
\label{eig1}
{\cal E}_{1,2} \equiv E_{1,2} + \frac{i}{2} \Gamma_{1,2} =
\frac{\varepsilon_1 + \varepsilon_2}{2} \pm Z
\end{eqnarray}
with
\begin{eqnarray}
\label{eig2}
Z \equiv \frac{1}{2} \sqrt{(\varepsilon_1 - \varepsilon_2)^2 + 4
\omega^2}
\; .
\end{eqnarray}
Here, $E_i$ is the energy and $\Gamma_i$ the width of the eigenstate $i$.
The properties of the ${\cal E}_i$ trajectories as a function of a
certain parameter are well known. They contain
\begin{description}
\item[{\it ~Level repulsion:}]
two states repel each other in accordance with Re$(Z)$;
\item[{\it ~Width bifurcation:}]
the widths of two states bifurcate in accordance with Im$(Z)$;
\item[{\it ~Avoided level crossing:}]
two discrete (or narrow resonance) states avoid crossing
\cite{landau,zener}
because $ (\varepsilon_1 - \varepsilon_2)^2 + 4 \omega^2
>0$ and therefore always $Z\ne 0$;
\item[{\it ~Appearance of an EP:}]
two states cross when $Z=0$.
\end{description}
Altogether, the crossing scenario that is caused by an EP
in non-Hermitian quantum physics,
with, generally, complex eigenvalues ${\cal E}_i \equiv
E_i + \frac{1}{2} \Gamma_i$ of the Hamiltonian,
needs to be considered in terms
of a combined behavior of energy ($E_i$) and width
($\Gamma_i$) trajectories of the two states $i=1,~2$.
A level repulsion will generally appear in the
Re$({\cal E}_i) = E_i $ trajectories together with a free crossing
of the Im$({\cal E}_i) \propto \Gamma_i $
trajectories; while a bifurcation of the widths $\Gamma_i$
is accompanied, generally, by a free crossing of the energy
trajectories $E_i $. The last case
is illustrated, some years ago, in Figs. 1 to 3
of the paper \cite{mudiisro}.
Sometimes, the crossing phenomenon in non-Hermitian quantum physics
is called {\it avoided level crossing in the complex plane},
e.g. \cite{burosa1}. In the present paper we use the
term {\it avoided level crossing} or {\it level repulsion} according
to the standard definition for
the Re$({\cal E}_i) = E_i$ trajectories;
while the term {\it width bifurcation} is used for
the corresponding phenomenon appearing in the Im$({\cal E}_i) \propto
\Gamma_i$ trajectories. We underline once more: {\it both phenomena are
combined} in non-Hermitian quantum physics.
In \cite{pra93}, the case with equal widths $\gamma_1 = \gamma_2$ of
the two states and with imaginary coupling $\omega = i
\,\omega_0$ is solved analytically. As a result, two EPs appear, see
Eqs. (14)-(16) and
Fig. 1.a-d in \cite{pra93}. Between the two EPs, the widths
bifurcate up to a maximum value. In the present paper, we
consider complex $\omega_i$ where only one EP can be seen clearly.
Nevertheless, also the second EP has some influence onto the
dynamical properties of the system (see the numerical
results given in the present paper).
When $\omega = 0$, the energies $\varepsilon_i$ vary smoothly as a
function of any parameter. According to (\ref{eig1}) and
(\ref{eig2}), $Z= \pm \frac{1}{2}(\varepsilon_1 - \varepsilon_2) $ and
${\cal E}_{1,2} \to \varepsilon_{1,2}$ in this case. This means that no EP
can be related to the Hamiltonian ${\cal H}_0^{(2)}$.
\subsection{Eigenfunctions of ${\cal H}^{(2)}$ }
\label{eigfu}
The properties of the eigenfunctions $\Phi_i$ of a non-Hermitian
operator are less known. They are the following.
\begin{description}
\item[{\it ~~Biorthogonality:}]
The eigenfunctions and eigenvalues of every Hamilton operator
have to fulfill the two conditions
\begin{eqnarray}
\label{eif1}
{\cal H} |\Phi_i\rangle = {\cal E}_i|\Phi_i\rangle \hspace*{1cm}
\langle \Psi_i|{\cal H} = {\cal E}_i \langle \Psi_i|\; .
\end{eqnarray}
A Hermitian operator has real eigenvalues such that
$\langle \Psi_i| = \langle \Phi_i|$ in this case.\\
The eigenvalues of a non-Hermitian operator
are generally complex such that the left and right eigenfunctions
differ from one another,
$\langle \Psi_i| \ne \langle \Phi_i|$.\\
This is valid also for the eigenvalues and eigenfunctions of
the two symmetric operators
${\cal H}^{(2)}$ and ${\cal H}_0^{(2)}$. In this case, the
relation between the left and right eigenfunctions
is given by \cite{mudiisro,savin1,savin2}
\begin{eqnarray}
\label{eif2}
\langle \Psi_i| = \langle \Phi_i^*|\; .
\end{eqnarray}
\item[{\it ~~Normalization:}]
In the case of a Hermitian operator,
$\langle \Phi_i|\Phi_j\rangle$ is real and the eigenfunctions are usually
normalized to $\langle \Phi_i|\Phi_j\rangle =1$.\\
To smoothly describe the transition from a closed system
with discrete states to a weakly open one with narrow resonance states
(described by ${\cal H}^{(2)}$), it is meaningful to use the normalization
\begin{eqnarray}
\label{eif3}
\langle \Phi_i^*|\Phi_j\rangle = \delta_{ij}
\end{eqnarray}
for the eigenfunctions. The value
$\langle \Phi_i^*|\Phi_j\rangle \equiv (\Phi_i|\Phi_j) $ is
however complex such that
the phases of the two eigenfunctions $\Phi_{1,2}$ relative
to one another cannot be rigid. They are rather parameter dependent
since $\langle \Phi_i^*|\Phi_j\rangle$ has to be real,
according to (\ref{eif3}), for every parameter value.
It follows from (\ref{eif3}), that the values of the
standard expressions are changed \cite{top},
\begin{eqnarray}
\label{eif4}
\langle\Phi_i|\Phi_i\rangle =
{\rm Re}~(\langle\Phi_i|\Phi_i\rangle) ~; \quad
A_i \equiv \langle\Phi_i|\Phi_i\rangle \ge 1
\end{eqnarray}
\vspace*{-1.2cm}
\begin{eqnarray}
\label{eif5}
\nonumber
\langle\Phi_i|\Phi_{j\ne i}\rangle =
i ~{\rm Im}~(\langle\Phi_i|\Phi_{j \ne i}\rangle) =
-\langle\Phi_{j \ne i}|\Phi_i\rangle
\end{eqnarray}
\vspace*{-1.4cm}
\begin{eqnarray}
\label{eif6}
|B_i^j| \equiv |\langle \Phi_i | \Phi_{j \ne i}| ~\ge ~0
\end{eqnarray}
\item[{\it ~~Phase rigidity:}]
The phase rigidity is a quantitative measure for the biorthogonality
of the eigenfunctions. It is defined by \cite{top}
\begin{eqnarray}
\label{eif7}
r_k ~\equiv ~\frac{\langle \Phi_k^* | \Phi_k \rangle}{\langle \Phi_k
| \Phi_k \rangle} ~= ~A_k^{-1}
\end{eqnarray}
by taking into account the normalization (\ref{eif3}).
In Hermitian systems, the eigenfunctions are orthogonal and
$r_k=1$.
In systems with well-separated resonance states, it follows
$r_k\approx 1$; however it is never $r_k= 1$
\cite{top,savin1,savin2}.
Hermitian quantum physics is, in this case. a reasonable
approximation for the description of the open quantum system.
In approaching an exceptional point, it follows $r_k \to 0$
\cite{top}.
The phase rigidity is experimentally studied on microwave billiards
\cite{richter2}. The variation of $r_k $
in approaching the EP is found, indeed.
The experimental result agrees with the relation (\ref{sec8})
discussed below \cite{comment4}.
Our calculations show an interesting unexpected property
for two nearby states with similar values of their widths
$\gamma_i$ \cite{epj1,pra93}:
$r_k\approx 1$ at maximum width bifurcation.
These results will be discussed below in detail.
An analog result is found
for two nearby states with level repulsion
which is caused by an EP \cite{pra93}.
\item[{\it ~~Mixing of the eigenfunctions via the environment (EM):}]
The Schr\"odinger equation for the basic wave functions
$\Phi_i^0 $ with the Hamiltonian (\ref{ham0}) is
\begin{eqnarray}
\label{eif8}
({\cal H}_0^{(2)} - \varepsilon_i) ~| \Phi_i^0 \rangle = 0
\end{eqnarray}
while the Schr\"odinger equation with the full Hamiltonian
(\ref{ham2}) reads
\begin{eqnarray}
\label{eif9}
({\cal H}^{(2)} - {\cal E}_i) ~| \Phi_i \rangle = 0 \; .
\end{eqnarray}
Eq. (\ref{eif9}) can be rewritten in a Schr\"odinger equation with
source term,
\begin{eqnarray}
\label{eif11}
({\cal H}^{(2)}_0 - {\cal E}_i) ~| \Phi_i \rangle = -
\left(
\begin{array}{cc}
0 & \omega \\
\omega & 0
\end{array} \right) |\Phi_i \rangle \; .
\end{eqnarray}
Now, we can use the standard representation of the $\Phi_i$ in the
$\{ \Phi_n^0 \}$
\begin{equation}
\label{eif12}
\Phi_i=\sum \, b_{ij} ~\Phi_j^0 \; ;
\quad \quad b_{ij} = \langle \Phi_j^{0 *} | \Phi_i\rangle
\end{equation}
under the condition that the $b_{ij}$
are normalized by $\sum_j (b_{ij})^2 = 1$, i.e.
\begin{eqnarray}
\label{eif13}
\sum_j (b_{ij})^2
= {\rm Re} [\sum_j(b_{ij} )^2]
= \sum_j \{[{\rm Re} (b_{ij})]^2 - [{\rm Im} (b_{ij})]^2\}
=1 \; .
\end{eqnarray}
We are interested in the probability of EM which is defined by
\begin{eqnarray}
\label{eif14}
\sum_j |b_{ij}|^2
= \sum_j \{[{\rm Re} (b_{ij})]^2 + [{\rm Im} (b_{ij})]^2\} \; .
\end{eqnarray}
From (\ref{eif13}) and (\ref{eif14}) follows
\begin{eqnarray}
\label{eif15}
\sum_j |b_{ij}|^2 \ge 1 \; .
\end{eqnarray}
In the neighborhood of an EP, $\sum_j |b_{ij}|^2 \gg 1$; and
$\sum_j |b_{ij}|^2 \to \infty $
in approaching an EP \cite{epj1}.\\
When the maximum width bifurcation (or level repulsion) is
parametrically reached, the eigenfunctions $\Phi_i$
are almost orthogonal, however EM contained in the wavefunctions
of the eigenstates, is strong \cite{epj1}.\\
\item[{\it ~~Eigenfunctions of ${\cal H}^{(2)}$ at an EP:}]
According to analytical and numerical results
\cite{ro01,marost3,gurosa,berggren}, it is
\begin{eqnarray}
\label{sec8}
\Phi_1^{\rm cr} \to ~\pm ~i~\Phi_2^{\rm cr} \; ;
\quad \qquad \Phi_2^{\rm cr} \to
~\mp ~i~\Phi_1^{\rm cr}
\end{eqnarray}
where $\Phi_i^{\rm cr}$ are the eigenfunctions at an EP.
The EP is however a point in the continuum of scattering wavefunctions
and is therefore of measure zero.
Hints to the existence of an EP can be found in observable
values. These are, above all, avoided level crossing and width
bifurcation which both are caused by an EP.
We mention here that the relations (\ref{sec8}) are in agreement with
experimental results obtained on microwave billiards
\cite{richter1}.
These nice results \cite{comment4} are confirmed
independently from one another
by different authors, e.g. \cite{ro01,marost3,gurosa,berggren}.
\end{description}
The eigenfunction $\Phi_i$ of the non-Hermitian Hamilton operator
$\cal H$ is the main part of the wavefunction of the resonance state
$i$ inside the localized part of the system. The wavefunction of the
resonance state including its tail is given in Eq. (42) in \cite{top}.
\subsection{Numerical results}
\label{num2}
\begin{figure}
\caption{
\footnotesize{
Eigenvalues ${\cal E}
\label{fig1}
\end{figure}
We refer to the analytical results obtained and discussed in \cite{pra93}
for the eigenvalues and eigenfunctions of $N=2$ states. In the
analytical studies $\omega$ is assumed to be either real or imaginary
what is, of course,
seldom realized in realistic systems. Nevertheless, the results of
these studies provide some
insight into the basic features of the eigenvalues and eigenfunctions
of a non-Hermitian operator, above all near to an EP.
In Fig. \ref{fig1}, we show numerical results obtained for systems
under more realistic conditions in which $\omega$ is complex.
The energies $e_i$ are parameter dependent while the $\gamma_i$
as well as the $\omega$ are parameter independent. The difference
between the widths $\gamma_i$ of the two states as well as
$\omega$ are chosen in such a manner that an EP occurs.
In both cases, the phase rigidity $r_i$ approaches zero at the EP;
and is near to one at maximum width bifurcation. The mixing
$|b_{ij}|$ of the two eigenfunctions increases limitless in
approaching the EP; and is finite,
$|b_{ij}| < 1$, in the parameter region of the maximum width bifurcation.
The hint to a second EP can be seen in the eigenvalues as well as
in the eigenfunctions at, respectively, $a=0.68$ in Fig. \ref{fig1} left
and $a=0.0$ in Fig. \ref{fig1} right.
Similar results are obtained when the energies $e_i$
(as well as the $\omega$) are chosen to be
parameter independent while the $\gamma_i$ are parameter dependent,
see e.g. \cite{pra93}.
In our calculations, we choose the coupling strength $\omega$ between
system and environment parameter independent in order to exclude
formally its influence onto the dynamics of the open quantum system.
This allows us to fix the role of nonlinear processes.
To summarize the results of Fig. \ref{fig1}, we state the following.
\begin{description}
\item[~~~~{\it Phase rigidity and mixing of the eigenfunctions
in approaching an EP}]
\begin{eqnarray}
\label{sec9}
r_i \to 0
~~;~~~ |b_{ij}| \to \infty \; ,
\end{eqnarray}
in general
\begin{eqnarray}
\label{sec11}
1 > r_i \ge 0 ~~;~~~ |b_{ij}| > 1 \; .
\end{eqnarray}
\item[~~~~{\it Phase rigidity and mixing of the wavefunctions between two EPs
}]
\begin{eqnarray}
\label{sec12}
r_i \to 1 ~~;~~~ |b_{ij}| < 1
\end{eqnarray}
in approaching maximum width bifurcation.
In the analytically solvable case with imaginary $\omega$ it is \cite{epj1}
\begin{eqnarray}
\label{sec13}
|b_{ij}| \approx 0.7
\end{eqnarray}
meaning that the eigenfunctions
$\Phi_i$ are almost orthogonal
and strongly mixed in the set of
basic wavefunctions \{$\Phi_k^{0}$\}
in approaching maximum width bifurcation.
\end{description}
Similar results are obtained when Re$(\omega) \gg {\rm Im}(\omega)$.
The difference to the results shown in Fig. \ref{fig1} is that now
level repulsion is the main effect caused by the EP \cite{epj1}.
We remark here that the
evolution from $r_k=0$ at the EP to $r_k\approx 1$ at the maximum
width bifurcation
is driven exclusively by the
nonlinear source term of the Schr\"odinger equation (see
Sect. \ref{nonl})
since $\omega = const $ in our calculations.
When $\omega = 0$, it is ${\cal E}_i = \varepsilon_i$. In this case,
there are no EPs as mentioned in Sect. \ref{eigv}.
\section{Eigenvalues and eigenfunctions of a $3\times 3$
non-Hermitian Hamiltonian}
\label{eigf3}
Let us consider the Hamiltonian
\begin{eqnarray}
\label{nxn1}
{\cal H}^{(N)} =
\left( \begin{array}{cccc}
\varepsilon_{1} & \omega_{12} & \ldots &\omega_{1N} \\
\omega_{21} & \varepsilon_{2} & \ldots & \omega_{2N}\\
\vdots & \vdots & \ddots& \vdots \\
\omega_{N1} & \omega_{N2} & \ldots & \varepsilon_{N} \\
\end{array} \right)
\end{eqnarray}
where
$\varepsilon_{i} \equiv e_i + i/2~\gamma_i$ are the
energies and widths of the $N$ states;
$\omega_{i\,k\ne i}$
are the complex coupling matrix elements of the states $i$ and $k$ via
the common environment; and the $\omega_{i\,k=i}$ denote
the selfenergy of the state $i$ which is
mostly assumed to be included in the $\varepsilon_{i}$ in our
calculations.
The values $\omega_{ik}$ for different $i$ and $k$ differ usually
from one another. It is however a
well-known fact from numerical calculations \cite{top}, that a resonance
state becomes trapped by another
nearby state when its width is somewhat smaller than that of the
nearby state. Finally the widths of most
relatively short-lived states of the system are similar to one
another. These states determine the evolution of the system.
\subsection{Numerical results for $N=3$}
\label{num3}
\begin{figure}
\caption{
\footnotesize{
Eigenvalues ${\cal E}
\label{fig2}
\end{figure}
In Fig. \ref{fig2} we show the numerical results
obtained for the eigenvalues and eigenfunctions of $N=3$ states by using
parameters similar to those for $N=2$ states in Fig. \ref{fig1}.
The $\omega_{ij} \equiv \omega$ are
chosen to be equal for the different $i$ and $j$.
Above all, they are parameter independent,
similar as the corresponding $\omega$ in Fig. \ref{fig1}.
The comparison of Figs. \ref{fig1} and \ref{fig2} shows that the
main features of the eigenvalues and eigenfunctions are the same
for $N=2$ and $N=3$.
The eigenvalues repel each other in energy and their widths
bifurcate under the influence of an EP; the phase
rigidities approach zero and the mixing of the wavefunctions
increases limitless at and near to an
EP; the phase rigidities approach the value one and the corresponding
almost orthogonal wavefunctions are mixed
when the width bifurcation is maximum. These effects are enhanced
when $N=3$ as compared to those occurring when $N=2$.
As in the two-level case, similar results are obtained
when Re$(\omega) \gg {\rm Im}(\omega)$. In this case, level repulsion
is the main effect caused by the EP \cite{epj2}.
\subsection{Third-order exceptional points}
\label{third}
Hints to third-order EPs (at which three eigenvalues coalesce at one
parameter value) cannot be found in Fig. \ref{fig2}. The
reason is that every EP is a point in
the continuum (with measure zero) which can be identified only by its
influence onto observable values in a finite parameter range around it.
Furthermore, a third-order EP occurring in the system without any EM
of its states via the environment, is shielded due to EM in a
realistic system. It can therefore not be observed in an open quantum
system.
According to the numerical results shown in Fig. \ref{fig2},
we see several second-order EPs in a critical
parameter region around the value at which the
conditions for a third-order EP are mathematically fulfilled.
The observable effect caused by a third-order EP,
is some clustering
of second-order EPs which occurs in a finite
parameter range around the value at which the third-order EP is
mathematically expected. This fact is discussed in detail in \cite{pra93}.
These results show the differences between a formal-mathematical
result and
effects that can really be observed in a physical system. The point
is that two states that cross at an EP
lose (due to the EM of the states)
their individual character
in a finite parameter range around the EP; and
the areas of influence of various
second-order EPs overlap. In this manner, they
amplify, collectively, their impact onto physical values with the
result that, e.g., a third-order EP
is shielded in a physical system.
\section{Schr\"odinger equation with nonlinear source term}
\label{nonl}
The Schr\"odinger equation (\ref{eif9})
can be rewritten in the Schr\"odinger equation (\ref{eif11})
with source term. In this equation,
the coupling $\omega$ of the states $i$ and ${j\ne i}$
via the common environment of scattering wavefunctions (EM)
is contained in the source term.
The source term is nonlinear \cite{top}
\begin{eqnarray}
\label{16}
({\cal H}_0^{(2)} - {\cal E}_i) ~| \Phi_i \rangle =
\sum_{k=1,2} \langle
\Phi_k|W|\Phi_i\rangle \sum_{m=1,2} \langle \Phi_k |\Phi_m\rangle
|\Phi_m\rangle
\end{eqnarray}
since $\langle \Phi_k |\Phi_m\rangle \ne 1$ for $k= m$ and
$\langle \Phi_k |\Phi_m\rangle \ne 0$ for $k\ne m$, see
Eqs. (\ref{eif4}) and (\ref{eif6}).
In (\ref{16}) the definition $W \equiv
-\left(
\begin{array}{cc}
0 & \omega \\
\omega & 0
\end{array} \right)$
is used for convenience.
The most important part of the nonlinear contributions is contained in
\begin{eqnarray}
\label{17}
({\cal H}_0^{(2)} - {\cal E}_n) ~| \Phi_n \rangle =
\langle \Phi_n|W|\Phi_n\rangle ~|\Phi_n|^2 ~|\Phi_n\rangle \; .
\end{eqnarray}
Far from an EP, the source term is (almost) linear since
$\langle \Phi_k|\Phi_{k }\rangle \to 1$ and
$\langle \Phi_k|\Phi_{l\ne k }\rangle = -
\langle \Phi_{l \ne k }|\Phi_{k}\rangle \to 0$.
Near to an EP however, the source term is nonlinear since
$\langle \Phi_k|\Phi_{k }\rangle \ne 1$ and
$\langle \Phi_k|\Phi_{l\ne k }\rangle = -
\langle \Phi_{l \ne k }|\Phi_{k}\rangle \ne 0$.
Due to the EM involved in the source term, the eigenfunctions $\Phi_i$
and eigenvalues ${\cal E}_i$
of ${\cal H}^{(2)}$ contain global features.
The environment of an open quantum system is
the continuum of scattering wavefunctions which
has an infinite number of degrees of freedom. It may cause therefore,
among others, a dynamical phase transition \cite{top,epj2}.
The transition is non-adiabatic \cite{top,ropp,epj2}.
In order to illustrate the nonlinear effects involved in the source
term of the Schr\"odinger equation (\ref{16}) let us consider, as an
example, the resonance part of the $S$ matrix
from which the resonance structure
of the cross section can be calculated,
\begin{eqnarray}
\label{cro}
\sigma (E) \propto |1-S(E)|^2 \; .
\end{eqnarray}
A unitary representation of the resonance part of the
$S$ matrix in the case of two resonance states coupled to a
common continuum of scattering wavefunctions reads \cite{ro03}
\begin{eqnarray}
\label{sm4}
S = \frac{(E-E_1-\frac{i}{2}\Gamma_1)~(E-E_2-\frac{i}{2}\Gamma_2)}{(E-E_1+
\frac{i}{2}\Gamma_1)~(E-E_2+\frac{i}{2}\Gamma_2)}\; .
\end{eqnarray}
Here, the influence of the EPs onto the cross section is contained
in the eigenvalues
${\cal{E}}_i = E_i + i/2~\Gamma_i$. The expression (\ref{sm4})
allows us therefore to receive reliable results also when the phase
rigidity is reduced, $r_k < 1$.
The expression (\ref{sm4}) can be used in order to derive analytically
an expression for the
resonance structure of the S-matrix at an EP
\cite{ro03},
\begin{eqnarray}
\label{sm5}
S = 1-2i\frac{\Gamma_d}{E-E_d+\frac{i}{2}\Gamma_d}-
\frac{\Gamma_d^2}{(E-E_d+\frac{i}{2}\Gamma_d)^2}
\end{eqnarray}
where $E_1=E_2\equiv E_d$ and
$\Gamma_1=\Gamma_2\equiv \Gamma_d$. As a result of interferences,
this expression consists of three terms, one of which is
explicitly nonlinear. The resonance structure (\ref{sm5}) shows
two bumps approximately at the energies $\varepsilon_i$ of the
two resonance states, and an interference minimum between them.
This structure resembles that of two more or less isolated
resonances the energies of which are $\varepsilon_1$ and $\varepsilon_2$.
Many years ago, the resonance structure of the cross section with
two resonance states is calculated as a function of the coupling
strength
between system and environment \cite{mudiisro}.
These calculations are performed by using the standard expression
for the S-matrix with the energies $\varepsilon_i$ replaced by
the eigenvalues ${\cal E}_i$.
The results show a double-hump structure at the
EP (Fig. 9 in \cite{mudiisro})
which corresponds exactly to the expression
(\ref{sm5}) obtained analytically.
Our conclusion from these results is, that nonlinear terms determine
the resonance structure of the cross section in the
neighborhood of an EP.
\section{S-matrix: resonance structure in the one-channel case}
\label{smatr1c}
According to textbooks, the resonance structure of the S-matrix is
well understood when all resonance states are coupled to one and the
same decay channel (this is the so-called {\it one-channel case}).
The resonance structure is calculated by means of the Hermitian
formalism, in which no EPs are involved.
The basic results of theoretical and experimental studies agree under
the condition that the resonances do not overlap, i.e. when they are
well separated from one another in the cross section.
In order to receive a better understanding of the resonance structure
of the cross section also in this simple case, we calculate it with
and without taking into account EM of the resonance states. In the
first case ($\omega \ne 0$), EPs are involved, while in the second
case ($\omega = 0$) EPs do not appear. The Hamiltonian is
non-Hermitian in both cases. We compare the resonance structure of
the S-matrix obtained in the two cases.
We performed calculations with different values of $\omega$ and for
different sets of resonance states. In Figs. \ref{fig3},
\ref{fig4} and \ref{fig5}
we show typical results. They are obtained by choosing
the values of $\omega$
to be the same as in Fig. \ref{fig1} ($N=2$) and in
Fig. \ref{fig2} left ($N=3$), respectively. The $\omega$ are complex
and near to those known from realistic systems, so that
results can be obtained only numerically.
The calculations are performed
with the energies $\varepsilon_i \equiv e_i + \frac{i}{2} \gamma_i$
chosen in Figs. \ref{fig1} and \ref{fig2}, respectively. All states
are coupled to one and the same continuum.
\subsection{Numerical results: resonance structure with $\omega \ne 0$}
\label{num6with}
Using (\ref{cro}) and (\ref{sm4}) for the S-matrix, we calculated the
resonance structure of the cross section with, respectively, two and
three resonance states under different conditions by taking into account EM.
In all cases with $N=2$ resonance states we see a double-hump structure,
while the cross section shows a triple-hump structure when $N=3$. For
examples see Figs. \ref{fig3}.a, \ref{fig4}.a and \ref{fig5}.a.
In Fig. \ref{fig3}.a, the coupling of the states to the continuum is
relatively weak, see the corresponding eigenvalue
pictures Fig. \ref{fig1}.a,b. The resonance part of the S-matrix shows
the typical two-hump structure.
In Fig. \ref{fig4}.a the difference between the two widths
$\gamma_i/2$ is relatively large, see the corresponding eigenvalue
pictures Fig. \ref{fig1}.f,g.
In order to see the influence of an EP, also the coupling
strength $|\omega| $ has to be relatively large in this case.
According to Fig. \ref{fig1}.g, it is
$\sum_{i=1,2} \Gamma_i \approx \Gamma_2$ where $\Gamma_2$ is the width
of the short-lived state $2$. As can be seen from Fig. \ref{fig4}.a, the
cross section shows the characteristic double-hump structure not only
in the very neighborhood of the EP but also beyond this value.
Using Eqs. (\ref{cro}) and (\ref{sm4}) for the S-matrix,
we are able to reproduce the double-hump structure of the cross section
as a function of the coupling strength $\omega$ which is shown in
Fig. 9 of the old paper \cite{mudiisro}. The calculations
in \cite{mudiisro} are performed on
the basis of the standard S-matrix theory,
however with the energies $\varepsilon_i$ replaced by the eigenvalues
${\cal E}_i$. The role of the interference of the
different contributions to the resonance structure is also
shown in Fig. 9 in \cite{mudiisro}.
In Fig. \ref{fig5}.a, we show the results with $N=3$ for the case that
the widths of the three states are similar to one another
and $\omega$ is relatively small, see the
corresponding eigenvalue pictures Figs. \ref{fig2}.a,b.
The cross section shows the
typical three-hump structure at different values of the parameter
$a$ near to the region with several neighbored EPs
as well as beyond it.
The 2D-contour plots of the resonance structure of the cross section
with two levels, calculated with $\omega \ne 0$, are shown in
Figs. \ref{fig3}.c and \ref{fig4}.c.
In both cases, the cross section falls down steeply to its minimum
value between the two EPs.
Here the eigenfunctions are (almost) orthogonal and mixed
in the set of basic wavefunctions $\{\Phi_n^0\}$, see
Fig. \ref{fig1}. As can be seen from Fig. \ref{fig4}.c, the minimum
value appears at the value of maximum width bifurcation (see
Fig. \ref{fig1} right column).
The 2D-contour plot of the resonance structure of the cross section
with three levels, calculated with $\omega \ne 0$, is the same as
that calculated with $\omega = 0$ (see next section
\ref{num6without}).
In any case, the 2D-contour plots of the resonance structure
of the cross section should not be confused with eigenvalue
trajectories that avoid crossing. Furthermore, they are not
symmetric with respect to $E=0$ corresponding to the
eigenvalue pictures Figs. \ref{fig1} and \ref{fig2} (in difference to
Fig. 9 for the cross section in \cite{mudiisro} that is related to
eigenvalue figures which are symmetric with respect to $E=0$).
\begin{figure}
\caption{
\footnotesize{
Cross section with two resonance states. The parameters are the same
as in Fig. \ref{fig1}
\label{fig3}
\end{figure}
\begin{figure}
\caption{
\footnotesize{
Cross section with two resonance states. The parameters are the same
as in Fig. \ref{fig1}
\label{fig4}
\end{figure}
\begin{figure}
\caption{
\footnotesize{
Cross section with three resonance states. The parameters are the same
as in Fig. \ref{fig2}
\label{fig5}
\end{figure}
\subsection{Numerical results: resonance structure with $\omega = 0$}
\label{num6without}
We compare the resonance structure of the S-matrix
obtained in the non-Hermitian formalism with taking into account EM
($\omega \ne 0$) to that obtained without EM
(corresponding to $\omega =0$).
Typical results are shown in Figs. \ref{fig3}.b and \ref{fig4}.b
for $N=2$ and in Fig. \ref{fig5}.b. for $N=3$. These figures have to be
compared with, respectively, Figs. \ref{fig3}.a, \ref{fig4}.a
and \ref{fig5}.a.
As in all our calculations, the resonance
structure of the S-matrix is almost the same for $\omega = 0$ and
$\omega \ne 0$. Differences in the resonance structure of the S-matrix
can be seen only when $\omega$ is large, see Fig. \ref{fig4}.a as
compared to Fig. \ref{fig4}.b. In all other cases, the resonance
structure is typically the same, see for example Fig. \ref{fig3}.a
as compared to Fig. \ref{fig3}.b and Fig. \ref{fig5}.a
as compared to Fig. \ref{fig5}.b.
In Fig. \ref{fig5}.c we
show the 2D-contour plot of the cross section with three resonances,
calculated with $\omega = 0$. It looks like that obtained with
$\omega \ne 0$. Also the results for $N=2$ with $\omega = 0$
are typically the same as those with $\omega \ne 0$ (which are shown
in Figs. \ref{fig3}.c and \ref{fig4}.c).
In the 2D-contour plot of the resonance structure with three levels
(Fig. \ref{fig5}.c), we see two second-order EPs instead of a
third-order EP. This result corresponds to the discussion on
third-order EPs in Sect. \ref{third}.
\subsection{Influence of exceptional points}
\label{infl}
In Figs. \ref{fig3}, \ref{fig4} and \ref{fig5} we have shown the
numerical results obtained for the resonance structure of the
S-matrix when the system is considered, respectively, with EM
($\omega \ne 0$) and without EM ($\omega = 0$).
We considered the most sensitive situation where the resonance
structure is influenced by two adjoining EPs.
EPs and EM appear only when $\omega \ne 0$.
Nevertheless, the resonance structure of the
cross section is almost the same in the two cases.
This result is valid not only when the number of
resonances is two but also when it is larger than two.
This means that the resonance structure of the cross section is almost
independent of EM, i.e. on the
coupling of the states via one common continuum of scattering states.
This unexpected result can be explained in the following manner.
The evolution of the system between the two EPs is driven exclusively by the
nonlinear source term
of the Schr\"odinger equation (\ref{eif11}) since $\omega$ is constant
in our calculations and can therefore not be responsible for the
width bifurcation. Obviously, the
nonlinear source term is able, in the one-channel case, to largely
conserve the resonance structure of the cross section.
Altogether, we have here some type of self-affirmation. Analytical
results for the resonance structure of the cross section
can be obtained, in the one-channel case (with well separated
resonances), when the system is described by a Hermitian
operator the eigenvalues and eigenfunctions
of which are smoothly parameter dependent. These results agree
quite well with those of experimental observations.
The description of the system as a closed system seems therefore
to be justified. In addition,
more complicated cases with e.g. more than one open channel, cannot
be solved analytically in the standard theory. Thus,
the justification of the Hermitian approach for the description
of the system (with well separated resonances) rests solely
on the analytical results obtained for the one-channel case.
Our results for the one-channel case show that this case cannot
be used
in order to prove or disprove the Hermitian quantum physics.
To that purpose, the study of
more complicated cases is needed, see the
next section \ref{smatr2c}.
We mention here that the resonance scattering at third-order EPs is
studied in \cite{heiwu} by using a method that is different from
ours. Also in these calculations,
three peaks appear in the cross section. According to \cite{heiwu},
the ``sprouting out'' of the
three levels under parameter variation depends on the particular
parameter chosen. A similar result is obtained
\cite{epj2,pra93} in the framework of the formalism presented
in the present paper. However, a third-order EP does not appear in our
calculations, see Fig. \ref{fig5}.c. Instead we see several
second-order EPs and hints to them, respectively, what agrees
with the discussion in Sect. \ref{third}.
\section{S-matrix: resonance structure in the two-channel case}
\label{smatr2c}
We will not provide here new numerical results
for the two-channel case. Instead we refer to results obtained
a few years ago \cite{muro,burosa1,burosa2} for the
transmission through a small system (quantum dot).
In order to describe transmission, we have to consider at least
two channels: the entrance and the exit channel. In
\cite{muro}, unexpected experimental results
\cite{yacobi,schuster,heiblum} on the resonance structure of the
transmission could be explained. In the papers
\cite{burosa1,burosa2}, different
calculations are performed for both, a system with a small number of
resonance states as well as for a system with many states. In the
last case, the calculations are performed
first in the tight-binding approach according to the formalism
presented by Datta \cite{datta}.
Then, the non-Hermitian Hamilton operator is diagonalized and the
eigenvalues and eigenfunctions are determined. This formalism is
equivalent to that used in the present paper (see
p. 437 in \cite{knoss}).
The calculations for systems with a small number of states are
performed according S-matrix theory and using
tight-binding approach \cite{saburo}.
The results for the two-channel case
are more interesting than those for the one-channel
case discussed in Sect. \ref{smatr1c} since the influence of EPs and
EM can be seen immediately \cite{muro,burosa1,burosa2}.
The resonance structure of the transmission
can be traced back to the eigenvalues of the non-Hermitian operator;
EM of the states can generally not be neglected;
and the phase rigidity is anticorrelated with the transmission
probability. The last property is the most interesting one. It
has no analog in the standard formalism.
The phase rigidity is a theoretical value
characteristic of the non-Hermitian formalism. It can be traced
experimentally in a microwave
billiard \cite{richter2}. It will however be difficult to study
it directly in a realistic system. According to
the above mentioned numerical results \cite{burosa1,burosa2} it
is, however, anticorrelated with an observable value, namely with
the transmission probability.
This anticorrelation of the theoretical value (phase rigidity)
with an observable one (transmission probability)
allows us to really test the non-Hermitian formalism.
Moreover, when this anticorrelation
really exists, it is of high interest for applications.
\section{Discussion of the results and summary}
\label{disc}
A critical consideration of the standard Hermitian formalism
for the description of open quantum systems is possible
by starting from a general non-Hermitian formalism
\cite{comment3} which includes
the Hermitian quantum physics as a limiting case. In the
non-Hermitian formalism, the normalization of the eigenfunctions of
the Hamiltonian can freely be chosen \cite{gurosa}.
If it is chosen by means of (\ref{eif3}),
the non-Hermitian quantum formalism fulfills the condition to
approach, on the one hand, the standard Hermitian quantum physics
under certain conditions (that can be formulated) and to be,
on the other hand, more general than it. The mathematical
consequences are the following.
\begin{verse}
\item
The phases of the eigenfunctions
relative to one another are not rigid, see Sect. \ref{eigfu}.
This fact agrees with the basic relation (\ref{sec8}) which is valid
in approaching an EP: the two eigenfunctions
$\Phi_1$ and $\Phi_2$ of ${\cal H}^{(2)}$
are (almost) orthogonal to one another when the two eigenstates
$1$ and $2$ are distant from one another, while the orthogonality is
completely lost in approaching an EP.
\item
The eigenstates contain EM and differ therefore from
the original eigenstates. A mixing of the wavefunctions of
only two states may appear at low level density
such that it may be difficult to choose the basic set of
"pure" wavefunctions. At high level density EM causes a dynamical
phase transition
which is non-adiabatic due to the involved nonlinear processes
\cite{epj2}.
\item
Some well-known unsolved puzzles of standard Hermitian
quantum physics do not appear in the non-Hermitian description
of open quantum systems \cite{comment3}. Among others, the problem of the
Schr\"odinger cat and the short tunneling
time characterizing the decay of the states in the
Hermitian quantum
physics, are not puzzling when the system is considered to be open.
Furthermore, the nonlinear processes involved in the
non-Hermitian formalism, are irreversible, see the discussion around
Fig. 9 in \cite{ropp}.
\end{verse}
In Sects. \ref{eigf2}, \ref{eigf3} and \ref{smatr1c} of the present
paper, we have shown numerical results obtained in the framework of
non-Hermitian quantum theory for a system that is
coupled to one common channel \cite{comment3}.
We consider systems with $N=2$ and
$N=3$ states in the most sensitive parameter range in which
the dynamics of the system is determined by two EPs.
We have compared the results, obtained for the same situation,
with and without taking into account EM. In the
first case, EPs are caused by the EM of the states. In the
second case, however, neither EPs nor EM appear. Nevertheless,
the resonance structure of the S-matrix is almost the same in the two
cases. This result does not depend on the number of states
taken into account in the calculation.
In our calculations with non-Hermitian Hamiltonian, the coupling
strength $\omega$ between system and environment is chosen to be
fixed. Width
bifurcation of the states may be caused therefore exclusively by the
nonlinear terms contained in
the Schr\"odinger equation at and near to an EP.
These nonlinear terms conserve, obviously, the resonance structure
of the cross section in the one-channel case.
Thus, the one-channel case does not allow us to test the
non-Hermitian formalism.
The situation is completely different when the system is coupled
to two (or more) channels. A prominent example is the transmission
through, e.g., a quantum dot. Here, at least two different channels
are involved: entrance and exit channel. In the present
paper, we do not provide new numerical results. Instead we refer to
some results obtained earlier (see Sect. \ref{smatr2c}).
Most interesting is the anticorrelation between phase rigidity and
transmission probability which can be seen clearly in the results of
different calculations.
Thus, the observation of non-analytical effects in the transmission
through a quantum dot is not in contradiction to the
results known from the standard S-matrix description in the
one-channel case. Quite the contrary,
these effects are characteristic of the non-Hermitian theory of
open quantum systems. They exist also in the one-channel case
where they can, however, not be seen due to their
suppression by the nonlinear terms of the Schr\"odinger equation
near to EPs.
\section{Conclusions}
\label{concl}
The results of the present paper answer the
questions asked in the Introduction.
Although EPs influence the dynamics of open quantum systems, they
cannot be observed directly. In the one-channel case,
the resonance structure of the cross section can be described well
{\it without} taking them into account. The reason for this
unexpected result
are the nonlinear processes caused by the EPs. They restore, in
the one-channel case, the original resonance structure of the cross
section and hide the influence of the EPs onto observable values.
A cursory consideration allows therefore
the conclusion that EPs do not play any role in open quantum systems.
This conclusion can be justified however only in the one-channel
case. The two-channel case is much richer and more interesting.
In the Introduction, we pointed to the so-called phase lapses
observed experimentally in the transmission through a quantum dot.
These unexpected results are explained by means of the existence
of EPs. We mentioned moreover in Sect. \ref{smatr2c} the
results obtained theoretically for the transmission through a
localized quantum system by using the tight-binding approach.
We underline however that, in any case, the results of non-Hermitian
quantum physics differ from those of Hermitian quantum physics
only little in a parameter range that is {\it not} influenced
by EPs. In this parameter range, Fermi's golden rule holds.
The results for the two-channel case show, under different conditions,
an anti-correlation between phase
rigidity and transmission probability, i.e. between an {\it internal}
property of the eigenfunctions of the non-Hermitian Hamilton operator
and an observable value. Based on the non-Hermitian quantum
theory formulated in the present paper,
an experimental test of this relation will
contribute, among others, also to an understanding of the short
tunneling time \cite{tunnel} that is observed experimentally.
The transmission through a small system needs to be studied
in future in more detail, theoretically as well as experimentally.
On the one hand, it allows us to test the non-Hermitian quantum
theory for open quantum systems \cite{comment3},
since it relates the theoretical value phase rigidity to
the observable value of the transmission probability.
On the other hand,
the anticorrelation between these two values will open the door
to important applications.
\end{document}
|
\begin{document}
\title{On some Euler-Mahonian distributions} \date{\today}
\begin{abstract}
We prove that the pair of statistics $(\des,\maj)$ on multiset permutations is equidistributed with the pair $(\stc,\invv)$ on certain quotients of the symmetric group. We define the analogue of the statistic $\stc$ on multiset permutations, whose joint distribution with the inversions equals that of $(\des,\maj)$. We extend the definition of the statistic $\stc$ to hyperoctahedral and even hyperoctahedral groups. Such functions, together with the Coxeter length, are equidistributed, respectively, with (ndes,nmaj) and (ddes,dmaj).
\end{abstract}
\maketitle
\thispagestyle{empty}
\section{introduction}
The first result about the enumeration of multiset permutations with respect to statistics now called \emph{descent number} and \emph{major index} is due to MacMahon. Let $\rho=(\rho_1,\ldots,\rho_m)$ be a composition of $N \in \mathbb{N}$. We denote by $S_\rho$ the set of all permutations of the multiset $\{{1}^{\rho_1},\ldots,{m}^{\rho_m}\}$. The
\emph{descent set} $\mathbb{D}es(w)$ of $w = w_1\cdots w_N\in S_\rho$ is
$\mathbb{D}es(w)=\{i \in [N-1] \mid w_i>w_{i+1}\}$. The descent and major index
statistics on $S_\rho$ are
$$\des(w)=|\mathbb{D}es(w)| \quad\textup{ and }\quad\maj(w)=\mathfrak{su}m_{i\in\mathbb{D}es(w)} i.$$ Then (\cite[\textsection 462, Vol.~2, Ch.~IV,
Sect.~IX]{MacMahon/16}):
\begin{equation}\label{eq:mmpart}
\mathfrak{su}m_{k\geq 0}\left(\prod_{i=1}^m\binom{\rho_j +k}{k}_q\right) x^k=\frac{\mathfrak{su}m_{w \in S_\rho}{x^{\des(w)}q^{\maj(w)}}}{\prod_{i=0}^{N}(1-xq^i)} \in \mathbb{Z}[q][[x]],
\end{equation}
where, for $n, k \in \mathbb{N}$ we put
\[\binom{n}{k}_p :=\frac{[n]_p!}{[n-k]_p![k]_p !}, \qquad [n]_p:=\mathfrak{su}m_{i=0}^{n-1} p^i, \qquad [n]_p!:=\prod_{i=1}^n [i]_p \quad\mbox{and}\quad [0]_p!:=1.\]
The well known result about the equidistribution, on multiset permutations, of the \emph{inversion number} with the major index, goes also back to MacMahon; Foata and Sch{\"u}tzenberger \cite{FoataSch/78} proved that such equidistribution refines, in the case of the symmetric group, to inverse descent classes.
A pair of statistics that is equidistributed with $(\des,\maj)$ is called Euler-Mahonian.
In \cite{Skandera/01} Skandera introduced an Eulerian statistic, which he called $\stc$, on the symmetric group, and proved that the pair $(\stc,\invv)$ is Euler-Mahonian.
In this note we prove that the joint distribution of $(\stc,\invv)$ on certain quotients of the symmetric group is indeed the same as the distribution of $(\des,\maj)$ on multiset permutations; we use such result to define a statistic $\mstc$ that is Eulerian on multiset permutations and that, together with $\invv$ is equidistributed with the pair $(\des,\maj)$.
The Eulerian polynomial is (essentially) the descent polynomial on the symmetric group~$S_n$. Frobenius proved (see \cite{Frobenius/1910}) that such polynomial has real, simple, negative roots, and that $-1$ features as a root if and only if $n$ is even. Simion proved later that the descent polynomials of permutations of any multiset are also real rooted, with simple, negative roots (see \cite{Simion/84}).
We use our first result of equidistribution to show that on the set of permutations of words in the alphabet $\{1^r, 2^r\}$, the polynomial of the joint distribution of des and maj admits, for odd $r$ a unique \emph{unitary} factor. This factorisation, together with the one of Carlitz's $q$-Eulerian polynomial (the polynomial of the joint distribution of des and maj on the symmetric group) that we show in \cite{CarnevaleVoll/16}, may be considered a refinement of Frobenius' result, and supports a conjecture we made in \cite{CarnevaleVoll/16} and that we translate in Section \ref{stcmulti} in terms of the joint distribution of $(\stc,\invv)$ on quotients of the symmetric group.
Generalisations of MacMahon's result \eqref{eq:mmpart} to signed permutations were first obtained by Adin, Brenti and Roichman in \cite{AdinBrentiRoichman/01} and to even-signed permutations by Biagioli in \cite{Biagioli/03}.
In the last section of this note we define Eulerian statistics $\nstc$ and $\dstc$ that, together with the length, are equidistributed, respectively, with the Euler-Mahonian pairs $(\ndes,\nmaj)$ on the hyperoctahedral group and $(\ddes,\dmaj)$ on the even hyperoctahedral group.
\section{Stc on quotients of the symmetric group and multiset permutations}\label{stcmulti}
For $n,m \in \mathbb{N},$ $m\leq n$ we denote with $[n]:=\{1,\ldots,n\}$ and $[m,n]:=\{m,m+1,\ldots,n\}$. For a permutation $\sigma \in S_n$ we use the one-line notation or the disjoint cycle notation.
The Coxeter length $\ell$ for $\sigma\in S_n$ coincides with the inversion number $\invv(\sigma):=|\{(i,j)\in[n]\times [n] \mid i<j, \,\sigma(i)>\sigma(j)\}|$.
Also, for a (signed) permutation $\sigma\in S_n$ (respectively, $B_n$), we let $\mathcal{I}(\sigma):=\{(i,j)\in [n]\times[n] \mid \sigma(i)>\sigma(j)\}$.
It is well-known that the symmetric group $S_n$ is in bijection with the set of words $w=w_1\cdots w_n $ $\in E_n$ where
\[E_n=\{w=w_1\cdots w_n \mid w_i \in [0,n-i], \mbox{ for } i=1,.\ldots, n-1\}. \]
One of such bijections is the Lehmer code, defined as follows.
For $\sigma \in S_n$, $\code(\sigma)=c_1\cdots c_n \in E_n$ where $c_i= |\{j \in [i+1,n] \mid \sigma(i)>\sigma(j)\}|$.
The sum of the $c_i$s gives, for each permutation, the inversion number. The statistic $\stc$, that together with the length constitutes an Euler-Mahonian pair equidistributed with $(\des,\maj)$, is defined as follows (cf. \cite[Definition 3.1]{Skandera/01}):
$\stc(\sigma)=\sta(\code(\sigma))$, where for a word $w\in E_n$ $$\sta(w)=\max\{ r \in [n] \mid\mbox{ there exists a subsequence } w_{i_1}\cdots w_{i_r} >(r-1)(r-2)\cdots 1 \, 0\}$$
that is, the maximal length of a possible staircase subword.
For example let $\sigma=452361 \in S_6$. Then $\code(\sigma) =331110$, $\invv(\sigma)=\mathfrak{su}m_i c_i=9$, $\stc(\sigma)=\sta(\code(\sigma))=3$. So defined, the statistic $\stc$ constitutes an Eulerian partner for the inversions on $S_n$, cf. \cite[Theorem 3.1]{Skandera/01}.
\begin{thm}\label{thm:ska}
Let $n \in \mathbb{N}$. Then
\begin{equation*}
\mathfrak{su}m_{w\in S_n}x^{\des(w)}q^{\maj(w)}=\mathfrak{su}m_{w\in S_n}x^{\stc(w)}q^{\ell(w)}
\end{equation*}
\end{thm}
Given a composition of $N$, the corresponding set of multiset permutations $S_\rho$ is naturally in bijection with certain quotients and inverse descent classes of $S_N$. In particular, for $\rho=(\rho_1,\ldots,\rho_m)$ a composition of $N$, for $i=1\ldots, m-1$ we let
\begin{equation}\label{eq:subset}
r_i:=\mathfrak{su}m_{k=1}^i {\rho _k}\mbox{ and }R:=\{r_i, i\in [m]\}\mathfrak{su}bseteq [N-1].
\end{equation}
We let $S^{R^c} _N$ and $IS^R _N $ denote, respectively, the quotient and the inverse descent class of of the symmetric group
\[S^{R^c} _N=\{w \in S_N \mid \mathbb{D}es(w)\mathfrak{su}bseteq R\}, \qquad IS^R _N:=\{w \in S_N\mid \mathbb{D}es(w^{-1})\mathfrak{su}bseteq R\}. \]
A natural way to associate a permutation to a multiset permutation is the standardisation. Givena $\rho$ a composition of $N$ and a word $w$ in the alphabet $\{{1}^{\rho_1},\ldots,{m}^{\rho_m}\}$, $\stdr(w)$ is the permutation of $S_N$ obtained substituting, in the order of appearence in $w$ from left to right, the $\rho_1$ $1$s with the sequence $1\,2\ldots\rho_1$, the $\rho_2$ $2$s with the sequence $\rho_1+1 \ldots \rho_1+\rho_2$ and so on. So for example if $\rho=(2,3,2)$ and $w=1223132\in S_\rho$, then $\stdr(w)=1346275 \in S_7$.
The following result is due to Foata and Han \cite[Propriet\' e 2.2]{FoataHan/04}.
\begin{pro}\label{thm:fh}
Let $n \in \mathbb{N}$, $J\mathfrak{su}bseteq [n-1]$. Then
\begin{equation}
\mathfrak{su}m_{ \mathfrak{su}bstack{\{w\in S_n \mid\\ {\mathbb{D}es(w)=J}\}}}x^{\des(w^{-1})}q^{\maj(w^{-1})}=\mathfrak{su}m_{ \mathfrak{su}bstack{\{w\in S_n \mid\\ {\mathbb{D}es(w)=J}\}}}x^{\stc(w)}q^{\ell(w)}
\end{equation}
\end{pro}
\begin{pro}\label{thm:equi}
Let $N \in \mathbb{N}$, $\rho$ a composition of $N$ and $R\mathfrak{su}bseteq [N-1]$ as in \eqref{eq:subset}. The pair $(\stc,\ell)$ on $S^{R^c} _N$ is equidistributed with $(\des,\maj)$ on $S_\rho$:
\begin{equation}\label{equi}
C_{\rho}(x,q):=\mathfrak{su}m_{w \in S_\rho}{x^{\des(w)}q^{\maj(w)}}=\mathfrak{su}m_{w \in S^{R^c} _N}{x^{\stc(w)}q^{\ell(w)}}
\end{equation}
\end{pro}
\begin{proof}
The standardisation $\stdr$ is a bijection between $S_\rho$ and $IS^R _N$, and preserves $\des$ and $\maj$, so
\[\mathfrak{su}m_{w\in S_\rho}x^{\des(w)}q^{\maj(w)}=\mathfrak{su}m_{w\in S_\rho}x^{\des(\stdr(w))}q^{\maj(\stdr(w))}=\mathfrak{su}m_{w\in IS^R _N}x^{\des(w)}q^{\maj(w)}.\]
By Proposition \ref{thm:fh} the last term is the desired distribution on $S^{R^c} _N$:
\begin{equation*}
\mathfrak{su}m_{w\in IS^R _N}x^{\des(w)}q^{\maj(w)}=\mathfrak{su}m_{w\in S^{R^c} _N}x^{\stc(w)}q^{\ell(w)}.
\end{equation*}
\end{proof}
As an application, we prove a result about the bivariate factorisation of the polynomial $C_\rho(x,q)$, that in \cite{CarnevaleVoll/16} is used to prove deduce analytic properties of some orbit Dirichlet series. We say that a bivariate polynomial $f(x,y)\in \mathbb{Z}[x,y]$ is unitary if there exist integers $\alpha, \beta\geq 0$ and $g\in \mathbb{Z}[t]$ so that $f(x,y)=g(x^\alpha y^\beta)$ and all the complex roots of $g$ lie on the unit circle (see also \cite[Remark 2.9]{CarnevaleVoll/16}).
\begin{pro}
Let $\rho=(r,r)$ where $r \equiv 1 \pmod 2$. Then
\begin{equation}\label{fact}
C_{\rho}(x,q)=(1+xq^{r})\widetilde{C}_{\rho}(x,q),\end{equation}
where $\widetilde{C}_{\rho}(x,q)$ has no
unitary factor.
\end{pro}
\begin{proof}
The polynomial $C_{\rho}(x,1)$, descent polynomial of $S_\rho$, has all real, simple, negative roots (cf. \cite[Corollary 2]{Simion/84}). Thus a factorisation of the form \eqref{fact} implies that $\widetilde{C}_{\rho}(x,q)$ has no
unitary factor.
To prove \eqref{fact} we define an involution $\varphi$ on $S^{R^c} _N$ such that, for all $w\in S^{R^c} _N$, $|\ell(\varphi (w))-\ell(w)|=r$ and $|\stc(\varphi (w))-\stc(w)|=1$.
We first show that when $\rho=(\rho_1,\rho_2)$, the statistic $\stc$ on the corresponding quotient $S_{N}^{\{\rho_1\}}$ has a very simple description: it counts the occurrences of elements $j\in [\rho_1 +1, N]$ in the first $\rho_1$ positions. A permutation $w\in S_{N}^{\{\rho_1\}}$ has at most a descent at $\rho_1$, so its code is of the form $\code(w)=c_1\cdots c_{\rho_1} 0\cdots 0$, with $0\leq c_1\leq\ldots\leq c_{\rho_1}$. The first (possibly) non-zero element of the code is exactly the number of elements of the second block for which the image is in the first block, and this coincides with the length of the longest staircase subword of the code.
Let now $\rho=(r,r)$ and $r$ odd. For $w \in S^{R^c} _N$ we let \[M_w=\{i\in [r] \mid w^{-1}(i)\leq r \mbox{ and } w^{-1}(i+r)> r \mbox{ or } w^{-1}(i)> r \mbox{ and } w^{-1}(i+r)\leq r \},\]
that is, the set of $i\in [r]$ for which $i$ and $i+r$ are not in the same ascending block. Since $r$ is odd, $M_w$ is non-empty for all $w\in S_\rho$.
We then define $\varphi(w)=((\iota,\iota + r)w)^{R^c}$, where $\iota:=\min\{i \in M_w\}$ and, for $\sigma \in S_N$, $\sigma^{R^c}$ denotes the unique minimal coset representative in the quotient $S^{R^c} _N$. Clearly $\stc(\varphi(w))=\stc(w)\pm 1$.
Suppose now that $w^{-1}(\iota)\leq r$ and $w^{-1}(\iota)>r$ (the other case is analogous). Then $$\ell(\varphi(w))=\ell(w)+|\{i \in [r]\mid w(i)>\iota\}|+|\{i \in [r+1,2r]\mid w(i)<\iota+r\}|=\ell(w)+r-i+i.\qedhere$$
\end{proof}
We reformulate \cite[Conjecture B]{CarnevaleVoll/16} in terms of the bivariate distribution of $(\stc,\ell)$ on quotients of $S_n$.
\begin{con}
Let $\rho$ be a composition of $N$ and $R\mathfrak{su}bseteq [N-1]$ constructed as in \eqref{eq:subset}. Then $C_\rho (x,q)=\mathfrak{su}m_{w\in S^{R^c} _N}{x^{\stc(w)}q^{\ell(w)}}$ has a unitary factor if and only if $\rho=(\rho_1,\ldots,\rho_m)$ where $\rho_1=\ldots=\rho_m=r$ for some odd $r$ and even $m$. In this case
\[\mathfrak{su}m_{w\in S^{R^c} _N}{x^{\stc(w)}q^{\ell(w)}}=(1+xq^{\frac{rm}{2}})\widetilde{C}_\rho(x,q)\]
for some $\widetilde{C}_\rho(x,q)\in \mathbb{Z}[x,q]$ with no unitary factors.
\end{con}
Proposition \ref{thm:equi} suggests a natural extension of the definition of the statistic $\stc$ to multipermutations, thus answering a question raised in \cite{Skandera/01}.
For $w\in S_\rho$, $\stdr (w) \in IS^R _N$. So we have a bijection between multiset permutations $S_\rho$ and the quotient $S^{R^c} _N$
\[\istd: S_\rho \rightarrow S^{R^c} _N,\quad \istd(w)=(\stdr (w))^{-1}\]
which is inversion preserving: $\invv(w)=\invv(\istd(w))$.
\begin{dfn}
Let $\rho$ be a composition of $N$. For a multiset permutation $w \in S_\rho$ the \emph{multistc} is
\[\mstc(w):=\stc(\istd(w)).\]
\end{dfn}
The pair $(\mstc,\invv)$ is equidistributed with $(\des,\maj)$ on $S_\rho$, as
\[\mathfrak{su}m_{w \in S_\rho} x^{\mstc(w)}q^{\invv(w)}=\mathfrak{su}m_{w \in S^{R^c} _N} x^{\stc(w)}q^{\invv(w)}=\mathfrak{su}m_{w \in S_\rho} x^{\des(w)}q^{\maj(w)},\]
which together with \eqref{eq:mmpart} proves the following theorem.
\begin{thm}
Let $\rho$ be a composition of $N\in \mathbb{N}$. Then
\[ \mathfrak{su}m_{k\geq 0}\left(\prod_{i=1}^m\binom{\rho_j +k}{k}_q\right) x^k=\frac{\mathfrak{su}m_{w \in S_\rho}{x^{\mstc(w)}q^{\invv(w)}}}{\prod_{i=0}^{N}(1-xq^i)} \in \mathbb{Z}[q][[x]].\]
\end{thm}
\section{Signed and even-signed permutations}
MacMahon's result \eqref{eq:mmpart} for the symmetric group (i.e. for $\rho_1=\ldots\rho_m=1$) is often present in the literature as Carlitz's identity, satisfied by the Carlitz's Eulerian polynomial $A_n(x,q):=\mathfrak{su}m_{\sigma \in S_n} x^{\des(\sigma)}q^{\maj(\sigma)}$.
Such result was extended, for suitable statistics, to the groups of signed and even-signed permutations. The major indices so defined are in both cases equidistributed with the Coxeter length $\ell$. In this section we define type $B$ and type $D$ analogues of the statistic $\stc$, that together with the length satisfy these generalised Carlitz's identities.
\mathfrak{su}bsection{Eulerian companion for the length on $B_n$}
Let $n\in \mathbb{N}$. The hyperoctahedral group $B_n$ is the group of permutations $\sigma=\sigma_1\cdots \sigma_n$ of $\{\pm 1,\ldots,\pm n\}$ for which $|\sigma|:=|\sigma_1|\ldots |\sigma_n| \in S_n$.
For $\sigma \in B_n$, the negative set and negative statistic are
\[\nset(\sigma)=\{i \in [n] \mid \sigma(i)<0\}\quad \nneg(\sigma)=|\nset(\sigma)|.\]
The Coxeter length $\ell$ for $\sigma$ in $B_n$ has the following combinatorial interpretation (see, for instance \cite{BjoernerBrenti/05}):
\begin{equation*}
\ell(\sigma)=\invv(\sigma)+\nneg(\sigma)+\nsp(\sigma),
\end{equation*}
where $\invv$ is the usual inversion number and $\nsp(\sigma):=|\{(i,j)\in [n]\times[n] \mid i<j,\,\sigma(i)+\sigma(j)<0\}|$ is the number of \emph{negative sum pairs}.
In \cite{AdinBrentiRoichman/01} an Euler-Mahonian pair of the \emph{negative} type was defined as follows.
The negative descent and negative major index are, respectively,
\begin{equation}\label{eq:nstat}
\ndes(\sigma)=\des(\sigma)+\nneg(\sigma) \quad \nmaj(\sigma)=\maj(\sigma)-\mathfrak{su}m_{i\in\nset(\sigma)}\sigma(i).\end{equation}
The pair $(\ndes,\nmaj)$ satisfies the following generalised Carlitz's identity, cf. \cite[Theorem 3.2]{AdinBrentiRoichman/01}.
\begin{thm}
Let $n\in \mathbb{N}$. Then
\begin{equation}
\mathfrak{su}m_{r\geq 0} [r+1]^n _q x^r = \frac{\mathfrak{su}m\limits_{\sigma \in B_n}{x^{\ndes(\sigma)}q^{\nmaj(\sigma)}}}{(1-x) \prod\limits_{i=1}^{n}{(1-x^2 q^{2i})}} \mbox{ in }\mathbb{Z}[q][[x]].\end{equation}
\end{thm}
Motivated by \eqref{eq:nstat} and the well-known fact that the length in type $B$ may be also written as
\begin{equation}
\ell(\sigma)=\invv(\sigma)-\mathfrak{su}m_{i \in \nset(\sigma)} \sigma(i),
\end{equation} we define the analogue of the statistic $\stc$ for signed permutations as follows.
\begin{dfn}
Let $\sigma\in B_n$. Then
\[\nstc(\sigma):=\stc (\sigma)+\nneg(\sigma).\]
\end{dfn}
\begin{thm}\label{thm:B}
Let $n\in \mathbb{N}$. Then
\[\mathfrak{su}m_{\sigma\in B_n}{x^{\nstc(\sigma)}q^{\ell(\sigma)}}=\mathfrak{su}m_{\sigma\in B_n}{x^{\ndes(\sigma)}q^{\nmaj(\sigma)}}\]
\end{thm}
\begin{proof}
We use essentially the same argument as in the proof of \cite[Theorem 3]{LaiPet/11}. There, the following decomposition of $B_n$ is used.
Every permutation $\tau \in S_n$ is associated with $2^n$ elements of $B_n$, via the choice of the $n$ signs. More precisely, given a signed permutation $\sigma \in B_n$ one can consider the ordinary permutation in which the elements are in the same relative positions as in $\sigma$. We write $\sy(\sigma)=\tau$. Then
\[B_n=\bigcup_{\tau \in S_n} B(\tau)\]
where $B(\tau):=\{\sigma \in B_n \mid \sy(\sigma)=\tau\}$. So every $\sigma\in B_n$ is uniquely identified by the permutation $\tau=\sy(\sigma)$ and the choice of signs $J(\sigma):=\{\sigma(j) \mid j \in \nset(\sigma)\}.$
Clearly, for $\sigma \in B_n$ we have $\mathcal{I}(\sigma)=\mathcal{I}(\sy(\sigma))$, and thus $\stc(\sigma)=\stc(\sy(\sigma))$. So, for $\tau=\sy(\sigma)$
\[x^{\nstc(\sigma)}q^{\ell(\sigma)}=x^{\stc(\tau)}q^{\invv(\tau)}\prod_{j\in J(\sigma)} x q^j.\] The claim follows, as
\[\mathfrak{su}m_{\sigma \in B_n} {x^{\nstc(\sigma)}q^{\ell(\sigma)}}=\mathfrak{su}m_{\sigma \in B(\tau)}\mathfrak{su}m_{\tau \in S_n}x^{\stc(\tau)}q^{\invv(\tau)}\mathfrak{su}m_{J\mathfrak{su}bseteq [n]}\prod_{j\in J} x q^j=A_n(x,q)\prod_{i=1}^n {(1+xq^i)}.\qedhere\]
\end{proof}
\begin{cor}Let $n\in \mathbb{N}$. Then
\begin{equation}
\mathfrak{su}m_{r\geq 0} [r+1]^n _q x^r = \frac{\mathfrak{su}m\limits_{\sigma \in B_n}{x^{\nstc(\sigma)}q^{\ell(\sigma)}}}{(1-x) \prod\limits_{i=1}^{n}{(1-x^2 q^{2i})}} \mbox{ in }\mathbb{Z}[q][[x]].\end{equation}
\end{cor}
\mathfrak{su}bsection{Eulerian companion for the length on $D_n$}
The even hyperoctahedral group $D_n$ is the subgroup of $B_n$ of signed permutations for which the negative statistic is even:
\[D_n:=\{\sigma \in B_n \mid \nneg(\sigma) \equiv 0 \pmod 2\}.\]
Also for $\sigma$ in $D_n$ the Coxeter length can be computed in terms of statistics:
\begin{equation}
\ell(\sigma)=\invv(\sigma)+\nsp(\sigma).
\end{equation}
The problem of finding an analogue, on the group $D_n$ of even signed permutations, was solved in \cite{Biagioli/03},
where type $D$ statistics $\des$ and $\maj$ were defined, as follows. For $\sigma \in D_n$
\begin{equation}
\ddes(\sigma)=\des(\sigma)+|\mathbb{D}Neg(\sigma)|\qquad
\dmaj(\sigma)=\maj(\sigma)-\mathfrak{su}m_{i \in \mathbb{D}Neg(\sigma)} \sigma(i)
\end{equation}
where $\mathbb{D}Neg(\sigma):=\{i-1 \in [n]|\sigma(i)<-1\}$.
The following holds (cf. \cite[Theorem 3.4]{Biagioli/03}).
\begin{thm}
Let $n \in \mathbb{N}$. Then
\begin{equation}
\mathfrak{su}m_{r\geq 0} [r+1]^n _q x^r = \frac{\mathfrak{su}m\limits_{\sigma \in D_n}{x^{\ddes(\sigma)}q^{\dmaj(\sigma)}}}{(1-x)(1-xq^n) \prod\limits_{i=1}^{n-1}{(1-x^2 q^{2i})}} \mbox{ in }\mathbb{Z}[q][[x]].\end{equation}
\end{thm}
\begin{dfn}
Let $\sigma \in D_n$. We set
$$\dstc(\sigma):=\stc (\sigma)+|\mathbb{D}Neg(\sigma)|=\stc(\sigma)+\nneg(\sigma)+\varepsilon(\sigma),$$
where $$\varepsilon(\sigma)=\begin{cases}
-1 \mbox{ if }\sigma^{-1}(1)<0 \\
0\:\:\; \mbox{ otherwise }.
\end{cases}$$
\end{dfn}
We now show that the statistic just defined
constitutes an Eulerian partner for the length on $D_n$, that is, the following holds.
\begin{thm}\label{thm:D}
Let $n\in \mathbb{N}$. Then
\[\mathfrak{su}m_{\sigma\in D_n}{x^{\dstc(\sigma)}q^{\ell(\sigma)}}=\mathfrak{su}m_{\sigma\in D_n}{x^{\ddes(\sigma)}q^{\dmaj(\sigma)}}.\]
\end{thm}
\begin{proof}
We use, as in \cite{Biagioli/03} the following decomposition of $D_n$. Let \begin{equation}\label{eq:asc}
T_n:=\{\alpha \in D_n \mid \des(\alpha)=0\}=\{\alpha\in D_n \mid \mathcal{I}(\alpha)=\emptyset\}
\end{equation}
then $D_n$ can be rewritten as the following disjoint union:
\begin{equation}\label{eq:ddec}
D_n=\bigcup_{\tau \in S_n}\{\alpha \tau \mid \alpha \in T_n\}.
\end{equation}
For $\alpha \in T_n$ and $\tau \in S_n$ one has:
\begin{equation*}
\ell(\alpha \tau)=\ell(\alpha)+\ell(\tau)=\nsp(\alpha)+\invv(\tau),\quad \nsp(\alpha \tau)=\nsp(\alpha),\quad \dstc(\alpha \tau)=\stc(\tau)+\nneg(\alpha)+\varepsilon(\sigma),
\end{equation*}
the last one follows from the second equality in \eqref{eq:asc}. We thus have
\begin{align*}
\mathfrak{su}m_{\sigma\in D_n}{x^{\dstc(\sigma)}q^{\ell(\sigma)}}&=\mathfrak{su}m_{\alpha \in T_n}\mathfrak{su}m_{\tau \in S_n}x^{\stc(\tau)+\nneg(\alpha)+\varepsilon(\alpha)}q^{\ell(\alpha)+\ell(\tau)}\\
&=\mathfrak{su}m_{\alpha \in T_n}x^{\nneg(\alpha)+\varepsilon(\alpha)}q^{\nsp(\alpha)} \mathfrak{su}m_{\tau \in S_n}x^{\stc(\tau)}q^{\invv(\tau)} \\
&=\prod_{i=1}^{n-1}(1+x q^i) A_n(x,q)\end{align*}
for the last equality see \cite[Lemma 3.3]{Biagioli/03}.
The result follows, as
\[\mathfrak{su}m_{\sigma\in D_n}{x^{\ddes(\sigma)}q^{\dmaj(\sigma)}}=\prod_{i=1}^{n-1}(1+x q^i) A_n(x,q).\qedhere\]
\end{proof}
\begin{cor}Let $n \in \mathbb{N}$. Then
\begin{equation}
\mathfrak{su}m_{r\geq 0} [r+1]^n _q x^r = \frac{\mathfrak{su}m\limits_{\sigma \in D_n}{x^{\dstc(\sigma)}q^{\ell(\sigma)}}}{(1-x)(1-xq^n) \prod\limits_{i=1}^{n-1}{(1-x^2 q^{2i})}} \mbox{ in }\mathbb{Z}[q][[x]].\end{equation}
\end{cor}
\end{document}
|
\begin{document}
\title{An Explicit Formula for the Spherical Curves with Constant Torsion}
\author{Demetre Kazaras \\ University of Oregon
\and Ivan Sterling \\ St. Mary's College of Maryland}
\date{}
\maketitle
\section{Introduction}
The purpose of this article is to give an explicit formula for all curves of constant torsion $\tau$ in the unit two-sphere $S^2(1)$. These curves and their basic properties have been known since the 1890's, and some of these properties are discussed in the Appendix. Some example curves, computed with a standard ODE package, with $\tau=.1,.5,1,2$ are shown in Figure \ref{peter}. Though their existence and some of their general properties were known, our explicit formulas for them, in terms of hypergeometric functions, are new.
Curves of constant torsion are also of interest because all asymptotic curves on any pseudo-spherical surface (that is, surfaces in $\mathbb{R}^3$ with constant negative Gauss curvature) are of constant torsion. Furthermore, any pair of curves with constant torsion $\pm \tau$, intersecting at one point, define an essentially unique pseudo-spherical surface. A complete classification of curves of constant torsion in $\mathbb{R}^3$, in the context of integrable geometry, is a work in progress and is related to the corresponding unfinished classification of pseudo-spherical surfaces.
The authors would like to thank the referees and the editor for their suggestions to improve the original version of this paper.
\begin{figure}
\caption{The curves of torsion $\tau=.1,.5,1,2$ on the unit 2-sphere}
\label{peter}
\end{figure}
\section{General Setting} \label{two}
\subsection{General Curves in $\mathbb{R}^3$}
Let
\[\gamma:(a,b) \longrightarrow \mathbb{R}^3\]
be a regular (i.e. nonzero speed) $C^\infty$ curve in $\mathbb{R}^3$ with nonzero curvature. The speed $v$, curvature $\kappa$ and torsion $\tau$ of $\gamma$ are given by:
\[v=\Vert \gamma' \Vert,\;\;
\kappa = \frac{\Vert \gamma' \times \gamma'' \Vert}{\Vert \gamma' \Vert^3},\;\;
\tau = \frac{[\gamma' \gamma'' \gamma''']}{\Vert \gamma' \times \gamma'' \Vert^2}. \]
The unit tangent $T$ is given by
\begin{equation} \label{tangent}T=\frac{\gamma'}{v}.\;\; \end{equation}
The unit normal and unit binormal are given by
\begin{equation*} N=\frac{T'}{v \kappa},\;\; B=T \times N. \end{equation*}
These are related by the Frenet formulas:
\begin{equation} \label{frenet}
\begin{array}{ccccc}
T' &=&& v \kappa N &\\
N' &=& - v \kappa T &&+ v \tau B \\
B' &=&&-v \tau N &
\end{array}
\end{equation}
Curves $\gamma$ with prescribed differentiable curvature $\kappa >0$ and torsion $\tau$ can be found by integrating (\ref{frenet}) and (\ref{tangent}). Up to re-parametrization (see below) a curve in $\mathbb{R}^3$ is determined, up to a rigid motion of $\mathbb{R}^3$, by its curvature $\kappa$ and torsion $\tau$.
\subsection{Changing Parametrizations}
Given $\gamma(t)$, then the arc-length function $s(t)$ of $\gamma(t)$ is given by
\[s(t)=\int_a^t \Vert \gamma'(u)\Vert \; du. \]
Note that since $s(t)$ is increasing, it has an inverse $t(s)$. To obtain a unit-speed reparametrization $\gamma^{unit}$ of $\gamma$ we let
\[\gamma^{unit}(s) := \gamma(t(s))\]
We denote the parameter of a unit-speed curve by the letter $s$.
On the other hand, if we are given a unit-speed curve $\gamma^{unit}(s)$, we may wish to find a reparametrization $\gamma$ of $\gamma^{unit}$ by letting $t(s)$ be some special monotone function. In that case we have
\[\gamma(t):=\gamma^{unit}(s(t))\]
where $s(t)$ is the inverse of $t(s)$.
\subsection{Spherical Curves}
$\gamma$ is called a spherical curve if (for some $r>0$) $\gamma(t) \in S^2(r) \; \forall t$. The speed, curvature and torsion of a spherical curve satisfy \cite{G}
\begin{equation} \label{sphereeqn}
\kappa^2 \tau^2(\kappa^2 r^2 -1)=\kappa'^2 v^2.
\end{equation}
\subsection{Effect of Homothety on Curvature and Torsion}
If $\tilde{\gamma}(t):=\lambda \gamma(t)$, then $\tilde{\kappa}(t)=\frac{\kappa(t)}{\lambda}$ and $\tilde{\tau}(t)=\frac{\tau(t)}{\lambda}$. Thus, a curve of constant torsion $\tau_1$ on a sphere of radius $r_1$ corresponds by homothety to a curve of constant torsion $\tau_2=\frac{r_1}{r_2} \tau_1$ on a sphere of radius $r_2$.
In other words, any spherical curve of constant positive torsion corresponds to precisely one spherical curve with $\tau=1$ as well as to precisely one curve of constant positive torsion on the unit sphere. Without loss of generality, we consider only spherical curves of constant positive torsion on the unit sphere.
\subsection{Constant Torsion Unit-Speed Curves on the Unit Sphere}
If $r=1$, $\tau$ is a positive constant, and $\gamma: (a,b) \rightarrow S^2(1)$ is unit-speed, then equation (\ref{sphereeqn}) is an ordinary differential equation in $\kappa$ (notice $\kappa \geq 1$ holds for any curve on the unit sphere):
\begin{equation} \label{grayode} \kappa'^2=\kappa^2 \tau^2 (\kappa^2-1). \end{equation}
The general solution to equation (\ref{grayode}) is given by:
\begin{equation} \label{spherekappa} \kappa = \csc(\tau s + C),
\;\; \frac{-C}{\tau} < s < \frac{-C+\pi}{\tau}. \end{equation}
Notice we use the parameter $s$ instead of $t$ since $\gamma$ is a unit-speed curve. Furthermore, $\kappa(s)$ is decreasing on $(\frac{-C}{\tau}, \frac{-C+\frac{\pi}{2}}{\tau})$.
\subsection{Our Goal}
As mentioned, a unit-speed curve $\gamma$ is determined up to rigid motion by its curvature and torsion. However, in general it is not possible to explicitly solve for $\gamma$ given $\kappa>0$ and $\tau > 0$. Spherical curves of constant torsion provide an interesting and natural example to study. They were considered by classical geometers and the formula (\ref{spherekappa}) was known. Even though the formula for $\kappa$ is so simple, no explicit solutions for $\gamma$ were found. This is most likely because the integration methods that we found neccessary were not developed until decades later. By choosing a special re-parametrization and using functions defined in the 1940's, we were successful in obtaining an explicit formula for $\gamma$ involving hypergeometric functions.
\section{Explicit Formulas}
\subsection{The Radius of Curvature Parametrization}
In curve theory parametrization by the curvature is called the ``natural parametrization". In our case, when the natural parametrization is used, the domain of definition lies outside the radius of convergence of the resulting hypergeometric solutions. To avoid having to deal with the problem of analytically continuing hypergeometric functions beyond their radii of convergence we instead parametrize by the reciprocal of the curvature, which is called the radius of curvature.
We seek unit-speed curves $\gamma^{unit}:(\frac{-C}{\tau},\frac{-C+\frac{\pi}{2}}{\tau}) \rightarrow S^2(1)$ of constant torsion $\tau >0$ on the unit sphere. In order to simplify the Frenet equations, we reparametrize $\gamma^{unit}$ by $t(s)=\frac{1}{\kappa(s)}=\sin(\tau s + C)$. Since $\frac{1}{\kappa(s)}$ is increasing on its domain, the inverse $s(t)$, $s:(0,1) \longrightarrow (\frac{-C}{\tau},\frac{-C+\frac{\pi}{2}}{\tau})$, exists and we have
\[\gamma(t)=\gamma^{unit}(s(t))=\gamma^{unit}(\frac{\sin^{-1}(t)-C}{\tau}),
\;\; 0<t<1 .\]
One can recover $\gamma^{unit}$ from $\gamma$ by reversing the process. Note that
\[v= \Vert \gamma' \Vert = \Vert \gamma^{unit'} \Vert |s'(t)|=|s'(t)|
=\frac{1}{\tau \sqrt{1-t^2}}.\]
With $\kappa=\frac 1t$ the Frenet equations (\ref{frenet}) become
\begin{subequations}
\label{natfrenet}
\begin{alignat}{3}
T' &= & \frac{v N}{t} &\label{natfrenet1} \\
N' &=\frac{- v T}{t} &&+ v \tau B \label{natfrenet2} \\
B' &= & -v \tau N & \label{natfrenet3}
\end{alignat}
\end{subequations}
Recall $\gamma' = v T$. Thus as a preliminary step we will compute $T$. Namely, we want to solve (\ref{natfrenet}) for $T$.
From (\ref{natfrenet1}) and (\ref{natfrenet2}) we have
\begin{equation} N=t \sqrt{1-t^2} \tau T', \label{Neqn} \end{equation}
and
\begin{equation} B=\sqrt{1-t^2} N'+\frac{1}{t \tau} T. \label{Beqn} \end{equation}
(\ref{Neqn}) and (\ref{natfrenet3}) yield
\begin{equation} B'=-\tau t T'. \label{Bprime1} \end{equation}
On the other hand differentiating (\ref{Neqn}) yields
\begin{equation} N'= \frac{\tau}{\sqrt{1-t^2}}(t (1-t^2) T''+ (1-2 t^2) T'). \label{Nprime} \end{equation}
Plugging this into (\ref{Beqn}) yields
\begin{equation} B=\frac{-1}{t \tau}(t^2(t^2-1) \tau^2 T''+t(2 t^2-1) \tau^2 T'-T). \label{B} \end{equation}
Hence
\begin{equation} B'=\frac{-1}{t^2 \tau}(t^3(t^2-1)\tau^2T'''+t^2(5t^2-2)\tau^2T''+t(4t^2 \tau ^2-1)T'+T). \label{Bprime2} \end{equation}
Equating (\ref{Bprime1}) and (\ref{Bprime2}) and simplifying we arrive at
\begin{equation} \label{ODET}
t^3(t^2-1)\tau^2T'''+t^2(5t^2-2)\tau^2T''+t(3t^2 \tau ^2-1)T'+T=0.
\end{equation}
This is a third order linear homogeneous differential equations with non-constant coefficients. In general it is not possible to find a closed form solution for such an equation. However, this is one of the special cases where one can find hypergeometric type solutions. These methods were developed in the 1940's, and hence were not available to the classical (1890's) geometers.
\subsection{Initial Conditions}
To arrive at initial conditions for our ODE, we find initial conditions for $T$, $N$, and $B$ and use the Frenet equations (\ref{natfrenet}) to arrive at initial conditions for $T$, $T'$, and $T''$. We let $T=(T_1,T_2,T_3)$, $N=(N_1,N_2,N_3)$, and $B=(B_1,B_2,B_3)$. For $V=T,N,B,T', \mbox{or}\; T''$ we us the notation $V_{i_0} := V_i(t_0)$.
$T$ and $N$ are unit vectors ($\Vert T \Vert =1$ and $\Vert N\Vert=1$) so we have
\[ |T_{1_0}| \leq 1,\;\;|T_{2_0}|\leq \sqrt{1-T_{1_0}},\;\;T_{3_0}=\sqrt{1-T_{1_0}^2-T_{2_0}^2}\]
\[ |N_{1_0}|\leq 1,\;\;|N_{2_0}|\leq \sqrt{1-N_{1_0}},\;\;N_{3_0}=\sqrt{1-N_{1_0}^2-N_{2_0}^2}\]
Also $T$ is orthogonal to $N$ ($T \cdot N = 0$)
\[N_{1_0}T_{1_0}+N_{2_0}T_{2_0}+N_{3_0}T_{3_0}=0.\]
By $B=T\times N,$ we have
\[B_{1_0}=T_{2_0}N_{3_0}-T_{3_0}N_{2_0},\;\;B_{2_0}=T_{3_0}N_{1_0}-T_{1_0}N_{3_0}\]
\[B_{3_0}=T_{1_0}N_{2_0}-T_{2_0}N_{1_0}.\]
We will, without loss of generality and up to rigid motion, choose $t_0=\frac 12,$ $T_0=(T_{1_0},T_{2_0},T_{3_0})=(1,0,0)$, and $N_0=(0,1,0).$
Now that we have initial conditions for $T$, $N$, and $B$, we will use the Frenet equations to express $T_0$, $T'_0$, and $T''_0$ in terms of $T_0$, $N_0$, and $B_0$
\[T'_0=\frac{v_0 N_0}{t_0},\;\;T''_0=(v'_0 t_0+v_0)N_0-v_0 ^2t_0 ^2T_0+v_0 ^2 \tau t_0 B_0.\]
The set of initial conditions $t_0=\frac 12,$ $T_0=(T_{1_0},T_{2_0},T_{3_0})=(1,0,0)$, and $N_0=(0,1,0)$ yields $T'_0=(0,\frac{4}{\sqrt{3} \tau},0)$ and $T''_0=(\frac{-16}{3 \tau^2},\frac{-16}{3 \sqrt{3} \tau},\frac{8}{3 \tau}).$
\subsection{Solving for $T$ via Hypergeometric functions}
$${}_p F_q(a_1,a_2, . . . , a_p;b_1,b_2, . . .,b_q;t^{a}):=\Sigma_{n=1}^{\infty}\frac{(a_1)_n . . . (a_p)_n}{(b_1)_n . . . (b_q)_n} \frac{t^{an}}{n!}$$
is the Barnes generalized hypergeometric function \cite{E}. Note the use of the Pochhammer symbols $(x)_n:=\frac{\Gamma (x+n)}{\Gamma (x)}$. We will also use
$${}_2 F_1^{reg}(a,b,c,t^{a}):=\frac{{}_2 F_1(a,b;c;t^{a})}{\Gamma[c]}.$$
By direct substitution (see for example section 46 of \cite{R}) it is straightforward to check that the following is a solution to (\ref{ODET}).
\[T=(T_1,T_2,T_3), \;T_j=\sum_{\ell=1}^3 c_{j\ell}S_\ell.\]
Where
\[S_1=i t \;\; {}_3F_2(\frac 12,\frac 12,\frac 32;\frac 32-\frac i{2\tau},\frac 32+\frac i{2\tau};t^2),\]
\[S_2=(-1)^{\frac {-i}{2\tau}}t^{\frac{-i}{2\tau}} \;\; {}_3F_2(1-\frac i{2\tau},-\frac i{2\tau}, -\frac i{2\tau};\frac 12-\frac i{2\tau},1-\frac i {\tau};t^2),\]
\[S_3=(-1)^{\frac {i}{2\tau}}t^{\frac{i}{2\tau}} \;\; {}_3F_2(1+\frac i{2\tau},\frac i{2\tau}, \frac i{2\tau};\frac 12+\frac i{2\tau},1+\frac i {\tau};t^2),\]
the $c_{j\ell}$ are constants, and $i=\sqrt{-1}$. Note that $S_1$ is pure imaginary and that $S_3$ is the complex conjugate of $S_2$. For proper complex constants $c_{j\ell}$, $T$ is a real valued vector function. By plugging in the initial conditions of the last section we can solve for the $c_{j\ell}$.
\subsection{Solving for $\gamma$}
Recall that $\gamma(t)=\int{vT}dt.$ Since we have found $T$ in terms of hypergeometric functions, we must compute the following type of integrals:
\[\int \frac{h(t) \; {}_p F_q(a_1,a_2, . . . , a_p;b_1,b_2, . . .,b_q;,t^2)}{\tau\sqrt{1-t^2} } dt:=\int \frac{h(t) \Sigma_{n=1}^{\infty}\frac{(a_1)_n . . . (a_p)_n}{(b_1)_n . . . (b_q)_n} \frac{t^{2 n}}{n!}}{\tau\sqrt{1-t^2 }}dt\]
\[ =\frac{\alpha}{\tau}\Sigma_{n=1}^{\infty}\frac{(a_1)_n . . . (a_p)_n}{(b_1)_n . . . (b_q)_n n! } \int\frac{t^{\beta n}}{\sqrt{1-t^2 }}dt. \]
Where $\alpha, \beta$ are constants. We repeat this process for each $S_\ell$, using the notation $\gamma=(U_1,U_2,U_3)$.
For $S_1$
\[U_1:=\int \frac{S_1}{\tau \sqrt{1-t^2}}dt
=\int \frac{i t \;\; {}_3F_2(\frac 12,\frac 12,\frac 32;\frac 32-\frac i{2\tau},\frac 32+\frac i{2\tau};t^2)}{\tau \sqrt{1-t^2}}dt\]
\[=\Sigma_{n=0}^{\infty} d_{1n}\int \frac{t^{2n+1}}{\sqrt{1-t^2}}dt ,\]
where $d_{1n}=\frac{i(1+\tau^2)\Gamma(\frac{1}{2}+n)^2\Gamma(\frac{3}{2}+n) Sech(\frac{\pi}{2 \tau})}
{2 \sqrt{\pi} \tau^3 n! \Gamma(\frac{3}{2} +n - \frac{i}{2 \tau})
\Gamma(\frac{3}{2} +n + \frac{i}{2 \tau})}.$
For $S_2$
\[U_2:=\int \frac{S_2}{\tau\sqrt{1-t^2}}dt
=\int \frac{(-1)^{\frac {-i}{2\tau}}t^{\frac{-i}{2\tau}} \;\; {}_3F_2(1-\frac i{2\tau},-\frac i{2\tau}, -\frac i{2\tau};\frac 12-\frac i{2\tau},1-\frac i {\tau};t^2)}{\tau\sqrt{1-t^2}}dt\]
\[=\Sigma_{n=0}^{\infty} d_{2n} \int \frac{t^{2n-\frac{i}{\tau}}}{\sqrt{1-t^2}}dt, \]
where $d_{2n}=\frac{e^{\frac{\pi}{2\tau}}2^{-\frac{i}{\tau}}
\Gamma(n-\frac{i}{2 \tau})^2 \Gamma(1+n-\frac{i}{2\tau})
\Gamma(\frac{-i+\tau}{2\tau})^2}{\sqrt{\pi} \tau \Gamma(1+n)
\Gamma(1+n-\frac{i}{\tau})\Gamma(-\frac{i}{2\tau})^2\Gamma(n+\frac{-i+\tau}{2\tau})}.$
For $S_3$
\[U_3:=\int \frac{S_3}{\tau\sqrt{1-t^2}}dt
=\int \frac{(-1)^{\frac {i}{2\tau}}t^{\frac{i}{2\tau}} \;\; {}_3F_2(1+\frac i{2\tau},\frac i{2\tau}, \frac i{2\tau};\frac 12+\frac i{2\tau},1+\frac i {\tau};t^2)}{\tau\sqrt{1-t^2}}dt\]
\[=\Sigma_{n=0}^{\infty} d_{3n} \int \frac{t^{2n+\frac{i}{\tau}}}{\sqrt{1-t^2}}dt, \]
where $d_{3n}=\frac{e^{-\frac{\pi}{2\tau}}2^{\frac{i}{\tau}}
\Gamma(n+\frac{i}{2 \tau})^2 \Gamma(1+n+\frac{i}{2\tau})
\Gamma(\frac{i+\tau}{2\tau})^2}{\sqrt{\pi} \tau \Gamma(1+n)
\Gamma(1+n+\frac{i}{\tau})\Gamma(\frac{i}{2\tau})^2\Gamma(n+\frac{i+\tau}{2\tau})}.$
Once again we are lucky and for each $U_\ell$ we can evaluate the integrals. In each case they are hypergeometric.
\[U_1=\Sigma_{n=0}^{\infty} \frac{n!}{2}d_{1n}\;
{}_2 F_1^{reg}(\frac{1}{2},1+n,2+n,t^2) ,\]
\[U_2=\Sigma_{n=0}^{\infty} \frac{\Gamma(n+\frac{-i+\tau}{2\tau})}{2}d_{2n}\; {}_2 F_1^{reg}(\frac{1}{2},n+\frac{-i+\tau}{2\tau},
\frac{3}{2}+n-\frac{i}{2\tau},t^2) ,\]
\[U_3=\Sigma_{n=0}^{\infty} \frac{\Gamma(n+\frac{i+\tau}{2\tau})}{2}d_{3n}\; {}_2 F_1^{reg}(\frac{1}{2},n+\frac{i+\tau}{2\tau},
\frac{3}{2}+n+\frac{i}{2\tau},t^2).\]
Each of the ${}_2 F_1^{reg}$'s also has a power series.
\[{}_2 F_1^{reg}(\frac{1}{2},1+n,2+n,t^2) = \sum_{m=0}^\infty e_{1m} t^{2m},
\;e_{1m}=\frac{\Gamma(\frac 12 +m)}{(n+m+1)\sqrt{\pi}\Gamma(1+n)\Gamma(1+m)},\]
\begin{multline*}
{}_2 F_1^{reg}(\frac{1}{2},n+\frac{-i+\tau}{2\tau},
\frac{3}{2}+n-\frac{i}{2\tau},t^2) =
\sum_{m=0}^\infty e_{2m} t^{2m},
\\e_{2m}=\frac{2\tau\Gamma(\frac 12 +m)}
{\sqrt{\pi}\Gamma(2n\tau+2m\tau+\tau-i)\Gamma(1+m)\Gamma(n+\frac{-i+\tau}{2\tau})},
\end{multline*}
\begin{multline*} {}_2 F_1^{reg}(\frac{1}{2},n+\frac{i+\tau}{2\tau},
\frac{3}{2}+n+\frac{i}{2\tau},t^2)= \sum_{m=0}^\infty e_{3m} t^{2m},
\\e_{3m}=\frac{2\tau\Gamma(\frac 12 +m)}
{\sqrt{\pi}\Gamma(2n\tau+2m\tau+\tau+i)\Gamma(1+m)\Gamma(n+\frac{i+\tau}{2\tau})}.
\end{multline*}
Thus
\[U_1=\Sigma_{m=0}^\infty \Sigma_{n=0}^\infty \frac{n!}{2}d_{1n} e_{1m} t^{2m+2n+2},\]
\[U_2=\Sigma_{m=0}^\infty \Sigma_{n=0}^\infty \frac{\Gamma(n+\frac{-i+\tau}{2\tau})}{2} d_{2n} e_{2m} t^{2m+2n+2},\]
\[U_3=\Sigma_{m=0}^\infty \Sigma_{n=0}^\infty \frac{\Gamma(n+\frac{i+\tau}{2\tau})}{2} d_{3n} e_{3m} t^{2m+2n+2}.\]
These complicated double sums combine nicely and simplify as follows.
\[U_1=\frac{i}{2\sqrt{\pi}\tau}\Sigma_{k=0}^\infty \frac{\Gamma(\frac12 +k)}{\Gamma(2 +k)}\;{}_4F_3 (\frac12,\frac12,\frac32,-k;\frac12-k,\frac32-\frac{i}{2 \tau}, \frac32+\frac{i}{2 \tau};1)\; t^{2+2k},\]
\begin{multline*}
U_2=\frac{e^{\frac{\pi}{2 \tau}}}{\sqrt{\pi}} \Sigma_{k=0}^\infty \frac{\Gamma(\frac12 +k)}{(-i+(1+2k)\tau) \Gamma(1+k)}\;\; \times
\\ {}_4F_3 (-k,1-\frac{i}{2 \tau},-\frac{i}{2 \tau},-\frac{i}{2 \tau};\frac12-k,\frac12-\frac{i}{2 \tau},1-\frac{i}{\tau};1)
\; t^{1-\frac{i}{\tau}+2k},
\end{multline*}
\begin{multline*}
U_3=\frac{e^{-\frac{\pi}{2 \tau}}}{\sqrt{\pi}} \Sigma_{k=0}^\infty \frac{\Gamma(\frac12 +k)}{(i+(1+2k)\tau) \Gamma(1+k)}\;\; \times
\\ {}_4F_3 (-k,1+\frac{i}{2 \tau},\frac{i}{2 \tau},\frac{i}{2 \tau};\frac12-k,\frac12+\frac{i}{2 \tau},1+\frac{i}{\tau};1)
\; t^{1+\frac{i}{\tau}+2k}.
\end{multline*}
Thus we can write $\gamma=(\gamma_1,\gamma_2,\gamma_3)$ as a power series in $t$ where
\[\gamma_j=\sum_{\ell=1}^3 c_{j\ell}U_\ell.\]
The curve with $\tau=1$ is given in Figure \ref{curvepic}, this time using the explicit formula.
\begin{figure}
\caption{The curve of torsion $\tau=1$ on the unit 2-sphere}
\label{curvepic}
\end{figure}
\section{Appendix}
The purpose of this Appendix is to address questions and issues about the curves raised by the referees and the editor.
For $\tau=0$, the curves are also planar and are precisely the set of circles lying on the sphere. If we consider curves corresponding to solutions (\ref{spherekappa}) with $C=0$, and $k(\frac{\frac{\pi}{2}}{\tau}) = \csc{\frac{\frac{\pi}{2}}{\tau} \tau}=1$, then as $\tau$ varies from $0$ to $\infty$ the curves numerically appear to vary (in a non-uniform way) from an infinitely covered great circle, through a family of spiral ``clothoid" like curves.
The editor pointed out that if we consider those solutions to (\ref{spherekappa}) with $C=0$, and $k(s_0)=\csc(s_0 \tau) > 1$, then the corresponding curves approach a ``small circle" on $S^2(1)$ of constant curvature $k(s_0)$. This is an interesting example of non-uniform convergence. The curves as a whole converge pointwise to an infinitely covered great circle, while it is still possible to find sequences of ``tails" that converge to infinitely covered small circles. This phenomenon is indicated in Figure \ref{finn} where one sees a sequence of curves converging to a small circle. More details of this simple yet interesting behavior will be written up elsewhere.
\begin{figure}
\caption{Curves of constant torsion approaching a circle }
\label{finn}
\end{figure}
We will mention a few of the qualitative properties of these curves. Let us consider the case of curves in $S^2(1)$ with a fixed initial point and varying $\tau$. All curves of constant torsion differ from one of these by a rigid motion. In \cite{C}, p185, it is shown that the curves are embedded, spiral infinitely often about a limiting endpoint, and are reflectionally symmetric through the initial point. (In Figure \ref{peter} we show only the upper-half of the curves.) \cite{C}, p185, also shows that as $\tau$ varies from $0$ to $\infty$, the length varies from $\infty$ to $0$, see also equation (\ref{spherekappa}).
One referee asked if it would be possible to foliate $S^2(1)$ with curves of constant torsion (other than by the just using circles). It may be possible to foliate $S^2(1)$, in some convoluted way, by packing $S^2(1)$ with pieces of curves of constant torsion; however our conjecture would be that is not possible to foliate in any ``reasonable" way. The reasoning is as follows. It seems to be a difficult problem to find an explicit formula for the upper endpoint in terms of the $\tau$ and the initial point. Nevertheless, numerically as $\tau$ varies from $0$ to $\infty$ the upper endpoint steadily moves downward from the north pole to the initial point. In particular this would imply that the curves corresponding to an infinitesimal change in $\tau$ would (repeatedly) intersect. It would follow that any foliation of the $S^2(1)$ by curves of constant torsion would have to include curves with common endpoints that differ by a rigid motion; a rotation about the upper endpoint. This type of foliation could only work in some radius about the upper endpoint, because the effect of a rotation on the opposite lower endpoint would result in (repeated) intersections. In summary, the numerics strongly indicate that there is no foliation (singular or not) of $S^2(1)$ by curves of constant torsion.
Weiner \cite{W} proved that there exist arbitrarily short closed constant torsion curves in $\mathbb{R}^3$. More recently, Musso \cite{M} studied those curves of constant torsion in $\mathbb{R}^3$, whose normal vectors sweep out elastic curves in $S^2(1)$. Ivey \cite{I} generalizes Musso's results and gives examples of closed constant torsion curves of various knot types. The examples in the current paper complement these known examples.
\end{document}
|
\begin{document}
\title{How do algebras grow?}
\author{Be'eri Greenfeld}
\address{Department of Mathematics, Bar Ilan University, Ramat Gan 5290002, Israel}
\email{[email protected]}
\begin{abstract}
We construct an increasing, submultiplicative, arbitrarily rapid function $f:\muathbb{N}\rightarrow \muathbb{N}$ which is not equivalent to the growth function of any finitely generated algebra, demonstrating the difficulty in characterizing growth functions in an asymptotic language.
\end{abstract}
\muaketitle
{\sigma}ection{Introduction}
{\sigma}ubsection{Growth functions} The question of `how do algebras grow?', or, which functions can be realized as growth functions of algebras (associative/Lie/Jordan/other, or algebras having certain additional algebraic properties) is a major problem in the meeting point of several mathematical fields including algebra, combinatorics, symbolic dynamics and more.
In this note we examine growth functions of infinite dimensional, finitely generated associative algebras. Let $F$ be an arbitrary field and let $R$ be such $F$-algebra. Fixing a finite dimensional generating subspace $R=F[V]$ we define the growth of $R$ to be the function:
$$\gamma_{R,V}(n)=\dim_F V^n$$
This evidently depends on the choice of $V$, but might change only up to the following equivalence relation: $f{\sigma}im g$ if $f(n)\leq g(Cn)\leq f(Dn)$ for some $C,D>0$. Therefore when talking about the growth of an algebra we refer to the ${\sigma}im$-equivalence class of the function $\gamma_{R,V}(n)$ (for some $V$). We say that $f\preceq g$ if $f(n)\leq g(Cn)$ for some $C>0$. For more on growth functions of algebras, see \cite{KrauseLenagan}.
There are obvious properties necessarily satisfied by such growth functions; they are always:
\begin{itemize}
\item \textit{Increasing} (namely, $f(n)<f(n+1)$); and
\item \textit{Submultiplicative} (namely, $f(n+m)\leq f(n)f(m)$).
\end{itemize}
The main goal in studying the variety of possible growth functions is to investigate to what extent these conditions are in fact sufficient.
{\sigma}ubsection{Former results} Several attempts have been made to realize as wide as possible variety of such functions as growth functions of associative algebras.
Smoktunowicz and Bartholdi \cite{SmoktunowiczBartholdi} proved that every increasing and submultiplicative function is equivalent to a growth function of an associative algebra, up to a polynomial factor. Namely:
\begin{thm}[{\cite[Theorem~C]{SmoktunowiczBartholdi}}]
Let $f:\muathbb{N}\rightarrow \muathbb{N}$ be submultiplicative and increasing. Then there exists a finitely generated monomial algebra $B$ whose growth function satisfies: $$f(n)\preceq \dim_F B(n)\preceq n^2f(n).$$
\end{thm}
They deduce the following corollary which allows an accurate realization of `sufficiently regular' rapid growth types:
\begin{cor}[{\cite[Corollary~D]{SmoktunowiczBartholdi}}]
Let $f:\muathbb{N}\rightarrow \muathbb{N}$ be a submultiplicative, increasing, and such that $f(Cn)\geq nf(n)$ for some $C>0$ and all $n\in \muathbb{N}$. Then there exists an associative algebra of growth ${\sigma}im f$.
\end{cor}
It should be mentioned that the above constructions were modified by the author in \cite{BGJalg, BGIsr} to construct prime, primitive and simple algebra of prescribed growth rates; these also yield the existence of finitely generated simple Lie algebras with arbitrary growth functions satisfying the conditions of the above corollary (arguments will appear elsewhere).
Bell and Zelmanov \cite{BellZelmanov} found an additional condition (on discrete derivatives) satisfied by all growth functions; their remarkable achievement is that in fact, \textit{every increasing function satisfying this condition is equivalent to a growth function of an associative algebra}. They proved:
\begin{thm}[{\cite[Theorem~1.1]{BellZelmanov}}]
A growth function of an algebra is asymptotically equivalent to a constant function, a linear function, or a weakly increasing function $F:\muathbb{N}\rightarrow\muathbb{N}$ with the following properties:
\begin{enumerate}
\item $F'(n)\geq n+1$ for all $n\in \muathbb{N}$;
\item $F'(m)\leq F'(n)^2$ for all $m\in \{n,\dots,2n\}$.
\end{enumerate}
Conversely, if $F(n)$ is either a constant function, a linear function, or a weakly increasing function
with the above properties then it is asymptotically equivalent to the growth function of an finitely generated algebra.
\end{thm}
As the writers suggest, one can interpret this theorem as saying that other than the necessary condition that $F'(m)\leq F'(n)^2$ for all $m\in \{n,\dots,2n\}$, which is related to submultiplicativity, the only additional
constraints required for being realizable as a growth function of an algebra are those coming from Bergman's gap theorem \cite{BergmanGap} (which asserts that a super-linear growth function must be at least quadratic) and the elementary “gap” that an algebra cannot have strictly sublinear growth that is not constant. However, it seems that there is no natural characterization of whether a given function is equivalent to a function satisfying the above condition on discrete derivatives.
We remark that there exist extremely pathological examples of oscillating growth of algebras: Trofimov \cite{Semigroup} showed that for every $f_{-}(n){\sigma}ucc n^2$ and $f_{+}\prec \exp(n)$ there exists a $2$-generated semigroup with growth function infinitely often smaller than $f_{-}$ and infinitely often larger than $f_{+}$. This was improved by Belov, Borisenko and Latyshev in \cite{BBL1997}; such examples cannot be found within the class of groups.
{\sigma}ubsection{Our aim} In this note we construct an example emphasizing the difficulty of the fundamental questions of characterizing growth functions of algebras.
Namely, we prove:
\begin{thm}\label{main thm submul}
Let $g:\muathbb{N}\rightarrow\muathbb{N}$ be a subexponential function. Then there exists an increasing, submultiplicative function $f:\muathbb{N}\rightarrow \muathbb{N}$ such that $f{\sigma}ucceq g$ and $f$ is not equivalent to the growth function of any finitely generated algebra.
\end{thm}
We mention that this in particularly implies that $f$ constructed above is also not equivalent to the growth function of any group; Bartholdi and Erschler \cite{BartholdiErschler} proved that any function $f:\muathbb{N}\rightarrow \muathbb{N}$ which grows uniformly faster\footnote{In the sense that $f(2n)\leq f(n)^2\leq f(\eta n)$ for $n\gg 1$.} than $\exp(n^\alpha)$ are equivalent to growth functions of groups ($\alpha=\log 2/\log \eta\approx 0.7674$ where $\eta$ is the positive root of $X^3-X^2-2X-4$). They also leave open the question of providing a complete characterization of growth functions of groups.
By Bergman's gap theorem, the function $n^{\alpha}$ for $\alpha\in(1,2)$, which is increasing and submultiplicative, is not equivalent to the growth function of an algebra; no similar gap theorem is valid for other polynomially bounded functions. Theorem \ref{main thm submul} shows that the two necessary conditions of being increasing and sumbultiplicative are \textit{not sufficient even for `sufficiently rapid' functions}, thereby emphasizing the significance of the polynomial factor in \cite[Theorem~C]{SmoktunowiczBartholdi}. This phenomenon hints that there is in fact no characterization in an asymptotic language of growth functions within the class of increasing and submultiplicative functions; this might justify and emphasize the importance of using new characteristics of functions in the attempt to characterize growth functions, such as discrete derivatives as done in \cite{BellZelmanov}.
{\sigma}ection{Preliminary results}
By a result of Bell and Zelmanov \cite[Proposition~2.1]{BellZelmanov}, if $\gamma$ is a growth function of an finitely generated algebra then $\gamma'(m)\leq \gamma'(n)^2$ for every $m\in\{n,\dots,2n\}$, where $\gamma'(n) = \gamma(n)-\gamma(n-1)$. Their proof yields:
\begin{rem}
Assume $\gamma$ is a growth function of an algebra. Let $d \in \muathbb{N}$. Then
$\gamma'(m)\leq \gamma'(n)^d$ for every $m\in\{n,\dots,dn\}$.
\end{rem}
\begin{proof}
We may assume the algebra is monomial, so $\gamma'(n)$ is the number of (nonzero) words of length $n$ in the generators.
But if $n \leq m \leq dn$ then every word of length $m$ is a prefix of a product of $d$ words of length $n$.
\end{proof}
This is used in the next proposition.
\begin{prop} \label{property_growth}
Suppose $f:\muathbb{N}\rightarrow \muathbb{N}$ is equivalent to a growth function $\gamma:\muathbb{N}\rightarrow \muathbb{N}$ of an finitely generated algebra. Then there exists $C\in \muathbb{N}$ such that for all $D\gg 1$, for all $n$ we have: $$f(2CDn)-f(2CDn-C)\leq 2D^2n (f(CDn)-f(Cn-C))^{2D}.$$
\end{prop}
\begin{proof}
Write $\gamma(n)\leq f(Cn)\leq \gamma(Dn)$ for some $C,D$ (we can take $D$ arbitrarily large).
Set $h(n)=f(Cn)$ and $\varphi(n)=\gamma(Dn)$. Then $h(n)\leq \varphi(n)\leq h(Dn)$.
Observe that: $$h'(n)=f(Cn)-f(Cn-C)\leq \gamma(Dn)-\gamma(n-1)={\sigma}um_{k=n}^{Dn} \gamma'(k)\leq Dn\gamma'(n)^D.$$
Note also that:
\begin{eqnarray*}
\gamma'(Dn) & =& \gamma(Dn)-\gamma(Dn-1) \\
& \leq & \gamma(Dn)-\gamma(Dn-D) \\
& = & \varphi(n)-\varphi(n-1) \\
& \leq & h(Dn)-h(n-1).
\end{eqnarray*}
Putting these together, we get that:
\begin{eqnarray*}
f(2CDn)-f(2CDn-C) & = & h'(2Dn) \\
& \leq & 2D^2n\gamma'(2Dn)^D
\\
& \leq & 2D^2n\gamma'(Dn)^{2D} \\
& \leq & 2D^2n (h(Dn)-h(n-1))^{2D} \\ & = & 2D^2n (f(CDn)-f(Cn-C))^{2D},
\end{eqnarray*}
as desired.
\end{proof}
{\sigma}ection{A construction of a submultiplicative function}\label{construction of f}
Let $1 < d_1<d_2<\cdots$ be an increasing sequence, and $n_1,n_2,\dots$ a sequence such that
$$n_1 < d_1 n_1 < n_2 < d_2 n_2 < n_3 < \cdots.$$
Both sequences are to be restricted in the sequel
by conditions of the form ``$d_k$ is greater than a function of $\{d_i,n_i\}_{i=0}^{k-1}$'' and ``$n_k$ is greater than a function of $\{d_i,n_i\}_{i=0}^{k-1}$ and $d_k$''.
{\sigma}ubsection{The interval $[1,n_2]$}
We will define a function $f:\muathbb{N}\rightarrow \muathbb{N}$, first by defining it on the domain $[1,n_2]$:
\begin{itemize}
\item For $x\leq n_1$, take $f(x)=2^x$;
\item For $n_1<x\leq d_1n_1$, take $f(x)=f(x-1)+x+1$;
\item For $d_1n_1<x\leq n_2$, take $f(x)=\lfloor 2^{1/{2d_1}}f(x-1)\rfloor$.
\end{itemize}
Denote $\alpha_1=f(d_1n_1)-f(n_1)
< d_1^2n_1^2$. Since $\alpha_1$ is polynomial with respect to $n_1$ (assuming $d_1$ was fixed), if we take $n_1\gg 1$ then we may assume that $f(d_1n_1) = 2^{n_1}+\alpha_1\leq 2^{n_1+\frac{1}{3}}$.
We will also need the following fact:
\begin{lem}\label{floor}
Given $c>1$ and $\varepsilon>0$, for all $a_0\gg 1$ the sequence $a_{k+1}=\lfloor ca_k \rfloor$ satisfies $c^{k-\varepsilon}a_0\leq a_k\leq c^ka_0$.
\end{lem}
\begin{proof}
By induction $a_k\geq c^ka_0-\frac{c^k-1}{c-1}$, so:
$$a_k-c^{k-\varepsilon}a_0\geq (c^k-c^{k-\varepsilon})a_0-\frac{c^k-1}{c-1}\xrightarrow{a_0\rightarrow \infty}\infty.$$
\end{proof}
Using Lemma \ref{floor} (taking $c = 2^{\frac{1}{2d_1}}$, $\varepsilon = 2^{-3}$), we can take $n_1\gg 1$ so that if $x\geq d_1n_1$ then $f(x)\geq f(d_1n_1)\cdot 2^{\frac{x-d_1n_1}{2d_1}-2^{-3}}$.
It is evident that $f$ is increasing in $[1,d_1n_1]$; it is also increasing in $[d_1n_1,n_2]$ if we only make sure $n_1$ is large enough.
We now turn to prove that $f$ is submultiplicative.
\begin{prop}\label{f submul 0}
The function $f:[1,n_2]\rightarrow \muathbb{N}$ constructed above is submultiplicative.
\end{prop}
\begin{proof}
We first take care of the interval $[d_1n_1+1,n_2]$. Pick $p+q>d_1n_1$ with $p\leq q$ and we must show that $f(p+q)\leq f(p)f(q)$. We assume $d_1>2$ and $n_1\gg 1$ (this will be explicitly explained) and compute that:
\begin{eqnarray*}
f(p+q)&\leq &f(d_1n_1)\cdot 2^{\frac{p+q-d_1n_1}{2d_1}}\\
&\leq & 2^{n_1+\frac{1}{3}+\frac{p+q-d_1n_1}{2d_1}}\\
&=& 2^{\frac{1}{2}n_1+\frac{p+q}{2d_1}+\frac{1}{3}}.
\end{eqnarray*}
Assume $p\leq n_1$, then $q>n_1$. Whether or not $q \leq d_1n_1$, we have that: $$\frac{f(p+q)}{f(q)}\leq 2^{\frac{p}{2d_1}}\leq 2^p=f(p);$$ this follows since the ratio between two successive numbers in $[n_1,n_2]$ is $\leq \frac{1}{2d_1}$ if we only take $n_1\gg 1$). Thus we suppose $n_1<p$ (so in particular $f(q)\geq f(p)\geq 2^{n_1}$).
\begin{itemize}
\item If $d_1n_1\leq p$ then (assuming $n_1>1$):
\begin{eqnarray*}
f(p)f(q)&\geq & 2^{n_1+\frac{p-d_1n_1}{2d_1}-2^{-3}}2^{n_1+\frac{q-d_1n_1}{2d_1}-2^{-3}}\\
&=& 2^{n_1+\frac{p+q}{2d_1}-2^{-2}}\\
&\geq & 2^{\frac{1}{2}n_1+\frac{p+q}{2d_1}+\frac{1}{3}}\geq f(p+q).
\end{eqnarray*}
\item If $q\leq d_1n_1$ then: $$f(p)f(q)\geq 2^{2n_1}\geq 2^{\frac{1}{2}n_1+n_1+\frac{1}{3}}\geq f(p+q),$$ the latter inequality follows since $p+q\leq 2d_1n_1$.
\item If $p<d_1n_1<q$ then: \begin{eqnarray*}
f(p)f(q)& \geq & 2^{n_1}2^{n_1+\frac{q-d_1n_1}{2d_1}-2^{-3}} \\ & =& 2^{\frac{3}{2}n_1+\frac{q}{2d_1}-2^{-3}} \\ & = & 2^{\frac{3}{2}n_1+\frac{p+q}{2d_1}-\frac{p}{2d_1}-2^{-3}}\\
& \geq & 2^{\frac{3}{2}n_1+\frac{p+q}{2d_1}-\frac{1}{2}n_1-2^{-3}}>f(p+q).
\end{eqnarray*}
\end{itemize}
As for submultiplicativity in the interval $[n_1,d_1n_1]$ (note that the interval $[1,n_1]$ is trivial as the function $x\muapsto 2^x$ is submultiplicative), assume $n_1\leq p+q\leq d_1n_1$.
\begin{itemize}
\item If $p>n_1$ then: $$f(p)f(q)\geq f(n_1)^2=2^{2n_1}\geq 2^{n_1}+\alpha_1=f(d_1n_1)\geq f(p+q).$$
\item If $q<n_1$ then: $$f(p)f(q)=2^{p+q}\geq f(p+q).$$
\item If $p\leq n_1\leq q$ then:
\begin{eqnarray*}
f(p+q) & \leq & f(d_1n_1)=2^{n_1}+\alpha_1 \\
&\leq & 2^{n_1+\frac{1}{3}}\leq 2^p2^{n_1}=f(p)f(n_1)\leq f(p)f(q).
\end{eqnarray*}
\end{itemize}
We thus proved that $f:[1,n_2]\rightarrow \muathbb{N}$ is submultiplicative.
\end{proof}
{\sigma}ubsection{Extending $f$ to $\muathbb{N}$}
We now extend $f$ to $\muathbb{N}$ as follows. Suppose $d_1,\dots,d_{k-1},\\ n_1,\dots,n_{k-1}$ were chosen and suppose $f$ was defined in the domain $[1,n_k]$ (we choose $n_k$ only after $\{d_i,n_i\}_{i=0}^{k-1},d_k$ were fixed).
Assumptions on $d_k,n_k$ will be explicitly made during the proof of submultiplicativity, in order to clarify where these assumptions originate from.
We assume $d_k\geq \frac{n_{k-1}}{2d_1\cdots d_{k-2}}$.
Define:
\begin{itemize}
\item For $n_k<x\leq d_kn_k$, take $f(x)=f(x-1)+x+1$;
\item For $d_kn_k<x\leq n_{k+1}$, take $f(x)=\lfloor 2^{\frac{1}{2d_1\cdots d_k}}f(x-1) \rfloor$.
\end{itemize}
Note that by taking $n_k$ to be large enough we can make sure that $f(x)$ is increasing.
\textbf{Condition (I).} We pick $n_k$ large enough such that for all $d_kn_k\leq x \leq n_{k+1}$ we have that: $$f(x)\geq f(d_kn_k)\cdot 2^{\frac{x-d_kn_k}{2d_1\cdots d_k}-2^{-k-2}}$$ (this is possible by Lemma \ref{floor} applied with $c=2^{\frac{1}{2d_1\cdots d_k}}$ and $\varepsilon=2^{-k-2}$).
\begin{lem} \label{lower bound f}
We can choose $\{n_k\}_{k\geq 1}$ to be sufficiently large such that for all $x\leq d_kn_k$ we have that: $$f(x)\geq 2^{\frac{x}{2d_1\cdots d_k}+1+2^{-k-1}}.$$
\end{lem}
\begin{proof}
Now we prove the assertion by induction on $k$. For $k=1$, in the relevant interval (namely $x\leq d_1n_1$) we have that: $$f(x)\geq 2^{n_1}\geq 2^{\frac{d_1n_1}{2d_1}+1+2^{-2}}$$ so the assertion is true (indeed, we take $n_1\geq 3$). Suppose the claim holds for $x\leq d_kn_k$ and let us prove it for $x\leq d_{k+1}n_{k+1}$; if $x\leq d_kn_k$ this is immediate from the hypothesis. If $d_kn_k<x\leq n_{k+1}$ then by Condition (I): $$f(x)\geq f(d_kn_k)\cdot 2^{\frac{x-d_kn_k}{2d_1\cdots d_{k}}-2^{-k-2}}.$$ We can bound the latter term from below (using what we just proved for $d_kn_k$):
\begin{eqnarray*}
f(d_kn_k)\cdot 2^{\frac{x-d_kn_k}{2d_1\cdots d_{k}}-2^{-k-2}}& \geq & 2^{\frac{d_kn_k}{2d_1\cdots d_{k}}+1+2^{-k-1}}\cdot 2^{\frac{x-d_kn_k}{2d_1\cdots d_{k}}-2^{-k-2}}\\
&= & 2^{\frac{x}{2d_1\cdots d_{k}}+1+2^{-(k+1)-1}}.
\end{eqnarray*}
If $n_{k+1}<x\leq d_{k+1}n_{k+1}$ then (using what we already know for $n_{k+1}$):
\begin{eqnarray*}
f(x)\geq f(n_{k+1})& \geq & 2^{\frac{n_{k+1}}{2d_1\cdots d_{k}}+1+2^{-k-2}}\\
&=& 2^{\frac{d_{k+1}n_{k+1}}{2d_1\cdots d_{k+1}}+1+2^{-(k+1)-1}}\\
&\geq & 2^{\frac{x}{2d_1\cdots d_{k+1}}+1+2^{-(k+1)-1}},
\end{eqnarray*}
as desired.
\end{proof}
We will use the following freely:
\begin{lem}
We can always assume $f(d_kn_k)\leq f(n_k)\cdot{2^\varepsilon}$. More specifically, given $\varepsilon >0$ we can choose $d_k,n_k$ in such a way.
\end{lem}
\begin{proof}
Using Lemma \ref{lower bound f} we see that: $$f(n_k)\geq 2^{\frac{n_k}{2d_1\cdots d_{k-1}}}\gg d_k^2n_k^2\geq f(d_kn_k)-f(n_k).$$
Assuming $\{d_i,n_i\}_{i=0}^{k-1}$ and $d_k$ are fixed, we can let $n_k$ be large enough such that: $$(2^\varepsilon-1)f(n_k)\geq f(d_kn_k)-f(n_k),$$
and the claim follows.
\end{proof}
From now on, we take $n_k$ large enough so that $f(d_kn_k)\leq f(n_k)\cdot 2^{\frac{1}{3}}$.
\begin{prop}\label{f submul}
The function $f:\muathbb{N}\rightarrow \muathbb{N}$ constructed above is submultiplicative.
\end{prop}
\begin{proof}
We now turn to prove that $f$ is submultiplicative in the interval $[n_k+1,n_{k+1}]$ (by the induction hypothesis it is submultiplicative for $[1,n_k]$, where the induction base is Proposition \ref{f submul 0}). As in the basic step, we begin with the interval $[d_kn_k+1,n_{k+1}]$ (without limiting $n_{k+1}$, which can be thought of as infinity). Let $p+q>d_kn_k$ and as before, $p\leq q$. Denote $\beta=f(d_{k-1}n_{k-1})$. Then: $$f(p+q)\leq \beta\cdot 2^{\frac{n_k-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+\frac{p+q-d_kn_k}{2d_1\cdots d_{k}}+\frac{1}{3}}=\beta\cdot 2^{\frac{p+q-d_kd_{k-1}n_{k-1}}{2d_1\cdots d_k}+\frac{1}{3}}.$$
We divide into cases:
\begin{itemize}
\item \textbf{Suppose $p\leq d_{k-1}n_{k-1}$.} Note that for $t\in [n_k,d_kn_k-1]$ we have that $$\frac{f(t+1)}{f(t)}\leq 1+\frac{t+2}{f(t)}\leq 1+\frac{d_kn_k+1}{2^{\frac{n_k}{2d_1\cdots d_k}}}$$ which we can take to be smaller than $2^{\frac{1}{2d_1\cdots d_k}}$ by letting $n_k$ be large enough. Thus, if in addition we take $d_k\geq \frac{n_{k-1}}{2d_1\cdots d_{k-2}}$ then: $$\frac{f(p+q)}{f(q)}\leq 2^{\frac{p}{2d_1\cdots d_k}}\leq 2^{\frac{d_{k-1}n_{k-1}}{2d_1\cdots d_k}}\leq 2\leq f(p).$$
(Note that the first inequality is evident if $q\geq d_kn_k$, and otherwise follows from the argument in the beginning of this case.)
\item \textbf{If $d_{k-1}n_{k-1}<p\leq n_k$ then $q\geq n_k$ (as $d_k>2$), and assume in addition that $q\leq d_kn_k$.}
Note also that we can choose $d_k>d_{k-1}n_{k-1}+1$ so: $$d_{k-1}d_kn_{k-1}\leq (d_{k-1}n_{k-1}+1)(d_k-1)\leq p(d_k-1)$$ and thus (recalling that $q\leq d_kn_k$):
\begin{eqnarray*}
({\sigma}tar)\ \ \ pd_k+d_kn_k-2d_{k-1}d_kn_{k-1}&\geq & pd_k-p(d_k-1)-d_kd_{k-1}n_{k-1}+d_kn_k \\
&\geq & p+q-d_{k-1}d_kn_{k-1}.\ \ \ \ \
\end{eqnarray*}
Then, using Condition (I):
\begin{eqnarray*}
f(p)f(q)&\geq &f(p)f(n_k) \\
&\geq &\beta^2\cdot 2^{\frac{p-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}-2^{-(k-1)-2}+\frac{n_k-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}-2^{-(k-1)-2}} \\
&=&\beta^2\cdot 2^{\frac{pd_k+n_kd_k-2d_kd_{k-1}n_{k-1}}{2d_1\cdots d_{k}}-2^{-k}} \\
&\geq & \beta\cdot 2^{\frac{p+q-d_kd_{k-1}n_{k-1}}{2d_1\cdots d_{k}}+\frac{1}{3}} \\
&\geq &f(p+q).
\end{eqnarray*}
(The one before last inequality follows from $({\sigma}tar)$ combined with the fact that $\beta \geq 2$.)
\item \textbf{If $d_{k-1}n_{k-1}<p\leq n_k$ then $q\geq n_k$, and now assume that moreover $q>d_kn_k$.}
Recalling Lemma \ref{lower bound f} we have: $$\frac{f(p+q)}{f(q)}\leq 2^{\frac{p}{2d_1\cdots d_k}}\leq f(p).$$
\end{itemize}
In the remaining cases,
$p>n_k$.
\begin{itemize}
\item \textbf{Assume $p\geq d_kn_k$.}
Note that by Condition (I):
\begin{eqnarray*}
f(p)f(q) & \geq & f(d_kn_k)^2\cdot 2^{\frac{p+q-2d_kn_k}{2d_1\cdots d_k}-2\cdot 2^{-k-2}},\\
f(p+q) & \leq & f(d_kn_k)\cdot 2^{\frac{p+q-d_kn_k}{2d_1\cdots d_k}}.
\end{eqnarray*}
Therefore:
\begin{eqnarray*}
\frac{f(p)f(q)}{f(p+q)}& \geq & f(d_kn_k)\cdot 2^{-\frac{d_kn_k}{2d_1\cdots d_k}-2^{k-1}}\\
&\geq & 2^{\frac{d_kn_k}{2d_1\cdots d_k}+1+2^{-k-1}}\cdot 2^{-\frac{d_kn_k}{2d_1\cdots d_k}-2^{k-1}}>1.
\end{eqnarray*}
(The middle inequality follows by Lemma \ref{lower bound f}.)
\item \textbf{Suppose $n_k\leq p< d_kn_k$. If in addition $q\leq d_kn_k$} then $$f(p+q)\leq f(2d_kn_k)\leq \beta \cdot 2^{\frac{2d_kn_k-d_kd_{k-1}n_{k-1}}{2d_1\cdots d_k}+\frac{1}{3}}$$ and by Condition (I) specified for $x=n_k$:
\begin{eqnarray*}
f(p)f(q)\geq f(n_k)^2 & \geq & \beta^2\cdot 2^{\frac{2n_k-2d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}-2\cdot 2^{-(k-1)-2}}\\
&=& \beta^2\cdot 2^{\frac{2d_kn_k-2d_kd_{k-1}n_{k-1}}{2d_1\cdots d_k}-2^{-k}},
\end{eqnarray*}
and by Lemma \ref{lower bound f} applied for $x=d_{k-1}n_{k-1}$: $$\beta\geq 2^{\frac{d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+1}=2^{\frac{d_kd_{k-1}n_{k-1}}{2d_1\cdots d_{k}}+1}$$ so:
$$f(p)f(q)\geq \beta\cdot 2^{\frac{2d_kn_k-d_kd_{k-1}n_{k-1}}{2d_1\cdots d_k}+1-2^{-k}}\geq f(p+q).$$
\item \textbf{Suppose $n_k<p<d_kn_k<q$.} Then (applying Lemma \ref{lower bound f} on $x=p$, and Condition (I) on $x=q$:
\begin{eqnarray*}
f(p)f(q)&\geq & 2^{\frac{p}{2d_1\cdots d_k}+1}f(q)\\
&\geq & 2^{\frac{p}{2d_1\cdots d_k}+1}\cdot f(d_kn_k)\cdot 2^{\frac{q-d_kn_k}{2d_1\cdots d_k}-2^{-k-2}}\\
&\geq & f(p+q).
\end{eqnarray*}
\end{itemize}
We thus proved submultiplicativity of $f$ for the interval $[d_kn_k+1,n_{k+1}]$. It remains to show that $f(p+q)\leq f(p)f(q)$ for $p+q\in [n_k+1,d_kn_k]$.
\begin{itemize}
\item \textbf{If $p>n_k$} then (applying Lemma \ref{lower bound f} for $x=n_k$):
\begin{eqnarray*}
f(p)f(q)\geq f(n_k)^2 & \geq & f(n_k)\cdot 2^{\frac{n_k}{2d_1\cdots d_k}+1}\\ & \geq & f(n_k)+d_k^2n_k^2\geq f(d_kn_k)\geq f(p+q),
\end{eqnarray*}
where the inequality $$f(n_k)\cdot 2^{\frac{n_k}{2d_1\cdots d_k}+1}\geq f(n_k)+d_k^2n_k^2$$ follows since if $d_k$ is fixed then the left hand side grows more rapidly than the right hand one (as a function of $n_k$), so in particular we can take $n_k$ large enough such that this inequality holds.
\item \textbf{If $p\leq q<n_k$} then:
\begin{eqnarray*}
f(p+q)& \leq & f(n_k)+(p+q-n_k)^2\\
& \leq & f(n_k)+n_k^2\\
& \leq & f(n_k)\cdot 2^{\frac{1}{2}}\\
& \leq & \beta\cdot 2^{\frac{n_k-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+\frac{1}{2}}.
\end{eqnarray*}
(The one before last inequality follows since $f$ grows exponentially in the interval $[d_{k-1}n_{k-1}+1,n_k]$, so in particular we can take $n_k$ to be large enough such that $f(n_k)\gg n_k^2$).
\begin{itemize}
\item Suppose in addition that $d_{k-1}n_{k-1}<p$. Then, using Condition (I):
\begin{eqnarray*}
f(p)f(q)&\geq & \beta ^2\cdot 2^{\frac{p+q-2d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}-2\cdot 2^{-(k-1)-2}}\\
&=& \beta^2\cdot 2^{\frac{p+q-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}}\cdot 2^{\frac{-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}-2^{-k}}.
\end{eqnarray*}
But applying Lemma \ref{lower bound f} for $x=d_{k-1}n_{k-1}$ we get $\beta\geq 2^{\frac{d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+1}$ so:
\begin{eqnarray*}
f(p)f(q) &\geq &\beta\cdot 2^{\frac{p+q-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+1-2^{-k}} \\
&\geq & \beta\cdot 2^{\frac{n_k-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+1-2^{-k}} \\
&\geq &\beta\cdot 2^{\frac{n_k-d_{k-1}n_{k-1}}{2d_1\cdots d_{k-1}}+\frac{1}{2}} \\
&\geq & f(p+q).
\end{eqnarray*}
\item Now suppose $p\leq d_{k-1}n_{k-1}$ and $q$ is general. (recall $q\leq p+q\leq d_kn_k$). Note that if we make sure that $n_k>2d_{k-1}n_{k-1}$ then it is forced that $q>d_{k-1}n_{k-1}$. Now: $$\frac{f(p+q)}{f(q)}\leq 2^{\frac{p}{2d_1\cdots d_{k-1}}}\leq f(p)$$ where the last inequality follows from Lemma \ref{lower bound f} for $x=p$.
\end{itemize}
\item \textbf{It remains to take care of the case $d_{k-1}n_{k-1}\leq p\leq n_k\leq q$.} But notice that: $$f(p+q)\leq f(d_kn_k)\leq f(n_k)+d_k^2n_k^2\leq 2f(n_k)$$ since $f$ grows exponentially in the interval $[d_{k-1}n_{k-1}+1,n_k]$, so in particular we can take $n_k$ to be large enough such that $f(n_k)\gg d_k^2n_k^2$ (note $d_k$ is already fixed when we choose $n_k$). Now: $$f(p)f(q)\geq 2f(q)\geq 2f(n_k).$$
\end{itemize}
We thus proved that $f:\muathbb{N}\rightarrow \muathbb{N}$ is a submultiplicative function.
\end{proof}
{\sigma}ection{Our construction is not equivalent to any growth function}
Let $f:\muathbb{N}\rightarrow \muathbb{N}$ be the increasing, submultiplicative function constructed in Section \ref{construction of f} with respect to the sequences $\{d_k,n_k\}_{k=1}^\infty$.
\begin{prop} \label{notequiv}
We can choose $\{d_i,n_i\}_{i=0}^{\infty}$ such that the resulting $f$ is not equivalent to any growth function of an finitely generated algebra.
\end{prop}
\begin{proof}
Since the conditions on the $d_k$ and $n_k$ in Section~\ref{construction of f} always require the parameters to be large enough (depending on those previously defined), we assume further that $n_k=km_k$ for $m_k$ to be determined in the sequel.
Suppose $C\in \muathbb{N}$ is given, and let $k=C$. Let $n=m_k+1$ and $D=\lfloor d_k(1-\frac{1}{m_k+1})\rfloor$, and observe that $\frac{1}{2}d_k\leq D\leq d_k$. We are going to contradict the property stated in Proposition \ref{property_growth} with these parameters, namely, we must show that: $$f(2CDn)-f(2CDn-C)>2D^2n(f(CDn)-f(Cn-C))^{2D}.$$
$2CDn\leq 2kd_km_k=2d_kn_k$ and as long as we take $n_k>2k$ we have that
$$2CDn-C\geq 2CDn-CD\geq 2kDm_k\geq kd_km_k=d_kn_k$$
so:
\begin{eqnarray*}
f(2CDn)-f(2CDn-C)& \geq & f(2CDn-C)\cdot (2^{\frac{k}{2d_1\cdots d_k}-2^{-k-2}}-1)\\ & \geq & f(2CDn-C)\cdot \Delta_k
\end{eqnarray*}
where $\Delta_k$ is a value that does not depend on $n_k$ (we are going to take $n_k$ large enough to overcome this factor, which is very small).
Now, substituting our parameters and using Lemma \ref{lower bound f} for $x=d_kn_k$: $$f(2CDn-C)\geq f(d_kn_k)\geq 2^{\frac{d_kn_k}{2d_1\dots d_k}}=q^{n_k}=q^{km_k}$$ where $q=2^{\frac{1}{2d_1\cdots d_{k-1}}}$.
On the other hand, $$n_k=km_k=Cn-C<CDn\leq k\left(d_k\frac{m_k}{m_k+1}\right)(m_k+1)=d_kn_k$$ so (by definition of $f$ in intervals of this type): $$f(CDn)-f(Cn-C)\leq (CDn)^2\leq (kd_k(m_k+1))^2$$ hence:
\begin{eqnarray*}
2D^2n(f(CDn)-f(Cn-C))^{2D}& \leq & 2d_k^2(m_k+1)(kd_k(m_k+1))^{2d_k}\\
&\leq & (m_k+1)^{2d_k+1}\cdot \Gamma_k
\end{eqnarray*}
where $\Gamma_k$ depends only on $k,d_k$ (but not on $m_k$ or equivalently on $n_k$).
Finally, note that as we fix $k,d_1,\dots,d_k$ and let $m_k\rightarrow \infty$ we get:
\begin{eqnarray*}
f(2CDn)-f(2CDn-C)&\geq &(q^k)^{m_k}\cdot \Delta_k \\
&\gg & (m_k+1)^{2d_k+1}\cdot \Gamma_k \\
&\geq & 2D^2n(f(CDn)-f(Cn-C))^{2D}
\end{eqnarray*}
contradicting the property of Proposition \ref{property_growth}. Since this can be done for any $C=~k\in \muathbb{N}$, and we can take $d_k$ (and hence $D$) to be arbitrarily large, we proved that $f$ cannot be equivalent to any growth function of an finitely generated algebra.
\end{proof}
{\sigma}ection{Proof of Theorem \ref{main thm submul}}
\begin{prop} \label{superpoly}
Let $g:\muathbb{N}\rightarrow \muathbb{N}$ be an arbitrary subexponential function. Then we can choose $\{d_i,n_i\}_{i=0}^{\infty}$ such that the resulting function in our construction is $f$ satisfies $f{\sigma}ucceq g$.
\end{prop}
\begin{proof}
Since $g$ is assumed to be subexponential, there exists $\omega:\muathbb{N}\rightarrow \muathbb{R}$ such that $\omega(n)\xrightarrow{n\rightarrow \infty} 0$ and $g(n)\leq 2^{n\omega(n)}$.
We make sure that $n_k>\muax\{m\,|\,\omega(m)\geq \frac{1}{2d_1\cdots d_k}\}$ for all $k\geq 1$.
We claim that for all $x\geq n_1$ we have that $f(x)\geq 2^{x\omega(x)}$. There are two possibilities: either $x\in [n_j,d_jn_j]$ or $x\in [d_jn_j,n_{j+1}]$ for some $j\geq 1$. Let us consider the first case. Then $x\geq n_j$ so by the way we picked $n_j$ we have that $\omega(x)<\frac{1}{2d_1\cdots d_j}$.
By Lemma \ref{lower bound f} we have that: $$f(x)\geq 2^{\frac{x}{2d_1\cdots d_j}}\geq 2^{x\omega(x)}.$$
As for the second case, if $x\in [d_jn_j,n_{j+1}]$ then (again using Lemma \ref{lower bound f} for $d_jn_j$):
\begin{eqnarray*}
f(x) & \geq & 2^{\frac{x-d_jn_j}{2d_1\cdots d_j}-2^{-j-2}}f(d_jn_j)\\
& \geq & 2^{\frac{x}{2d_1\cdots d_j}+1-2^{-j-2}}\\
& \geq & 2^{x\omega(x)}.
\end{eqnarray*}
Thus $f(x)\geq 2^{x\omega(x)}\geq g(x)$ for all $x\gg 1$.
\end{proof}
Finally we have:
\begin{proof}[{Proof of Theorem \ref{main thm submul}}]
The theorem follows since we can take $\{d_k,n_k\}_{k=1}^\infty$ satisfying all conditions required in Propositions \ref{f submul}, \ref{notequiv} and \ref{superpoly}.
\end{proof}
\end{document}
|
\begin{document}
\begin{frontmatter}
\title{On the irregularity of uniform hypergraphs\,\tnoteref{titlenote}}
\tnotetext[titlenote]{This work was supported by the National Nature
Science Foundation of China (Nos.\,11471210, 11571222)}
\author[address1]{Lele Liu}
\ead{[email protected]}
\author[address1]{Liying Kang}
\ead{[email protected]}
\author[address1,address2]{Erfang Shan\corref{correspondingauthor}}
\cortext[correspondingauthor]{Corresponding author}
\ead{[email protected]}
\address[address1]{Department of Mathematics, Shanghai University, Shanghai 200444, P.R. China}
\address[address2]{School of Management, Shanghai University, Shanghai 200444, P.R. China}
\begin{abstract}
Let $H$ be an $r$-uniform hypergraph on $n$ vertices and $m$ edges, and let
$d_i$ be the degree of $i\in V(H)$. Denote by $\varepsilon(H)$ the difference
of the spectral radius of $H$ and the average degree of $H$. Also, denote
\[
s(H)=\sum_{i\in V(H)}\left|d_i-\frac{rm}{n}\right|,~
v(H)=\frac{1}{n}\sum_{i\in V(H)}d_i^{\frac{r}{r-1}}-\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}.
\]
In this paper, we investigate the irregularity of $r$-uniform hypergraph $H$ with
respect to $\varepsilon(H)$, $s(H)$ and $v(H)$, which extend relevant results
to uniform hypergraphs.
\end{abstract}
\begin{keyword}
Uniform hypergraph \sep
Adjacency tensor \sep
Measure of irregularity \sep
Degree sequence
\MSC[2010]
15A42 \sep
05C50
\end{keyword}
\end{frontmatter}
\section{Introduction}
Let $G=(V(G),E(G))$ be an undirected graph with $n$ vertices and $m$ edges without
loops and multiple edges, where $V(G)=[n]:=\{1,2,\ldots,n\}$. A graph $G$ is regular
if all its vertices have the same degree, otherwise it is irregular. In many
applications and problems it is of importance to know how irregular a given graph is.
Various measures of graph irregularity have been proposed and studied, see, for example,
\cite{Bell,Collatz,Henning,Nikiforov:Degree deviation,Nikiforov2007} and references therein.
We first survey some known parameters used as measures of irregularity as well as their
respective properties. In 1957, Collatz and Sinogowitz \cite{Collatz} showed that the
spectral radius $\rho(G)$ of a graph $G$ is greater than or equal to the average degree
$\overline{d}(G)$, and the equality holds if and only if $G$ is regular. The fact allows
us to consider the difference $\varepsilon(G)=\rho(G)-\overline{d}(G)$ as a relevant
measure of irregularity of $G$. The authors also proved that, for $n\leq 5$, the
maximum value of $\varepsilon(G)$ is $\sqrt{n-1}-2+2/n$ and the maximal is attained for
the star $S_n$. Fifty years later, Aouchiche et al. \cite{Aouchiche2008} conjectured that
the most irregular connected graph on $n$ ($n\geq10$) vertices is a pineapple graph.
Recently, this conjecture was proved by Tait and Tobin \cite{Tait2016}. In 1992, Bell
\cite{Bell} suggested making the variance $v(G)$ of the vertex degrees of $G$ as a measure
of the irregularity, i.e.,
\[
v(G)=\frac{1}{n}\sum_{i=1}^nd_i^2-\left(\frac{2m}{n}\right)^2.
\]
The author compared $\varepsilon(G)$ and $v(G)$ for various classes of graphs, and showed
that they are not always compatible. Also, the most irregular graphs according to these
measures were determined for certain classes of graphs. In 2006, Nikiforov
\cite{Nikiforov:Degree deviation} introduced
\[
s(G)=\sum_{i\in V(G)}\left|d_i-\frac{2m}{n}\right|
\]
as a new measure of the irregularity of a graph $G$, and showed several inequalities with
respect to $\varepsilon(G)$, $s(G)$ and $v(G)$ as follows:
\begin{equation}
\label{eq:Nikiforov}
\frac{v(G)}{2\sqrt{2m}}\leq\rho(G)-\frac{2m}{n}\leq
\sqrt{s(G)}.
\end{equation}
In particular, for a bipartite graph $G$ with $m$ edges and partition $V(G)=V_1\dot{\cup} V_2$,
Nikiforov \cite{Nikiforov:Degree deviation} defined
\[
s_2(G)=\sum_{i\in V_1} \left|d_i-\frac{m}{n_1}\right|+
\sum_{i\in V_2} \left|d_i-\frac{m}{n_2}\right|
\]
as a more relevant irregularity parameter than $s(G)$, where $n_1=|V_1|$, $n_2=|V_2|$. Also,
it was proved that
\begin{equation}
\label{eq:Nikiforov for bipartite}
\rho(G)-\frac{m}{\sqrt{n_1n_2}}\leq\sqrt{\frac{s_2(G)}{2}}.
\end{equation}
These irregularity measures as well as other attempts to measure the irregularity of a
graph were studied in several works
\cite{Dimitrov2014,Dimitrov2015,Edwards,Henning,Rautenbach}.
Our work in the present paper is to study the irregularity of uniform hypergraphs.
Denote by $\mathcal{H}(n,m)$ the set of all the $r$-uniform hypergraphs with $n$
vertices and $m$ edges. Let $H\in\mathcal{H}(n,m)$ be an $r$-uniform hypergraph, and
$\rho(H)$ be the spectral radius of $H$. In 2012, Cooper and Dutle
\cite{Cooper:Spectra Uniform Hypergraphs} showed that $\rho(H)\geq rm/n$. It is
clear that the equality holds if and only if $H$ is regular by \cite[Theorem 2]{Qi2013}.
Therefore, the value
\[
\varepsilon(H)=\rho(H)-\frac{rm}{n}
\]
can be viewed as a relevant measure of irregularity of $H$. Denote
\[
s(H)=\sum_{i\in V(H)}\left|d_i-\frac{rm}{n}\right|,
\]
where $d_i$ is the degree of vertex $i$ of $H$. Obviously, $s(H)\geq 0$, with equality
if and only if $H$ is regular. Analogous to the graph case, if $H\in\mathcal{H}(n,m)$
is an $r$-partite $r$-uniform hypergraph with partition $V(H)=V_1\dot{\cup} V_2\dot{\cup}\cdots\dot{\cup} V_r$
and $|V_i|=n_i$, $i\in[r]$, we denote
\[
s_r(H)=\sum_{i\in[r]}\sum_{j\in V_i}\left|d_j-\frac{m}{n_i}\right|.
\]
For an $r$-uniform hypergraph $H\in\mathcal{H}(n,m)$, we also denote
\[
v(H)=\frac{1}{n}\sum_{i=1}^nd_i^{\frac{r}{r-1}}-\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}.
\]
It follows from Power Mean inequality that $v(H)\geq 0$, with equality holds if and only if $H$ is
regular.
The main contribution of this paper is proposing some relations among $\varepsilon(H)$, $s(H)$
and $v(H)$, which extend relevant results to uniform hypergraphs. To be precise, we first generalize
\eqref{eq:Nikiforov for bipartite} to $r$-partite $r$-uniform hypergraphs as follows.
\begin{theorem}
\label{thm:Main result-1}
Let $H\in\mathcal{H}(n,m)$ be an $r$-partite $r$-uniform hypergraph with partition
$V(H)=V_1\dot{\cup} V_2\dot{\cup}\cdots\dot{\cup} V_r$. Let $|V_i|=n_i$, $i\in[r]$. Then
\[
\rho(H)-\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}\leq
\left(\frac{s_r(H)}{2}\right)^{\frac{r-1}{r}}.
\]
\end{theorem}
The main frame of \autoref{thm:Main result-1} is inspired by that of \cite{Nikiforov:Degree deviation}.
By virtue of \autoref{thm:Main result-1} and the direct product operation of hypergraphs,
we obtain the following result concerning $\varepsilon(H)$, $s(H)$ and $v(H)$, which
generalize the result \eqref{eq:Nikiforov}.
\begin{theorem}
\label{thm:Main result-2}
Let $H\in\mathcal{H}(n,m)$. Then
\[
\frac{r-1}{\sqrt[r]{m}}\left(\frac{\sqrt[r]{r!}}{r^r}\right)^{\frac{1}{r-1}}v(H)
\leq\rho(H)-\frac{rm}{n}
\leq\frac{r}{\sqrt[r]{r!}}\left(\frac{s(H)}{2}\right)^{\frac{r-1}{r}}.
\]
\end{theorem}
\section{Preliminaries}
\label{sec2}
In this section, we first present some necessarily notions and definitions of hypergraphs
and tensors which will be used in the sequel.
A hypergraph $H =(V(H),E(H))$ is a pair consisting of a vertex set $V(H)=[n]$, and a
set $E(H)=\{e_1,e_2,\ldots,e_m\}$ of subsets of $V(H)$, the edges of $H$. For a vertex
$i\in V(H)$, the {\em degree} of $i$, denoted by $d_H(i)$ or simply by $d_i$, is the
number of edges containing $i$. A hypergraph is called {\em regular} if all its vertices
have the same degree, otherwise it is {\em irregular}. The minimum and maximum degrees among
the vertices of $H$ are denoted by $\delta(H)$ and $\Delta(H)$, respectively. An $r$-uniform
hypergraph $H$ is called $k$-{\em partite} if its vertex set $V(H)$ can be partitioned into
$k$ sets such that each edge contains at most one vertex from each set. An edge maximal
$k$-partite $r$-uniform hypergraph is called {\em complete $k$-partite}.
Let $H_1$ and $H_2$ be two $r$-uniform hypergraphs. Denote by $H_1\cup H_2$ the {\em union} of
$H_1$ and $H_2$, i.e., $V(H_1\cup H_2)=V(H_1)\cup V(H_2)$ and $E(H_1\cup H_2)=E(H_1)\cup E(H_2)$.
The {\em direct product} $H_1\times H_2$ of $H_1$ and $H_2$ is defined as an $r$-uniform
hypergraph with vertex set $V(H_1\times H_2) = V(H_1)\times V(H_2)$, and
$\{(i_1,j_1),(i_2,j_2),\ldots,(i_r,j_r)\}\in E(H_1\times H_2)$ if and only if
$\{i_1,i_2,\ldots,i_r\}\in E(H_1)$ and $\{j_1,j_2,\ldots,j_r\}\in E(H_2)$.
For positive integers $r$ and $n$, a real {\em tensor} $\mathcal{A}=(a_{i_1i_2\cdots i_r})$ of
order $r$ and dimension $n$ refers to a multidimensional array (also called {\em hypermatrix})
with entries $a_{i_1i_2\cdots i_r}$ such that $a_{i_1i_2\cdots i_r}\in\mathbb{R}$ for all
$i_1$, $i_2$, $\ldots$, $i_r\in[n]$. The following general product of tensors was defined by
Shao \cite{Shao:General product}, which is a generalization of the matrix case.
\begin{definition}[\cite{Shao:General product}]
\label{defn:General product}
Let $\mathcal{A}$ (and $\mathcal{B}$) be an order $r\geq 2$ (and order $k\geq 1$),
dimension $n$ tensor. Define the product $\mathcal{AB}$ to be the following tensor $\mathcal{C}$
of order $(r-1)(k-1)+1$ and dimension $n$
\[
c_{i\alpha_1\cdots\alpha_{r-1}}=\sum_{i_2,\ldots,i_r=1}^na_{ii_2\cdots i_r}
b_{i_2\alpha_1}\cdots b_{i_r\alpha_{r-1}}
~~(i\in [n], \alpha_1,\ldots,\alpha_{r-1}\in [n]^{k-1}).
\]
\end{definition}
From the above definition, if $x=(x_1,x_2,\ldots,x_n)^{\mathrm{T}}$ is a vector, we have
\begin{equation}
\label{eq:Ax equation}
(\mathcal{A}x)_i=\sum_{i_2,\ldots,i_r=1}^na_{ii_2\cdots i_r}x_{i_2}\cdots x_{i_r},~~
i\in [n].
\end{equation}
In 2005, Qi \cite{Qi2005} and Lim \cite{Lim} independently introduced the definition of
eigenvalues of a tensor. Let $\mathcal{A}$ be an order $r$ dimension $n$ tensor,
$x=(x_1,x_2,\ldots,x_n)^{\mathrm{T}}\in\mathbb{C}^n$ be a column vector of dimension $n$.
If there exists a number $\lambda\in\mathbb{C}$ and a nonzero vector $x\in\mathbb{C}^{n}$
such that
\[
\mathcal{A}x=\lambda x^{[r-1]},
\]
then $\lambda$ is called an {\em eigenvalue} of $\mathcal{A}$, $x$ is called an
{\em eigenvector} of $\mathcal{A}$ corresponding to the eigenvalue $\lambda$,
where $x^{[r-1]}$ is the Hadamard power of $x$, i.e.,
$x^{[r-1]}=(x_{1}^{r-1},x_2^{r-1},\ldots,x_{n}^{r-1})^{\mathrm{T}}$. The {\em spectral
radius} of $\mathcal{A}$, denoted by $\rho(\mathcal{A})$, is the maximum modulus of the
eigenvalues of $\mathcal{A}$.
In 2012, Cooper and Dutle \cite{Cooper:Spectra Uniform Hypergraphs}
defined the adjacency tensors $\mathcal{A}(H)$ for an $r$-uniform hypergraphs $H$.
\begin{definition}
[\cite{Cooper:Spectra Uniform Hypergraphs}]
Let $H=(V(H),E(H))$ be an $r$-uniform hypergraph on $n$ vertices. The adjacency
tensor of $H$ is defined as the order $r$ and dimension $n$ tensor
$\mathcal{A}(H)=(a_{i_1i_2\cdots i_r})$, whose $(i_1i_2\cdots i_r)$-entry is
\[
a_{i_1i_2\cdots i_r}=\begin{cases}
\frac{1}{(r-1)!}, & \text{if}~\{i_1,i_2,\ldots,i_r\}\in E(H),\\
0, & \text{otherwise}.
\end{cases}
\]
\end{definition}
For an $r$-uniform hypergraph $H$, the spectral radius of $H$, denoted by $\rho(H)$,
is defined to be that of its adjacency tensor $\mathcal{A}(H)$. In general, an
$r$-uniform hypergraph $H$ can be decomposed into components $H_i=(V(H_i),E(H_i))$
for $i=1$, $2$, $\ldots$, $s$. Denote the spectral radii of $H$ and $H_i$ by $\rho(H)$
and $\rho(H_i)$, respectively. Theorem 3.3 in \cite{Qi2014} implies that
\[
\rho(H)=\max_{1\leq i\leq s}\{\rho(H_i)\}.
\]
Friedland et al. \cite{Friedland2013} defined the weak irreducibility of a nonnegative tensor
by using the strong connectivity of a graph associated to the nonnegative tensor. Later,
Yang et al. \cite{Yang2011-2} presented an equivalent definition of the weak irreducibility
from the algebraic point of view.
\begin{definition}[\cite{Yang2011-2}]
Let $\mathcal{A}$ be an order $r$ dimension $n$ tensor. If there exists a nonempty
proper index subset $I\subseteq [n]$ such that
\[
a_{i_1i_2\cdots i_r}=0~~(\forall~i_1\in I,~\text{and at least one of}~i_2,\ldots,i_r\notin I).
\]
Then $\mathcal{A}$ is called weakly reducible. If $\mathcal{A}$ is not weakly reducible,
then $\mathcal{A}$ is called weakly irreducible.
\end{definition}
It was proved that an $r$-uniform hypergraph $H$ is connected if and only if its adjacency
tensor $\mathcal{A}(H)$ is weakly irreducible (see \cite{Pearson2014}).
Let $\mathcal{A}=(a_{i_1i_2\cdots i_r})$ be a nonnegative tensor of order $r$ and dimension $n$.
For any $i\in [n]$, we write
\[
r_i(\mathcal{A})=\sum_{i_2,\ldots,i_r=1}^na_{ii_2\cdots i_r}.
\]
The following bound for $\rho(\mathcal{A})$ in terms of $r_i(\mathcal{A})$ was proposed
in \cite{Yang2010}, and the conditions for the equal cases were studied in \cite{Fan2015}.
\begin{lemma}[\cite{Fan2015,Yang2010}]
\label{lem:rho(A) upper bound}
Let $\mathcal{A}$ be a nonnegative tensor of order $r$ and dimension $n$. Then
\begin{equation}
\label{eq:r_i equality}
\min_{1\leq i\leq n}r_i(\mathcal{A})\leq\rho(\mathcal{A})\leq
\max_{1\leq i\leq n}r_i(\mathcal{A}).
\end{equation}
Moreover, if $\mathcal{A}$ is weakly irreducible, then one of the equalities in
\eqref{eq:r_i equality} holds if and only if
$r_1(\mathcal{A})=r_2(\mathcal{A})=\cdots=r_n(\mathcal{A})$.
\end{lemma}
\begin{lemma}[\cite{Shao:General product,Yang2011}]
\label{lem:Same spectra}
Let $\mathcal{A}$ and $\mathcal{B}$ be two order $r$ dimension $n$ tensors.
If there is a nonsingular diagonal matrix $P$ of order $n$ such that
$\mathcal{B}=P^{-(r-1)}\mathcal{A}P$, then $\mathcal{A}$ and $\mathcal{B}$
have the same eigenvalues.
\end{lemma}
\begin{remark}
Let $P=\text{diag}\{p_1,p_2,\ldots,p_n\}$ be a nonsingular diagonal matrix,
and $\mathcal{A}(H)=(a_{i_1i_2\cdots i_r})$ be the adjacency tensor of an
$r$-uniform hypergraph $H$. According to \autoref{defn:General product}, we have
\begin{equation}
\label{eq:PAP}
(P^{-(r-1)}\mathcal{A}(H)P)_{i_1i_2\cdots i_r}=
p_{i_1}^{-(r-1)}a_{i_1i_2\cdots i_r}p_{i_2}\cdots p_{i_r}.
\end{equation}
\end{remark}
\begin{lemma}[\cite{Nikiforov}]
\label{lem:rho<m}
Let $H\in\mathcal{H}(n,m)$ be an $r$-uniform hypergraph. Then
\[
\rho(H)\leq\frac{r}{\sqrt[r]{r!}}m^{\frac{r-1}{r}}.
\]
Moreover, if $H$ is $r$-partite, then
\[
\rho(H)\leq m^{\frac{r-1}{r}},
\]
equality holds if and only if $H$ is complete $r$-partite.
\end{lemma}
The Weyl type inequality for uniform hypergraphs is stated as follows.
\begin{lemma}[\cite{Nikiforov}]
\label{lem:Weyl's inequality}
Let $H_1$ and $H_2$ be $r$-uniform hypergraphs. Then
\[
\rho(H_1\cup H_2)\leq \rho(H_1)+\rho(H_2).
\]
\end{lemma}
\section{Irregularity of uniform hypergraphs}
In this section, we shall prove \autoref{thm:Main result-1} and \autoref{thm:Main result-2}.
Before continuing, we present an upper bound for the spectral radius of an $r$-uniform
hypergraph, which generalizes a result in \cite{Berman2001}. It is noted that
the same result has been proved by Nikiforov \cite{Nikiforov2017}.
Here we add a characterization for the equality.
\begin{lemma}[\cite{Nikiforov2017}]\label{lem:r-partite}
Suppose that $H$ is a connected $r$-uniform hypergraph on $n$ vertices. Then
\begin{equation}
\label{eq:rho(H)<sqrt[r]}
\rho(H)\leq\max_{\{i_1,i_2,\ldots,i_r\}\in E(H)}
\left\{\sqrt[r]{d_{i_1}d_{i_2}\cdots d_{i_r}}\right\},
\end{equation}
with equality holds if and only if $d_{i_1}d_{i_2}\cdots d_{i_r}$ is a constant
for any $\{i_1,i_2,\ldots,i_r\}\in E(H)$.
\end{lemma}
\begin{proof}
Let $P=\text{diag}\,\{p_1,p_2,\ldots,p_n\}$ be a nonsingular diagonal matrix.
By \eqref{eq:PAP} we have
\begin{align*}
r_i(P^{-(r-1)}\mathcal{A}(H)P) & =
\sum_{i_2,\ldots,i_r=1}^np_i^{-(r-1)}a_{ii_2\cdots i_r}p_{i_2}\cdots p_{i_r}\\
& =\sum_{\{i,i_2,\ldots,i_r\}\in E(H)}p_i^{-(r-1)}p_{i_2}\cdots p_{i_r}.
\end{align*}
Setting $p_i=\sqrt[r]{d_i}$, $i\in [n]$, we see
\begin{align}
r_i(P^{-(r-1)}\mathcal{A}(H)P) & =
\sum_{\{i,i_2,\ldots,i_r\}\in E(H)}\frac{\sqrt[r]{d_id_{i_2}\cdots d_{i_r}}}{d_i} \label{eq:r_i}\\
& \leq\max_{\{i_1,i_2,\ldots,i_r\}\in E(H)}\left\{\sqrt[r]{d_{i_1}d_{i_2}\cdots d_{i_r}}\right\}
\label{eq:rho(H)<max}.
\end{align}
By \autoref{lem:rho(A) upper bound} and \autoref{lem:Same spectra}, we deduce that
\begin{equation}
\label{eq:rho(H)<max1}
\rho(H)=\rho(P^{-(r-1)}\mathcal{A}(H)P) \leq
\max_{1\leq i\leq n}\left\{r_i(P^{-(r-1)}\mathcal{A}(H)P)\right\}.
\end{equation}
Then \eqref{eq:rho(H)<max} and \eqref{eq:rho(H)<max1} imply that
\[
\rho(H)\leq\max_{\{i_1,i_2,\ldots,i_r\}\in E(H)}\left\{\sqrt[r]{d_{i_1}d_{i_2}\cdots d_{i_r}}\right\}.
\]
If the equality in \eqref{eq:rho(H)<sqrt[r]} holds, then the equality in \eqref{eq:rho(H)<max1}
holds. Since $H$ is connected, $\mathcal{A}(H)$ is weakly irreducible. Therefore,
$P^{-(r-1)}\mathcal{A}(H)P$ is also weakly irreducible. By \autoref{lem:rho(A) upper bound},
$r_i(P^{-(r-1)}\mathcal{A}(H)P)$ is a constant, $i\in [n]$. Furthermore, the equality
in \eqref{eq:rho(H)<max} holds. So, $d_{i_1}d_{i_2}\cdots d_{i_r}\equiv c$ is a
constant for any $\{i_1,i_2,\ldots,i_r\}\in E(H)$. Conversely, assume that for any
$\{i_1,i_2,\ldots,i_r\}\in E(H)$, $d_{i_1}d_{i_2}\cdots d_{i_r}\equiv c$ is a constant.
It follows from \autoref{lem:rho(A) upper bound} and \eqref{eq:r_i} that
\[
\sqrt[r]{c}=\min_{1\leq i\leq n}\left\{r_i(P^{-(r-1)}\mathcal{A}(H)P)\right\}
\leq\rho(H)\leq\max_{1\leq i\leq n}
\left\{r_i(P^{-(r-1)}\mathcal{A}(H)P)\right\}=\sqrt[r]{c},
\]
which yields that $\rho(H)=\sqrt[r]{c}$, as desired.
\end{proof}
\begin{remark}
We now consider a similar topic as \autoref{lem:r-partite} which is of independent
interest. Suppose that $H$ is a connected $r$-uniform hypergraph on $n$ vertices. Let
$x=\frac{1}{\sqrt[r]{rm}}(\sqrt[r]{d_1},\sqrt[r]{d_2},\ldots,\sqrt[r]{d_n})^{\mathrm{T}}$
be a column vector. By \cite[Theorem 2]{Qi2013} and AM--GM inequality, we have
\begin{align*}
\rho(H) & \geq x^{\mathrm{T}}(\mathcal{A}x)=\frac{1}{m}\sum_{\{i_1,i_2,\ldots,i_r\}\in E(H)}
\sqrt[r]{d_{i_1}d_{i_2}\cdots d_{i_r}}\\
& \geq\Bigg(\prod_{\{i_1,i_2,\ldots,i_r\}\in E(H)}
\sqrt[r]{d_{i_1}d_{i_2}\cdots d_{i_r}}\Bigg)^{\frac{1}{m}}.
\end{align*}
It follows that
\[
\rho(H)^r\geq\Bigg(\prod_{\{i_1,i_2,\ldots,i_r\}\in E(H)}d_{i_1}d_{i_2}\cdots d_{i_r}\Bigg)^{\frac{1}{m}}.
\]
From the inequality between geometric and harmonic means, we obtain
\begin{equation}
\label{eq:GM-HM}
\rho(H)^r\geq\frac{m}{\displaystyle\sum_{\{i_1,i_2,\ldots,i_r\}\in E(H)}\frac{1}{d_{i_1}d_{i_2}\cdots d_{i_r}}}.
\end{equation}
Clearly, equality in \eqref{eq:GM-HM} holds if and only if $d_{i_1}d_{i_2}\cdots d_{i_r}$
is a constant for any $\{i_1,i_2,\ldots,i_r\}\in E(H)$. The above inequality generalize a
result in \cite{HoffmanWolfe1995} (see also \cite{SimicStevanovic2003}).
\end{remark}
The following lemma is needed, and the arguments have been used in
\cite{Nikiforov:Degree deviation}.
\begin{lemma}\label{lem:Regular}
Let $H\in\mathcal{H}(n,m)$ be an $r$-uniform hypergraph. Then there exists an
$r$-uniform hypergraph $\widehat{H}\in\mathcal{H}(n,m)$ such that
$\Delta(\widehat{H})-\delta(\widehat{H})\leq 1$
and $\widehat{H}$ differs from $H$ in at most $s(H)$ edges.
\end{lemma}
\begin{proof}
Denote $d=\left\lfloor rm/n\right\rfloor$ for short. We first show that for
any $H\in\mathcal{H}(n,m)$, there exists $H^*\in\mathcal{H}(n,m)$ such that
either $\delta(H^*)=d$ or $\Delta(H^*)=d+1$. If $\delta(H)\leq d-1$ and
$\Delta(H)\geq d+2$, then we select $i$, $j\in V(H)$ such that $d_i=\delta(H)$
and $d_j=\Delta(H)$. Since $d_j>d_i$, there exists an edge $e\in E(H)$ such
that $j\in e$, $i\notin e$ and $e':=(e\backslash\{j\})\cup\{i\}\notin E(H)$.
Denote $H':=H-e+e'$. Clearly, $H'\in\mathcal{H}(n,m)$ and $H'$ differs from
$H$ in two edges. Moreover, we have
\begin{align*}
s(H){-}s(H') & {=} \left|d_i{-}\frac{rm}{n}\right|{+}\left|d_j{-}\frac{rm}{n}\right|
{-}\left|(d_i{+}1){-}\frac{rm}{n}\right|{-}\left|(d_j{-}1){-}\frac{rm}{n}\right|\\
& {=} \left(\frac{rm}{n}{-}d_i\right)\!\!+\!\!\left(d_j{-}\frac{rm}{n}\right)
\!\!-\!\!\left(\frac{rm}{n}{-}(d_i{+}1)\right)\!\!-\!\!\left((d_j{-}1){-}\frac{rm}{n}\right)\!=\!2.
\end{align*}
Repeating the above process, we can get an $r$-uniform hypergraph $H^*\in\mathcal{H}(n,m)$
such that either $\delta(H^*)=d$ or $\Delta(H^*)=d+1$, and $H^*$ differs from $H$ in
$(s(H)-s(H^*))$ edges.
Without loss of generality, we may assume that $\delta(H^*)=d$ (the other case can be
proved similarly). If $\Delta(H^*)\leq d+1$, then $\widehat{H}=H^*$ is the desired
hypergraph. Otherwise, assume that $\Delta(H^*)\geq d+2$. Denote
\begin{align*}
A & =\{i\in V(H^*)\,|\,d_{H^*}(i)=d\}, \\
B & =\{i\in V(H^*)\,|\,d_{H^*}(i)=d+1\},\\
C & =\{i\in V(H^*)\,|\,d_{H^*}(i)\geq d+2\},
\end{align*}
and $|A|=k$, $|B|=s$. Let $i\in A$, $j\in C$ with $d_{H^*}(j)=\Delta(H^*)$. Notice that
$d_{H^*}(j)>d$, then there is an edge $e\in E(H^*)$ such that $j\in e$, $i\notin e$ and
$e'':=(e\backslash\{j\})\cup\{i\}\notin E(H^*)$. Let $H'':=H^*-e+e''$. Then
$H''\in\mathcal{H}(n,m)$ and $H''$ differs from $H^*$ in two edges. Repeating the process at most
$\ell:=\sum_{u\in C}(d_{H^*}(u)-d-1)$ times, we can obtain the desired $r$-uniform
hypergraph $\widehat{H}\in\mathcal{H}(n,m)$. Therefore, $\widehat{H}$ differs $H$ at most
$(s(H)-s(H^*)+2\ell)$ edges.
In the following, we will show that $s(H)-s(H^*)+2\ell\leq s(H)$. Consider the $r$-uniform
hypergraph $H^*$, we have
\begin{align*}
\frac{rm}{n} & =\frac{1}{n}
\Bigg(\sum_{i\in A}d_{H^*}(i)+\sum_{i\in B}d_{H^*}(i)+\sum_{i\in C}d_{H^*}(i)\Bigg)\\
& =\frac1n\Bigg(kd+s(d+1)+\sum_{i\in C}d_{H^*}(i)\Bigg)\\
& =\frac1n[kd+s(d+1)+(n-k-s)(d+1)+\ell]\\
& =d+1+\frac{\ell-k}{n}.
\end{align*}
Recall that $d=\lfloor rm/n\rfloor$. Hence $\ell<k$. Furthermore,
\begin{align*}
s(H^*) & =\sum_{i\in A}\left|d_{H^*}(i)-\frac{rm}{n}\right|
+\sum_{i\in B}\left|d_{H^*}(i)-\frac{rm}{n}\right|
+\sum_{i\in C}\left|d_{H^*}(i)-\frac{rm}{n}\right|\\
& =k\left(\frac{rm}{n}-d\right)+s\left(d+1-\frac{rm}{n}\right)+
\sum_{i\in C}\left(d_{H^*}(i)-\frac{rm}{n}\right)\\
& =k\left(1+\frac{\ell-k}{n}\right)+s\cdot\frac{k-\ell}{n}+\ell+
(n-k-s)\cdot\frac{k-\ell}{n}\\
& =2k\left(1-\frac{k-\ell}{n}\right)+2\ell>2\ell,
\end{align*}
then the result follows.
\end{proof}
By applying \autoref{lem:Regular} to each vertex class of an $r$-partite $r$-uniform
hypergraph, we can obtain the following corollary.
\begin{corollary}\label{coro:Regular}
Let $H\in\mathcal{H}(n,m)$ be an $r$-partite $r$-uniform hypergraph. Then
there exists an $r$-partite $r$-uniform hypergraph $\widehat{H}$ such that
$|d_{\widehat{H}}(i)-d_{\widehat{H}}(j)|\leq 1$ for any $i$, $j$ belonging
to the same vertex class and $\widehat{H}$ differs from $H$ in at most $s_r(H)$
edges.
\end{corollary}
In the sequel, we shall prove \autoref{thm:Main result-1}.
For this purpose, we need the following concept.
Let $H$ be an $r$-uniform hypergraph on $n$ vertices and $k_1$, $k_2$, $\ldots$,
$k_n$ be positive integers. Denote $H(k_1,k_2,\ldots,k_n)$ for the $r$-uniform
hypergraph obtained by replacing each vertex $i\in V(H)$ with a set $U_i$
of size $k_i$ and each edge $\{i_1,i_2,\ldots,i_r\}\in E(H)$ with a complete
$r$-partite $r$-uniform hypergraph with vertex classes $U_{i_1}$, $U_{i_1}$,
$\ldots$, $U_{i_r}$. The hypergraph $H(k_1,k_2,\ldots,k_n)$ is called a
{\em blow-up} of $H$.
\begin{lemma}[\cite{Nikiforov}]
\label{lem:Blown-up}
Let $H$ be an $r$-uniform hypergraph on $n$ vertices. Then
\[
\rho(H(k,k,\ldots,k))=k^{r-1}\rho(H).
\]
\end{lemma}
\noindent{\bfseries Proof of \autoref{thm:Main result-1}.}
By \autoref{coro:Regular}, there exists an $r$-partite $r$-uniform hypergraph
$\widehat{H}\in\mathcal{H}(n,m)$ such that $|d_{\widehat{H}}(i)-d_{\widehat{H}}(j)|\leq 1$
for any $i$, $j$ belonging to the same vertex class and $\widehat{H}$ differs
from $H$ in at most $s_r(H)$ edges. Therefore $2|E(H)\backslash E(\widehat{H})|\leq s_r(H)$.
We need the following two claims.
\begin{claim}
\label{claim1}
$\displaystyle\sqrt[r]{n_1n_2\cdots n_r}\geq\sqrt[r]{n/r}$.
\end{claim}
\noindent{\bf Proof of Claim 1.}
Without loss of generality, we assume that $n_1-n_2\geq0$. We will replace the
pair $n_1$ and $n_2$ by $n_1'=n_1+1$ and $n_2'=n_2-1$. Notice that $n_1'$ and $n_2'$
have the same sum as $n_1$ and $n_2$ while decreasing the product. To be precise,
$n_1'n_2'=(n_1+1)(n_2-1)<n_1n_2$, and therefore $(n_1'n_2')n_3\cdots n_r<n_1n_2n_3\cdots n_r$.
Repeating this process, we know that $\sqrt[r]{n_1n_2\cdots n_r}$ attaining the minimum when
one of $n_1$, $n_2$, $\ldots$, $n_r$ is $(n-r+1)$ and the others are $1$. It follows that
$\sqrt[r]{n_1n_2\cdots n_r}\geq\sqrt[r]{n-r+1}\geq\sqrt[r]{n/r}$.
The proof of the claim is completed.
\begin{claim}
\label{claim2}
$\displaystyle\rho(\widehat{H})\leq
\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}+\left(\frac{n}{r}\right)^{1-\frac{1}{r}}$.
\end{claim}
\noindent{\bf Proof of Claim 2.}
Let $\Delta_i=\max\{d_j\,|\,j\in V_i(\widehat{H})\}$, $i\in[r]$, where
$V_1(\widehat{H})$, $V_2(\widehat{H})$, $\ldots$, $V_r(\widehat{H})$
are the vertex classes of $\widehat{H}$. Hence $\Delta_i\leq m/n_i+1$
by \autoref{coro:Regular} and the proof of \autoref{lem:Regular}.
Using \autoref{lem:r-partite} gives
\[
\rho(\widehat{H})\leq\sqrt[r]{\Delta_1\Delta_2\cdots\Delta_r}
\leq\sqrt[\leftroot{-2}\uproot{14}r]{\left(\frac{m}{n_1}+1\right)
\left(\frac{m}{n_2}+1\right)\cdots\left(\frac{m}{n_r}+1\right)}.
\]
It suffices to show that
\[
\prod_{i=1}^r\left(\frac{m}{n_i}+1\right)\leq
\left(\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}+\left(\frac{n}{r}\right)^{1-\frac{1}{r}}\right)^r.
\]
Denote by $e_0(n_1,n_2,\ldots,n_r)=1$ and
\[
e_j(n_1,n_2,\ldots,n_r)=
\sum_{1\leq i_1<i_2<\cdots<i_j\leq r}n_{i_1}n_{i_2}\cdots n_{i_j},~j=1,2,\ldots,r,
\]
the $j$-th elementary symmetric polynomials in $n_1$, $n_2$, $\ldots$, $n_r$.
By \autoref{claim1} and Maclaurin's inequality, for any $ i\in[r]$ we have
\[
\left(\frac{n}{r}\right)^{1-\frac{1}{r}}\sqrt[r]{n_1n_2\cdots n_r}\geq\frac{n}{r}=
\frac{e_1(n_1,n_2,\ldots,n_r)}{\binom{r}{1}}\geq
\left(\frac{e_i(n_1,n_2,\ldots,n_r)}{\binom{r}{i}}\right)^{\frac{1}{i}},
\]
which yields that
\[
e_i(n_1,n_2,\ldots,n_r)\leq\binom{r}{i}
\left(\frac{n}{r}\right)^{\frac{(r-1)i}{r}}(n_1n_2\cdots n_r)^{\frac{i}{r}}.
\]
Notice that
\[
\prod_{i=1}^r\left(\frac{m}{n_i}+1\right)=
\sum_{i=0}^r\frac{e_i(n_1,n_2,\ldots,n_r)}{n_1n_2\cdots n_r}\cdot m^{r-i}.
\]
Therefore, we obtain that
\begin{align*}
\prod_{i=1}^r\left(\frac{m}{n_i}+1\right) & \leq
\sum_{i=0}^r\binom{r}{i}\left(\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}\right)^{r-i}
\left(\frac{n}{r}\right)^{\frac{(r-1)i}{r}}\\
& =\left(\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}+\left(\frac{n}{r}\right)^{1-\frac{1}{r}}\right)^r.
\end{align*}
The proof of the claim is completed.
We will take the proof technique from \cite{Nikiforov:Degree deviation}. Let
$H_1=(V(H),E(H)\cup E(\widehat{H}))$ and $H_2=(V(H),E(H)\backslash E(\widehat{H}))$.
Clearly, $H$ is a subhypergraph of $H_1$, then $\rho(H)\leq\rho(H_1)$.
Therefore, by \autoref{lem:Weyl's inequality}, we have
\[
\rho(H)\leq\rho(H_1)=\rho(H_2\cup\widehat{H})\leq\rho(H_2)+\rho(\widehat{H}).
\]
It follows from \autoref{lem:rho<m} that
\begin{equation}
\label{eq:rho(H)-rho(H')}
\rho(H)-\rho(\widehat{H}) \leq\rho(H_2)\leq(E(H)\backslash E(\widehat{H}))^{\frac{r-1}{r}}
\leq\left(\frac{s_r(H)}{2}\right)^{\frac{r-1}{r}}.
\end{equation}
Finally, by \eqref{eq:rho(H)-rho(H')} and \autoref{claim2}, we obtain
\begin{equation}
\label{eq:Finally}
\rho(H)-\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}\leq
\left(\frac{s_r(H)}{2}\right)^{\frac{r-1}{r}}+\left(\frac{n}{r}\right)^{1-\frac{1}{r}}.
\end{equation}
Let $H(k,k,\ldots,k)$ be a blown-up of $H$. Clearly,
\[
|V(H(k,k,\ldots,k))|=kn,~|E(H(k,k,\ldots,k))|=k^rm.
\]
Applying \eqref{eq:Finally} for $H(k,k,\ldots,k)$, we have
\begin{align*}
\rho(H(k,k,\ldots,k))-\frac{k^rm}{\sqrt[r]{(kn_1)(kn_2)\cdots (kn_r)}} & \leq
\left(\frac{s_r(H(k,k,\ldots,k))}{2}\right)^{\frac{r-1}{r}}+\left(\frac{kn}{r}\right)^{1-\frac{1}{r}}.
\end{align*}
On the other hand, notice that $\rho(H(k,k,\ldots,k))=k^{r-1}\rho(H)$ by
\autoref{lem:Blown-up} and
\begin{align*}
s_r(H(k,k,\ldots,k))
& =k\sum_{i\in[r]}\sum_{j\in V_i}\left|d_{H(k,\ldots,k)}(j)-\frac{k^rm}{kn_i}\right|\\
& =k^r\sum_{i\in[r]}\sum_{j\in V_i}\left|d_{H}(j)-\frac{m}{n_i}\right|\\
& =k^rs_r(H),
\end{align*}
which follows that
\[
k^{r-1}\rho(H)-\frac{k^{r-1}m}{\sqrt[r]{n_1n_2\cdots n_r}}
\leq\left(\frac{k^rs_r(H)}{2}\right)^{\frac{r-1}{r}}+\left(\frac{kn}{r}\right)^{1-\frac{1}{r}}.
\]
Therefore, we obtain
\[
\rho(H)-\frac{m}{\sqrt[r]{n_1n_2\cdots n_r}}\leq\left(\frac{s_r(H)}{2}\right)^{\frac{r-1}{r}}+
\left(\frac{n}{r}\right)^{1-\frac{1}{r}}\cdot\frac{1}{k^{r+\frac{1}{r}-2}}.
\]
Take the limit $k\to +\infty$ on both sides of the above equation, we obtain the desired result.
The proof is completed. \qed
In the following we will give a proof of \autoref{thm:Main result-2} in virtue of
\autoref{thm:Main result-1} and the following result.
\begin{lemma}[\cite{Liu:Bounds concerning degrees}]
\label{lem:rho bound concerning degrees}
Suppose that $H$ is an $r$-uniform hypergraph on $n$ vertices. Let $d_i$ be the degree
of vertex $i$ of $H$, and $\rho(H)$ be the spectral radius of $H$. Then
\[
\rho(H)\geq
\left(\frac1n\sum_{i=1}^nd^{\frac{r}{r-1}}_i\right)^{\frac{r-1}{r}}.
\]
If $H$ is connected and $r\geq 3 $, then the equality holds if and only if $H$ is regular.
\end{lemma}
\noindent{\bfseries Proof of \autoref{thm:Main result-2}.}
We first prove the left hand. For short, denote $\rho(H)=\rho$. By AM-GM inequality, we have
\begin{align}\label{eq:AM-MB}
\frac{1}{r\!-\!1}\rho^{\frac{r}{r-1}}\!+\!\left(\frac{rm}{n}\right)^{\frac{r}{r-1}} \!&\! =
\frac{1}{r\!-\!1}\Bigg[\rho^{\frac{r}{r-1}}+\underbrace{\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}+
\cdots+\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}}_{r-1}\Bigg]\nonumber\\
& \geq\frac{r^2m}{(r-1)n}\rho^{\frac{1}{r-1}}.
\end{align}
Therefore, by \autoref{lem:rho bound concerning degrees} and \eqref{eq:AM-MB}, we have
\begin{align*}
v(H) & =\frac{1}{n}\sum_{i=1}^nd_i^{\frac{r}{r-1}}-\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}\\
& \leq\rho^{\frac{r}{r-1}}-\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}\\
& =\frac{r}{r-1}\rho^{\frac{r}{r-1}}-
\left[\frac{1}{r\!-\!1}\rho^{\frac{r}{r-1}}\!+\!\left(\frac{rm}{n}\right)^{\frac{r}{r-1}}\right]\\
& \leq\frac{r}{r-1}\rho^{\frac{1}{r-1}}\left(\rho-\frac{rm}{n}\right).
\end{align*}
Notice that $\rho\leq\frac{r}{\sqrt[r]{r!}}m^{\frac{r-1}{r}}$ by \autoref{lem:rho<m}. Hence
\[
v(H)\leq\frac{r}{r-1}\left(\frac{r}{\sqrt[r]{r!}}\right)^{\frac{1}{r-1}}m^{\frac{1}{r}}
\left(\rho-\frac{rm}{n}\right).
\]
Now we prove the right hand. Denote by $K_r^r$ the $r$-uniform hypergraph of order $r$ consisting
of a single edge. Let $\widetilde{H}$ be the direct product of $H$ and $K_r^r$,
i.e., $\widetilde{H}=H\times K_r^r$. Clearly, $\widetilde{H}$ is an $r$-partite $r$-uniform
hypergraph with partition
\[
V(\widetilde{H})=\bigcup_{j=1}^r\left(V(H)\times\{j\}\right).
\]
By \autoref{thm:Main result-1} we have
\begin{equation}
\label{eq:rho(tilde{H})-rm/n}
\rho(\widetilde{H})-\frac{|E(\widetilde{H})|}{n}\leq
\left(\frac{s_r(\widetilde{H})}{2}\right)^{\frac{r-1}{r}}.
\end{equation}
Notice that $|E(\widetilde{H})|=r!m$ and $d_{\widetilde{H}}(i,j)=(r-1)!d_i$ for any
$i\in V(H)$, $j\in[r]$. Therefore
\begin{align*}
s_r(\widetilde{H}) & =\sum_{j=1}^r\sum_{(i,j)\in V(\widetilde{H})}
\left|d_{\widetilde{H}}((i,j))-\frac{r!m}{rn}\right|\\
& =r\sum_{i\in V(H)}\left|(r-1)!d_{H}(i)-\frac{r!m}{rn}\right|\\
& =r!s(H).
\end{align*}
By \cite[Claim 4]{Liu:Bounds concerning degrees}, we know $\rho(\widetilde{H})=(r-1)!\rho(H)$.
It follows from \eqref{eq:rho(tilde{H})-rm/n} that
\[
(r-1)!\rho(H)-\frac{r!m}{n}\leq\left(\frac{r!s(H)}{2}\right)^{\frac{r-1}{r}},
\]
and the assertion follows by simple algebra. \qed
\section*{References}
\end{document}
|
\begin{document}
\title[Gaussian bounds for heat kernels on the ball and simplex]
{Gaussian bounds for the heat kernels \\on the ball and simplex: Classical approach}
\author[G. Kerkyacharian]{Gerard Kerkyacharian}
\address{LPSM, CNRS-UMR 7599, and Crest}
\email{[email protected]}
\author[P. Petrushev]{Pencho Petrushev}
\address{Department of Mathematics, University of South Carolina, Columbia, SC 29208}
\email{[email protected]}
\author[Y. Xu]{Yuan Xu}
\address{Department of Mathematics, University of Oregon, Eugene, Oregon 97403-1222}
\email{[email protected]}
\subjclass[2010]{42C05, 35K08}
\keywords{Heat kernel, Gaussian bounds, orthogonal polynomials, ball, simplex}
\thanks{The first author has been supported by ANR Forewer.
The second author has been supported by NSF Grant DMS-1714369.
The third author has been supported by NSF Grant DMS-1510296.}
\thanks{Corresponding author: Pencho Petrushev, E-mail: [email protected]}
\begin{abstract}
Two-sided Gaussian bounds are established for the weighted heat kernels on the unit ball and simplex in ${\mathbb R}^d$
generated by classical differential operators whose eigenfunctions are algebraic polynomials.
\end{abstract}
\date{January 21, 2018}
\maketitle
\section{Introduction}
Two-sided Gaussian bounds have been established for heat kernels in various settings.
For example, Gaussian bounds for the Jacobi heat kernel on $[-1, 1]$
with weight $(1-x)^\alpha(1+x)^\beta$, $\alpha, \beta>-1$,
are obtained in \cite[Theorem 7.2]{CKP} and \cite[Theorem~5.1]{KPX},
and also in \cite{NS} in the case when $\alpha, \beta\ge -1/2$
(see \eqref{gauss-int} below).
In this article we establish two-sided Gaussian estimates for the heat kernels
generated by classical differential operators whose eigenfunctions are algebraic polynomials
in the weighted cases on the unit ball and simplex in ${\mathbb R}^d$.
Such estimates are also established in \cite{KPX} using a general method that utilizes known
two-sided Gaussian estimates for the heat kernels generated by weighted Laplace operators on Riemannian manifolds.
Here we derive these results directly from the Gaussian bounds for the Jacobi heat kernel.
Such a direct method leads to working in somewhat restricted range
for the parameters of the weights (commonly used in the literature).
We next describe our results in detail.
We shall use standard notation. In particular, positive constants will be denoted by
$c, c', \tilde{c}, c_1, c_2, \dots$ and they may vary at every occurrence.
Most constants will depend on parameters that will be clear from the context.
The notation $a\sim b$ will stand for $c_1\le a/b \le c_2$.
The functions that we deal with in this article are assumed to be real-valued.
\subsection{Heat kernel on the unit ball}\label{subsec:ball}
Consider the operator
\begin{equation}\label{D-mu}
{\mathcal D}_\mu:= \sum_{i=1}^d (1-x_i^2)\partial^2_i-2\sum_{1\le i < j \le d}x_i x_j\partial_i\partial_j
- (d+2 \mu)\sum_{i=1}^d x_i \partial_i,
\end{equation}
acting on sufficiently smooth functions on the unit ball
${\mathbb B}^d:=\big\{x\in{\mathbb B}^d: \|x\|<1\big\}$ in ${\mathbb R}^d$
equipped with the measure
\begin{equation}\label{def-meas-ball}
d\nu_\mu = w_\mu(x)dx := (1-\| x\|^2)^{\mu-1/2} dx, \quad \mu\ge 0,
\end{equation}
and the distance
\begin{equation}\label{dist-ball}
d_{\mathbb B}(x,y) := \arccos \big(\langle x, y\rangle + \sqrt{1-\| x\|^2}\sqrt{1-\| y\|^2}\big),
\end{equation}
where $\langle x, y\rangle$ is the inner product of $x, y\in {\mathbb R}^d$
and $\|x\|:= \sqrt{\langle x, x\rangle}$.
As will be shown the operator ${\mathcal D}_\mu$ is symmetric and $-{\mathcal D}_\mu$ is positive in $L^2({\mathbb B}, w_\mu)$.
Furthermore, ${\mathcal D}_\mu$ is essentially self-adjoint.
Denote
\begin{equation}\label{def-ball-ball}
B_{\mathbb B}(x, r):=\{y\in {\mathbb R}^d: d_{\mathbb B}(x,y)<r\}
\quad\hbox{and}\quad
V_{\mathbb B}(x, r):= \nu_\mu(B_{\mathbb B}(x, r)).
\end{equation}
As is well known (see, e.g. \cite[Lemma 11.3.6]{DaiX})
\begin{equation}\label{V-ball-ball}
V_{\mathbb B}(x, r) \sim r^d(1-\|x\|^2+r^2)^\mu.
\end{equation}
Denote by ${\mathcal V}_n(w_\mu)$ the set of all algebraic polynomials of degree $n$ in $d$ variables
that are orthogonal to lower degree polynomials in $L^2({\mathbb B}^d, w_\mu)$,
and let ${\mathcal V}_0(w_\mu)$ be the set of all constants.
As is well known (see e.g. \cite[\S2.3.2]{DX}) ${\mathcal V}_n(w_\mu)$, $n=0, 1, \dots$, are eigenspaces of the operator ${\mathcal D}_\mu$,
more precisely,
\begin{equation}\label{eigen-ball}
{\mathcal D}_\mu P=-n(n+d+2\mu-1)P,\quad \forall P\in{\mathcal V}_n(w_\mu).
\end{equation}
Let $P_n(w_\mu; x, y)$ be the kernel of the orthogonal projector onto ${\mathcal V}_n(w_\mu)$.
Then the semigroup $e^{t{\mathcal D}_\mu}$, $t>0$, generated by ${\mathcal D}_\mu$ has a (heat) kernel $e^{t{\mathcal D}_\mu}(x,y)$ of the form
\begin{equation}\label{ball-HK}
e^{t{\mathcal D}_\mu}(x,y)=\sum_{n=0}^\infty e^{-tn(n+2\lambda)}P_n(w_\mu; x, y),\quad \lambda:=\mu + (d-1)/2.
\end{equation}
We establish two-sided Gaussian bounds on $e^{t{\mathcal D}_\mu}(x,y)$:
\begin{theorem}\label{thm:Gauss-ball}
For any $\mu\ge 0$ there exist constants $c_1, c_2, c_3, c_4>0$ such that for all $x, y\in {\mathbb B}^d$ and $t>0$
\begin{equation} \label{gauss-ball}
\frac{c_1\exp\{- \frac{d_{\mathbb B}(x,y)^2}{c_2t}\}}{\big[V_{\mathbb B}(x, \sqrt t) V_{\mathbb B}(y, \sqrt t)\big]^{1/2}}
\le e^{tD_\mu}(x,y)
\le \frac{c_3\exp\{- \frac{d_{\mathbb B}(x,y)^2}{c_4t}\}}{\big[V_{\mathbb B}(x, \sqrt t) V_{\mathbb B}(y, \sqrt t)\big]^{1/2}}.
\end{equation}
\end{theorem}
\subsection{Heat kernel on the simplex}\label{subsec:simplex}
We also establish two-sided Gaussian bounds for the heat kernel generated by the operator
\begin{equation}\label{def-D-simplex}
{\mathcal D}_\kappa := \sum_{i=1}^d x_i\partial_i^2 - \sum_{i=1}^d\sum_{j=1}^d x_ix_j \partial_i\partial_j
+ \sum_{i=1}^d \big(\kappa_i + \tfrac12 - (|\kappa|+ \tfrac{n+1}{2}) x_i\big) \partial_i
\end{equation}
with $|\kappa|:=\kappa_1+\dots+\kappa_{d+1}$
acting on sufficiently smooth functions on the simplex
\begin{equation*}
{\mathbb T}^d:=\Big\{x \in {\mathbb R}^d: x_1 \ge 0,\dots, x_d\ge 0, |x| \le 1 \Big\},
\quad |x|:= x_1+\cdots+x_d,
\end{equation*}
in ${\mathbb R}^d$, $d\ge 1$, equipped with the measure
\begin{equation}\label{def-meas-simpl}
d\nu_\kappa(x)=w_\kappa(x)dx:=\prod_{i=1}^d x_{i}^{\kappa_i-1/2}(1-|x|)^{\kappa_{d+1}-1/2}dx,
\quad \kappa_i \ge 0,
\end{equation}
and the distance
\begin{equation}\label{def-dist-simpl}
d_{\mathbb T}(x,y) := \arccos \Big(\sum_{i=1}^d \sqrt{x_i y_i} + \sqrt{1-|x|}\sqrt{1-|y|}\Big).
\end{equation}
As will be shown the operator ${\mathcal D}_\kappa$ is symmetric and $-{\mathcal D}_\kappa$ is positive
in the weighted space $L^2({\mathbb T}, w_\kappa)$,
furthermore, ${\mathcal D}_\kappa$ is essentially self-adjoint.
We shall use the notation:
\begin{equation}\label{B-simplex}
B_{\mathbb T}(x, r):=\{y\in{\mathbb T}^d: \rho(x, y)<r\} \quad\hbox{and}\quad V_{\mathbb T}(x, r):=\nu_\kappa(B(x, r)).
\end{equation}
It is known that
\begin{equation}\label{V-ball-simpl}
V_{\mathbb T}(x, r) \sim r^d (1-|x|+r^2)^{\kappa_{d+1}}\prod_{i=1}^d(x_i+r^2)^{\kappa_i}.
\end{equation}
This equivalence follows e.g. from \cite[(5.1.10)]{DaiX},
see also (4.23)-(4.24) in \cite{KPX}.
Denote by ${\mathcal V}_n(w_\kappa)$ the set of all algebraic polynomials of degree $n$ in $d$ variables
that are orthogonal to lower degree polynomials in $L^2({\mathbb T}^d, w_\kappa)$,
and let ${\mathcal V}_0(w_\kappa)$ be the set of all constants.
As is well known (e.g. \cite[\S2.3.3]{DX}) ${\mathcal V}_n(w_\kappa)$, $n=0, 1, \dots$,
are eigenspaces of the operator ${\mathcal D}_\kappa$, namely,
\begin{equation}\label{sim-eigen-sp}
{\mathcal D}_\kappa P=- n\big(n+|\kappa|+(d-1)/2\big)P,
\quad \forall P\in{\mathcal V}_n(w_\kappa),\;\; n=0, 1, \dots.
\end{equation}
Let $P_n(w_\kappa; x, y)$ be the kernel of the orthogonal projector onto ${\mathcal V}_n(w_\kappa)$ in $L^2({\mathbb T}^d, w_\kappa)$.
The heat kernel $e^{t{\mathcal D}_\kappa}(x,y)$, $t>0$, takes the form
\begin{equation}\label{simplex-HK}
e^{t{\mathcal D}_\kappa}(x,y)=\sum_{n=0}^\infty e^{-tn(n+\lambda_\kappa)} P_k(w_\kappa; x, y),
\quad \lambda_\kappa:= |\kappa|+(d-1)/2.
\end{equation}
\begin{theorem}\label{thm:Gauss-simpl}
For any $\kappa_i\ge 0$, $i=1, \dots, n+1$,
there are constants $c_1,c_2,c_3,c_4>0$ such that for all $x,y \in {\mathbb T}^d$ and $t>0$
\begin{equation}\label{gauss-simplex}
\frac{c_1\exp\{- \frac{d_{\mathbb T}(x,y)^2}{c_2t}\}}{\big[V_{\mathbb T}(x, \sqrt t)V_{\mathbb T}(y, \sqrt t)\big]^{1/2}}
\le e^{t{\mathcal D}_\kappa}(x,y)
\le \frac{c_3\exp\{- \frac{d_{\mathbb T}(x,y)^2}{c_4t}\}}{\big[V_{\mathbb T}(x, \sqrt t)V_{\mathbb T}(y, \sqrt t)\big]^{1/2}}.
\end{equation}
\end{theorem}
\subsection{Method of proof and discussion}\label{subsec:method}
We shall prove Theorems~\ref{thm:Gauss-ball} and \ref{thm:Gauss-simpl}
by using the known two-sided Gaussian bounds on the Jacobi heat kernel on $[-1, 1]$.
We~next describe this result.
The classical Jacobi operator is defined by
\begin{equation}\label{def-Jacobi}
L_{\alpha, \beta}f(x):=\frac{\big[w_{\alpha,\beta}(x)(1-x^2)f'(x)\big]'}{w_{\alpha, \beta}(x)},
\end{equation}
where
\begin{equation*}
w_{\alpha, \beta}(x):=(1-x)^{\alpha}(1+x)^{\beta}, \quad \alpha, \beta>-1.
\end{equation*}
We consider $L_{\alpha, \beta}$ with domain $D(L):={\mathcal P}[-1, 1]$ the set of all algebraic polynomials restricted to $[-1, 1]$.
We also consider $[-1, 1]$ equipped with the weighted measure
\begin{equation}\label{mes-int}
d\nu_{\alpha, \beta}(x) := w_{\alpha, \beta}(x) dx = (1-x)^{\alpha}(1+x)^{\beta} dx
\end{equation}
and the distance
\begin{equation}\label{dist-dist}
\rho(x,y) := |\arccos x - \arccos y|.
\end{equation}
It is not hard to see that the Jacobi operator $L_{\alpha, \beta}$ in the setting described above
is essentially self-adjoint and $-L_{\alpha, \beta}$ is positive in $L^2([-1, 1], w_{\alpha, \beta})$.
We shall use the notation
\begin{equation}\label{int-ball}
B(x, r):=\{y\in [-1,1]: \rho(x, y)<r\} \quad\hbox{and}\quad V(x, r):=\nu_{\alpha, \beta}(B(x, r)).
\end{equation}
As is well known (see e.g. \cite[(7.1)]{CKP})
\begin{equation}\label{measure-ball}
V(x, r)\sim r(1-x+r^2)^{\alpha+1/2}(1+x+r^2)^{\beta+1/2}, \quad x\in [-1, 1], \; 0<r\le \pi.
\end{equation}
It is well known \cite{Sz} that the Jacobi polynomials $P_n^{(\alpha, \beta)}$, $n= 0, 1, \dots$,
are eigenfunctions of the operator $L_{\alpha, \beta}$, namely,
\begin{equation}\label{Jacobi-eigenv}
L_{\alpha, \beta}P_n^{(\alpha, \beta)}= -n(n+\alpha+\beta+1)P_n^{(\alpha, \beta)}, \quad n=0, 1, \dots.
\end{equation}
We consider the Jacobi polynomials $\big\{P_n^{(\alpha, \beta)}\big\}$ normalised in $L^2([-1,1], w_{\alpha, \beta})$.
Then the Jacobi heat kernel $e^{tL_{\alpha, \beta}}(x,y)$, $t>0$, takes the form
\begin{equation}\label{Jacobi-HK}
e^{tL_{\alpha, \beta}}(x,y)=\sum_{n=0}^\infty e^{-tn(n+\lambda)}P_n^{(\alpha, \beta)}(x)P_n^{(\alpha, \beta)}(y),
\quad \lambda:=\alpha+\beta+1.
\end{equation}
\begin{theorem}\label{thm:Gauss-int}
For any $\alpha, \beta>-1$
there exist constants $c_1, c_2, c_3, c_4 >0$ such that for all $x,y \in [-1,1]$ and $t>0$
\begin{equation}\label{gauss-int}
\frac{c_1\exp\{- \frac{\rho(x,y)^2}{c_2t}\}}{\big[V(x, \sqrt t)V(y, \sqrt t)\big]^{1/2}}
\le e^{tL_{\alpha, \beta}}(x,y)
\le \frac{c_3\exp\{- \frac{\rho(x,y)^2}{c_4t}\}}{\big[V(x, \sqrt t)V(y, \sqrt t)\big]^{1/2}}.
\end{equation}
\end{theorem}
This theorem is established in \cite[Theorem~7.2]{CKP} using a general result on heat kernels
in Dirichlet spaces with a doubling measure and local Poincar\'{e} inequality.
The same theorem is also proved in \cite[Theorem~5.1]{KPX}.
In \cite{NS} Nowak and Sj\"{o}gren obtained this result in the case when $\alpha, \beta \ge -1/2$
via a direct method using special functions.
For the proof of Theorem~\ref{thm:Gauss-ball} it will be critical that the kernel $P(w_\mu; x, y)$
of the orthogonal projector onto ${\mathcal V}_n(w_\mu)$ in $L^2({\mathbb B}, w_\mu)$ has an explicit representation
in terms of the univariate Gegenbauer polynomials (see \eqref{rep-Pn-ball}-\eqref{rep-Pn-ball-2}).
For the proof of Theorem~\ref{thm:Gauss-simpl} we deploy the well known representation
of the kernel $P_n(w_\kappa; x, y)$ in terms of Jacobi polynomials (see \eqref{rep-Pn-simpl}).
It should be pointed out that our method of proof of estimates \eqref{gauss-ball} and \eqref{gauss-simplex}
works only in the range $\mu \ge 0$ for the weight parameter in the case of the ball
and in the range $\kappa_i\ge 0$, $i=1,\dots, n$, in the case of the simplex.
These restrictions on the range of the parameters are determined by the range
for the parameters in the representations of the kernels $P(w_\mu; x, y)$ and $P_n(w_\kappa; x, y)$.
Observe that the two-sided estimates on the heat kernels from \eqref{gauss-ball},\eqref{gauss-simplex}
coupled with the general results from \cite{CKP, KP}
entail smooth functional calculus in the settings on the ball and simplex (see \cite{IPX,PX2}),
in particular, the finite speed propagation property is valid.
For more details, see \cite[\S 3.1]{KPX}.
\section{Proof of Gaussian bounds for the heat kernel on the ball}
\label{sec:proof-ball}
We adhere to the notation from \S \ref{subsec:ball}.
Define
\begin{equation*}
D_{i,j}:=x_i\partial_j-x_j\partial_i,\quad 1 \le i \ne j \le d.
\end{equation*}
It is easy to see that
\begin{equation} \label{Dij-theta}
D_{i,j} = \partial_{\theta_{i,j}}
\quad \hbox{with} \quad
(x_i,x_j) = r_{i,j}(\cos \theta_{i,j},\sin \theta_{i,j}).
\end{equation}
Further, define the second order differential operators
$$
D^2_{i,i} := [w_\mu(x)]^{-1} \partial_i \left[(1-\|x\|^2) w_\mu(x) \right]\partial_i,
\quad 1 \le i \le d.
$$
It turns out that the differential operator $D_\mu$ from \eqref{D-mu} can be decomposed
as a~sum of second order differential operators \cite[Proposition 7.1]{DaiX}:
\begin{equation}\label{decomp}
{\mathcal D}_\mu = \sum_{i=1}^d D^2_{i,i} + \sum_{1\le i < j \le d} D^2_{i,j}
= \sum_{1\le i \le j \le d} D^2_{i,j}.
\end{equation}
The basic properties of the operator $D_\mu$ are given in the following
\begin{theorem}\label{thm:prop-D}
For $f \in C^2({\mathbb B}^d)$ and $g \in C^1({\mathbb B}^d)$,
\begin{align}\label{rep-Dmu}
& \int_{{\mathbb B}^d} {\mathcal D}_\mu f(x) g(x) w_\mu(x) dx
\\
&= - \int_{{\mathbb B}^d}
\Big[ \sum_{i =1}^d \partial_i f (x)\partial_i g(x) (1-\|x\|)^2 +
\sum_{1 \le i< j \le d} D_{i,j} f(x)D_{i,j} g(x) \Big] w_\mu(x) dx. \notag
\end{align}
Consequently, the operator $D_\mu$ is essentially self-adjoint
and $-D_\mu$ is positive in $L^2({\mathbb B}^d, w_\mu)$.
\end{theorem}
\begin{proof}
Applying integration by parts in the variable $x_i$ we obtain
\begin{align*}
\int_{{\mathbb B}^d} (D_{i,i}^2 f(x)) g(x) w_\mu(x) dx
& = \int_{{\mathbb B}^d} \left( \partial_i \left[ (1-\|x\|^2) w_\mu(x)
\partial_i f(x) \right] \right) g(x) dx \\
& = - \int_{{\mathbb B}^d} \partial_i f (x)\partial_i g(x) (1-\|x\|)^2 w_\mu(x) dx.
\end{align*}
We now handle $D_{i,j}^2$. It is sufficient to consider $D_{1,2}$.
If $d =2$ we switch to polar coordinates and use \eqref{Dij-theta} and
integration by parts for $2\pi$-periodic functions to obtain
\begin{align*}
\int_{{\mathbb B}^2} (D_{i,j}^2 f(x)) g(x) w_\mu(x) dx
& = \int_0^1 r (1-r^2)^{\mu-1} \int_{0}^{2\pi}( \partial_\theta^2 f) g d\theta dr \\
& = - \int_0^1 r (1-r^2)^{\mu-1} \int_{0}^{2\pi} \partial_\theta f \partial_\theta g d\theta dr \\
& = - \int_{{\mathbb B}^2} D_{i,j} f(x) D_{i,j} g(x) w_\mu(x) dx.
\end{align*}
In dimension $d > 2$ we apply the following integration identity that follows by
a~simple change of variables,
\begin{equation}\label{B-B}
\int_{{\mathbb B}^d} f(x)dx = \int_{{\mathbb B}^{d-2}}
\Big[\int_{{\mathbb B}^2} f\big(\sqrt{1-\|v\|^2}u, v\big) du \Big] (1-\|v\|^2) dv,
\end{equation}
and parametrizing the integral over ${\mathbb B}^2$ by polar coordinates we arrive at
\begin{equation*}
\int_{{\mathbb B}^d} (D_{i,j}^2 f(x)) g(x) w_\mu(x) dx = -\int_{{\mathbb B}^d} D_{i,j} f(x) D_{i,j} g(x) w_\mu(x) dx.
\end{equation*}
The above identities imply \eqref{rep-Dmu}.
We consider the operator ${\mathcal D}_\mu$ with domain $D({\mathcal D}_\mu)= {\mathcal P}({\mathbb B}^d)$ the set of all polynomial on ${\mathbb B}^d$,
which is obviously dense in $L^2({\mathbb B}^d, w_\mu)$.
From \eqref{rep-Dmu} it readily follows that the operator ${\mathcal D}_\mu$ is symmetric and $-{\mathcal D}_\mu$ is positive.
We next show that the operator ${\mathcal D}_\mu$ is essentially self-adjoint,
that is, the completion $\overline{{\mathcal D}}_\mu$ of the operator ${\mathcal D}_\mu$ is self-adjoint.
Let $\{P_{nj}: j=1, \dots, \dim {\mathcal V}_n\}$ be an orthonormal basis of ${\mathcal V}_n={\mathcal V}_n(w_\mu)$ consisting of real-valued polynomials.
Clearly
\begin{equation*}
D({\mathcal D}_\mu)=\Big\{f= \sum_{n, j} a_{nj} P_{nj}: \; a_{nj}\in{\mathbb R},
\;\;\{a_{nj}\}\;\;\hbox{compactly supported}\Big\}, \;\;\hbox{and}
\end{equation*}
\begin{equation*}
{\mathcal D}_\mu f= -\sum_{n, j} a_{nj} n(n+2\lambda) P_{nj}
\quad \hbox{if}\quad f= \sum_{j} a_{nj} P_{nj}\in D({\mathcal D}_\mu).
\end{equation*}
We define $\overline{{\mathcal D}}_\mu$ and its domain $D(\overline{{\mathcal D}}_\mu)$ by
\begin{equation*}
D(\overline{{\mathcal D}}_\mu):=\Big\{f= \sum_{n=0}^\infty\sum_{j=1}^{\dim {\mathcal V}_n} a_{nj} P_{nj}:
\; \sum_{n,j} |a_{nj}|^2 <\infty,\;\;\sum_{n,j} |a_{nj}|^2 (n(n+2\lambda))^2<\infty \Big\}
\end{equation*}
and
\begin{equation*}
\overline{{\mathcal D}}_\mu f:= -\sum_{n,j} a_{nj} n(n+2\lambda) P_{nj}
\quad \hbox{if}\quad f= \sum_{n, j} a_{nj} P_{nj}\in D(\overline{{\mathcal D}}_\mu).
\end{equation*}
It is easily to show that $\overline{{\mathcal D}}_\mu$ is the closure of ${\mathcal D}_\mu$
and that $\overline{{\mathcal D}}_\mu$ is self-adjoint.
\end{proof}
\begin{remark}\label{rem:green-ball}
Identity \eqref{rep-Dmu} is the weighted Green's formula on ${\mathbb B}^d$ $($see \cite{KPX}$)$.
\end{remark}
\begin{proof}[Proof of Theorem~\ref{thm:Gauss-ball}]
We shall assume that $0<t\le 1$. In the case $t>1$ the Gaussian bounds \eqref{gauss-ball}
obviously follow from \eqref{gauss-ball} in the case $t=1$.
It is known (see \cite[Thm. 5.2.8]{DX}) that for $\mu > 0$ the kernel $P_n(w_\mu;x,y)$ of
the orthogonal projector onto ${\mathcal V}_n(w_\mu)$ in $L^2({\mathbb B}, w_\mu)$
has the representation
\begin{equation}\label{rep-Pn-ball}
P_n(w_\mu;x,y) = c_\lambda\frac{n+\lambda}{\lambda}
\int_{-1}^1 C_n^\lambda \left(\langle x, y\rangle + u \sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}\right)
(1-u^2)^{\mu-1} du,
\end{equation}
where $C_n^\lambda$ is the Gegenbauer polynomial of degree $n$
and $c_\lambda>0$ is a constant depending only on $\lambda$ and $d$.
The Gegenbauer polynomials $\{C_n^\lambda\}$ are orthogonal in the weighted space $L^2([-1, 1], w_\lambda)$
with $w_\lambda(u) := (1-u^2)^{\lambda-1/2}$
and can be defined by the generating function
\begin{equation*}
(1-2uz-z^2)^{-\mu}=\sum_{n=0}^\infty C_n^\lambda(u)z^n, \quad |z|<1, \; |u|<1.
\end{equation*}
Using that $C_n^\lambda(1)=\binom{n+2\lambda-1}{n}$ it is easy to show that
\begin{equation}\label{geg}
\int_{-1}^{1}|C_n^\lambda(u)|^2w_\lambda(u)du=\frac{\lambda}{n+\lambda}C_n^\lambda(1).
\end{equation}
In the limiting case $\mu = 0$ the representation of $P_n(w_\mu;x,y)$ takes the form
\begin{align}\label{rep-Pn-ball-2}
P_n(w_0; x,y) = c_d\frac{\lambda+n}{\lambda} & \left[ C_n^\lambda
\left(\langle x,y\rangle +\sqrt{1-\|x\|^2} \sqrt{1-\|y\|^2}\right) \right .\\
& \left . + C_n^\lambda \left (\langle x,y\rangle - \sqrt{1-\|x\|^2} \sqrt{1-\|y\|^2} \right)\right]. \notag
\end{align}
If $\alpha=\beta=\lambda-1/2$ we denote the Jacobi operator by
$L_\lambda:=L_{\lambda-1/2, \lambda-1/2}$
and we have
$L_\lambda f(x) = (1-x^2) f''(x) - (2\lambda+1) f'(x)$.
We denote by $e^{tL_\lambda}(u,v)$ the Jacobi heat kernel in this case
and by \eqref{Jacobi-HK} and \eqref{geg} we obtain
\begin{equation}\label{gegen-HK}
e^{tL_\lambda}(u,v)
= \sum_{n =0}^\infty e^{-tn(n+2\lambda)}\frac{n+\lambda}{\lambda}\frac{C_n^\lambda(u) C_n^\lambda(v)}{C_n^\lambda(1)},
\quad \lambda:=\mu+(d-1)/2.
\end{equation}
Assume $\mu>0$.
The above, \eqref{ball-HK}, and \eqref{rep-Pn-ball} lead to the representation
\begin{equation} \label{h-kernel-ball2}
e^{t {\mathcal D}_\mu}(x,y) = c_\lambda\int_{-1}^1 e^{t L_\lambda} \left(1,\langle x, y\rangle
+ u \sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}\right)(1-u^2)^{\mu-1} du.
\end{equation}
Note that in the case of Gegenbauer polynomials ($\alpha = \beta = \lambda -1/2$)
by \eqref{measure-ball} it follows that
$V(x,r) \sim r( 1-x^2 + r^2)^\lambda$, $-1 \le x \le 1$,
and hence
\begin{equation}\label{V-ball}
V(1,\sqrt{t})\sim t^{\lambda+1/2}
\quad\hbox{and}\quad
V(z,\sqrt{t}) \sim t^{\lambda+1/2}(1+(1-z^2)/t)^\lambda,
\quad |z|\le 1.
\end{equation}
If $x = \cos \theta$, then $1-x =2 \sin^2 \frac{\theta}{2} \sim \theta^2$
and hence
\begin{equation}\label{dist-1z}
\rho(1,z) = |\arccos 1 - \arccos z| = \arccos z \sim \sqrt{1-z}, \quad -1\le z\le 1.
\end{equation}
From this, \eqref{h-kernel-ball2}, \eqref{gauss-int}, and \eqref{V-ball} we obtain
\begin{equation} \label{main-est}
e^{t {\mathcal D}_\mu}(x,y)
\le c_1\int_{-1}^1 \frac{\exp \big\{-\frac{1-z(u;x,y)}{c_2t}\big\}}
{t^{\lambda+1/2}\big(1+ \frac{1-z(u;x,y)^2}{t}\big)^\lambda}(1-u^2)^{\mu-1} du
\end{equation}
and
\begin{equation} \label{main-est1}
e^{t {\mathcal D}_\mu}(x,y)
\ge c_3\int_{-1}^1 \frac{\exp\big\{-\frac{1-z(u;x,y)}{c_4t}\big\}}
{t^{\lambda+1/2}\big(1+ \frac{1-z(u;x,y)^2}{t}\big)^\lambda}(1-u^2)^{\mu-1} du,
\end{equation}
where $z(u; x,y) := \langle x, y\rangle + u\sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}$.
Since $1 + b \le e^{b}$ for $b \ge 0$, we have
$$
1 \le \Big(1+ \frac{1-z^2}{t}\Big)^\lambda \le \Big(1+ 2\frac{1-z}{t}\Big)^\lambda
\le \exp\Big\{2\lambda \frac{1-z}{t}\Big\},
\quad |z|\le 1.
$$
Therefore, by replacing the constant $c_4$ in \eqref{main-est1} by a smaller constant $c_4'$ we get
\begin{equation} \label{main-est2}
e^{t {\mathcal D}_\mu}(x,y)
\ge \frac{c_3'}{t^{\lambda+1/2}}\int_{-1}^1 \exp\Big\{-\frac{1-z(u;x,y)}{c_4't}\Big\}
(1-u^2)^{\mu-1} du.
\end{equation}
Obviously, from \eqref{main-est} it follows that
\begin{equation} \label{main-est3}
e^{t {\mathcal D}_\mu}(x,y)
\le \frac{c_1}{t^{\lambda+1/2}}\int_{-1}^1\exp\Big\{-\frac{1-z(u;x,y)}{c_2t}\Big\}
(1-u^2)^{\mu-1} du.
\end{equation}
We have
\begin{align*}
1-z(u;x,y) = 1- \langle x, y\rangle - \sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}
+ (1-u)\sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}
\end{align*}
and using the definition of $d_{\mathbb B}(x,y)$ in \eqref{dist-ball} we get
$$
1- z(1;x,y) = 1- \cos d_{\mathbb B}(x,y) = 2 \sin^2 \frac{d_{\mathbb B}(x,y)}{2} \sim d_{\mathbb B}(x,y)^2.
$$
Hence,
$$
1-z(u;x,y) \sim d_{\mathbb B}(x,y)^2 + (1-u) H(x,y), \quad H(x,y): = \sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}.
$$
Consequently,
$$
\exp\Big\{-\frac{1-z(u;x,y)}{c_2t}\Big\}
\le \exp\Big\{-\frac{d_{\mathbb B}(x,y)^2}{c't}\Big\}
\exp\Big\{-\frac{(1-u)H(x,y)}{c't}\Big\}
$$
and
$$
\exp\Big\{-\frac{1-z(u;x,y)}{2c_4t}\Big\}
\ge \exp\Big\{-\frac{d_{\mathbb B}(x,y)^2}{c''t}\Big\}
\exp\Big\{-\frac{(1-u)H(x,y)}{c''t}\Big\}.
$$
These two inequalities along with \eqref{main-est2}-\eqref{main-est3}
imply that in order to obtain the two-sided Gaussian bounds in \eqref{gauss-ball}
it suffices to show that the quantity
\begin{equation}\label{def-At}
A_t(x,y): =\frac{1}{t^{\lambda+1/2}} \int_{-1}^1\exp\Big\{-\frac{(1-u)H(x,y)}{ct}\Big\}
(1-u^2)^{\mu-1}du
\end{equation}
satisfies the following inequalities, for any $\varepsilon>0$,
\begin{align}\label{key-est}
\frac{c^\star}{\big[V_{\mathbb B}(x,\sqrt{t})V_{\mathbb B}(y,\sqrt{t})\big]^{1/2}}
\le A_t(x,y)
\le \frac{c^{\star\star}\exp\Big\{\varepsilon\frac{d_{\mathbb B}(x,y)^2}{t}\Big\}}{\big[V_{\mathbb B}(x,\sqrt{t})V_{\mathbb B}(y,\sqrt{t})\big]^{1/2}}.
\end{align}
Here the constant $c^{\star\star}>0$ depends on $\varepsilon$.
\noindent
{\em Lower bound estimate.}
First, assume that $H(x,y)/ t \ge 1$. Then we have
\begin{align}\label{lower-est}
A_t(x,y)
& \ge \frac{\tilde{c}}{t^{\lambda+1/2}}\int_0^1\exp\Big\{-\frac{(1-u)H(x,y)}{ct}\Big\}(1-u)^{\mu-1} du \notag
\\
& =\frac{\tilde{c}t^\mu}{t^{\lambda+1/2}H(x,y)^\mu}\int_0^{H(x,y)/t}v^{\mu-1}e^{-v/c}dv
\\
& \ge \frac{c_* }{t^{d/2}H(x,y)^\mu} \quad \hbox{with}
\quad c_*=\tilde{c}\int_0^1 v^{\mu-1} e^{-v/c} dv, \notag
\end{align}
where we applied the substitution $v=(1-u)H(x, y)/t$ and used that $\lambda +1/2 = \mu + d/2$.
However,
by \eqref{V-ball-ball}, $V_{\mathbb B}(x,r) \ge cr^d(1-\|x\|^2)^\mu$, which implies
$$
t^{d/2} H(x,y)^\mu = t^{d/2} \big(\sqrt{1-\|x\|^2} \sqrt{1-\|y\|^2}\big)^\mu
\le \big[V_{\mathbb B}(x,\sqrt{t})V_{\mathbb B}(y,\sqrt{t}\big]^{1/2}.
$$
Putting the above together we conclude that $A_t(x,y)$ obeys the lower bound in \eqref{key-est} in this case.
Now, assume that $H(x,y)/t \le 1$.
Then $\exp \big\{-\frac{(1-u)H(x,y)}{ct}\big\} \ge e^{-1/c}$
and we have
$$
A_t(x,y)\ge \frac{\tilde{c}}{t^{\lambda+1/2}} \ge \frac{c^\star}{\left[V_{\mathbb B}(x,\sqrt{t})V_{\mathbb B}(y,\sqrt{t})\right]^{1/2}}.
$$
Here we used that, by \eqref{V-ball-ball}, $V_{\mathbb B}(x,r) \ge cr^{d+2\mu} = cr^{2\lambda+1}$.
Thus, $A_t(x,y)$ again obeys the lower bound estimate in \eqref{key-est}
and this completes its proof.
\noindent
{\em Upper bound estimate.}
Obviously $\exp\big\{-\frac{(1-u)H(x,y)}{ct}\big\} \le 1$
and hence
\begin{equation}\label{At-upper1}
A_t(x,y) \le \frac{c_*}{t^{\lambda+1/2}}= \frac{c_*}{t^{d/2+\mu}}.
\end{equation}
We shall obtain another estimate on $A_t(x,y)$ by breaking the integral in \eqref{def-At}
into two parts: one over $[0,1]$ and the other over $[-1,0]$.
Just as in \eqref{lower-est} applying the substitotion $v=(1-u)H(x,y)/t$ we obtain
\begin{align*}
\frac{1}{t^{\lambda+1/2}} \int_0^1\exp\Big\{-\frac{(1-u)H(x,y)}{ct}\Big\}(1-u^2)^{\mu-1}du
\le \frac{c^*\max\{1, 2^{\mu-1}\}}{t^{d/2}H(x,y)^\mu}
\end{align*}
with
$c^*=\int_0^\infty v^{\mu-1} e^{-v/c} dv$.
Here we used that $(1+u)^{\mu-1}\le \max\{1, 2^{\mu-1}\}$.
For the integral over $[-1,0]$ we use the fact that $1-u \ge 1$ for $u \in [-1,0]$
to obtain
\begin{align*}
\frac{1}{t^{\lambda+1/2}} \int_{-1}^0\exp\Big\{-\frac{(1-u)H(x,y)}{ct}\Big\}(1-u^2)^{\mu-1}du
&\le \frac{c_*}{t^{\lambda+1/2}}\exp\Big\{-\frac{H(x,y)}{ct}\Big\}
\\
\le \frac{\tilde{c}}{t^{\lambda+1/2}}\Big(\frac{t}{H(x, y)}\Big)^\mu
&= \frac{\tilde{c}}{t^{\lambda+1/2}H(x, y)^\mu}.
\end{align*}
Here we used that $v^\mu \le \lfloor \mu+1\rfloor! e^v$, $\forall v>0$,
and $\lambda=\mu+(d-1)/2$.
Together, the above inequalities imply
\begin{equation}\label{At-upper2}
A_t(x,y) \le \frac{c^*}{t^{d/2} H(x,y)^\mu}.
\end{equation}
In turn, \eqref{At-upper1} and \eqref{At-upper2} yield
\begin{equation}\label{At-upper}
A_t(x,y) \le \frac{c_\diamond}{t^{d/2} (t + H(x,y))^\mu}.
\end{equation}
It remains to show that the above estimate implies the upper bound estimate in \eqref{key-est}.
To this end we need the following simple inequalities:
\begin{equation} \label{elementary}
(u+a) (u+b) \le 3 (u^2 +a b) (1+u^{-1} |a-b|), \quad a, b \ge 0, \;\; 0 < u \le 1,
\end{equation}
(see, e.g. \cite[(2.21]{PX1}) and (see \cite[(4.9)]{PX2})
$$
\big|\sqrt{1-\|x\|^2}- \sqrt{1-\|y\|^2}\big| \le \sqrt{2} d_{\mathbb B}(x,y), \quad x,y \in {\mathbb B}^d.
$$
Together, these two inequalities yield
\begin{equation}\label{combo}
\big(\sqrt{t} + \sqrt{1-\|x\|^2}\big)\big(\sqrt{t} + \sqrt{1-\|y\|^2}\big)
\le c \big(t + H(x,y)\big)\Big(1+ \frac{d_{\mathbb B}(x,y)}{\sqrt{t}}\Big).
\end{equation}
Evidently $1+u\le \varepsilon^{-1}e^{\varepsilon u}$ for $u\ge 0$ and $0<\varepsilon\le 1$,
and hence
\begin{equation}\label{ineq-3}
(1+b)^\mu \le 2^\mu(1+b^2)^{\mu/2} \le 2^\mu \varepsilon^{-\mu/2} e^{\mu\varepsilon b^2},
\quad \forall b\ge 0, \; 0<\varepsilon\le 1.
\end{equation}
Also, from \eqref{V-ball-ball} it follows that
$V_{\mathbb B}(x, r)\sim r^d\big(r+\sqrt{1-\|x\|^2}\big)^{2\mu}$.
From this, \eqref{combo}, and \eqref{ineq-3} it follows that
\begin{align*}
\big[V_{\mathbb B}(x,\sqrt{t})V_{\mathbb B}(y,\sqrt{t})\big]^{1/2}
& \le c t^{d/2} \big(\sqrt{t} + \sqrt{1-\|x\|^2}\big)^\mu \big(\sqrt{t} + \sqrt{1-\|y\|^2}\big)^\mu
\\
& \le c_\varepsilon t^{d/2}\big(t + H(x,y)\big)^\mu \exp\Big\{\mu\varepsilon \frac{d_{\mathbb B}(x,y)^2}{t}\Big\}.
\end{align*}
In turn, this and \eqref{At-upper} yield the upper bound estimate in \eqref{key-est}.
We next consider the case when $\mu=0$.
Now, \eqref{ball-HK}, \eqref{rep-Pn-ball-2}, and \eqref{gegen-HK} yield the representation
\begin{align}\label{rep-D0}
e^{t {\mathcal D}_0}(x,y)
&= c_\lambda e^{t L_\lambda} \left(1,\langle x, y\rangle + \sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}\right)
\\
&+ c_\lambda e^{t L_\lambda} \left(1,\langle x, y\rangle - \sqrt{1-\|x\|^2}\sqrt{1-\|y\|^2}\right).\notag
\end{align}
From this point on the proof follows in the footsteps of the proof when $\mu>0$ from above,
but is much simpler because the integral in \eqref{h-kernel-ball2} is replaced in \eqref{rep-D0} by two terms.
We omit the further details.
The proof of Theorem~\ref{thm:Gauss-ball} is complete.
\end{proof}
\section{Proof of Gaussian bounds for the heat kernel on the simplex}\label{sec:simplex}
In this part we adhere to the notation from \S\ref{subsec:simplex}.
The differential operator ${\mathcal D}_\kappa$ from \eqref{def-D-simplex} can be written in the more symmetric form
\begin{equation}\label{rep-D-simplex}
{\mathcal D}_\kappa = \sum_{i=1}^d U_{i} + \sum_{1\le i < j \le d} U_{i,j},
\end{equation}
where, with the notation $\partial_{i,j} : = \partial_i - \partial_j$,
\begin{align*}
U_{i} & := \frac{1}{w_k(x)} \partial_i (x_i(1-|x|) w_\kappa(x)) \partial_i,\\
U_{i,j} & := \frac{1}{w_k(x)} \partial_{i,j} (x_i x_j w_\kappa(x)) \partial_{i,j}, \quad 1 \le i \le d.
\end{align*}
This decomposition was first established in \cite{BSX} for $w_\kappa(x) = 1$ and later used in \cite{Diz} for $w_\kappa$.
It is easy to verify it directly.
The following basic property of the operator ${\mathcal D}_\kappa$ follows immediately from \eqref{rep-D-simplex} by integration by parts:
\begin{proposition}
For any $f \in C^2({\mathbb T}^d)$ and $g \in C^1({\mathbb T}^d)$,
\begin{align}\label{diver-simplex}
\int_{{\mathbb T}^d} {\mathcal D}_\kappa f(x) \cdot g(x) w_\kappa(x) dx
=&-\int_{{\mathbb T}^d}\Big[ \sum_{i =1}^d \partial_i f (x) \partial_i g (x) x_i (1-|x|)
\\
&+ \sum_{1 \le i \le j \le d} \partial_{i,j} f (x) \partial_{i,j} g (x) x_i x_j \Big] w_\kappa(x) dx.\notag
\end{align}
\end{proposition}
Observe that identity \eqref{diver-simplex} is the weighted Green's formula on the simplex ${\mathbb T}^d$ (see \cite{KPX}).
We consider the operator ${\mathcal D}_\kappa$ defined on the set $D({\mathcal D}_\kappa)={\mathcal P}({\mathbb T}^d)$ of all algebraic
polynomials on ${\mathbb T}^d$, which is obviously dense in $L^2({\mathbb T}^d, w_\kappa)$.
From \eqref{diver-simplex} it readily follows that the operator ${\mathcal D}_\kappa$ is
symmetric and $-{\mathcal D}_\kappa$ is positive in $L^2({\mathbb T}^d, w_\kappa)$.
Furthermore, just as in the proof of Theorem~\ref{thm:prop-D} it follows that the operator ${\mathcal D}_\kappa$ is essentially self-adjoint.
\begin{proof}[Proof of Theorem~\ref{thm:Gauss-simpl}]
We may assume that $0<t\le 1$, because the case $t>1$ follow immediately from the case $t=1$.
Recall that we consider in this article the Jacobi polynomials $P_n^{(\alpha, \beta)}$, $n=0,1, \dots$,
normalized in $L^2([-1,1], w_{\alpha, \beta})$.
It is known (see \cite[Theorem 5.3.4]{DX}) that if all $\kappa_i > 0$ the kernel $P_n(w_\mu;x,y)$
of the orthogonal projector onto ${\mathcal V}_n(w_\kappa)$ in $L^2({\mathbb T}^d, w_\kappa)$ has the following representation
\begin{align}\label{rep-Pn-simpl}
P_n(w_\mu;x,y) = & c_\kappa P_n^{(\lambda -\frac 12,-\frac 12)}(1) \notag
\\
&\times \int_{[-1,1]^{d+1}}P_n^{(\lambda -\frac 12,-\frac 12)} \left(2z(u;x,y)^2-1\right)
\prod_{i=1}^{d+1} (1-u_i^2)^{\kappa_i} du,
\end{align}
where
\begin{equation*}
z(u;x,y) := \sum_{k=1}^{d+1} u_i\sqrt{x_iy_i}, \;\; x_{d+1} := 1-|x|, \;\; y_{d+1} := 1-|y|, \;\; \lambda:=|\kappa|+(d-1)/2,
\end{equation*}
in which $|x| = x_1+ \ldots + x_d$. In the case when some or all $\kappa_i =0$, this identity holds under the
limit $\kappa_i \to 0$, using that
$$
\lim_{\kappa \to 0+} \frac{\int_{-1}^1 f(x) (1-x^2)^{\kappa - 1} dx }{\int_{-1}^1 (1-x^2)^{\kappa - 1} dx}
= \frac12 \left[f(1) + f(-1) \right].
$$
Assume $\kappa_i>0$, $i=1, \dots, n+1$. Combining \eqref{simplex-HK}, \eqref{Jacobi-HK}, and
\eqref{rep-Pn-simpl} we obtain the representation
\begin{equation} \label{h-kernel-simplex2}
e^{t{\mathcal D}_\kappa}(x,y) = c_\kappa\int_{[-1,1]^{d+1}}
e^{tL_{\lambda -\frac 12,-\frac 12}} \left(1, 2z(u;x,y)^2-1\right)\prod_{i=1}^{d+1} (1-u_i^2)^{\kappa_i} du.
\end{equation}
Note that from \eqref{def-dist-simpl} we have
$\sum_{k=1}^{d+1} \sqrt{x_iy_i}=\cos d_{\mathbb T}(x, y)$
and hence $|z(u;x,y)|\le 1$.
Just as in \eqref{dist-1z} we obtain
\begin{equation}\label{rho-1z}
\rho(1, 2z^2 -1):= |\arccos 1 - \arccos (2z^2 -1)|
\sim \sqrt{1 - (2z^2 -1)} \sim \sqrt{1-z^2}.
\end{equation}
On the other hand, with $\alpha= \lambda-1/2$ and $\beta=-1/2$ we infer from \eqref{measure-ball} that
$ V(x, r)\sim r(1-x+r^2)^\lambda$ and hence
\begin{equation*}
V(1, \sqrt{t})\sim t^{\lambda+1/2}
\;\;\hbox{and}\;\;
V(2z^2-1, \sqrt{t})\sim t^{1/2}\big(t + 2(1-z^2)\big)^{\lambda}
\sim t^{\lambda+1/2}\Big(1 + \frac{1-z^2}{t}\Big)^{\lambda}.
\end{equation*}
We use these equivalences, \eqref{h-kernel-simplex2}, \eqref{gauss-int}, and \eqref{rho-1z} to obtain
\begin{equation} \label{main-est-TT}
e^{t{\mathcal D}_\kappa}(x,y)
\le c_1\int_{[-1,1]^{d+1}}\frac{\exp \big\{-\frac{1-z(u;x,y)^2}{c_2t}\big\}}
{t^{\lambda+1/2}\big(1+\frac{1-z(u;x,y)^2}{t}\big)^\lambda} \prod_{i=1}^{d+1} (1-u_i^2)^{\kappa_i-1} du
\end{equation}
and
\begin{equation} \label{main-est1-TT}
e^{t{\mathcal D}_\kappa}(x,y)
\ge c_3\int_{[-1,1]^{d+1}}\frac{\exp\big\{-\frac{1-z(u;x,y)^2}{c_4t}\big\}}
{t^{\lambda+1/2}\big(1+\frac{1-z(u;x,y)^2}{t}\big)^\lambda} \prod_{i=1}^{d+1} (1-u_i^2)^{\kappa_i-1} du.
\end{equation}
Just as in the proof of Theorem~\ref{thm:Gauss-ball} by replacing
the constant $c_4$ in \eqref{main-est1-TT} by a smaller constant $c_4'$ we can eliminate the term
$\big(1+ \frac{1-z(u;x,y)^2}{t}\big)^\lambda$ in the denominator.
Thus, it follows that
\begin{equation} \label{main-est2-TT}
e^{t{\mathcal D}_\kappa}(x,y)
\ge \frac{c_3'}{t^{\lambda+1/2}}
\int_{[-1,1]^{d+1}}\exp\Big\{-\frac{1-z(u;x,y)^2}{c_4't}\Big\}\prod_{i=1}^{d+1} (1-u_i^2)^{\kappa_i-1}du.
\end{equation}
By simply deleting that term in \eqref{main-est-TT} we get
\begin{equation*}
e^{t{\mathcal D}_\kappa}(x,y)
\le \frac{c_1}{t^{\lambda+1/2}}
\int_{[-1,1]^{d+1}}\exp\Big\{-\frac{1-z(u;x,y)^2}{c_2t}\Big\}\prod_{i=1}^{d+1} (1-u_i^2)^{\kappa_i-1}du.
\end{equation*}
Evidently,
$$
1 - z(u;x,y)^2 = (1+|z(u;x,y)|)(1 - |z(u;x,y)|) \ge 1-|z(u,x,y)| \ge 1- \sum_{i=1}^{d+1}|u_i| \sqrt{x_iy_i}.
$$
Using the symmetry of the last term above with respect to sign changes of $u_i$,
and that $1-u_i^2\sim 1-u_i$ when $0\le u_i\le 1$,
we conclude that
\begin{equation} \label{main-est3-TT}
e^{t{\mathcal D}_\kappa}(x,y)
\le \frac{c_1'}{t^{\lambda+1/2}}
\int_{[0,1]^{d+1}}\exp\Big\{-\frac{1-z(u;x,y)}{c_2t}\Big\}\prod_{i=1}^{d+1} (1-u_i)^{\kappa_i-1}du.
\end{equation}
Similarly, using that $1-z(u; x,y)^2\le 2(1-z(u; x,y))$
we infer from \eqref{main-est2-TT} that
\begin{equation} \label{main-est4-TT}
e^{t{\mathcal D}_\kappa}(x,y)
\ge \frac{c_3''}{t^{\lambda+1/2}}
\int_{[0,1]^{d+1}}\exp\Big\{-\frac{1-z(u;x,y)}{c_4''t}\Big\}\prod_{i=1}^{d+1} (1-u_i)^{\kappa_i-1}du.
\end{equation}
By the definition of $d_{\mathbb T}(x,y)$ in \eqref{def-dist-simpl} we have
$$
1- \sum_{i=1}^{d+1} \sqrt{x_iy_i} = 1-\cos d_{\mathbb T}(x, y) = 2\sin^2\frac {d_{\mathbb T}(x,y)}{2} \sim d_{\mathbb T}(x,y)^2
$$
and hence
\begin{equation}\label{z-simplex}
1-z(u;x,y) = 1- \sum_{i=1}^{d+1} \sqrt{x_iy_i} + \sum_{i=1}^{d+1} (1-u_i) \sqrt{x_iy_i}
\sim d_{\mathbb T}(x,y)^2 + \sum_{i=1}^{d+1} (1 - u_i) \sqrt{x_iy_i}.
\end{equation}
Consequently,
\begin{equation}\label{exp-1}
\exp\Big\{-\frac{1-z(u;x,y)}{c_2t}\Big\}
\le \exp\Big\{-\frac{d_{\mathbb T}(x,y)^2}{c't}\Big\}
\prod_{i=1}^{d+1} \exp\Big\{-\frac{(1-u_i)\sqrt{x_iy_i}}{c't}\Big\}
\end{equation}
and
\begin{equation}\label{exp-2}
\exp\Big\{-\frac{1-z(u;x,y)}{c_4''t}\Big\}
\ge \exp\Big\{-\frac{d_{\mathbb T}(x,y)^2}{c''t}\Big\}
\prod_{i=1}^{d+1} \exp\Big\{-\frac{(1-u_i)\sqrt{x_iy_i}}{c''t}\Big\}.
\end{equation}
For $x,y\in [0,1]$ and $\kappa>0$, denote
\begin{equation}\label{A-T}
A_t(\kappa; x,y):= \kappa \int_{0}^1\exp\Big\{-\frac{(1-u)\sqrt{xy}}{ct}\Big\}(1-u)^{\kappa-1}du,
\end{equation}
where $c>0$ is a constant.
We claim that for any $0< \varepsilon \le 1$
\begin{equation}\label{est-At}
\frac{c_\diamond t^{|\kappa|}}{\prod_{i=1}^{d+1}(x_i+t)^{\kappa_i/2}(y_i+t)^{\kappa_i/2}}
\le \prod_{i=1}^{d+1} A_t(\kappa_i; x_i,y_i)
\le \frac{c^\diamond t^{|\kappa|}\exp\Big\{\varepsilon \frac{d_{\mathbb T}(x,y)^2}{t}\Big\}}
{\prod_{i=1}^{d+1}(x_i+t)^{\kappa_i/2}(y_i+t)^{\kappa_i/2}},
\end{equation}
where $c^\diamond>0$ depends on $\varepsilon$.
Assume for a moment that the inequalities \eqref{est-At} are valid.
Then by \eqref{main-est3-TT}, \eqref{exp-1}, and the right-hand side inequality in \eqref{est-At} we obtain
\begin{align*}
e^{t{\mathcal D}_\kappa}(x, y)
&\le \frac{c}{t^{|\kappa|+d/2}}\exp\Big\{-\frac{d_{\mathbb T}(x,y)^2}{c't}\Big\}
\frac{t^{|\kappa|}\exp\big\{\varepsilon\frac{d_{\mathbb T}(x,y)^2}{t}\big\}}
{\prod_{i=1}^{d+1}(x_i+t)^{\kappa_i/2}\prod_{i=1}^{d+1}(y_i+t)^{\kappa_i/2}}
\\
&\le \frac{c\exp\big\{-\frac{d_{\mathbb T}(x, y)^2}{2c't}\big\}}{\big[V_{\mathbb T}(x, \sqrt{t})V_{\mathbb T}(y, \sqrt{t})\big]^{1/2}}.
\end{align*}
Here we used that $\lambda=|\kappa|+(d-1)/2$, and $V_{\mathbb T}(x, \sqrt{t})= t^{d/2}\prod_{i=1}^{d+1}(x_i+t)^{\kappa_i}$
and the similar expression for $V_{\mathbb T}(y, \sqrt{t})$,
which follow by \eqref{V-ball-simpl}.
We also used the right-hand side estimate in \eqref{est-At} with $\varepsilon=(2c')^{-1}$.
The above inequalities yields the upper bound estimate in \eqref{gauss-simplex}.
One similarly shows that \eqref{main-est4-TT}, \eqref{exp-2}, and the left-hand side inequality in \eqref{est-At}
imply the lower bound estimate in \eqref{gauss-simplex}.
It remains to prove the estimates in \eqref{est-At}.
We first focus on the lower bound estimate in \eqref{est-At}.
If $\sqrt{xy}/t\le 1$, then
$\exp\big\{-\frac{(1-u)\sqrt{xy}}{ct}\big\} \ge c'>0$
and hence
\begin{equation*}
A_t(\kappa; x,y) \ge c' \ge c'(t/\sqrt{xy})^\kappa.
\end{equation*}
Assume $\sqrt{xy}/t>1$. Then applying the substitution $v=\frac{(1-u)\sqrt{xy}}{t}$
we obtain
\begin{align*}
A_t(\kappa; x,y)= \frac{t^\kappa}{(\sqrt{xy})^\kappa}\int_{0}^{\sqrt{xy}/t}e^{-v/c}v^{\kappa-1}dv
\ge \frac{t^\kappa}{(\sqrt{xy})^\kappa}\int_0^1e^{-v/c}v^{\kappa-1}dv
=\frac{c't^\kappa}{(\sqrt{xy})^\kappa}.
\end{align*}
Thus in both cases
\begin{equation*}
A_t(\kappa; x,y) \ge \frac{c't^\kappa}{(\sqrt{xy})^\kappa}
\ge \frac{c't^\kappa}{(x+t)^{\kappa/2}(y+t)^{\kappa/2}},
\end{equation*}
which yields the lower bound estimate in \eqref{est-At}.
We now prove the upper bound estimate in \eqref{est-At}.
Clearly
$\exp\big\{-\frac{(1-u)\sqrt{xy}}{ct}\big\}\le 1$ and hence
$A_t(\kappa; x,y)\le c'$.
On the other hand, from above it follows that
\begin{align*}
A_t(\kappa; x,y)
\le \frac{t^\kappa}{(\sqrt{xy})^\kappa}\int_0^\infty e^{-v/c}v^{\kappa-1}dv
=\frac{c''t^\kappa}{(\sqrt{xy})^\kappa}.
\end{align*}
Together, these two estimates yield
\begin{equation*}
A_t(\kappa; x,y) \le \frac{c^\star t^\kappa}{(\sqrt{xy}+t)^\kappa},
\end{equation*}
implying
\begin{equation}\label{est-prod-A}
\prod_{i=1}^{d+1} A_t(\kappa_i; x_i,y_i)
\le \frac{c^\star t^{|\kappa|}}{\prod_{i=1}^{d+1}\big(\sqrt{x_iy_i }+ t\big)^{\kappa_i}}.
\end{equation}
To show that this leads to the desired upper bound estimate,
we need the following simple inequality (see \cite[(2.50)]{IPX})
$$
\left|\sqrt{x_i}-\sqrt{y_i}\right|\le d_{\mathbb T}(x,y),
\quad i=1, \dots, d+1, \;\; x,y \in {\mathbb T}^d.
$$
This along with \eqref{elementary} implies
$$
\big(\sqrt{x_i}+ \sqrt{t}\big)\big(\sqrt{y_i}+\sqrt{t}\big)
\le c\big(\sqrt{x_iy_i}+ t\big)\Big(1+ \frac{d_{\mathbb T}(x,y)}{\sqrt{t}}\Big),
$$
which leads to
\begin{align*}
\prod_{i=1}^{d+1}(x_i+t)^{\kappa_i/2}(y_i+t)^{\kappa_i/2}
& \sim \prod_{i=1}^{d+1}\big(\sqrt{t} + \sqrt{x_i}\big)^{\kappa_i}\big(\sqrt{t} + \sqrt{y_i}\big)^{\kappa_i}
\\
& \le c \prod_{i=1}^{d+1}\big(\sqrt{x_iy_i }+ t\big)^{\kappa_i}
\Big(1+ \frac{d_{\mathbb T}(x,y)}{\sqrt{t}}\Big)^{|\kappa|}
\\
& \le c(\varepsilon) \prod_{i=1}^{d+1}\big(\sqrt{x_iy_i }+ t\big)^{\kappa_i}
\exp\Big\{\varepsilon|\kappa|\frac{d_{\mathbb T}(x,y)^2}{t}\Big\}.
\end{align*}
Here for the last inequality we used \eqref{ineq-3} with $\mu=|\kappa|$.
Together, the above and \eqref{est-prod-A} yield the upper bound estimate in \eqref{est-At}.
We now consider the case when one or more $\kappa_i=0$, $1\le i\le n+1$. In this case,
the kernel representation \eqref{rep-Pn-simpl} holds under the limit. If $\kappa_i =0$, then the integral
over $u_i$ in \eqref{h-kernel-simplex2} is replaced by the average of point evaluations at $u_i=1$ and $u_i=-1$.
It is easy to see that all deductions that lead to \eqref{est-At} are still valid with the realization that \eqref{A-T}
holds under the limit
$$
\lim_{\kappa \to 0+} A_t(\kappa; x,y) =
\lim_{\kappa \to 0+} \kappa \int_{0}^1\exp\Big\{-\frac{(1-u)\sqrt{xy}}{ct}\Big\}(1-u)^{\kappa-1}du =1.
$$
This completes the proof.
\end{proof}
\end{document}
|
\begin{document}
\setcounter{secnumdepth}{4}
\setcounter{tocdepth}{3}
\counterwithin{paragraph}{subsection}
\numberwithin{equation}{subsection}
\title{The wavefront set over a maximal unramified field extension}
\begin{abstract}
Let $(\pi,X)$ be a depth-$0$ admissible smooth complex representation of a $p$-adic reductive group that splits over an unramified extension.
In this paper we develop the theory necessary to study the wavefront set of $X$ over a maximal unramified field extension of the base $p$-adic field.
In the final section we then apply these methods to compute the geometric wavefront set of spherical Arthur representations of split $p$-adic reductive groups.
In this case we see how the wavefront set over a maximal unramified extension can be computed using perverse sheaves on the Langlands dual group.
\end{abstract}
\tableofcontents
\section*{Introduction}
Let $k$ be a finite extension of $\mathbb Q_p$ with finite residue field $\mathbb F_q$ of sufficiently large characteristic, algebraic closure $\bar k$ and let $\mathbf G(k)$ be the $k$-points of a connected reductive group $\mathbf G$ defined over $k$ and split over an unramified extension of $k$.
Let $\mf g$ be the Lie algebra of $\bfG$, and $\mf g(k)$ be the $k$-points of $\mf g$.
For an admissible smooth irreducible representation $(\pi, X)$ of $\bfG(k)$, the wavefront set of $X$, denoted $\mathrm{WF}(X)$, is a harmonic analytic invariant of $X$ of fundamental importance.
Roughly speaking it measures the direction of the singularities of the character distribution $\Theta_X$ of $X$ near the identity.
More precisely: the Harish-Chandra--Howe local character expansion dictates that there exists an open neighbourhood $\mathcal V$ of the identity and coefficients $c_{\mathbb{O}}(X)\in \mathbb{C}$, one for each $\mathbb{O}$ in the collection, $\mathcal N_o(k)$, of nilpotent orbits of $\mf g(k)$, such that
\begin{equation}
\Theta_{\pi}(f) = \sum_{\mathbb{O} \in \mathcal N_o(k)}c_{\mathbb{O}}(X) \hat \mu_{\mathbb{O}}(f\circ \exp), \quad \forall f\in C_c^\infty(\mathcal V)
\end{equation}
where $\hat \mu_\mathbb{O}$ denotes the Fourier transform of the nilpotent orbital integral associated to $\mathbb{O}\in \mathcal N_o(k)$.
The \textit{($p$-adic) wavefront set} is the set (not necessarily a singleton)
$$\mathrm{WF}(X) := \max \{\mathbb{O}:c_{\mathbb{O}}(X)\ne 0\} \subseteq \mathcal N_o(k)$$
where the maximum is taken with respect to the closure ordering on the $k$-rational nilpotent orbits.
Much can be said about $X$ from the wavefront set.
When $\mathrm{WF}(X)$ consists of regular nilpotent elements, a famed result of Rodier \cite{rodier} states that $X$ admits a Whittaker model.
If an orbit in $\mathrm{WF}(X)$ meets a Levi subgroup of $\bfG(k)$, then the work of Moeglin and Waldspurger \cite{waldmoeg} shows that $X$ cannot be supercuspidal.
From an analytic point of view the wavefront set controls the asymptotic growth of the space of vectors fixed by the Moy--Prasad filtration subgroups as you go further along the filtration \cite[Section 5.1]{barbaschmoy}.
The wavefront set is also expected to play an important role in the definition of Arthur packets for $p$-adic groups, making it relevant to the study of automorphic forms and the unitary dual.
This distinguishes the wavefront set as a particularly powerful invariant to study.
However, in practice it is notoriously difficult to compute.
Moeglin and Waldspurger \cite{waldmoeg} have calculated the wavefront set for irreducible smooth representations of $\mathbf{GL}_n$ and for irreducible subquotients of the regular principal series for split classical groups, but little is known in general.
A slightly coarser invariant, which one might hope to have more control over, is the \textit{geometric wavefront set} $^{\bar k}WF(X)$.
This is defined to be
$$^{\bar k}\mathrm{WF}(X) := \max\{\mathcal N_o(\bar k/k)(\mathbb{O}):c_{\mathbb{O}}(X)\ne 0\} \subseteq \mathcal N_o(\bar k)$$
where $\mathcal N_o(\bar k)$ denotes that set of $\bar k$-rational nilpotent orbits and $\mathcal N_o(\bar k/k)(\mathbb{O})$ denotes the (unique) $\bar k$-rational nilpotent orbit of $\mf g(\bar k)$ that $\mathbb{O}$ lies in (see page 9 for the general definition of $\mathcal N_o$).
Much more is known about the geometric wavefront set.
For classical groups, Moeglin \cite{moeglin} showed that it must always be a special orbit, and in \cite{wso3} and \cite{tempunip}, Waldspurger computed $^{\bar k}\mathrm{WF}(X)$ for anti-tempered and tempered unipotent representations of the pure inner twists of the split form of $SO(2n+1)$.
Moreover, in analogy with real reductive groups and finite groups of Lie type, it is expected that there is a \textit{single} nilpotent orbit in the geometric wavefront set and that all the nilpotent orbits in $\mathrm{WF}(X)$ lie in it.
In this sense, $^{\bar k}\mathrm{WF}(X)$ is a good first approximation for $\mathrm{WF}(X)$.
In the first part of this paper we shall concern ourselves with studying the wavefront set over an intermediate field.
This will result in two new invariants which provide richer information than the geometric wavefront set, but are still more tractable than the $p$-adic wavefront set.
Let $K$ be the maximal unramified extension of $k$ in $\bar k$.
The residue field of $K$ is naturally an algebraic closure of $\mathbb F_q$ so we will write $\barF_q$ for the residue field of $K$.
Define the \textit{unramified wavefront set} to be
$$^K\widetilde\mathrm{WF}(X) := \max\{\mathcal N_o(K/k)(\mathbb{O}):c_{\mathbb{O}}(X)\ne 0\} \subseteq \mathcal N_o(K)$$
where $\mathcal N_o(K)$ denotes the set of $K$-rational nilpotent orbits (which we henceforth refer to as \textit{unramified nilpotent orbits}) and $\mathcal N_o(K/k)(\mathbb{O})$ denotes the $K$-rational nilpotent orbit of $\mf g(K)$ that $\mathbb{O}$ lies in.
The motivation for this modification comes from the work of Barbasch and Moy \cite{barbaschmoy} where they relate the coefficients $c_{\mathbb{O}}(X)$ to representations of the reductive quotients of the parahoric subgroups of $\bfG(k)$.
The introduction of the field $K$ is inevitable if one wishes to lift geometric results from the reductive quotients up to the $p$-adic group, and much trouble is taken in \cite{barbaschmoy} to interpret these results over the original field $k$ again.
What we show in this paper is that one should accept the field $K$ as a fact of life - and once one does this many statements take a more natural form and a lot of new structure becomes apparent.
The first main result of this paper - which we now state - illustrates this well.
Let $\mathcal B(\bfG,k)$ denote the Bruhat--Tits building of $\bfG(k)$.
For each face $c$ of $\mathcal B(\bfG,k)$ recall that we have the following short exact sequence
$$1\to \bfU_c(\mf o) \to \bfP_c(\mf o) \to \bfL_c(\mathbb F_q) \to 1.$$
The group $\bfL_c(\mathbb F_q)$ is a finite group of Lie type, and the space of fixed vectors $X^{\bfU_c(\mf o)}$ is a finite dimensional representation of $\bfL_c(\mathbb F_q)$.
As we alluded to earlier, the wavefront set is an invariant which also makes sense for representations of finite groups of Lie type and we write
$$^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)})$$
for the (geometric) wavefront set of the representation $X^{\bfU_c(\mf o)}$ of $\bfL_c(\mathbb F_q)$.
This is a collection of nilpotent orbits of $\bfL_c$ over $\barF_q$ (see Section \ref{sec:kawanaka} for precise details).
Motivated by the work of \cite{barbaschmoy}, \cite{debacker} and \cite{waldsnil} we introduce a lifting map $\mathcal L_c$ from the partially ordered set of $\barF_q$-rational nilpotent orbits of $\bfL_c$ to the partially ordered set of $K$-rational nilpotent orbits of $\bfG(k)$.
The wavefront sets of the representations of the reductive quotients are then related to the wavefront set of $X$ by the following theorem.
\begin{theorem}
\label{thm:localwf}
[Theorem \ref{lem:liftwf}]
Let $(\pi,X)$ be a depth-$0$ representation of $\bfG(k)$.
Then
\begin{equation}
^K\widetilde\mathrm{WF}(X) = \max_{c\subseteq \mathcal B(\bfG,k)}\mathcal L_c(\hphantom{ }^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)})).
\end{equation}
In fact one can restrict $c$ to range over the faces (or vertices) of any fixed chamber of $\mathcal B(\bfG,k)$.
\end{theorem}
This easily yields the following corollary.
\begin{corollary}
\label{cor:geomwf}
[Proposition \ref{prop:wfs}, Theorem \ref{lem:liftwf}]
Let $(\pi,X)$ be a depth-$0$ representation of $\bfG(k)$.
Then
\begin{equation}
^{\bar k}\mathrm{WF}(X) = \max_{c\subseteq c_0} \mathcal N_o(\bar k/K) (\mathcal L_c(\hphantom{ }^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)})))
\end{equation}
where $c_0$ is any chamber of $\mathcal B(\bfG,k)$ and $\mathcal N_o(\bar k/k)$ is defined on page 9.
\end{corollary}
This corollary cleanly repackages the main idea of \cite[Section 5.1]{barbaschmoy} (indeed it makes precise \cite[Proposition 5.2]{barbaschmoy}) and in a sense this alone would be a satisfactory conclusion to our foray into unramified territory.
However from a philosophical perspective Theorem \ref{thm:localwf} suggests that the unramified wavefront set is natural in its own right and warrants further investigation.
The next section of this paper investigates the unramified wavefront set closer.
The main practical obstruction to doing this is the partially ordered set $\mathcal N_o(K)$ for which very little is known.
The main result of this section is a natural parameterisation of this set which depends equivariantly on the choice of hyperspecial point.
To make this precise, let us fix some notation.
Let $\bfG_K$ denote the base change of $\bfG$ along $\text{Sp\'e}c(K)\to \text{Sp\'e}c(k)$ (recall by assumption that $\bfG_K$ is split).
Let $\mathcal B(\bfG,K)$ be the Bruhat-Tits building for $\bfG_K(K)=\bfG(K)$ and let $\bfT_K$ be a maximal $K$-split torus of $\bfG_K$.
A hyperspecial face of $\mathcal B(\bfG,K)$ is a face that contains a hyperspecial point.
Let $\mathscr H$ denote the set of hyperspecial faces of $\mathcal B(\bfG,K)$ and $\bfG(K)\backslash\mathscr H$ denote the set of $\bfG(K)$-orbits of hyperspecial faces.
From the root data attached to $\bfT_K$ we may construct complex reductive groups $G$ (resp. $G^\vee$) with the same (resp. dual) root data.
Let $\mathcal N_{o,c}$ denote the set of all pairs $(\mathbb{O},C)$ where $\mathbb{O}$ is a complex nilpotent orbit of $G$ and $C$ is a conjugacy class of $A(\mathbb{O})$ - the $G$-equivariant fundamental group of $\mathbb{O}$.
Let $Z_G$ denote the center of $G$ and $A(Z_G)$ be the group of components of $Z_G$.
In Section \ref{par:equiv} we define an action of $A(Z_G)$ on $\mathcal N_{o,c}$ and a simply transitive action of $A(Z_G)$ on $\bfG(K)\backslash \mathscr H$.
\begin{theorem}
\label{thm:unramparam}
[Theorem \ref{thm:unramifiedparam},Theorem \ref{thm:naturality},Proposition \ref{prop:equivariance}]
For each orbit $\mathscr O\in \bfG(K)\backslash \mathscr H$ there is a bijection
\begin{equation}
\theta_{\mathscr O,\bfT_K}:\mathcal N_o(K) \xrightarrow{\sim} \mathcal N_{o,c}.
\end{equation}
This map is natural in $\bfT_K$ and $A(Z_G)$-equivariant in $\mathscr O$.
\end{theorem}
We first remark that the set $\mathcal N_{o,c}$ might look familiar to readers for its resemblance to the parameters arising in the Springer correspondence.
In that setting one considers the set of pairs $(\mathbb{O},\rho)$ where $\mathbb{O}$ is a complex nilpotent orbit of $G$ and $\rho$ is an \emph{irreducible representation} of $A(\mathbb{O})$.
The similarity is of course not precise, but certainly suggests some tantalising connections.
More than simply being suggestive however, the set $\mathcal N_{o,c}$ is well studied in its own right.
It naturally arises (non-canonically) as the parameterising set for nilpotent orbits of finite groups of Lie type and in Sommers' work generalising the Bala--Carter theorem for nilpotent orbits \cite{Sommers2001}.
It is also the domain for a powerful extension of the incredibly important Barbasch--Lusztig--Spaltenstein--Vogan duality map $d:\mathcal N_o\to \mathcal N_o^\vee$ going from complex nilpotent orbits of $G$ to complex nilpotent orbits of $G^\vee$.
This extension $d_S:\mathcal N_{o,c}\to \mathcal N_o^\vee$, discovered by Sommers, but also apparent in earlier work by Lusztig, extends the map $d$ in the sense that
$$d_S(\mathbb{O},1) = d(\mathbb{O})$$
where $1$ denotes the trivial conjugacy class, and is notable because in contrast to $d$, the map $d_S$ is surjective.
Fixing an orbit $\mathscr O\in \bfG(k)\backslash \mathscr H$ and using the bijection $\theta_{\mathscr O,\bfT_K}$ from Theorem \ref{thm:unramparam} one can of course interpret the map $d_S$ as a map
$$d_{S,\mathscr O,\bfT_K}:\mathcal N_o(K) \to \mathcal N_o^\vee.$$
Although this map ostensibly depends on $\mathscr O$, we show in Proposition \ref{prop:indpd} that it is in fact \emph{independent} of the choice of $\mathscr O$.
Thus let us discard the $\mathscr O$ from the notation and simply write
$$d_{S,\bfT_K}:\mathcal N_o(K)\to \mathcal N_o^\vee.$$
Having discarded with the dependence on $\mathscr O$, and given the naturality of $d_{S,\bfT_K}$ in $\bfT_K$, the map $d_{S,\bfT_K}$ certainly seems natural (in the colloquial sense of the word. Indeed one might wonder if it admits a more intrinsic construction than the one we have given).
This raises the obvious - and pertinent - question: is $d_S$ an order reversing map?
From a purely philosophical perspective the answer ought to be yes - duality maps \emph{should} be order reversing.
Indeed we provide some evidence to support this belief in Lemma \ref{lem:orderrev}.
However the partial order on $\mathcal N_o(K)$ is difficult to study and the bijection $\theta_{\mathscr O,\bfT_K}$ is not so well suited to give easy answers to this question.
The best we can do in this paper is leave this as a conjecture.
\begin{conjecture}
\label{conj:orderrev}
The map $d_{S,\bfT_K}:\mathcal N_o(K)\to \mathcal N_o^\vee$ is order reversing.
\end{conjecture}
But let us set aside this issue for the moment.
The work of Achar in \cite{achar} strongly suggests that there is merit in \emph{declaring} $d_{S,\bfT_K}$ to be order-reversing.
What we mean by this is we introduce a new order on $\mathcal N_o(K)$ where $d_S$ is order reversing by design.
For $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$ define
$$\mathbb{O}_1\le_A\mathbb{O}_2\quad \text{ if } \quad \mathcal N_o(\bar k/K)(\mathbb{O}_1)\le \mathcal N_o(\bar k/K)(\mathbb{O}_2) \text{ and } d_{S,\bfT_K}(\mathbb{O}_1)\ge d_{S,\bfT_K}(\mathbb{O}_2).$$
This is the \emph{finest} pre-order on $\mathcal N_o(K)$ that makes $\mathcal N_o(\bar k/K)$ order preserving and $d_{S,\bfT_K}$ order reversing.
The naturality of $d_{S,\bfT_K}$ in $\bfT_K$ means that this pre-order is independent of the choice of maximal $K$-split torus $\bfT_K$.
It is not a partial order because there are orbits $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$ such that $\mathbb{O}_1\le_A \mathbb{O}_2$ and $\mathbb{O}_2\le_A \mathbb{O}_1$ but $\mathbb{O}_1 \ne \mathbb{O}_2$.
However, if we define $\mathbb{O}_1\sim\mathbb{O}_2$ when $\mathbb{O}_1\le_A\mathbb{O}_2$ and $\mathbb{O}_2\le_A\mathbb{O}_1$ then $\le_A$ descends to a partial order on $\mathcal N_o(K)/\sim_A$ and we can also obtain a explicit parameterisation of this set.
Let $\mathcal N_{o,\bar c}$ denote the set of all pairs $(\mathbb{O},\bar C)$ of complex nilpotent orbits of $G$ and conjugacy classes $\bar C$ of $\bar A(\mathbb{O})$ - Lusztig's canonical quotient of $A(\mathbb{O})$ (in the sense of \cite[Section 5]{Sommers2001}).
Let $\mf Q:\mathcal N_{o,c}\to \mathcal N_{o,\bar c}$ be the map induced by the quotient map $A(\mathbb{O})\to \bar A(\mathbb{O})$ (see the discussion preceding Proposition \ref{prop:indpd} for details).
\begin{theorem}
[Theorem \ref{thm:thetabar}]
Let $\mathscr O\in \bfG(K)\backslash \mathscr H$.
The composition $\mf Q\circ\theta_{\mathscr O,\bfT_K}$ descends to a natural (in $\bfT_K$) bijection
$$\bar\theta_{\bfT_K}:\mathcal N_o(K)/\sim_A \xrightarrow{\sim} \mathcal N_{o,\bar c}$$
which does not depend on $\mathscr O$.
\end{theorem}
Crucially for us, the partial order $\le_A$ on $\mathcal N_{o}(K)/\sim_A$ is considerably easier to compute in practice than the closure ordering on $\mathcal N_o(K)$.
This motivates our next definition.
We define the \emph{canonical unramified wavefront set} to be
$$^K\mathrm{WF}(X):=\max_{c\subseteq \mathcal B(\bfG,k)}[\mathcal L_c(\hphantom{ }^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)}))] \quad (\subseteq \mathcal N_o(K)/\sim_A)$$
where $[\bullet]:\mathcal N_o(K)\to \mathcal N_o(K)/\sim_A$ is the natural quotient map.
The compatibility between $\le_A$ and $\mathcal N_o(\bar k/K)$ ensures that an analogue of Corollary \ref{cor:geomwf} (Theorem \ref{thm:algwf}) holds for $^K\mathrm{WF}(X)$, but $^K\mathrm{WF}(X)$ also has the added benefit that it is frequently (conjecturally always) a singleton.
When $^K\mathrm{WF}(X)$ is a singleton, if we view $^K\mathrm{WF}(X)$ as an element of $\mathcal N_{o,\bar c}$ (via $\bar \theta_{\bfT_K}$) and $^{\bar k}\mathrm{WF}(X)$ as an element of $\mathcal N_o$ (under the natural isomorphism between $\mathcal N_o(\bar k)$ and $\mathcal N_o$), then $^K\mathrm{WF}(X)$ takes the form
$$^K\mathrm{WF}(X) = (\hphantom{ }^{\bar k}\mathrm{WF}(X),\bar C)$$
for some $\bar C\in \bar A(\hphantom{ }^{\bar k}\mathrm{WF}(X))$.
For those familiar with representations of real reductive groups, this is reminiscent of the associated cycle of a representation, but of course differs crucially in that we are dealing with conjugacy classes rather than irreducible representations, and with $\bar A(\hphantom{ }^{\bar k}\mathrm{WF}(X))$ instead of $A(\hphantom{ }^{\bar k}\mathrm{WF}(X))$.
Beyond being the culmination of a series of `natural' considerations, it is not a priori clear what utility this new invariant has.
Although we will not address this matter in this paper, in joint work with Dan Ciubotaru and Lucas Mason-Brown \cite{cmo}, we use the canonical unramified wavefront set to construct anti-tempered Arthur packets for $p$-adic groups.
This approach crucially relies on the information encoded in the canonical unramified wavefront set and fails if one attempts to use the geometric wavefront set instead.
Let us now briefly digress to explain the terminology used.
In the language of partial orders, Conjecture \ref{conj:orderrev} is equivalent to $[\bullet]:\mathcal N_o(K)\to \mathcal N_o(K)/\sim_A$ being a homomorphism.
Under the assumption that this is true then
$$^K\mathrm{WF}(X) = \max\{[\mathcal N_o(K/k)(\mathbb{O})]:c_{\mathbb{O}}(X)\ne 0\}$$
for depth-$0$ representations and so indeed $^K\mathrm{WF}(X)$ is a `wavefront set'.
If we further assume that all the orbits of $^K\widetilde\mathrm{WF}(X)$ lie in a single geometric orbit, then $^K\mathrm{WF}(X)$ simply picks out the $\sim_A$ classes of elements in $^K\widetilde\mathrm{WF}(X)$ that minimise $d_S$.
Conjecturally there is a unique such class - a `canonical' such class with respect to this property if you will.
Perhaps `distinguished' would have been a better modifier, but that adjective already has an important meaning in the context of nilpotent orbits.
The third and final section of this paper is dedicated to developing the tools needed to compute $^K\mathrm{WF}(X)$ for irreducible representations in the principal block of $\mathrm{Rep}(\bfG(k))$, the category of smooth complex $\bfG(k)$-representations, when $\bfG$ is split over $k$.
Recall that the principal block of $\mathrm{Rep}(\bfG(k))$, which we denote $\mathrm{Rep}_{\bfI}(\bfG(k))$, consists of those representations that are generated by their Iwahori fixed vectors and is equivalent to the category of modules of the Iwahori--Hecke algebra $\mathcal H_{\bfI}$ of $\bfG(k)$.
Moreover, when $X$ is admissible, by the theory of unrefined minimal $K$-types, the representations $X^{\bfU_c(\mf o)}$ are sums of principal series unipotent representations.
The wavefront sets of such representations have a particularly simple expression connected to the Hecke algebra of $\bfL_c(\mathbb F_q)$.
We use the compatibility of these Hecke algebras with the Iwahori--Hecke algebra and a simple deformation argument to obtain an explicit algorithm in Theorem \ref{thm:locwf} for computing $^K\mathrm{WF}(X)$.
Finally, we use the tools developed in this section to compute $^K\mathrm{WF}(X)$ and $^{\bar k}\mathrm{WF}(X)$ for the spherical Arthur representations of a split adjoint group over $k$.
For $\mb G$ a reductive group defined and split over a number field $F$, these are expected to be the spherical representations arising as local factors of irreducible subrepresentations of $L^2_{disc}(\mb G(F)\backslash \mb G(\mathbb A_F))$ (see \cite{as1}, \cite{as2}, \cite{as3}, \cite{as4}, \cite{as5}, \cite{as6}, for proofs in various special cases. See \cite{as7} for a uniform proof that all the spherical Arthur representations arise in this way).
Note that knowledge of the geometric wavefront set of spherical Arthur representations provides valuable structural insight for automorphic representations.
In particular the geometric wavefront set of the local factors bound the Fourier coefficients of the automorphic form (see \cite{gomez2020whittaker}).
We now state our results for the non-archimedean spherical Arthur representations in terms of their Arthur parameters.
Let $\mb G$ be defined and split over the $p$-adic field $k$.
Let $G^\vee$ denote the compelx Langalnds dual group of $\bfG$, $W_k$ be the Weil group of $k$, and
$$WD_k = W_k\times \mathbf{SL}(2,\mathbb{C})$$
the Weil--Deligne group of $k$.
Let $(\pi,X)$ be the spherical Arthur representation of $\bfG(k)$ lying in the Arthur packet $\psi:WD_k\times\mathbf{SL}(2,\mathbb{C})\to G^\vee$ that is trivial on $WD_k$.
Within this packet, $X$ is the representation corresponding to the trivial representation of $A_\psi$ - the component group of the centraliser of the image of $\psi$.
Let $\psi_0 = \psi\mid_{1\times\mathbf{SL}(2,\mathbb{C})}$ and
\begin{equation}
\ms n = d(\psi_0)\left(\begin{pmatrix}
0 & 1 \\ 0 & 0
\end{pmatrix}\right).
\end{equation}
The nilpotent orbit $\mathbb{O}^\vee:= G^\vee. \ms n$ completely determines the representation $X$ among the spherical Arthur representations and so we refer to $X$ as the spherical Arthur representation with parameter $\mathbb{O}^\vee$.
The final ingredient that we need in order to state the result is a refinement $d_A$ due to Achar \cite{achar} of the duality map $d_S$.
Let $\mathcal N^\vee_{o,c},\mathcal N^\vee_{o,\bar c}$ be the sets $\mathcal N_{o,c},\mathcal N_{o,\bar c}$ defined realative to $G^\vee$ instead of $G$. The duality $d_A$ is a map
\[d_A: \mathcal N^\vee_{o,\bar c}\rightarrow \mathcal N_{o,\bar c}
\]
satisfying certain properties. In particular,
\[d_A(\mathbb{O}^\vee,1)=(d(\mathbb{O}^\vee),\bar C'),
\]
for some class $C'$ which is the trivial class when $\mathbb{O}^\vee$ is special in the sense of Lusztig.
Using the bijection $\bar\theta_{\bfT_K}$ we view $d_A$ as a map $\mathcal N_{o,\bar c}^\vee \to \mathcal N_o(K)/\sim_A$.
Similarly, using the bijection from Lemma \ref{lem:pom} we view $d$ as a map $\mathcal N_o^\vee \to \mathcal N_o(\bar k)$.
\begin{theorem}
[Theorem \ref{thm:arthurwf}]
Let $X$ be the spherical Arthur representation with parameter $\mathbb{O}^\vee\in \mathcal N_o^\vee$.
Then $^K\mathrm{WF}(X)$ is a singleton and
\begin{equation}
\hphantom{ }^{K}\mathrm{WF}(X) = d_A(\mathbb{O}^\vee,1),\quad \hphantom{ }^{\bar k}\mathrm{WF}(X) = d(\mathbb{O}^\vee).
\end{equation}
\end{theorem}
\section{The Wavefront Set}
\paragraph{Basic Notation}
\label{par:basicnotation1}
Let $k$ be a non-archimedean local field of characteristic $0$ with residue field $\mathbb{F}_q$ of sufficiently large characteristic and ring of integers $\mathfrak{o} \subset k$.
\nomenclature{$k$}
\nomenclature{$\mf o$}{}
\nomenclature{$\mathbb F_q$}{}
\nomenclature{$\bar k$}{}
\nomenclature{$K$}{}
\nomenclature{$\mf O$}{}
\nomenclature{$\barF_q$}{}
\nomenclature{$\chi$}{}
Let $\mf p\subset \mf o$ be the maximal ideal of $\mf o$, fix an algebraic closure $\bar{k}$ of $k$ and let $K \subset \bar{k}$ be the maximal unramified extension of $k$ in $\bar{k}$.
Let $\mf O$ be the ring of integers of $K$.
The residue field of $K$ is an algebraic closure for $\mathbb F_q$ so we write $\barF_q$ for the residue field of $K$.
Let $\chi:k\to \mathbb{C}^\times$ be an additive character of $k$ that is trivial on $\mf p$ and non-trivial on $\mf o$.
$\chi$ descends to a character of $\mathbb F_q$ and we will refer to the resulting character also as $\chi$.
\nomenclature{$\bfG$}{}
\nomenclature{$\mf g$}{}
\nomenclature{$\bfG_K$}{}
\nomenclature{$\bfT_K$}{}
\nomenclature{$\bfG_K$}{}
\nomenclature{$X^*(\bfT_K,\bar k)$}{}
\nomenclature{$X_*(\bfT_K,\bar k)$}{}
\nomenclature{$\mathbb{P}hi(\bfT_K,\bar k)$}{}
\nomenclature{$\mathbb{P}hi^\vee(\bfT_K,\bar k)$}{}
Let $\bfG$ be a connected reductive algebraic group defined over $k$, that splits over an unramified extension of $k$.
Let $\mf g$ denote its Lie algebra.
Let $\bfG_K$ denote the base change of $\bfG$ along $$\text{Sp\'e}c(K)\to \text{Sp\'e}c(k).$$
Note that $\bfG_K$ is a split group.
Let $\bfT_K \subset \mathbf{G}_K$ be a $K$-split maximal torus.
For any field extension $F$ of $k$, we write $\bfG(F)$, $\mf g(F)$ etc. for the $F$ rational points.
\nomenclature{$\mathcal R(\bfG_K,\bfT_K)$}{}
Write $X^*(\bfT_K,\bar k)$ (resp. $X_*(\bfT_K,\bar k)$) for the lattice of algebraic characters (resp. co-characters) of $\mathbf{T}_K$, $\langle,\rangle$ for the canonical pairing between $X^*(\bfT_K,\bar k)$ and $X_*(\bfT_K,\bar k)$, and write $\mathbb{P}hi(\mathbf{T}_K,\bar k)$ (resp. $\mathbb{P}hi^{\vee}(\mathbf{T}_K,\bar k)$) for the set of roots (resp. co-roots) of $\bfG_K$.
Write
$$\mathcal R(\bfG_K,\bfT_K)=(X^*(\mathbf{T}_K,\bar k), \ \mathbb{P}hi(\mathbf{T}_K,\bar k),X_*(\mathbf{T}_K,\bar k), \ \mathbb{P}hi^\vee(\mathbf{T}_K,\bar k), \ \langle \ , \ \rangle)$$
for the absolute root datum of $\mathbf{G}$, and let $W$ be the associated (finite) Weyl group.
\nomenclature{$\bfG_\mathbb{Z}$}{}
\nomenclature{$\bfT_\mathbb{Z}$}{}
Let $\mathbf{G}_\mathbb{Z}$ be the connected reductive algebraic group defined (and split) over $\mathbb{Z}$ with split maximal torus $\bfT_\mathbb{Z}$ such that the root datum of $\bfG_\mathbb{Z}$ with respect to $\bfT_\mathbb{Z}$ is isomorphic to $\mathcal R$.
Let $\mathbf{G}_\mathbb{Z}^\vee$ be the Langlands dual group of $\bfG$, i.e. the connected reductive algebraic group corresponding to the root datum
$$\mathcal R^\vee=(X_*(\mathbf{T}_K,\bar k), \ \mathbb{P}hi^{\vee}(\mathbf{T}_K,\bar k), X^*(\mathbf{T}_K,\bar k), \ \mathbb{P}hi(\mathbf{T}_K,\bar k), \ \langle \ , \ \rangle)$$
defined (and split) over $\mathbb{Z}$.
\nomenclature{$\bfG^\vee_\mathbb{Z}$}{}
\nomenclature{$\bfT^\vee_\mathbb{Z}$}{}
Set $T^\vee=X^*(\bfT_K,\bar k)\otimes_\mathbb{Z} \mathbb{C}^\times$, regarded as a maximal torus in $G^\vee:=\bfG_\mathbb{Z}^\vee(\mathbb{C})$ with Lie algebra $\mathbf{\mathfrak t}^\vee=X^*(\bfT_K,\bar k)\otimes_{\mathbb Z} \mathbb C$, a Cartan subalgebra of the Lie algebra $\mathbf{\mathfrak g}^\vee$ of $\bfG^\vee$.
Define
\begin{align}\label{eq:real}
\begin{split}
T^\vee_{\mathbb R} &=X^*(\bfT_K,\bark)\otimes_{\mathbb Z} {\mathbb R}_{>0}\\
\mathbf{\mathfrak t}_{\mathbb R}^\vee &= X^*(\bfT_K,\bark)\otimes_{\mathbb Z} \mathbb R\\
T^\vee_c &=X^*(\bfT_K,\bark)\otimes_{\mathbb Z} S^1.
\end{split}
\end{align}
There is a polar decomposition $T^\vee=T^\vee_c T ^\vee_{\mathbb R}$.
\nomenclature{$T^\vee$}{}
\nomenclature{$T_\mathbb{R}^\vee$}{}
\nomenclature{$T_c^\vee$}{}
\nomenclature{$\mf t^\vee$}{}
\nomenclature{$\mf t_\mathbb{R}^\vee$}{}
\nomenclature{$\mathrm{Field}_k$}{}
\nomenclature{$\mathcal N(\bullet)$}{}
\nomenclature{$\mathcal N_o(\bullet)$}{}
Let $\mathrm{Field}_k$ denote the category of field extensions of $k$.
Let $\mathcal N$ be the functor from $\mathrm{Field}_k$ to $\mathrm{Set}$ which takes a field extension $F$ of $k$ to the set of nilpotent elements of $\mf g(F)$.
By nilpotence in this context we mean the unstable points (in the sense of GIT) with respect to the adjoint action of $\bfG(F)$, see \cite[Section 2]{debacker}.
For $F$ algebraically closed this coincides with all the usual notions of nilpotence.
Let $\mathcal N_o$ be the functor which takes $F$ to the set of orbits in $\mathcal N(F)$ under the adjoint action of $\bfG(F)$.
For $\ms H\in \bfG(F)$ and $\ms x\in \mf g(F)$ we write $\ms H.\ms x$ for the adjoint action of $\ms H$ on $\ms x$.
We briefly remark how $\mathcal N_o$ behaves on morphisms.
Given field extensions $F_1,F_2\in\mathrm{Field}_k$ and a morphism $F_1\to F_2$ we have natural inclusion maps
$$\mf g(F_1)\to \mf g(F_2) \text{ and } \bfG(F_1)\to \bfG(F_2).$$
Thus given a $\bfG(F_1)$ orbit $\mathbb{O}\subset \mathcal N(F_1)$ we can form the orbit
$$\bfG(F_2).\mathbb{O}\subset \mathcal N(F_2).$$
We define $\mathcal N_o(F_1\to F_2)(\mathbb{O})$ to be this orbit.
When we wish to emphasis the group we are working with we include it as a superscript e.g. $\mathcal N_o^{\bfG_\mathbb{Z}}$.
When $F$ is algebraically closed, we view $\mathcal N_o(F)$ as a partially ordered set with respect to the closure ordering in the Zariski topology.
When $F$ is $k$ or $K$, we view $\mathcal N_o(F)$ as a pre-ordered set with respect to the closure ordering in the topology induced by the topology on $F$.
When $F=k$ it is well known that the pre-order is a partial order \cite[Section 2.5]{debacker_homog}.
When $F=K$ we will show in \ref{cor:partialorder} that the pre-order is a partial order.
For brevity we will write $\mathcal N(F'/F)$ (resp. $\mathcal N_o(F'/F)$) for $\mathcal N(F\to F')$ (resp. $\mathcal N_o(F\to F')$) where $F\to F'$ is a morphism of fields.
Recall the following classical result.
\begin{lemma}[\cite{Pommerening},\cite{Pommerening2}]\label{lem:pom}
Let $F \in \mathrm{Field}_k$ be algebraically closed with good characteristic for $\bfG$.
Then there is canonical isomorphism of partially ordered sets $\Lambda_{\bfT_K}^F:\mathcal N_o(F)\xrightarrow{\sim}\mathcal N_o^{\bfG_\mathbb{Z}}(\mathbb{C})$.
\nomenclature{$\Lambda_{\bfT_K}^F$}{}
\end{lemma}
\begin{remark}
We include the $\bfT_K$ as a subscript because the definition of the group $\bfG_\mathbb{Z}$ depends on the choice of torus.
However $\Lambda_{\bfT_K}^{F}$ is natural in $\bfT_K$ in an analogous sense to Theorem \ref{thm:naturality}.
\end{remark}
When $F$ is algebraically closed let $\mathcal N_{o,sp}(F)$ denote the set of special orbits in the sense of Lusztig \cite[Definition 13.1.1]{chars}.
\nomenclature{$\mathcal N_{o,sp}(\bullet)$}{}
\paragraph{Buildings, Parahorics and Associated Notation}
\label{sec:buildings}
\nomenclature{$\mathcal B(\bfG,k),\mathcal B(\bfG,K)$}{}
\nomenclature{$\mathcal A(\bfT,k),\mathcal A(\bfT,K)$}{}
\nomenclature{$\mathcal A(c,\mathcal A)$}{}
Let $\mathcal B(\bfG,k)$ (resp. $\mathcal B(\bfG,K)$) denote the (enlarged) Bruhat--Tits building for $\bfG(k)$ (resp. $\bfG(K)$).
We identify $\mathcal B(\bfG,k)$ with the $\Gal(K/k)$-fixed points of $\mathcal B(\bfG,K)$.
We use the notation $c\subseteq \mathcal B(\bfG)$ to indicate that $c$ is a face of $\mathcal B$.
Given a maximal $k$-split torus $\bfT$, write $\mathcal A(\bfT,k)$ for the corresponding apartment in $\mathcal B(\bfG,k)$.
For an apartment $\mathcal A$ of $\mathcal B(\bfG,k)$ and any subset $\Omega\subseteq \mathcal A$ we write $\mathcal A(\Omega,\mathcal A)$ for the smallest affine subspace of $\mathcal A$ containing $\Omega$.
\nomenclature{$\bfP_c^\dagger$}{}
\nomenclature{$\bfP_c$}{}
\nomenclature{$\bfU_c$}{}
\nomenclature{$\bfL^\dagger_c$}{}
\nomenclature{$\bfL_c$}{}
\nomenclature{$\bfp_c$}{}
\nomenclature{$\bfu_c$}{}
\nomenclature{$\bfl_c$}{}
For a face $c\subseteq \mathcal B(\bfG,k)$ there is a group scheme $\bfP_c^\dagger$ defined over $\text{Sp\'e}c(\mf o)$ such that $\bfP_c^\dagger(\mf o)$ identifies with the stabiliser of $c$ in $\bfG(k)$. There is an exact sequence \cite[Section 1.2]{reeder}
\begin{equation}
1 \to \bfU_c(\mf o) \to \bfP_c^\dagger(\mf o) \to \bfL_c^\dagger(\mathbb F_q) \to 1,
\end{equation}
where $\bfU_c(\mf o)$ is the pro-unipotent radical of $\bfP_c^\dagger(\mf o)$ and $\bfL_c^\dagger$ is the reductive quotient of the special fibre of $\bfP_c^\dagger$.
Let $\bfL_c$ denote the identity component of $\bfL_c^\dagger$, and let $\bfP_c$ be the subgroup of $\bf P_c^\dagger$ defined over $\mf o$ such that $\bfP_c(\mf o)$ is the inverse image of $\bfL_c(\mathbb F_q)$ in $\bfP_c^\dagger(\mf o)$.
The groups $\bfP_c$ are called \emph{parahoric} subgroups of $\bfG(k)$.
We have analogous short exact sequences
\begin{equation}
1 \to \bfU_c(\mf o) \to \bfP_c(\mf o) \to \bfL_c(\mathbb F_q) \to 1,
\end{equation}
and one on the level of the Lie algebra
\begin{equation}
0 \to \bfu_c(\mf o) \to \bfp_c(\mf o) \to \bfl_c(\mathbb F_q) \to 0.
\end{equation}
When $c$ is a chamber in the building, then we call $\bfP_c$ an \emph{Iwahori subgroup} of $G$.
Let
$$\bfu = \bigcup_{c\subseteq \mathcal B(\bfG,k)}\bfu_c(\mf o)\subseteq \mf g(k), \quad \bfU = \bigcup_{c\subseteq \mathcal B(\bfG,k)}\bfU_c(\mf o)\subseteq \bfG(k).$$
These are the \emph{topologically nilpotent} and \emph{topologically unipotent} elements of $\mf g(k)$ and $\bfG(k)$ respectively.
\nomenclature{$\bfU$}{}
\nomenclature{$\bfu$}{}
\paragraph{Fourier Transforms}
By \cite[Proposition 4.1]{adler_roche}, for $p$ sufficiently large (see the reference for a precise bound for $p$), there exists a symmetric, non-degenerate $\bfG(k)$-invariant bilinear form
\nomenclature{$\ms {B}$}{}
$$\ms B:\mf g(k)\times \mf g(k) \to k$$
such that for every face $c$ of $\mathcal B(\bfG,k)$ we have
$$\bfp_c(\mf o) = \set{\ms X\in \mf g(k): \ms B(\ms X,\ms Y) \in \mf p, \forall Y\in \bfu_c(\mf o)}.$$
Such a bilinear form naturally descends for each face $c$ of $\mathcal B(\bfG,k)$ to a symmetric, non-degenerate $\bfL_c(\mathbb F_q)$-invariant bilinear form
$$\ms B_c:\bfl_c(\mathbb F_q)\times \bfl_c(\mathbb F_q)\to \mathbb F_q.$$
Fix a Haar measure $\mu_{\mf g(k)}$ on $\mf g(k)$.
\nomenclature{$\mathbb{F}T(f),\hat f$}{}
For a function $f\in C_c^\infty(\mf g(k))$ we define \emph{the Fourier transform of $f$} to be
\begin{equation}
\hat f(\ms X) := \mathbb{F}T_{\mf g(k)}(f)(\ms X) := \int_{\mf g(k)}\chi(\ms B(\ms X,\ms Y)) f(\ms Y)d\mu_{\mf g(k)}(\ms Y).
\end{equation}
Let $c$ be a face of $\mathcal B(\bfG,k)$ and $h:\bfl_c(\mathbb F_q)\to \mathbb{C}$ a function.
We define \emph{the Fourier transform of $h$} to be
\begin{equation}
\hat h(\ms x) := \mathbb{F}T_{\bfl_c(\mathbb F_q)}(h)(\ms x) := \sum_{\ms y\in\bfl_c(\mathbb F_q)}\chi(\ms B_c(\ms x,\ms y)) h(\ms y).
\end{equation}
We define $\widetilde h:\mf g(k)\to \mathbb{C}$ to be the function given by
\begin{equation}
\widetilde h(\ms X) =
\begin{cases}
h(\ms X+\bfu_c(\mf o)) & \text{if } \ms X\in \bfp_c(\mf o) \\
0 & \text{ otherwise}.
\end{cases}
\end{equation}
We say \emph{$\widetilde h$ is inflated form $h$} and we have that
$$\mathbb{F}T_{\mf g(k)}(\widetilde h)(\ms X) = \mu_{\mf g(k)}(\bfu_c(\mf o))\cdot \widetilde{\mathbb{F}T_{\bfl_c(\mathbb F_q)}(h)}(\ms X).$$
\paragraph{The Harish-Chandra-Howe Local Character Expansion}
\label{sec:hchlce}
Let $\exp:\bfu\to \bfU$ be the exponential map defined in \cite[Lemma 3.2]{barbaschmoy} and \cite[Section 3.3]{whomg} (the exponential map exists since we are assuming $p$ is sufficiently large. See the references for the precise bounds on $p$).
\nomenclature{$\exp$}{}
The map $\exp:\bfu\to \bfU$ has the property that for every face $c$ of $\mathcal B(\bfG,k)$, we have
$$\exp(\bfu_c(\mf o)) = \bfU_c(\mf o),$$
and $\exp$ descends to the exponential from the nilpotent elements of $\bfl_c(\mathbb F_q)$ to the unipotent elements of $\bfL_c(\mathbb F_q)$.
For a function $f\in C_c^\infty(\bfU)$ let $f\circ \exp$ denote the function in $C_c^\infty(\mf g(k))$ given by
\begin{equation}
f\circ \exp(\ms X) =
\begin{cases}
f(\exp(\ms X)) & \text{if } \ms X \in \bfu \\
0 & \text{if } \ms X\not\in \bfu.
\end{cases}
\end{equation}
For $\mathbb{O}\in \mathcal N_o(k)$ let $\mu_{\mathbb{O}}$ denote the corresponding nilpotent orbital integral.
We have the following result due to DeBacker (building on work by Waldspurger \cite{whomg}).
\begin{theorem}
\cite[Theorem 3.5.2]{debacker_homog}
Let $(\pi,X)$ be a depth-$0$ admissible representation of $\bfG(k)$.
Then there exists $c_{\mathbb{O}}(X)\in \mathbb{C}$ for each $\mathbb{O}\in\mathcal N_o(k)$ such that for $f\in C_c^\infty(\bfU)$ we have
\begin{equation}
\Theta_X(f) = \sum_{\mathbb{O}\in \mathcal N_o(k)} c_{\mathbb{O}}(X)\hat \mu_{\mathbb{O}}(f\circ \exp).
\end{equation}
\end{theorem}
We remark that the local character expansion, and in particular the coefficients $c_\mathbb{O}(X)$, always exists for admissible smooth representations.
The point of this theorem is that for depth-$0$ representations the expansion is valid for functions supported on $\bfU$.
\paragraph{The Wavefront Set of Representations of \texorpdfstring{$p$}{p}-adic Groups}
Let $(\pi,X)$ be a smooth admissible representation of $G$.
\nomenclature{$\mathrm{WF}(X)$}{}
\nomenclature{$^K\widetilde \mathrm{WF}(X)$}{}
\nomenclature{$^{\bar k}\mathrm{WF}(X)$}{}
The \emph{($p$-adic) wavefront set} is
$$\mathrm{WF}(X) := \max_{\mathbb{O}:c_\mathbb{O}(X)\ne 0}\mathbb{O},$$
the \emph{unramified wavefront set} is
$$^K\widetilde\mathrm{WF}(X) := \max_{\mathbb{O}:c_\mathbb{O}(X)\ne 0}\mathcal N_o(K/k)(\mathbb{O}),$$
and the \emph{geometric wavefront set} is
$$^{\bar k}\mathrm{WF}(X) := \max_{\mathbb{O}:c_\mathbb{O}(X)\ne 0}\mathcal N_o(\bar k/k)(\mathbb{O}).$$
\begin{remark}
In analogy with real groups and finite groups of Lie type it is expected that $^{\bar k}\mathrm{WF}(X)$ consists of a single nilpotent orbit - $\mathbb{O}$ say.
Moreover it is expected that for all $\mathbb{O}'\in \mathrm{WF}(X)$, $\mathcal N_o(\bar k/k)(\mathbb{O}') = \mathbb{O}$ (this is a strictly stronger condition than $^{\bar k}\mathrm{WF}(X)$ being a singleton since a priori there might exist $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(k)$ which are incomparable, but $\mathcal N_o(\bar k/k)(\mathbb{O}_1)<\mathcal N_o(\bar k/k)(\mathbb{O}_2)$).
\end{remark}
\subsection{Lifting Nilpotent Orbits and Closure Relations}
\paragraph{The Lifting Map}
\label{sec:debacker_param}
Let $h$ be the Coxeter number of the absolute Weyl group for $\mathbf G$.
Since we are assuming $p$ is sufficiently large, we in particular require $p>3(h-1)$ so $p$ satisfies the conditions of section \ref{sec:hchlce} and we can apply the results of \cite{debacker} to $\mf g(k)$.
Let $\mathcal A$ be an apartment of $\mathcal B(\bfG,k)$.
For faces $c_1,c_2$ in $\mathcal A$ with $\mathcal A(c_1,\mathcal A) = \mathcal A(c_2,\mathcal A)$ the projection maps
$$\bfP_{c_1}(\mf o)\cap \bfP_{c_2}(\mf o) \to \bfL_{c_1}(\mathbb F_q), \quad \bfP_{c_1}(\mf o)\cap \bfP_{c_2}(\mf o) \to \bfL_{c_2}(\mathbb F_q)$$
are both surjective with kernel $\bfU_{c_1}(\mf o)\cap \bfU_{c_2}(\mf o)$ and so there is an isomorphism
\nomenclature{$i_{c_2,c_1}$}{}
$$i_{c_2,c_1}:\bfL_{c_1}(\mathbb F_q)\to \bfL_{c_2}(\mathbb F_q).$$
We similarly obtain an isomorphism
\nomenclature{$j_{c_2,c_1}$}{}
$$j_{c_2,c_1}:\bfl_{c_1}(\mathbb F_q) \to \bfl_{c_2}(\mathbb F_q)$$
which is compatible with $i_{c_2,c_1}$ in the following sense:
\begin{equation}
j_{c_2,c_1}(\ms h.\ms x) = i_{c_2,c_1}(\ms h).j_{c_2,c_1}(\ms x)
\end{equation}
for all $\ms h\in \bfL_{c_1}(\mathbb F_q),\ms x\in \bfl_{c_1}(\mathbb F_q)$.
For $\ms H \in \bfG(k)$ and $\ms x\in \bfl_c(\mathbb F_q)$ let $\ms H.\ms x$ denote the image of $\ms H.\ms X \in \bf p_{\ms Hc}(\mf o)$ in $\bfl_{\ms Hc}(\mathbb F_q)$ where $\ms X$ is any lift of $\ms x$ to $\bfp_c(\mf o)$.
This is well defined because $\ms H.\bfu_c(\mf o) = \bfu_{\ms Hc}(\mf o)$.
Let
\nomenclature{$I^k$}{}
\nomenclature{$I_d^k$}{}
$$I^k = \{(c,\ms x):c\subseteq \mathcal B(\bfG,k),\ms x \in \mathcal N^{\bfL_c}(\mathbb F_q)\}.$$
Let $I_d^k$ denote the set of pairs $(c,\ms x)\in I^k$ where $\ms x$ is a distinguished nilpotent element of $\bfl_c(\mathbb F_q)$.
For $(c,\ms x)\in I^k$ let $\mathcal C(c,\ms x)$ denote the preimage of $\ms x$ in $\bfp_c(\mf o)$.
\nomenclature{$\sim_k$}{}
\nomenclature{$\mathcal C(c,x)$}{}
For $(c_1,\ms x_1),(c_2,\ms x_2)\in I^k$ we define $(c_1,\ms x_1)\sim_k (c_2,\ms x_2)$ if there exists an $\ms H\in \bfG(k)$ and an apartment $\mathcal A$ such that
$$\mathcal A(c_2,\mathcal A) = \mathcal A(\ms Hc_1,\mathcal A), \text{ and } \ms x_2 = j_{c_2,\ms Hc_1}({\ms H}.\ms x_1).$$
\nomenclature{$\mathcal L_c$}{}
Given an $(c,\ms x)\in I^k$ one can attach to it, as in \cite{barbaschmoy} and \cite{debacker}, a well defined \emph{nilpotent orbit $\mathcal L_c(\ms x)\in\mathcal N_o(k)$ called its lift}.
It has the following two useful equivalent characterisations (due to DeBacker in \cite[Lemma 5.3.3]{debacker}):
\begin{enumerate}
\item If $\ms x$ is included into an $\lalg{sl}_2$-triple $\ms x,\ms h, \ms y\in \bfl_c(\mathbb F_q)$, and $\ms X,\ms H,\ms Y\in \bfp_c(\mf o)$ is an $\lalg{sl}_2$-triple such that their images in $\bfl_c(\mathbb F_q)$ are $\ms x,\ms h,\ms y$ respectively (such an $\lalg{sl}_2$-triple always exists), then $\mathcal L_c(\ms x) = {\bfG(k)}.\ms X$.
\item $\mathcal L_c(\ms x)$ is the \emph{unique minimal element} of $\{\mathbb{O} \in \mathcal N_o(k):\mathbb{O} \cap \mathcal C(c,\ms x)\ne \emptyset\}$.
\end{enumerate}
Let $\mathbb{O}\in \mathcal N_o^{\bfL_c}(\mathbb F_q)$.
The nilpotent orbit $\mathcal L_c(\ms x)$ is independent of the choice of $\ms x\in \mathbb{O}$; we write $\mathcal L_c(\mathbb{O})$ for the resulting nilpotent orbit.
\nomenclature{$I_o^k$}{}
Define
$$I_o^k = \{(c,\mathbb{O}):c\subseteq \mathcal B(\bfG,k), \mathbb{O}\in \mathcal N_o^{\bfL_c}(\mathbb F_q)\}$$
and define $I_{o,d}^k$ to be the subset of $I^k$ consisting of pairs $(c,\mathbb{O})$ where $\mathbb{O}$ is a distinguished nilpotent orbit of $\bfl_c(\mathbb F_q)$.
\nomenclature{$I^k(\mathbb{O})$}{}
\nomenclature{$I_o^k(\mathbb{O})$}{}
\nomenclature{$I_d^k(\mathbb{O})$}{}
\nomenclature{$I_{o,d}^k(\mathbb{O})$}{}
For $\mathbb{O}\in \mathcal N_o(k)$ write $I^k(\mathbb{O})$ for the set $\set{(c,\ms x)\in I^k:\mathcal L_c(\ms x) = \mathbb{O}}$.
Analogously define $I_o^k(\mathbb{O}), I_d^k(\mathbb{O}), I_{o,d}^k(\mathbb{O})$.
We have the following result due to
Barbasch and Moy \cite[Corollary 3.7]{barbaschmoy} and DeBacker \cite[Theorem 5.6.1]{debacker} classifying the nilpotent orbits of $\mf g(k)$.
\begin{theorem}
\label{thm:nilorbit}
The map $I_d^k\to \mathcal N_o(k), (c,\ms x)\mapsto \mathcal L_c(\ms x)$ descends to a bijective correspondence between $I_d^k/\sim_k$ and $\mathcal N_o(k)$.
\end{theorem}
Note that for all the results in this section we are using the results from \cite{debacker} with $r=0$.
We can similarly define $I^K$, $I_d^K$, $I_o^K$, $I_{o,d}^K$, $I^K(\mathbb{O})$, $I_d^K(\mathbb{O})$, $I_o^K(\mathbb{O})$, $I_{o,d}^K(\mathbb{O})$, $\sim_K$, $\mathcal C$, and $\mathcal L_c$ for $\bfG(K)$ and the results in this section hold verbatim for these objects too.
We must be careful however since $K$ is not complete and this is a necessary condition in \cite{debacker}.
The only time this property is used however is in \cite[Lemma 5.2.1]{debacker}.
We give a proof for this result for $r=0$ and base field $K$ which means that the results in this section do indeed hold verbatim for $\bfG(K)$.
\begin{lemma}
\label{lem:unramlift}
Let $c$ be a face of $\mathcal B(\bfG,K)$ and let $\ms X,\ms H,\ms Y$ be an $\lalg{sl}_2$-triple contained in $\bfp_c(\mf O)$.
Then
\begin{equation}
{\bfU_c(\mf O)}.(\ms X+\ms c_{\bfu_c(\mf O)}(\ms Y)) = \ms X + \bfu_c(\mf O)
\end{equation}
where $\ms c_{\bfu_c(\mf O)}(\ms Y)$ denotes the centraliser of $\ms Y$ in $\bfu_C(\mf O)$.
\end{lemma}
\begin{proof}
Since we are only looking at $\bfG(K) = \bfG_K(K)$, and $\bfG_K$ is split, we may as well assume that $\mb G$ is also split over $k$.
Since $\bfG(K)$ acts transitively on the apartments of $\mathcal B(\bfG,K)$ we may also assume that $c\subseteq\mathcal B(G,k)$.
There is nothing to prove for the $\subseteq$ direction.
For the $\supseteq$ direction let $\ms Z\in \ms X+\bfu_c(\mf O)$.
Since $\ms Z,\ms X,\ms Y\in \mf g(K)$ there is a finite unramified extension $F$ of $k$ such that $\ms Z,\ms X,\ms Y\in \mf g(F)$.
Let $\mf o_F$ be the ring of integers for $F$.
Then since $\mf O\cap F = \mf o_F$ we have that $Z\in X+\bfu_c(\mf o_F)$.
Since $F$ is complete we can apply \cite[Lemma 5.2.1]{debacker} to $\bfG(F)$ and so
$${\bfU_c(\mf o_F)}.(\ms X+\ms c_{\bfu_c(\mf o_F)}(\ms Y)) = \ms X + \bfu_c(\mf o_F)$$
Thus
$$Z\in {\bfU_c(\mf o_F)}.(\ms X+\ms c_{\bfu_c(\mf o_F)}(\ms Y)) \subset {\bfU_c(\mf O)}.(\ms X+\ms c_{\bfu_c(\mf O)}(\ms Y))$$
as required.
\end{proof}
\paragraph{Closure relations}
\label{sec:closurerels}
The set $I_o^K$ comes with additional structure that $I_o^k$ does not.
For $(c_1,\mathbb{O}_1),(c_2,\mathbb{O}_2)\in I_o^K$ define
$$(c_1,\mathbb{O}_1)\le (c_2,\mathbb{O}_2) \text{ if } c_1 = c_2 \text{ and } \mathbb{O}_1 \le \mathbb{O}_2.$$
The following result is implied by the proofs in \cite[Section 3.14]{barbaschmoy}.
\begin{proposition}
\label{prop:lifted_rel}
Let $(c_1,\mathbb{O}_1),(c_2,\mathbb{O}_2)\in I_o^K$ and suppose $(c_1,\mathbb{O}_1)\le (c_2,\mathbb{O}_2)$.
Then $\mathcal L_{c_1}(\mathbb{O}_1)\le \mathcal L_{c_2}(\mathbb{O}_2)$.
\end{proposition}
\nomenclature{$\mathcal L$}{}
In other words, the map $\mathcal L:I_o^K\to \mathcal N_o(K), (c,\mathbb{O})\mapsto \mathcal L_c(\mathbb{O})$ is non-decreasing.
In section \ref{sec:pseudo-levis} we prove that $\mathcal L$ is in fact strictly increasing.
Let $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$ and suppose $\mathbb{O}_1\le \mathbb{O}_2$.
\begin{enumerate}[(1)]
\item We say $\mathbb{O}_1 \le \mathbb{O}_2$ is a \emph{lifted closure relation} if there exists a face $c$ of $\mathcal B(\bfG,K)$ and $(c,\mathbb{O}_i')\in I_o^K(\mathbb{O}_i)$ for $i=1,2$ such that $(c,\mathbb{O}_1')\le (c,\mathbb{O}_2')$ (cf. proposition \ref{prop:lifted_rel}).
\item We say $\mathbb{O}_1 \le \mathbb{O}_2$ is a \emph{degenerate closure relation} if there exists $(c,\ms x)\in I^K(\mathbb{O}_1)$ such that $\mathbb{O}_2 \cap \mathcal C(c,\ms x)\ne \emptyset$ (cf. property 2 of section \ref{sec:debacker_param}).
\end{enumerate}
The following proposition shows that every closure relation in $\mathcal N_o(K)$ can be broken down into a lifted closure relation and a degenerate closure relation.
\begin{theorem}
\label{thm:closurerels}
Let $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$, and suppose $\mathbb{O}_1 \le \mathbb{O}_2$.
Then there exists a $\mathbb{O}_{1.5}\in \mathcal N_o(K)$ such that $\mathbb{O}_1\le \mathbb{O}_{1.5} \le \mathbb{O}_2$ where $\mathbb{O}_1\le \mathbb{O}_{1.5}$ is a lifted closure relation and $\mathbb{O}_{1.5}\le \mathbb{O}_2$ is a degenerate closure relation.
\end{theorem}
\begin{proof}
Let $(c,\mathbb{O}_1')\in I_o^K(\mathbb{O}_1)$.
Let $\ms x \in \mathbb{O}_1'$ and $\ms X\in \mathcal C(c,\ms x)\cap \mathbb{O}_1$.
Since $\bfp_c(\mf O)$ is open in $\mf g(K)$, $\bfp_c(\mf O)\cap \mathbb{O}_2 \ne \emptyset$.
Let $\mathbb{O}'$ be the image of $\mathbb{O}_2\cap \bfp_c(\mf O)$ in $\bfl_c(\overline{\mathbb F}_q)$.
We claim that $\mathbb{O}_1' \subseteq \overline{\mathbb{O}'}$.
Let $U$ be an open subset of $\bfl_c(\overline{\mathbb F}_q)$ containing $\ms x$.
Let $\widetilde U$ be the preimage in $\bfp_c(\mf O)$.
Since $\bfu_c(\mf O)$ is open, the projection map $\bfp_c(\mf O)\to \bfl_c(\overline{\mathbb F}_q)$ is continuous and so $\widetilde U$ is open and contains $\ms X$.
Thus $\widetilde U\cap \mathbb{O}_2 \ne \emptyset$ and so $U\cap \mathbb{O}' \ne \emptyset$.
This proves the claim.
Write $\mathbb{O}' = \cup_i \mathbb{O}'^{(i)}$ as a union of $\bfL_c(\overline{\mathbb F}_q)$ nilpotent orbits.
Then since $\mathbb{O}_1' \subseteq \overline{\mathbb{O}'}$ there exists an $i$ such that $\mathbb{O}_1' \le \mathbb{O}'^{(i)}$.
Let $\mathbb{O}_{1.5}' = \mathbb{O}'^{(i)}$.
By construction $\mathbb{O}_{1.5} := \mathcal L_c(\mathbb{O}_{1.5}')$ has the required properties.
\end{proof}
For $\mathbb{O}\in \mathcal N_o(k)$ we say $\ms X, \ms H, \ms Y$ is an $\lalg{sl}_2$-triple for $\mathbb{O}$ if they are an $\lalg{sl}_2$-triple and $\ms X\in \mathbb{O}$.
We now show that $\mathcal N_o(K/k):\mathcal N_o(k) \to \mathcal N_o(K)$ and $\mathcal N_o(\bar k/k):\mathcal N_o(k) \to \mathcal N_o(\bar k)$ are strictly increasing.
\begin{lemma}
\label{lem:slodowy}
Let $\mathbb{O} \in \mathcal N_o(k)$ and let $\ms X,\ms H,\ms Y$ be an $\lalg{sl}_2$-triple for $\mathbb{O}$.
Let $\ms s = \ms X + \ms c_{\mf g(k)}(\ms Y)$ (a Slodowy slice for $\mathbb{O}$).
Then
\begin{enumerate}[(1)]
\item $\mathbb{O} \cap \ms s = \set{\ms X}$,
\item if $\mathbb{O}' \in \mathcal N_o(k)$ and $\overline{\mathbb{O}'} \cap \ms s \ne \emptyset$ then $\mathbb{O}' \cap \ms s \ne \emptyset$.
\end{enumerate}
\end{lemma}
\begin{proof}
We have the decomposition
\begin{equation}
\label{eq:slodowy}
\mf g(k) = [\mf g(k),\ms X] \oplus \ms c_{\mf g(k)}(\ms Y)
\end{equation}
and $[\mf g(k), \ms X]$ is the tangent space of $\mathbb{O}$ at $\ms X$.
Thus
$$T_{\ms X}(\ms s \cap \mathbb{O}) \hookrightarrow T_{\ms X} \ms s \cap T_{\ms X} \mathbb{O} = 0$$
and so $\ms s \cap \mathbb{O}$ is discrete.
However, if $\ms X'$ is in $\ms s\cap \mathbb{O}$, let $\lambda_{X'}:\mathbf G_m\to \mathbf G$ be a 1-parameter $k$-subgroup such that
$${\lambda_{\ms X'}(t)}.\ms X' = t^{-2}\ms X'$$
and write $\ms X' = \ms X + \ms Z$ where $\ms Z\in \ms c_{\mf g(k)}(\ms Y)$.
Let $\lambda$ be the 1-parameter $k$-subgroup of $\bfG(k)$ attached to $\ms H$.
Write $\mf g(k)(i)$ for the set of $\ms W\in \mf g(k)$ such that $\lambda(t).\ms W = t^i \ms W$ and let
$$\mf g(k)(\le 0) = \bigoplus_{i\le 0}\mf g(k)(i).$$
Since $\ms c_{\mf g(k)}(\ms Y) \subseteq \mf g(k)(\le 0)$, write $\ms Z = \sum_{i\le 0}\ms Z_i$ where $\ms Z_i\in \mf g(k)(i)$.
Then
\begin{equation}
{\lambda(t^{-1})\lambda_{\ms X'}(t^{-1})}.\ms X' = \ms X + \sum_{i\le 0}t^{2-i}\ms Z_i \in \mathbb{O} \cap \ms s
\end{equation}
for all $t\in k$ and $\to \ms X$ as $t\to 0$.
Since $\ms s \cap \mathbb{O}$ is discrete this means that $\ms X' = \ms X$.
This proves (1).
Let $\mathbb{A}d:\bfG(k) \times \ms s \to \mf g(k)$ be the restriction of the adjoint map.
$\mathbb{A}d$ is smooth with differential $T_{(1,\ms X)}$ which is onto by equation \ref{eq:slodowy}.
All the varieties in question are smooth and so there exists a Zariski open (and hence open in the topology induced by $k$) subset $V$ in ${\bfG(k)}.\ms s$ containing $\ms X$.
We have that $\overline{\mathbb{O}'}\cap \ms s \ne \emptyset$ and so $\overline{\mathbb{O}'} \cap {\bfG(k)}.\ms s \ne \emptyset$.
Since $\ms s \cap \overline{\mathbb{O}'}$ is closed and non-empty the argument for part (1) shows that $\ms X \in \ms s \cap \overline{\mathbb{O}'}$.
It follows that $\ms X \in \overline{\mathbb{O}'} \cap V$.
But $V$ is open and so $\mathbb{O}' \cap V \ne \emptyset$.
It follows that $\mathbb{O}' \cap {\bfG(k)}.\ms s \ne \emptyset$ and so $\mathbb{O}' \cap \ms s \ne \emptyset$ as required.
\end{proof}
\begin{corollary}
\label{cor:slodowy}
Let $\mathbb{O}, \mathbb{O}'\in \mathcal N_o(k)$.
Let $\ms X,\ms H,\ms Y$ be an $\lalg{sl}_2$-triple for $\mathbb{O}$ and $\ms s = \ms X + \ms c_{\mf g(k)}(\ms Y)$.
Then
\begin{enumerate}
\item $\mathbb{O} = \mathbb{O}'$ iff $\mathbb{O}' \cap \ms s$ is a singleton,
\item $\mathbb{O} < \mathbb{O}'$ (i.e. $\mathbb{O}\le \mathbb{O}'$ and $\mathbb{O}\ne \mathbb{O}'$) iff $\mathbb{O}' \cap \ms s$ has more than one element,
\end{enumerate}
\end{corollary}
\begin{proof}
By Lemma \ref{lem:slodowy} (1), if $\mathbb{O} = \mathbb{O}'$ then $\mathbb{O}' \cap \ms s = \set{\ms X}$.
If $\mathbb{O}'\cap \ms s$ is a singleton then it is closed and the same argument as Lemma \ref{lem:slodowy} (1) gives that $\mathbb{O}' \cap \ms s = \set{\ms X}$ and so $\mathbb{O} = \mathbb{O}'$.
If $\mathbb{O} < \mathbb{O}'$ then $\ms X \in \overline{\mathbb{O}'} \cap \ms s$.
By Lemma \ref{lem:slodowy} (2), $\mathbb{O}' \cap \ms s\ne \emptyset$.
It cannot consist of a single element since by part 1 this would imply $\mathbb{O} = \mathbb{O}'$.
Thus $\mathbb{O}'\cap \ms s$ consists of more than one element.
If $\mathbb{O}' \cap \ms s$ consists of more than one element then $\overline{\mathbb{O}'} \cap \ms s \ne \emptyset$ and so contains $\ms X$.
Thus $\mathbb{O} \le \mathbb{O}'$.
But $\mathbb{O} \ne \mathbb{O}'$ since $\mathbb{O}' \cap \ms s$ is not a singleton and so $\mathbb{O} < \mathbb{O}'$.
\end{proof}
Analogous results to Lemma \ref{lem:slodowy} and Corollary \ref{cor:slodowy} hold for nilpotent orbits of $\mf g(\bar k)$, though different proof methods must be used (see \cite[Lemma 5.10]{ggg-np} for details).
\begin{theorem}
\label{thm:saturation-increasing}
Let $\mathbb{O},\mathbb{O}'\in \mathcal N_o(k)$.
\begin{enumerate}[(1)]
\item If $\mathbb{O} < \mathbb{O}'$ then $\mathcal N_o(K/k)(\mathbb{O}) < \mathcal N_o(K/k)(\mathbb{O}')$.
\item If $\mathbb{O} < \mathbb{O}'$ then $\mathcal N_o(\bar k/k)(\mathbb{O}) < \mathcal N_o(\bar k/k)(\mathbb{O}')$.
\end{enumerate}
\end{theorem}
\begin{proof}
Clearly 2 implies 1 so it suffices to show that $\mathbb{O} < \mathbb{O}' \implies \mathcal N_o(\bar k/k)(\mathbb{O}) < \mathcal N_o(\bar k/k)(\mathbb{O}')$.
Let $\ms X,\ms H, \ms Y$ be an $\lalg{sl}_2$-triple for $\mathbb{O}$ and let $\ms s = \ms X + \ms c_{\mf g(k)}(\ms Y)$.
If $\mathbb{O} < \mathbb{O}'$ then $\mathbb{O}' \cap \ms s$ has more than one element.
Let $\mf s = \ms X + c_{\mf g(\bar k)}(\ms Y) \supseteq \ms s$.
Then $\mathcal N_o(\bar k/k)(\mathbb{O}') \cap \mf s$ contains $\mathbb{O}'\cap \ms s$ and so also has more than one element.
Thus $\mathcal N_o(\bar k/k)(\mathbb{O}) < \mathcal N_o(\bar k/k)(\mathbb{O}')$.
\end{proof}
\subsection{The Wavefont Set of Representations of Finite Groups of Lie Type}
\label{sec:ffltwf}
\paragraph{Generalised Gelfand--Graev Representations}
\label{sec:kwfs}
In section \ref{sec:ffltwf} only let $\mathbf G$ be a connected reductive group defined over $\mathbb F_q$.
Fix an algebraic closure $\overline{\mathbb F}_q$ of $\mathbb F_q$ and let $F:\bfG(\barF_q)\to \bfG(\barF_q)$ be the associated geometric Frobenius (so that $\bfG(\barF_q)^F = \bfG(\mathbb F_q)$).
Let $h$ be the Coxeter number of the absolute Weyl group for $\mathbf G$ and suppose $p>3(h-1)$.
Then the nilpotent cone $\mathcal N(\barF_q)$ of $\mf g(\barF_q)$ may be identified with the unipotent cone of $\mb G(\barF_q)$ via the exponential map $\exp:\mathcal N(\barF_q) \to \mathcal U(\barF_q)$ \cite[Section 2.1]{barbaschmoy}.
Let $\ms B:\mf g(\mathbb F_q)\times \mf g(\mathbb F_q)\to \mathbb F_q$ be a symmetric, non-degenerate $\mb G$-invariant bilinear form and $\chi:\mathbb F_q\to \mathbb{C}^\times$ be a non-trivial character.
Recall that for a function $f:\mf g(\mathbb F_q)\to \mathbb{C}$ \emph{the Fourier transform of $f$} is
$$\hat f(\ms x) = \sum_{\ms x\in \mf g(\mathbb F_q)}\chi(\ms B(\ms x,\ms y))f(\ms y).$$
\nomenclature{$\Gamma_{\ms n},\Gamma_{\mathbb{O}}$}{}
\nomenclature{$\gamma_{\ms n},\gamma_{\mathbb{O}}$}{}
For a nilpotent element $\ms n\in \mathcal N(\mathbb F_q)$ we may associate to it a representation $\Gamma_{\ms n}$ of $\bfG(\mathbb F_q)$ called \emph{the associated Generalised Gelfand--Graev Representation} or GGGR for short (see \cite[Section 2]{lusztig} for details on its construction).
Write $\gamma_{\ms n}$ for the character of $\Gamma_{\ms n}$.
$\gamma_{\ms n}$ has the following key properties (due to Kawanaka \cite{kawanaka}, \cite{kawanakab})
\begin{enumerate}[(1)]
\item $\gamma_{\ms n}$ only depends on the $\mathbb{A}d(\bfG(\mathbb F_q))$-orbit of $\ms n$. If $\mathbb{O} = \bfG(\mathbb F_q).\ms n$ write $\Gamma_\mathbb{O}$ (resp. $\gamma_\mathbb{O}$) for the resulting representation (resp. character),
\item the support of $\gamma_{\ms n}$ is contained in the closure of $\mb G(\barF_q).\exp(\ms n)$.
\end{enumerate}
Let $\ms n=\ms e,\ms h,\ms f$ be an $\lalg{sl}_2$-triple in $\mf g(\barF_q)$ and $\Sigma = -\ms f + \ms c_{\mf g(\barF_q)}(\ms e)$.
Let $r(\ms n) = \frac12 (\dim \mf g(\barF_q)-\dim \ms c_{\mf g(\barF_q)}(\ms n))$.
We have the following result due to Lusztig about the Fourier transform of $\gamma_{\ms n}\circ \exp$ (which we will also refer to as $\gamma_{\ms n}$).
\begin{proposition}
\label{prop:gggvals}
\cite[Proposition 2.5, Proposition 6.13]{lusztig}
Let $\ms n,\ms n'\in \mathcal N(\mathbb F_q)$.
\begin{enumerate}[(1)]
\item $\hat\gamma_{\ms n}(\ms y) = q^{r(\ms n)}\#\set{\ms g\in \bfG(\mathbb F_q):{\ms g}.\ms y\in \Sigma}$ for all $\ms y\in \mf g(\mathbb F_q)$,
\item if $\hat \gamma_{\ms n}(\ms n') \ne 0$, then $\ms n$ must lie in the closure of $\mb G(\barF_q).\ms n'$,
\item if $\ms n\in \mb G(\barF_q).\ms n'$ and $\hat\gamma_{\ms n}(\ms n') \ne 0$, then $\ms n'\in\bfG(\mathbb F_q).\ms n$,
\item $\hat \gamma_{\ms n}(\ms n) = q^{r(\ms n)}\#\ms C_{\bfG(\mathbb F_q)}(\ms n)$.
\end{enumerate}
\end{proposition}
\paragraph{The Kawanaka Wavefront Set}
\label{sec:kawanaka}
The Kawanaka wavefront set is the analogous notion of the $p$-adic wavefront set for complex representations of $\bfG(\barF_q)$.
In this the wavefront set was introduced by Kawanaka and so we refer to it as the \emph{Kawanaka} wavefront set.
Let $(\rho, V)$ be an irreducible representation of $\bfG(\mathbb F_q)$ and $\chi_V$ be the character afforded by $V$.
\nomenclature{$^{\barF_q}\mathrm{WF}(V)$}{}
The \emph{Kawanaka wavefront set $^{\barF_q}\mathrm{WF}(V)$ of $V$} is defined to be the nilpotent orbit $\mathbb{O}\in \mathcal N_o(\barF_q)$ satisfying
\begin{enumerate}[(1)]
\item there exists an $\mathbb{O}'\in \mathcal N_o(\mathbb F_q)$ such that $\langle \gamma_{\mathbb{O}'},\chi_V\rangle \ne 0$ and $\mathcal N_o(\barF_q/\mathbb F_q)(\mathbb{O}') = \mathbb{O}$;
\item if $\mathbb{O}'\in \mathcal N_o(\mathbb F_q)$ and $\langle \gamma_{\mathbb{O}'},\chi_V\rangle \ne 0$ then $\mathcal N_o(\barF_q/\mathbb F_q)(\mathbb{O}') \le \mathbb{O}$.
\end{enumerate}
It is not clear a priori that such an orbit exists, but if it does, then it is clear that it is unique.
The existence of the Kawanaka wavefront set has a somewhat long and complicated history.
Originally conjectured to always exist by Kawanaka, he proved that this is indeed the case for adjoint groups of type $A_n$, or of exceptional type \cite{kawanaka}.
He also gave a conjectural description of $\hphantom{ }^{\barF_q}\mathrm{WF}(V)$ in terms of Lusztig's classification of the irreducible representations of $\bfG(\mathbb F_q)$.
In particular, let $\mb G^*$ be the dual group of $\mb G$ defined over $\mathbb F_q$ with corresponding Frobenius $F'$ and suppose $V$ corresponds to the $F'$-stable special $G^*(\barF_q)$-conjugacy class $C$ (in the sense of \cite[Section 13.2]{lusztig}).
Pick an element $g$ of $C^{F'}$ and let $g=su$ be its Jordan decomposition.
We can attach to the Weyl group $W(s)$ of $C_{G^*}(s)$ an irreducible (special) representation $E$ via the Springer correspondence applied to $u$ and the trivial local system.
The representation $E'=j_{W(s)}^WE$ obtained via truncated induction then corresponds to an $F$-stable nilpotent orbit $\mathbb{O}$ and the trivial local system with respect to the Springer correspondence on $\mf g(\barF_q)$.
The orbit $\mathbb{O}$ was Kawanaka's candidate for $\hphantom{ }^{\barF_q}\mathrm{WF}(V)$.
This conjecture was partially proved by Lusztig in his paper \cite{lusztig}.
In this paper Lusztig attached to $V$ an $F$-stable unipotent class $C$ of $\mb G(\barF_q)$ called the unipotent support of $V$ which is the unipotent class of $\mb G(\barF_q)$ of maximal dimension satisfying
\begin{equation}
\label{eq:unipsupp}
\sum_{g\in C^F}\chi_V(g) \ne 0.
\end{equation}
He then showed that the (log of the) unipotent support of the Alvis--Curtis dual of $V$ is the unique nilpotent orbit of maximal dimension satisfying condition 1. above.
This essentially settled the existence claim, modulo the slight weakening of condition 2. above.
This however was fixed in later work by Achar and Aubert in \cite{achar_aubert} (and Taylor \cite{wavefront} with weakened conditions on the characteristic) - finally settling the matter fully.
In section 3 we will need to know the Kawanaka wavefront set for principal series unipotent representations of $\bfG(\mathbb F_q)$ when $\bfG$ is split.
We record a precise formula for $^{\barF_q}\mathrm{WF}$ for this case.
First recall that for unipotent principal series representations we have a $q\to 1$ operation arising from Lusztig's isomorphism \cite[Theorem 3.1]{lusztigdeformation}
\begin{equation}
\mathbb{C}[W]\to \mathcal H(\mb B(\mathbb F_q)\backslash \bfG(\mathbb F_q)/\mb B(\mathbb F_q))
\end{equation}
that gives a bijection between the irreducible constituents of $\text{Ind}_{\mb B(\mathbb F_q)}^{\bfG(\mathbb F_q)}1$ and irreducible $W$-representations.
For $V$ a constituent of $\text{Ind}_{\mb B(\mathbb F_q)}^{\bfG(\mathbb F_q)}1$ we write $V_{q\to 1}$ for the corresponding irreducible representation of $W$.
Second, recall Lusztig's partition of $\mathrm{Irr}(W)$ into families so that each family contains a unique special representation.
Each special representation of $W$ corresponds via the Springer correspondence to a special nilpotent orbit $\mathbb{O}$ and the trivial local system.
\nomenclature{$\mathbb{O}^s$}{}
For an irreducible representation $E$ of $W$ we write $\mathbb{O}^{s}(E)$ for the special nilpotent orbit corresponding to the special representation in the same family as $E\otimes \sgn$.
Unravelling the above recipe for the Kawanaka wavefront set we get that for principal series unipotent representations of $\bfG(\mathbb F_q)$, the Kawanaka wavefront set is given by
\begin{equation}
\label{eq:kawanakawf}
^{\barF_q}\mathrm{WF}(V) = \mathbb{O}^s(V_{q\to 1}).
\end{equation}
We also record the following proposition.
\begin{proposition}
\label{prop:contra}
Let $(\rho,V)$ be an irreducible representation of $\bfG(\mathbb F_q)$ and $\rho^*$ denote its contragredient.
Then $\hphantom{ }^{\barF_q}\mathrm{WF}(V) = \hphantom{ }^{\barF_q}\mathrm{WF}(V^*)$.
\end{proposition}
\begin{proof}
The unipotent support of a representation and its contragredient are trivially the same (see equation \ref{eq:unipsupp}).
The result then follows from the fact that Alvis--Curtis duality commutes with taking contragredients.
\end{proof}
Finally, we make the following convenient definitions.
When $\rho$ is not necessarily irreducible, \emph{the Kawanaka wavefront set of $\rho$} is the collection of maximal orbits among the wavefront sets of the irreducible constituents.
An element of the Kawanaka wavefront set is called a Kawanaka wavefront-set nilpotent.
\subsection{Relating the \texorpdfstring{$\mathbb Q_p$}{Qp} and \texorpdfstring{$\mathbb F_p$}{Fp} Wavefront Sets}
\paragraph{Inflated Generalised Gelfand--Graev Representations}
Let $(c,\mathbb{O})\in I_o^k$.
\nomenclature{$f_{c,\mathbb{O}}$}{}
Define the function $f_{c,\mathbb{O}} = \widetilde \gamma_{\mathbb{O}}$ where $\gamma_{\mathbb{O}}$ is the character of the GGGR of $\bfL_c(\mathbb F_q)$ attached to the orbit $\mathbb{O}$.
We will also write $f_{c,\mathbb{O}}$ for $f_{c,\mathbb{O}}\circ \exp$.
The following result is essentially due to Barbasch and Moy in \cite{barbaschmoy}, but is a sharper result than in loc. cit.
\begin{theorem}
\label{thm:bm}
Let $(c,\mathbb{O})\in I_o^k$.
Then
\begin{enumerate}[(1)]
\item $f_{c,\mathbb{O}}$ is supported on the topologically unipotent elements $\bfu$,
\item $\hat \mu_{\mathbb{O}'}(f_{c,\mathbb{O}}) = 0$ unless $\mathcal L_c(\mathbb{O}) \le \mathbb{O}'$.
\item Suppose $\mathbb{O}'\in \mathcal N_o(k)$ is such that $\mathcal N_o(\bar k/k)(\mathbb{O}') = \mathcal N_o(\bar k/k)(\mathcal L_c(\mathbb{O}))$. Then
\begin{enumerate}
\item If $\mathcal L_c(\mathbb{O})\ne \mathbb{O}'$, then $\hat \mu_{\mathbb{O}'}(f_{c,\mathbb{O}}) = 0$.
\item If $\mathcal L_c(\mathbb{O}) = \mathbb{O}'$, then $\hat \mu_{\mathbb{O}'}(f_{c,\mathbb{O}}) \ne 0$.
\end{enumerate}
\item For any irreducible smooth admissible representation $(\pi,X)$ of $\bfG(k)$, we have $$\Theta_X(f_{c,\mathbb{O}}) = \langle\Gamma_{c,\mathbb{O}},\check X^{\bfu_c(\mf o)}\rangle.$$
\end{enumerate}
\end{theorem}
\nomenclature{$\check X$}{}
Here $\check X$ denotes the contragredient (i.e. the smooth dual) of $X$.
The proof for (2) in \cite{barbaschmoy} however only shows that $\mathcal L_c(\mathbb{O})$ lies in the closure of $\mathcal N_o(K/k)(\mathbb{O}') \cap \mf g(k)$ in $\mf g(k)$.
We now give a complete proof of (2) using ideas from \cite{wast}.
\begin{proof}
Let $\ms x,\ms h,\ms y$ be an $\lalg{sl}_2$-triple for $\mathbb{O}$.
Let $\ms X,\ms H,\ms Y$ denote a lift of $\ms x,\ms h,\ms y$ to an $\lalg{sl}_2$-triple of $\mf g(k)$.
We proceed by first showing that
\begin{equation}
\label{eq:supp}
\text{supp}(\hat f_{c,\mathbb{O}}) = \set{{\ms h}.(-\ms Y + \ms Z):\ms h\in \bfP_c(\mf o),\ms Z\in \ms Z_{\bfp_c(\mf o)}(\ms X)}.
\end{equation}
We have that $\hat f_{c,\mathbb{O}}(\ms W) \ne 0$ iff $\ms W\in \bfp_c(\mf o)$ and $\hat\gamma_{\mathbb{O}}(\ms w) \ne 0$ where $\ms w$ is the image of $\ms W$ in $\bfl_c(\mathbb F_q)$.
By \cite[Equation 2.4 (a)]{lusztig}, $\hat\gamma_{\mathbb{O}}(\ms w)\ne 0$ iff $\ms w \in {\bfL_c(\mathbb F_q)}.(-\ms y + \ms Z_{\bfl_c(\mathbb F_q)}(\ms e))$.
By the proof of \cite[Lemma IX.3]{wast}, we know that the image of $\ms Z_{\bfp_c(\mf o)}(X)$ in $\bfl_c(\mathbb F_q)$ is $\ms Z_{\bfl_c(\mathbb F_q)}(\ms x)$.
Thus the support consists of those $\ms W$ in ${\bfP_c(\mf o)}.(-\ms Y+\ms Z_{\bfp_c(\mf o)}(\ms X))+\bfu_c(\mf o) = {\bfP_c(\mf o)}.(-\ms Y+\ms Z_{\bfp_c(\mf o)}(\ms X)+\bfu_c(\mf o))$.
But from the same proof in \cite{wast} we also know that for $\ms Z\in \ms Z_{\bfp_c(\mf o)}(\ms X)$
\begin{equation}
-\ms Y + \ms Z + \bfu_c(\mf o) = \set{{\ms h}.(-\ms Y+\ms Z+\ms Z'):\ms Z'\in \ms Z_{\bfu_c(\mf o)},\ms h\in \bfU_c(\mf o)}.
\end{equation}
Thus $-\ms Y + \ms Z_{\bfp_c(\mf o)}(\ms X) + \bfu_c(\mf o) = {\bfU_c(\mf o)}.(-\ms Y+\ms Z_{\bfp_c(\mf o)})$ and so equation \ref{eq:supp}. holds.
Now let $\mathbb{O}'$ be a nilpotent orbit with $\hat \mu_{\mathbb{O}'}(f_{c,\mathbb{O}})\ne0$.
Then $\text{supp}(\hat f_{c,\mathbb{O}})\cap \mathbb{O}' \ne \emptyset$.
Thus there is a $\ms X' \in \mathbb{O}'$, $\ms h\in \bfP_c(\mf o)$ and $\ms Z\in \ms Z_{\bfp_c(\mf o)}(\ms X)$ such that $\ms X' = {\ms h}.(-\ms Y+\ms Z)$.
Since ${\ms h^{-1}}.\ms X'\in \mathbb{O}'$ we can assume $\ms h = 1$ and so $\ms X' = -\ms Y+\ms Z$.
Let $\lambda_{\ms X'}: \mathbf G_m\to \mathbf G$ be a 1-parameter $k$ subgroup so that $\lambda_{\ms X'}(t).\ms X' = t^2\ms X'$, and $\lambda$ be the 1-parameter subgroup determined by $\ms H$.
Since $\ms Z_{\mf g(k)}(\ms X) \subseteq \mf g(k)(\ge 0)$, write $\ms Z = \sum_{i\ge 0}\ms Z_i$ where $\ms Z_i\in \mf g(k)(i)$.
Then
\begin{equation}
{\lambda(t)\lambda_{\ms X'}(t)}.\ms X' = -\ms Y + \sum_{i\ge 0}t^{2+i}\ms Z_i \to -\ms Y
\end{equation}
as $t\to 0$.
Thus $\mathbb{O} = {\bfG(k)}.(-Y) \le \mathbb{O}'$.
\end{proof}
Note that Theorem \ref{thm:saturation-increasing} together with (2) of this theorem immediately imply (3) (a) of this theorem.
\begin{proposition}
Suppose $(\pi,X)$ has depth-$0$.
\nomenclature{$\Xi(X),\Xi^{max}(X)$}{}
Let
$$\Xi(X) = \{\mathbb{O}\in \mathcal N_o(k):\text{there exists } (c,\mathbb{O}')\in I_o^k(\mathbb{O}) \text{ such that }\Theta_X(f_{c,\mathbb{O}'})\ne 0\}.$$
Write $\Xi^{max}(X)$ for the maximal orbits of $\Xi(X)$.
Then
\begin{enumerate}[(1)]
\item $\mathrm{WF}(X) = \Xi^{max}(X)$,
\item if $\mathbb{O} \in \Xi^{max}(X)$, then $\Theta_X(f_{c,\mathbb{O}'}) \ne 0$ for all $(c,\mathbb{O}')\in I_o^k(\mathbb{O})$.
\end{enumerate}
\end{proposition}
\begin{proof}
Let $\mathbb{O}\in \mathrm{WF}(X)$ and $(c,\mathbb{O}')\in I_o^k(\mathbb{O})$.
Since $X$ has depth-$0$ and the inflated GGGRs have support in $\bfU$ we have
\begin{equation}
\Theta_X(f_{c,\mathbb{O}'}) = \sum_{\mathbb{O}''\in \mathcal N_o(k)} c_{\mathbb{O}''}\hat\mu_{\mathbb{O}''}(f_{c,\mathbb{O}'}).
\end{equation}
Then by Theorem \ref{thm:bm} part 2 we have that
\begin{equation}
\Theta_X(f_{c,\mathbb{O}'}) = \sum_{\mathbb{O}\le \mathbb{O}''} c_{\mathbb{O}''}\hat\mu_{\mathbb{O}''}(f_{c,\mathbb{O}'}).
\end{equation}
But if $\mathbb{O} < \mathbb{O}''$ then $c_{\mathbb{O}''} = 0$ and so $\Theta_X(f_{c,\mathbb{O}'}) = c_{\mathbb{O}'}(X)\hat \mu_{\mathbb{O}}(f_{c,\mathbb{O}'}) \ne 0$.
Thus $\mathrm{WF}(X)\subseteq \Xi(X)$.
Now suppose $\mathbb{O}\in \mathrm{WF}(X)$ and $\mathbb{O}_1$ is a nilpotent orbit with $\mathbb{O} < \mathbb{O}_1$.
Let $(c_1,\mathbb{O}_1')\in I_o^k(\mathbb{O}_1)$.
Then $\Theta_X(f_{c_1,\mathbb{O}_1}) = \sum_{\mathbb{O}_1\le \mathbb{O}_2} c_{\mathbb{O}_2}\hat\mu_{\mathbb{O}_2}(f_{c_1,\mathbb{O}_1'})$.
But if $\mathbb{O}_1\le \mathbb{O}_2$ then $\mathbb{O}< \mathbb{O}_2$ and so $c_{\mathbb{O}_2}(X) = 0$ and so $\Theta_X(f_{c_1,\mathbb{O}_1'}) = 0$.
Thus we get that $\mathrm{WF}(X)\subseteq \Xi^{max}(X)$.
Finally, suppose $\mathbb{O}$ is in $\Xi^{max}(X)$ and $(c,\mathbb{O}')\in I_o^k(\mathbb{O})$.
$\mathbb{O}$ must be $\le \mathbb{O}_1$ for some $\mathbb{O}_1 \in \mathrm{WF}(X)$ (since otherwise $c_{\mathbb{O}_2}(X) = 0$ for all $\mathbb{O}\le \mathbb{O}_2$ and so $\Theta_X(f_{c,\mathbb{O}'}) = 0$).
But $\mathrm{WF}(X)\subseteq \Xi(X)$ and so by maximality we must have $\mathbb{O} = \mathbb{O}_1$ and so $\Xi^{max}(X) \subseteq \mathrm{WF}(X)$. This establishes 1.
To establish 2., note by the first part we have that $\Theta_X(f_{c,\mathbb{O}'}) \ne 0$ for all $\mathbb{O} \in \mathrm{WF}(X)$ and $(c,\mathbb{O}')\in I_o^k(\mathbb{O})$.
Then use the fact that $\Xi^{max}(X) = \mathrm{WF}(X)$.
\end{proof}
\begin{proposition}
\label{prop:wfs}
$\hphantom{ }$
\begin{enumerate}
\item $\hphantom{ }^K\widetilde\mathrm{WF}(X) = \max\set{\mathcal N_o(K/k)(\mathbb{O}):\mathbb{O}\in \mathrm{WF}(X)}$,
\item $^{\bar k}\mathrm{WF}(X) = \max\set{\mathcal N_o(\bar k/K)(\mathbb{O}):\mathbb{O}\in \hphantom{ }^K\widetilde\mathrm{WF}(X)}$.
\end{enumerate}
\end{proposition}
\begin{proof}
This follows from $\mathcal N_o(K/k)$ and $\mathcal N_o(\bar k/K)$ being non-decreasing.
\end{proof}
\begin{corollary}
\label{cor:maxl}
Suppose $(\pi,X)$ has depth-$0$.
Then $\hphantom{ }^K\widetilde\mathrm{WF}(X)$ is equal to the set of maximal orbits of $\mathcal N_o(K/k)(\Xi(X))$.
\end{corollary}
\paragraph{Local Wavefront Sets}
\label{sec:locwf}
\nomenclature{$\mathrm{WF}_c(X)$}{}
\nomenclature{$^K\mathrm{WF}_c(X)$}{}
We now define local wavefront sets $\mathrm{WF}_c$ and $\hphantom{ }^K\mathrm{WF}_c$ for $c$ a face of $\mathcal B(\bfG,k)$.
Let $\mathbb{O}_1,\dots,\mathbb{O}_k \in \mathcal N_o^{\bfL_c}(\barF_q)$ be the Kawanaka wavefront-set nilpotents of $W_c := \check V^{\bfU_c(\mf o)}$, a representation of $\bfL_c(\mathbb F_q)$.
Let $\mathbb{O}_1',\dots,\mathbb{O}_l' \in \mathcal N_o^{\bfL_c}(\mathbb F_q)$ be the nilpotent orbits in $\cup_i\mathbb{O}_i$ such that $\langle\Gamma_{\mathbb{O}_i'},W_c\rangle \ne 0$.
Define
$$\mathrm{WF}_c(X) =\set{\mathcal L_c(\mathbb{O}_i'):1\le i\le l} \text{ and } \hphantom{ }^K\mathrm{WF}_c(X) = \set{\mathcal L_c(\mathbb{O}_i):1\le i\le k}.$$
It is clear that
$$^K\mathrm{WF}_c(X) = \set{\mathcal N_o(K/k)(\mathbb{O}):\mathbb{O}\in \mathrm{WF}_c(X)}.$$
\nomenclature{$\mathscr C$}{}
Let $\mathscr C$ denote a collection of faces in $\mathcal B(\bfG,k)$ such that for all $\mathbb{O}\in \mathcal N_o(k)$ there exists a $(c,\mathbb{O}')\in I_o^k(\mathbb{O})$ such that $c\in \mathscr C$.
Examples of $\mathscr C$ that satisfy this property are:
\begin{enumerate}[(1)]
\item the faces of a fixed chamber $c_0$;
\item the vertices of a fixed chamber $c_0$;
\item a choice of vertex from each $\bfG(k)$ orbit of the vertices of a fixed chamber $c_0$.
\end{enumerate}
\begin{theorem}
\label{lem:liftwf}
Let $(\pi,X)$ be depth-$0$.
Then
$$^K\widetilde\mathrm{WF}(X) = \max_{c\in \mathscr C}\hphantom{ }^K\mathrm{WF}_c(X).$$
\end{theorem}
\begin{proof}
Let $^K\Xi(X) = \mathcal N_o(K/k)(\Xi(X))$.
We will show that $^K\mathrm{WF}_{c}(X)\subseteq \hphantom{ }^K\Xi(X)$ for all $c\in \mathscr C$, and that if $\mathbb{O}$ is a maximal element of $^K\Xi(X)$ then $\mathbb{O}\in \hphantom{ }^K\mathrm{WF}_c(X)$ for some $c\in \mathscr C$.
Corollary \ref{cor:maxl} then implies the result.
The first part is straightforward.
Let $c$ be any face of $\mathcal B(\bfG,k)$ and $\mathbb{O} \in \hphantom{ }^K\mathrm{WF}_c(X)$.
Write $\mathbb{O}$ as $\mathcal N_o(K/k)(\mathbb{O}_1)$ where $\mathbb{O}_1$ is in $\mathrm{WF}_c(X)$.
By definition of $\mathrm{WF}_c(X)$, $\mathbb{O}_1$ is in $\Xi(X)$.
Thus we have $\hphantom{ }^K\mathrm{WF}_c(X) \subseteq \hphantom{ }^K\Xi(X)$.
For the second part let $\mathbb{O}$ be a maximal element of $^K\Xi(X)$.
Write
$$\mathbb{O} = \mathcal N_o(K/k)(\mathbb{O}_1)$$
where $\mathbb{O}_1\in \Xi(X)$.
Let $(c,\mathbb{O}_1')\in I_o^k(\mathbb{O}_1)$ be such that $c\in\mathscr C$.
Since $\Theta_X(f_{c,\mathbb{O}_1'})\ne 0$, there is an irreducible constituent $W$ of $\check V^{\bfU_c(\mf o)}$ with
$$\langle \Gamma_{\mathbb{O}_1'},W\rangle \ne 0.$$
Let $\mathbb{O}_2'\in \mathcal N_o^{\bfL_c}(\barF_q)$ be a Kawanaka wavefront set nilpotent of $\check V^{\bfU_c(\mf o)}$ such that
$$\mathcal N_o(\barF_q/\mathbb F_q)(\mathbb{O}_1') \le \mathbb{O}_2'.$$
Let $\mathbb{O}_2 = \mathcal L_c(\mathbb{O}_2')$.
$\mathbb{O}_2$ is an element of $^K\Xi(X)$.
By Proposition \ref{prop:lifted_rel},
$$\mathcal N_o(\barF_q/\mathbb F_q)(\mathbb{O}_1') \le \mathbb{O}_2'$$
implies that
$$\mathbb{O} = \mathcal N_o(K/k)(\mathbb{O}_1) = \mathcal N_o(K/k)(\mathcal L_c(\mathbb{O}_1')) = \mathcal L_c(\mathcal N_o(\barF_q/\mathbb F_q)(\mathbb{O}_1'))\le \mathcal L_c(\mathbb{O}_2') = \mathbb{O}_2.$$
By maximality of $\mathbb{O}$ in $^K\Xi(X)$ we get that $\mathbb{O} = \mathbb{O}_2$.
It follows that $\mathbb{O} \in \hphantom{ }^K\mathrm{WF}_c(X)$.
\end{proof}
\begin{corollary}
Let $(\pi,X)$ be an admissible representation of $\bfG(k)$ and let $c$ be a face of $\mathcal B(\bfG,k)$.
Then
\begin{equation}
\label{eq:locwfexp}
^K\mathrm{WF}_c(X) = \set{\mathcal L_c(\mathbb{O}):\mathbb{O} \in \hphantom{ }^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)})}.
\end{equation}
\end{corollary}
\begin{proof}
We have that $\check X^{\bfU_c(\mf o)} = \left( X^{\bfU_c(\mf o)}\right)^*$.
Thus by Proposition \ref{prop:contra},
$$^{\barF_q}\mathrm{WF}(\check X^{\bfU_c(\mf o)}) = \hphantom{ }^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)}).$$
\end{proof}
We will always use the expression in equation \ref{eq:locwfexp} to compute the local wavefront sets .
\begin{corollary}
Let $(\pi,X)$ be an admissible representation of $\bfG(k)$ and let $(\check \pi,\check X)$ be its contragredient.
Then
$$^K\widetilde\mathrm{WF}(X) = \hphantom{ }^K\widetilde\mathrm{WF}(\check X) \text{ and } \hphantom{ }^{\bar k}\mathrm{WF}(X) = \hphantom{ }^{\bar k}\mathrm{WF}(\check X).$$
\end{corollary}
\begin{proof}
By Proposition \ref{prop:wfs}, it suffices to show the first equality.
By Lemma \ref{lem:liftwf} it suffices to show that $^K\mathrm{WF}_c(X) = \hphantom{ }^K\mathrm{WF}_c(\check X)$ for any face $c$ of $\mathcal B(\bfG,k)$.
This last equality follows from the previous corollary.
\end{proof}
In the following example we use the local wavefront sets to show that the geometric wavefront set of an anti-spherical representation is the regular nilpotent orbit.
\begin{example}
Let $(\pi,X)$ be a spherical representation of $\bfG(k)$.
Let $\mathcal A$ be an apartment of $\mathcal B(\bfG,k)$ and $c$ be a hyperspecial face of $\mathcal A$.
By definition, $V^{\bfU_c(\mf o)}$ contains the trivial $\bfL_c(\mathbb F_q)$-representation.
\nomenclature{$\ms{AZ}(X)$}{}
Let $\ms {AZ}(X)$ denote the Aubert--Zelevinsky dual of $X$ \cite[Definition 1.5]{aubertdual}.
Then $\ms {AZ}(X)^{\bfU_c(\mf o)}$ contains the Steinberg representation.
By Equation \ref{eq:kawanakawf}, the Kawanaka wavefront set of the Steinberg is $\mathbb{O}_{reg}$ - the regular nilpotent orbit of $\bfL_c(\mathbb F_q)$.
This is the unique maximal nilpotent orbit of $\bfL_c(\mathbb F_q)$ and so the Kawanaka wavefront set of $V^{\bfU_c(\mf o)}$ is $\mathbb{O}_{reg}$.
Let $\mathbb{O} = \mathcal L_c(\mathbb{O}_{reg})$.
Then $^K\mathrm{WF}_c(\ms {AZ}(X)) = \mathbb{O}$.
By Theorem \ref{thm:lift}, $\mathcal N_o(\bar k/K)(\mathbb{O})$ is the regular nilpotent orbit of $\mf g$ and so $^{\bar k}\mathrm{WF}(\ms {AZ}(X))$ must also be the regular nilpotent orbit.
\end{example}
\section{Unramified Nilpotent Orbits}
\paragraph{Basic Notation}
\label{par:basicnotation2}
\nomenclature{$\mathcal N_o$}{}
\nomenclature{$T$}{}
\nomenclature{$X^*$}{}
\nomenclature{$X_*$}{}
\nomenclature{$\widetilde W$}{}
\nomenclature{$E$}{}
\nomenclature{$V$}{}
\nomenclature{$\mathbb T$}{}
\nomenclature{$X_S$}{}
Recall from section \ref{par:basicnotation1} the definitions of $\bfG_\mathbb{Z},\bfT_\mathbb{Z}$ and $\bfT_K$.
Let $G = \bfG_\mathbb{Z}(\mathbb{C})$, $\mathcal N_o := \mathcal N_o^{\bfG_\mathbb{Z}}(\mathbb{C})$, and $T = \bfT_\mathbb{Z}(\mathbb{C})$.
Write $X^*,X_*$ for the common character and co-character lattices of $\bfT_K$ and $\bfT_\mathbb{Z}$, and $\mathbb{P}hi$ for $\mathbb{P}hi(\bfT_K,\bar k)$.
Let
$$\widetilde W=W\ltimes X_*$$
be \emph{the extended affine Weyl group of $\bf G_K$} and let
$$\cdot:\widetilde W\to W, w\mapsto \dot w$$
be the projection map along $X_*$.
Let $E = X^*\otimes_{\mathbb{Z}} \mathbb{R}$, $V = X_*\otimes_{\mathbb{Z}} \mathbb{R}$ and define the compact torus ${\mathbb T} = V/X_*$.
Extend the natural pairing $\langle -,- \rangle$ between $X^*$ and $X_*$ to one between $E$ and $V$.
This allows us to view $X^*$ as linear functions on $V$ via the embedding $\chi \mapsto \langle \chi,-\rangle \in V^*$.
It is moreover clear that these maps descend to continuous maps $V/X_*\to \mathbb{R}/\mathbb{Z}$ and so we obtain a homomorphism
$$X^* \to \hat {\mathbb T} = \mathbb{H}om_{cts}(\mathbb T,\mathbb{R}/\mathbb{Z}).$$
This map is in fact a $W$ equivariant bijection \cite[Section 9]{sommersmcninch}.
We now define the vanishing set for three different scenarios.
\begin{enumerate}[(1)]
\item For a subset $S\subseteq T$ define
$$X_S^* = \{\chi\in X^*:\chi(s) = 1, \ \forall s\in S\};$$
\item For a subset $S\subseteq V$, view $X^*$ as linear functions on $V$ and define
$$X_S^* = \{\chi\in X^*:\exists n\in \mathbb \mathbb{Z} \text{ such that } \chi(s) = n, \ \forall s\in S\};$$
\item For a subset $S\subseteq \mathbb T$, view $X^*$ as continuous homomorphisms from $\mathbb T$ to $\mathbb{R}/\mathbb{Z}$ and define
$$X_S^* = \{\chi\in X^*:\chi(s) = 0 + \mathbb{Z}, \ \forall s\in S\}.$$
\end{enumerate}
\nomenclature{$\mathbb{P}hi_S$}{}
For $S\subseteq T, V$, or $\mathbb T$, define $\mathbb{P}hi_S := \mathbb{P}hi\cap X_S$.
\nomenclature{$\mathbb{P}si$}{}
Define the set of \emph{affine roots}
$$\mathbb{P}si := \{\alpha + n:\alpha\in \mathbb{P}hi\}$$
where $\alpha+n$ denotes the affine function $v\mapsto \alpha(v)+n$ on $V$.
For $a = \alpha+n\in \mathbb{P}si$ write $\dot a := \alpha$ for the linear part.
The extended affine Weyl group $\widetilde W$ acts on $V$ by affine transformations.
This induces an action of $\widetilde W$ on the set $\mathbb{P}si$ via $w:a \mapsto a\circ w^{-1}$.
\nomenclature{$\mathbb{P}si_S$}{}
For a subset $S\subset V$ write $\mathbb{P}si_S$ for the set of all affine roots that vanish on $S$.
Note that $\mathbb{P}hi_S = \dot \mathbb{P}si_S$ and that $\mathbb{P}si_{wS} = w.\mathbb{P}si_S$ for all $w\in \widetilde W$.
The hyperplanes defined by the $a\in \mathbb{P}si$ endow $V$ with a chamber complex structure and $\widetilde W$ acts by chamber complex automorphisms.
Let $\mathcal A := \mathcal A(\bfT_K,K)$ be the apartment of $\mathcal B(\bfG,K)$ associated to $\bfT_K$.
The apartment $\mathcal A$ is the underlying affine space of the vector space $V$ and so affords an action of $V$ by translations.
\nomenclature{$\kappa_{x_0}$}{}
For this section we will need to make a choice of identification between $V$ and $\mathcal A$ - in particular we must choose a hyperspecial point $x_0\in \mathcal A$ (see \cite[Section 1.10]{tits} for the definition) to send the origin of $V$ to.
Let us fix such a choice $x_0$ and write $\kappa_{x_0}:V\to \mathcal A$ for the resulting identification.
This induces a chamber complex isomorphism between $V$ and $\mathcal A$, identifies $\mathbb{P}si$ with the affine roots of $\mathcal A$, and fixes an action of $\widetilde W$ on $\mathcal A$.
\nomenclature{$\mathcal A$}{}
\nomenclature{$\bar\bfT_K$}{}
\nomenclature{$\mathbb{P}hi_c(\bar\bfT_K,\barF_q)$}{}
For $c$ a face of $\mathcal A$ (resp. $V$) write $\mathcal A(c)$ for $\mathcal A(c,\mathcal A)$ (resp. $\mathcal A(c,V)$).
For a $c\subseteq \mathcal A$ let $W_c$ denote the subgroup of $\widetilde W$ generated by the reflections through hyperplanes containing $c$.
The torus $\bfT_K$ is in fact defined over $\mf O$, is a subgroup of $\bfP_c$ for each $c\subseteq \mathcal A$, and the special fibre of $\bfT_K$, denoted $\bar\bfT_K$, is an $\barF_q$-split maximal torus of $\bfL_c(\barF_q)$.
Write $\mathbb{P}hi_c(\bar\bfT_K,\barF_q)$ for the root system of $\bfL_c(\barF_q)$ with respect to $\bar\bfT_K$.
Then $\mathbb{P}hi_c(\bar\bfT_K,\barF_q)$ naturally identifies with the set of $\psi\in\mathbb{P}si(\bfT,k)$ that vanish on $\mathcal A(c,\mathcal A)$, and the Weyl group of $\bfL_c$ with respect to $\bar \bfT_K$ is naturally isomorphic to $W_c$.
\nomenclature{$\bfB_K$}{}
\nomenclature{$\mathbb{D}elta$}{}
\nomenclature{$\widetilde\mathbb{D}elta$}{}
\nomenclature{$\alpha_0$}{}
Let $\bfB_K$ be a Borel of $\bfG_K$ containing $\bfT_K$ and let $\mathbb{D}elta := \mathbb{D}elta(\bfT_K,\bfB_K)\subseteq \mathbb{P}hi$ be the simple roots determined by $\bfB_K$.
Let $\widetilde\mathbb{D}elta$ be the simple roots of $\mathbb{P}si$ corresponding to $\mathbb{D}elta$.
In particular, when $\mathbb{P}hi(\bfT,K)$ is irreducible, $\widetilde\mathbb{D}elta = \mathbb{D}elta \cup \{1-\alpha_0\}$ where $\alpha_0$ is the highest root of $\mathbb{P}hi(\bfT,K)$ with respect to $\mathbb{D}elta$.
In general, for $\mathbb{P}hi(\bfT,K)=\coprod_{i=1}^l\mathbb{P}hi_i$ where each $\mathbb{P}hi_i$ is irreducible and $l\ge 1$, then $\mathbb{D}elta = \coprod_{i=1}^l\mathbb{D}elta_i$ where $\mathbb{D}elta_i = \mathbb{D}elta\cap \mathbb{P}hi_i$, and $\widetilde\mathbb{D}elta := \coprod_{i=1}^l \widetilde\mathbb{D}elta_i$.
\nomenclature{$c_0$}{}
Let $c_0$ be the chamber of $\mathcal A$ cut out by $\widetilde\mathbb{D}elta$.
\nomenclature{$\mathbf P(\widetilde\mathbb{D}elta)$}{}
\nomenclature{$\mathsf{Type}_{x_0,\mathbb{D}elta}$}{}
There is a unique labelling function $\mathsf{Type}_{x_0,\mathbb{D}elta}$ of the faces of $\mathcal B(\bfG,K)$ in terms of the set
$$\mathbf P(\widetilde \mathbb{D}elta):=\{J\subsetneq\widetilde \mathbb{D}elta: J\cap\mathbb{D}elta_i\subsetneq \mathbb{D}elta_i, 1\le i\le l\},$$
such that for $c\subseteq \overline{c_0}$, we have that $\mathcal A(c)$ is the vanishing set of $\mathsf{Type}_{x_0,\mathbb{D}elta}(c)$ (see \cite[Section 5.2]{garrett}).
\paragraph{Pseudo-Levis}
\nomenclature{$N$}{}
\nomenclature{$W$}{}
\nomenclature{$C_G(\bullet)$}{}
\nomenclature{$\mathbb{P}hi_L$}{}
Let $N$ denote the normaliser of $T$ in $G$ and $W$ be the Weyl group $N/T$.
A \emph{pseudo-Levi subgroup $L$} of $G$ is a connected centraliser $\mathbf C_{G}^\circ(s)$ of a semisimple element $s\in G$.
Pseudo-Levi subgroups have the following convenient characterisation.
\begin{proposition}
\cite[Lemma 14]{sommersmcninch}
Let $S\subseteq T$ be a subset.
Then $C_{G}^\circ (S)$ is a reductive subgroup of $G$ and is generated by $T$ together with the root subgroups $\mf X_\alpha$ for $\alpha(s) = 1$ for all $s\in S$.
\end{proposition}
For a pseudo-Levi $L\subset G$ containing $T$, write $\mathbb{P}hi_{L}$ for $\mathbb{P}hi_{Z}$ where ${Z}\subseteq T$ is the center of $L$.
\subsection{Parameterising Unramified Nilpotent Orbits}
\paragraph{Affine Bala--Carter Theory}
\label{par:abc}
\nomenclature{$I_{d,\mathcal A}^K$}{}
\nomenclature{$I_{o,d,\mathcal A}^K$}{}
\nomenclature{$\mathscr N$}{}
\nomenclature{$\sim_{\mathscr N}$}{}
Let
$$I_{d,\mathcal A}^K = \set{(c,\ms x)\in I_{d}^K:c\subseteq \mathcal A} \text{ and } I_{o,d,\mathcal A}^K = \set{(c,\mathbb{O})\in I_{o,d}^K:c\subseteq \mathcal A}.$$
Since $\bfG(K)$ acts transitively on ordered pairs of apartments and chambers the inclusion map $I_{o,d,\mathcal A}^K\to I_{o,d}^K$ descends to a bijection on $\sim_K$ equivalence classes.
Let $\mathscr N$ denote the normaliser of $\bfT_K(K)$ in $\bfG(K)$ (or equivalently the stabiliser of $\mathcal A$ in $\bfG(K)$).
For $(c_1,\mathbb{O}_1),(c_2,\mathbb{O}_2)\in I_{o,d,\mathcal A}^K$ declare $(c_1,\mathbb{O}_1)\sim_{\mathscr N} (c_2,\mathbb{O}_2)$ if there exists an $\ms n\in \mathscr N$ such that $\mathcal A(c_1) = \mathcal A(\ms nc_2)$ and $\mathbb{O}_1 = j_{c_1,nc_2}(\ms n.\mathbb{O}_2)$.
Note that $\ms n.\mathbb{O}_2$ is a single $\bfL_{\ms nc_2}(\barF_q)$ orbit.
Indeed, if $\ms x_2 \in \mathbb{O}_2$ then
$$\ms n.\mathbb{O}_2 = \ms n \bfL_{c_2}(\barF_q) . \ms x_2 = \ms n \bfL_{c_2}(\barF_q) \ms n^{-1} \ms n. \ms x_2 = \bfL_{\ms n c_2} \ms n. \ms x_2.$$
\begin{proposition}
The map
$$I_{d,\mathcal A}^K\to I_{o,d,\mathcal A}^K, \quad (c,\ms x) \mapsto (c,\bfL_c(\barF_q).\ms x)$$
descends to a bijection between $I_{d,\mathcal A}^K/\sim_K$ and $I_{o,d,\mathcal A}^K/\sim_{\mathscr N}$.
\end{proposition}
\begin{proof}
The map $I_{d,\mathcal A}^K\to I_{o,d,\mathcal A}^K$ is clearly surjective.
Suppose there is a $\ms h\in \bfG(K)$ such that $\mathcal A(c_1) = \mathcal A(\ms hc_2)$ and $\ms x_1 = j_{c_1,\ms hc_2}(\ms h.\ms x_2)$.
Then we can write $\ms h = \ms n\ms h_0$ where $\ms n\in \mathscr N,\ms h_0\in \bfP_{c_2}(\mf O)$.
Thus
\begin{align}
\bfL_{c_1}(\barF_q).\ms x_1 &= \bfL_{c_1}(\barF_q).j_{c_1,\ms hc_2}(\ms h.\ms x_2) = \bfL_{c_1}(\barF_q)j_{c_1,\ms nc_2}(\ms n\ms h_0.\ms x_2) \\
&= j_{c_1,\ms nc_2}(\bfL_{\ms nc_2}(\barF_q)\ms n\ms h_0.\ms x_2) = j_{c_1,\ms nc_2}(\ms n(\bfL_{c_2}(\barF_q).(\ms h_0.\ms x_2))).
\end{align}
But since $\ms h_0\in \bfP_{c_2}(\mf O)$, we have that $\bfL_{c_2}(\barF_q)(\ms h_0.\ms x_2) = \bfL_{c_2}(\barF_q)\ms x_2$ and so
$$(c_1,\bfL_{c_1}(\barF_q).\ms x_1)\sim_{\mathscr N} (c_2,\bfL_{c_2}(\barF_q).\ms x_2).$$
Thus $I_{d,\mathcal A}^K\to I_{o,d,\mathcal A}^K/\sim_{\mathscr N}$ descends to a well defined map $I_{d,\mathcal A}^K/\sim_K\to I_{o,d,\mathcal A}^K/\sim_{\mathscr N}$.
Now suppose there is an $\ms n\in \mathscr N$ such that $\mathcal A(c_1) = \mathcal A(\ms nc_2)$ and
$$\bfL_{c_1}(\barF_q)\ms x_1 = j_{c_1,\ms nc_2}(\ms n(\bfL_{c_2}(\barF_q)\ms x_2)).$$
Then there exists an $\ms h_0'\in \bfL_{c_2}(\barF_q)$ such that $\ms x_1 = j_{c_1,\ms nc_2}(^{\ms n\ms h_0'}\ms x_2)$.
Let $\ms h_0\in \bfP_{c_2}(\mf O)$ be a lift of $\ms h_0'$ and $\ms h = \ms n\ms h_0$.
Then $\mathcal A(c_1) = \mathcal A(\ms hc_2)$ and
\begin{equation}
\ms x_1 = j_{c_1,\ms nc_2}(\ms n\ms h_0.\ms x_2) = j_{c_1,\ms hc_2}(\ms h.\ms x_2).
\end{equation}
Thus $I_{d,\mathcal A}^K/\sim_K\to I_{o,d,\mathcal A}^K/\sim_{\mathscr N}$ is injective and hence a bijection as required.
\end{proof}
Note that the choice of base point $x_0$ induces a surjection $\pi:\mathscr N\to \widetilde W$ with kernel $\bfT_K(\mf O^\times)$.
Since additionally
$$\bfT_K(\mf O^\times)\subseteq \bigcap_{c\subseteq \mathcal A}\bfP_c(\mf O),$$
if $\ms n_1,\ms n_2\in \mathscr N$ and $\pi(\ms n_1) = \pi(\ms n_2)$, then for any facet $c$ of $\mathcal A$ and nilpotent orbit $\mathbb{O}\in \mathcal N_o^{\bfL_c}(\barF_q)$, we have $\ms n_1c = \ms n_2c$ and $\ms n_1.\mathbb{O} = \ms n_2.\mathbb{O}$.
Thus for $w\in \widetilde W$, $c\subseteq \mathcal A$ and $\mathbb{O}\in \mathcal N_o^{\bfL_c}(\barF_q)$, defining $w.\mathbb{O}$ to be $\ms n.\mathbb{O}$ where $\ms n$ is any element of $\mathscr N$ such that $\pi(\ms n) = w$, is well defined.
\nomenclature{$\sim_{\mathcal A}$}{}
Now define a relation $\sim_{\mathcal A}$ on $I_{o,d,\mathcal A}$ by declaring $(c_1,\mathbb{O}_1)\sim_{\mathcal A}(c_2,\mathbb{O}_2)$ if there exists $w\in \widetilde W$ such that $\mathcal A(c_1) = \mathcal A(wc_2)$ and $\mathbb{O}_1 = i_{c_1,wc_2}(w.\mathbb{O}_2)$.
Then clearly $\sim_{\mathscr N}$ and $\sim_{\mathcal A}$ are the same equivalence relation on $I_{o,d,\mathcal A}^K$.
\nomenclature{$\mb{ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)$}{}
\nomenclature{$\mathsf{BC}_{x_0,\mathbb{D}elta}(c,\mathbb{O})$}{}
\nomenclature{$I_{o,d,c_0}^K$}{}
Let $\mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)$ be the set of pairs $(J,J')$ where $J\in \mathbf P(\widetilde\mathbb{D}elta)$ and $J' \subseteq J$ is a distinguished subset of $J$ in the sense of Bala--Carter \cite[Section 1]{balacarter}\cite[Section 8.2]{collingwoodmcgovern} (we can think of $J$ as the simple roots of a crystallographic root system).
Given $c\subseteq \mathcal B(\bfG,K)$, $\ms{Type}_{x_0,\mathbb{D}elta}(c)$ is a set of simple roots for $\mathbb{P}hi_c(\bar\bfT_K,\barF_q)$, and so given a distinguished $\mathbb{O}\in \mathcal N_o^{\bfL_c}(\mathbb F_q)$, let $\ms {BC}_{x_0,\mathbb{D}elta}(c,\mathbb{O})$ be the corresponding distinguished subset of $\ms{Type}_{x_0,\mathbb{D}elta}(c)$ prescribed by the Bala--Carter classification of distinguished nilpotent orbits \cite[Section 8]{collingwoodmcgovern}.
Recall that $c_0$ is the chamber of $\mathcal A$ cut out by $\widetilde \mathbb{D}elta$.
Let
$$I_{o,d,c_0}^K = \set{(c,\mathbb{O})\in I_{o,d,\mathcal A}^K:c\subseteq \overline{c_0}}.$$
Since $\widetilde W$ acts transitively on the chambers in $\mathcal A$, the inclusion map $I_{o,d,c_0}^K\to I_{o,d,\mathcal A}^K$ descends to a bijection.
\nomenclature{$\mb{ABC}_{x_0,\bfT_K}(c,\mathbb{O})$}{}
There is a bijection between $I_{o,d,c_0}^K$ and $\mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)$ given by
$$\mb {ABC}_{x_0,\bfT_K}:(c,\mathbb{O})\mapsto (\ms{Type}_{x_0,\mathbb{D}elta}(c),\ms {BC}_{x_0,\mathbb{D}elta}(c,\mathbb{O})).$$
Write $I_{o,d,c_0}^K:\mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)\to I_{o,d,c_0}^K$ for the inverse map.
\nomenclature{$\sim_{\widetilde{W}}$}{}
For $(J_1,J_1'),(J_2,J_2')\in \mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)$ define
$$(J_1,J_1')\sim_{\widetilde W}(J_2,J_2') \text{ if there exists } w\in \widetilde W: J_2 = w.J_1, J_2' = w.J_1'.$$
\begin{proposition}
Let $(c_1,\mathbb{O}_1),(c_2,\mathbb{O}_2)\in I_{o,d,c_0}^K$.
Then
$$(c_1,\mathbb{O}_1)\sim_{\mathcal A} (c_2,\mathbb{O}_2) \text{ if and only if } \mb {ABC}_{x_0,\bfT_K}(c_1,\mathbb{O}_1)\sim_{\widetilde W}\mb{ABC}_{x_0,\bfT_K}(c_2,\mathbb{O}_2).$$
\end{proposition}
\begin{proof}
Let $(J_i,J_i') = \mb {ABC}_{x_0,\bfT_K}(c_i,\mathbb{O}_i)$ for $i=1,2$.
$(\mathbb{R}ightarrow)$ Suppose there is a $w\in \widetilde W$ such that $\mathcal A(c_1) = \mathcal A(wc_2)$ and $\mathbb{O}_1 = i_{c_1,wc_2}(w.\mathbb{O}_2)$.
Note that $J_i$ is a root basis for $\psi_{\mathcal A(c_i)}$.
Thus since $\mathcal A(c_1) = \mathcal A(wc_2) = w\mathcal A(c_2)$, we have that $J_1$ and $w J_2$ are both root bases for $\psi_{\mathcal A(c_1)}$.
Thus there exists $w_0\in W_{c_1}$ such that $J_1 = w_0wJ_2$.
Now, the Bala--Carter data for $i_{c_1,wc_2}(w.\mathbb{O}_2)$ with respect to $wJ_2$ is $wJ_2'$.
With respect to the root basis $J_1$ it is thus $w_0wJ_2'$.
Thus $(J_1,J_1')\sim_{\widetilde W} (J_2,J_2')$.
$(\Leftarrow)$ Suppose there is some $w\in \widetilde W$ such that $(J_1,J_1') = w.(J_2,J_2')$.
Then $J_1 = wJ_2$ implies that $\mathcal A(c_1) = \mathcal A(wc_2)$.
Moreover, the Bala--Carter data of $i_{c_1,wc_2}(w.\mathbb{O}_2)$ is $wJ_2'$ with respect to $wJ_2 = J_1$.
But $w.J_2' = J_1'$ and so $\mathbb{O}_1$ and $i_{c_1,wc_2}({w}.\mathbb{O}_2)$ have the same Bala--Carter data and so must be equal.
\end{proof}
\begin{theorem}
\label{thm:affine_bala_carter}
\nomenclature{$\mathcal L_{x_0,\bfT_K}$}{}
The map
$$\mathcal L_{x_0,\bfT_K}:\mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta) \to \mathcal N_o(K), \quad (J,J')\mapsto \mathcal L\circ I_{o,d,c_0}^K(J,J')$$
descends to a bijection $\mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)/\sim_{\widetilde W} \ \to \mathcal N_o(K)$.
\end{theorem}
\begin{proof}
This follows immediately from the previous Proposition.
\end{proof}
\nomenclature{$\mb{BC}_{\bfT_K}(\mathbb{D}elta)$}{}
\nomenclature{$\mb{BC}_{x_0,\bfT_K}(\mathbb{D}elta)$}{}
\nomenclature{$\sim_W$}{}
Recall that regular Bala--Carter theory states that there is a map from the set $\mb {BC}_{\bfT_K}(\mathbb{D}elta)$ of pairs $(J,J')$ where $J\subseteq \mathbb{D}elta$ and $J'$ is distinguished in $J$, to $\mathcal N_o({\bar k})$ that descends to a bijection $\mb{BC}_{x_0,\bfT_K}:\mb {BC}_{\bfT_K}(\mathbb{D}elta)/\sim_W\ \to \mathcal N_o(\bar k)$ (where $\sim_W$ is the obvious analogue of $\sim_{\widetilde W}$).
So Theorem \ref{thm:affine_bala_carter} is an affine version of the combinatorial Bala--Carter Theorem in a very literal sense ($\mb {ABC}$ stands for Affine Bala--Carter).
In Proposition \ref{prop:trivccl} we give a precise statement about how these two parameterisations relate.
\paragraph{Properties of Lifting of Nilpotent Orbits}
\nomenclature{$\bfL$}{}
\nomenclature{$\bfl$}{}
\nomenclature{$\bar{\mb t}_K$}{}
\nomenclature{$\bar{\mathbb{P}hi}$}{}
\nomenclature{$\bar{\alpha}$}{}
\nomenclature{$\bar{\omega}$}{}
\nomenclature{$i_{c,\bar\omega}$}{}
\nomenclature{$j_{c,\bar\omega}$}{}
\nomenclature{$j_{c,o}$}{}
\nomenclature{$\Lambda_{x_0}^{\barF_q}$}{}
\nomenclature{$\Lambda_{\bfT_K}^{\bar k}$}{}
Let $\bfL = \bfL_{x_0}$, $\bfl = \bfl_{x_0}$.
Note that $\bfL(\barF_q) = \bfG_\mathbb{Z}(\barF_q)$ since $\bfG_K$ is split.
Let $\bar{\mb t}_K$ be the Lie algebra of $\bar \bfT_K$.
There is a natural identification between the character lattices of $\bfT_K$ and $\bar \bfT_K$.
For $\alpha\in \mathbb{P}hi$ let $\bar \alpha$ denote the image under this identification.
Let $\bar \mathbb{P}hi = \set{\bar \alpha:\alpha\in \mathbb{P}hi}$.
Let $\mf P$ be the maximal ideal of $\mf O$.
Recall that a choice of uniformiser of $K$ induces an isomorphism of additive groups between $\mf P^i/\mf P^{i+1}\to \mf P^j/\mf P^{j+1}$ for any $i,j\in \mathbb{Z}$.
For any facet $c$ of $\mathcal A$ this in turn induces an isomorphism from $\bfL_c(\barF_q)$ onto the pseudo-Levi of $\bfL(\barF_q)$ corresponding to $\bar\bfT_K(\barF_q)$ and $\bar \mathbb{P}hi_c$.
For a uniformiser $\bar \omega$ let $i_{c,\bar\omega}:\bfL_c\to \bfL$ denote the corresponding homomorphism and $j_{c,\bar\omega}:\bfl_c\to \bfl$ the associated morphism of Lie algebras.
One important property of this map is that the following diagram commutes
\begin{equation}
\begin{tikzcd}
& \bar\bfT_K \arrow[dl] \arrow[dr] & \\
\bfl_c \arrow[rr,"j_{c,\bar \omega}"] & & \bfl.
\end{tikzcd}
\end{equation}
Moreover, since $(\bfl_c)_{\bar \alpha}$ maps to $\bfl_{\bar \alpha}$ the resulting map of nilpotent orbits does not depend on the choice of uniformiser for $K$ (this follows from Bala--Carter theory).
Thus we obtain a canonical map $j_{c,o}:\mathcal N_o^{\bfL_c}(\barF_q) \to \mathcal N_o^{\bfL}(\barF_q)$.
Note since $\bfL$ and $\bfG_\mathbb{Z}$ have the same root data, by Lemma \ref{lem:pom} there is an order preserving isomorphism
$$\Lambda_{x_0}^{\barF_q}: \mathcal N_o^{\bfL}(\barF_q) \to \mathcal N_o.$$
Let $\Lambda_{\bfT_K}^{\bar k}:\mathcal N_o(\bar k) \to \mathcal N_o$ be the isomorphism from Lemma \ref{lem:pom} applied to $F = \bar k$.
\begin{theorem}
\label{thm:lift}
Let $(c,\mathbb{O})\in I_{o,\mathcal A}^K$.
Then
$$\Lambda_{\bfT_K}^{\bar k}\circ\mathcal N_o(\bar k/K)\circ \mathcal L_c(\mathbb{O}) = \Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O}).$$
\end{theorem}
\begin{proof}
Let $\ms x,\ms h,\ms y$ be an $\lalg{sl}_2$-triple for $\mathbb{O}$ and let $\ms X,\ms H,\ms Y$ be a lift to an $\lalg{sl}_2$-triple for $\mathcal L_c(\mathbb{O})$.
We have that $\alpha(\ms H)\in \mathbb{Z}$ for all $\alpha \in \mathbb{P}hi$.
There exists a $w\in W$ such that $\alpha(wH)\ge 0$ for all $\alpha\in \mathbb{D}elta$.
In particular then $\alpha(wH)\in\set{0,1,2}$ for all $\alpha\in\mathbb{D}elta$.
We also have $j_{c,\bar \omega}(\mathbb{O}) = \hphantom{ }w j_{c,\bar \omega}(\mathbb{O}) = j_{wc,\bar \omega}(w.\mathbb{O})$ and $w.\mathcal L_c(\mathbb{O}) = \mathcal L_c(\mathbb{O})$.
Thus by replacing $(c,\mathbb{O})$ with $(wc,\hphantom{ }{w}\mathbb{O})$ we can assume that $\alpha(H)\in\set{0,1,2}$ for all $\alpha\in \mathbb{D}elta$.
Then $j_{c,\bar \omega}(x),j_{c,\bar \omega}(h),j_{c,\bar \omega}(y)$ is an $\lalg{sl}_2$-triple for $j_{c,o}(\mathbb{O})$.
But since $\bar \alpha(j_{c,\bar \omega}(\ms h)) = \bar\alpha(\ms h)$ equals to the image of $\alpha(\ms H)$ in $\mathbb F_q$ for all $\alpha\in\mathbb{D}elta$, $j_{c,o}(\mathbb{O})$ and $\mathcal N_o(\bar k/K)(\mathcal L_c(\mathbb{O}))$ have the same weighted Dynkin diagram with respect to $\bar \mathbb{D}elta$ and $\mathbb{D}elta$ respectively and so $\Lambda_{\bfT_K}^{\bar k}\circ\mathcal N_o(\bar k/K)\circ\mathcal L_c(\mathbb{O}) = \Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O})$.
\end{proof}
\begin{corollary}
\label{cor:alginc}
Let $(c,\mathbb{O}_1),(c,\mathbb{O}_2)\in I_o^K$ and suppose that $\mathbb{O}_1 < \mathbb{O}_2$.
Then
$$\mathcal N_o(\bar k/K)(\mathcal L_c(\mathbb{O}_1)) < \mathcal N_o(\bar k/K)(\mathcal L_c(\mathbb{O}_2)).$$
\end{corollary}
\begin{proof}
By \cite[Theorem 5.5]{ggg-np}, if $\mathbb{O}_1 < \mathbb{O}_2$, then $j_{c,o}(\mathbb{O}_1) < j_{c,o}(\mathbb{O}_2)$.
Thus
$$\mathcal N_o(\bar k/K)(\mathbb{O}_1) = \Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O}_1) < \Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O}_2) = \mathcal N_o(\bar k/K)(\mathbb{O}_2).$$
\end{proof}
\begin{corollary}
\label{cor:strictly_inc}
The map $\mathcal L:I_o^K\to \mathcal N_o(K)$ is strictly increasing.
\end{corollary}
\begin{proof}
Suppose that $(c,\mathbb{O}_1') < (c,\mathbb{O}_2')$.
Let $\mathbb{O}_i = \mathcal L_c(\mathbb{O}_i')$ for $i=1,2$.
Since $\mathcal L$ is non-decreasing we have that $\mathbb{O}_1\le \mathbb{O}_2$.
By corollary \ref{cor:alginc} we have that $\mathcal N_o(\bar k/K)(\mathbb{O}_1) \ne \mathcal N_o(\bar k/K)(\mathbb{O}_2)$.
Therefore $\mathbb{O}_1 \ne \mathbb{O}_2$ and so $\mathbb{O}_1< \mathbb{O}_2$ as required.
\end{proof}
\begin{corollary}
\label{cor:partialorder}
The closure ordering on $\mathcal N_o(K)$ is a partial order.
\end{corollary}
\begin{proof}
Let $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$ and suppose $\mathbb{O}_1\le \mathbb{O}_2 \le \mathbb{O}_1$.
By Theorem \ref{thm:closurerels} there exists $\mathbb{O}_{1.5}\in \mathcal N_o(K)$ with $\mathbb{O}_1\le \mathbb{O}_{1.5}\le \mathbb{O}_2$ such that $\mathbb{O}_1\le\mathbb{O}_{1.5}$ is lifted and $\mathbb{O}_{1.5}\le \mathbb{O}_2$ is degenerate.
Since $\mathcal N_o(\bar k/K)(\mathbb{O}_1) = \mathcal N_o(\bar k/K)(\mathbb{O}_2)$, by corollary \ref{cor:strictly_inc}, we must have $\mathbb{O}_1 = \mathbb{O}_{1.5}$.
Let $(c,\mathbb{O}'_{1.5})\in I_o^K(\mathbb{O}_{1.5})$ be such that $\mathcal C(c,\mathbb{O}'_{1.5}) \cap \mathbb{O}_2 \ne \emptyset$ and let $\ms X,\ms H,\ms Y\in \bfP_c(\mf O)$ be an $\lalg{sl}_2$-triple for $\mathbb{O}_{1.5}$.
By Lemma \ref{lem:unramlift} we have that
$$\ms X+\bfu_c(\mf O) = \bfU_c(\mf O).(\ms X+\ms c_{\bfu_c(\mf O)}(\ms Y)).$$
It follows that
$$\ms X+\ms c_{\bfu_c(\mf O)}(\ms Y) \cap \mathbb{O}_2 \ne \emptyset.$$
However,
$$\ms X+\ms c_{\bfu_c(\mf O)}(\ms Y) \subseteq \ms X+\ms c_{\mf g(\bar k)}(\ms Y)$$
and since $\mathcal N_o(\bar k/K)(\mathbb{O}_1) = \mathcal N_o(\bar k/K)(\mathbb{O}_2)$ we have that
$$\left(\ms X+\ms c_{\mf g(\bar k)}(\ms Y)\right)\cap \mathcal N_o(\bar k/K)(\mathbb{O}_2) = \ms X.$$
Thus $\left(\ms X+\ms c_{\bfu_c(\mf O)}(\ms Y)\right)\cap \mathbb{O}_2 = \ms X$ and so $\mathbb{O}_1 = \mathbb{O}_2$.
\end{proof}
\paragraph{McNinch and Sommers' Parameterisation of \texorpdfstring{$\mathcal N_{o,c}$}{Noc}}
\label{sec:pseudo-levis}
\nomenclature{$\mathcal N_{o,c}$}{}
\nomenclature{$\mathcal C(G_0)$}{}
For a finite group $G_0$ write $\mathcal C(G_0)$ for the set of conjugacy classes of $G_0$.
For $\mathbb{O} \in \mathcal N_o$ let $A(\mathbb{O})$ denote the component group of the centraliser of an element of $\mathbb{O}$.
This group is well defined up to inner automorphism of $G$.
Let
$$\mathcal N_{o,c} = \{(\mathbb{O},C):\mathbb{O}\in \mathcal N_o,C\in \mathcal C(A(\mathbb{O}))\}.$$
Note that the $c$ here is short for `conjugacy class' and does not refer to a face of the building.
By \cite[Remark 2]{sommersmcninch} this set is in bijection with the set
$$\{(n,gC_G^\circ(n)):n\in \mathcal N^G,g\in C_G(n)\}/G.$$
This is perhaps a more canonical description of the $N_{o,c}$, but the first description has the benefit of being easier to understand.
We will pass interchangeably between the two.
\nomenclature{$\mathscr F$}{}
\nomenclature{$I_d^\mathbb{C}$}{}
Let $\mathscr F$ denote the set of pairs $(L,tZ^\circ)$ such that $L$ is a pseudo-Levi of $G$ and $tZ^\circ$ is an element of $Z/Z^\circ$ where $Z$ is the center of ${L}$ and $L = C_{G}^\circ(tZ^\circ)$.
Let $I_d^\mathbb{C}$ denote the set of all triples $({L},tZ^\circ,x)$ such that $(L,tZ^\circ)\in \mathscr F$, and $x$ is a distinguished nilpotent element of $\mf l$, the Lie algebra of $L$.
McNinch and Sommers prove the following result.
\begin{theorem}
\label{thm:somninch}
(\cite{sommersmcninch})
The map $\mathrm{MS}:({L},tZ^\circ,x)\mapsto (x,tC_{G}^\circ(x))$ yields a bijection between $I_d^\mathbb{C}/G$ and $\mathcal N_{o,c}$.
\end{theorem}
\nomenclature{$I_{d,T}^\mathbb{C}$}{}
\nomenclature{$I_{o,d,T}^\mathbb{C}$}{}
\nomenclature{$\mathscr F_T$}{}
Note that every semisimple element can be conjugated to lie in $T$.
Thus if we define $I_{d,T}^\mathbb{C}$ to be the subset of $I_d^\mathbb{C}$ consisting of triples $(L,tZ^\circ,x)$ such that $T\subseteq L$, then the map in Theorem \ref{thm:somninch} descends to a bijection between $I_{d,T}^\mathbb{C}/G$ and $\mathcal N_{o,c}$.
Define $\mathscr F_{T} = \set{(L,tZ^\circ)\in \mathscr F:T\subseteq L}$.
We will additionally find it convenient to work with orbits rather than elements.
Define $I_{o,d,T}^\mathbb{C}$ to be the set of all triples $({L},tZ^\circ,\mathbb{O})$ where $({L},tZ^\circ) \in \mathscr F_{T}$, and $\mathbb{O}$ is a distinguished nilpotent orbit of $\mf l$.
\begin{proposition}
\label{prop:unip}
The map $({L},tZ^\circ,x)\mapsto ({L},tZ^\circ,L.x)$ induces a bijection between $I_{d,T}^\mathbb{C}/G$ and $I_{o,d,T}^\mathbb{C}/N$.
\end{proposition}
\begin{proof}
The induced map $\phi:I_{d,T}^\mathbb{C}\to I_{o,d,T}^\mathbb{C}/N$ is clearly a surjection.
Suppose $g\in G$ is such that $\hphantom{ }{g}(L_1,t_1Z_1^\circ,x_1) = (L_2,t_2Z_2^\circ,x_2)$.
Then $T,\hphantom{ }{g}T$ are both maximal tori of $L_2$ and so there is a $l\in L_2$ so that $\hphantom{ }{lg}T = T$ and so $lg = n$ for some $n \in N$.
Clearly $\hphantom{ }nL_1 = L_2$ and $\hphantom{ }n(t_1Z_1^\circ) = t_2Z_2^\circ$.
Also ${n}(\hphantom{ }{L_1}x_1) = \hphantom{ }{L_2n}x_1 = \hphantom{ }{L_2g}x_1 = \hphantom{ }{L_2}x_2$.
Thus $\phi$ factors through $\mathscr F_{T}/G$.
Now suppose there exists a $n\in N$ such that ${n}(L_1,t_1Z_1^\circ,\hphantom{ }{L_1}x_1) = (L_2,t_2Z_2^\circ,\hphantom{ }{L_2}x_2)$.
Then $\hphantom{ }{n}L_1 = L_2$ and $\hphantom{ }{n}\hphantom{ }{L_1}x_1 = \hphantom{ }{L_2}x_2$.
Thus $\hphantom{ }{L_2n}x_1 = \hphantom{ }{L_2}x_2$ and so there exists an $l\in L_2$ such that $\hphantom{ }{ln}x_1 = x_2$.
Clearly $\hphantom{ }{ln}(t_1Z_1^\circ) = t_2Z_2^\circ$ since $t_2Z_2^\circ$ lies in the center of $L_2$.
Thus $\hphantom{ }{ln}(L_1,t_1Z_1^\circ,x_1) = (L_2,t_2Z_2^\circ,x_2)$.
Thus $\phi$ descends to a bijection as required.
\end{proof}
Note that $N$ stabilises $I_{o,d,T}^\mathbb{C}$ and $T$ acts trivially on $I_{o,d,T}^\mathbb{C}$.
Thus $W$ acts on $I_{o,d,T}^\mathbb{C}$ and $I_{o,d,T}^\mathbb{C}/W = I_{o,d,T}^\mathbb{C}/N$.
We thus have bijections
\begin{equation}
\label{eq:Wequivtoccl}
\begin{tikzcd}
& I_d^\mathbb{C}/G \arrow[dl,swap,"\sim"] \arrow[dr,"\sim"] & \\
I_{o,d,T}^\mathbb{C}/W & & \mathcal N_{o,c}.
\end{tikzcd}
\end{equation}
\nomenclature{$\mathrm{MS}_{o,T}$}{}
Write $\mathrm{MS}_{o,T}$ for the composition $I_{o,d,T}^\mathbb{C}\to I_{o,d,T}^\mathbb{C}/W \xrightarrow{\sim} N_{o,c}$.
\paragraph{From Faces of the Apartment to Pseudo-Levis}
Recall from section \ref{par:basicnotation2} the definitions of $T$ and $\mathbb T$.
We view $T$ as a complex algebraic group with the Zariski topology and $\mathbb T$ as a topological group with the topology induced from the classical topology on $V \cong \mathbb{R}^{\mathrm{rank}(X^*)}$.
Recall the following standard results about closed subgroups of $T$ and $\mathbb T$.
\begin{proposition}
\label{prop:bij1}
\cite[Section 16]{humphreys}
There is a $W$-equivariant bijective correspondence
\begin{align}
\set{\text{closed subgroups } H\le T} &\leftrightarrow \left\{\mathbb{Z} \text{-submodules } M\le X^*\right\} \\
H &\rightarrow X^*_{H} = \set{\chi\in X^*:\chi(H) = 1} \\
\set{t\in T: \chi(t) = 1 \ \forall \chi \in M} &\leftarrow M.
\end{align}
\end{proposition}
\begin{proposition}
\label{prop:bij2}
\cite[Chapter 4]{pontryaginduality}
There is a $W$-equivariant bijective correspondence
\begin{align}
\set{\text{closed subgroups } {\mathbb H} \le {\mathbb T}} &\leftrightarrow \left\{\mathbb{Z} \text{-submodules } M\le \hat{\mathbb T} \right\} \\
{\mathbb H} &\rightarrow X^*_{\mathbb H} = \set{\chi\in X^*:\chi(\mathbb H) = 0 + \mathbb{Z}} \\
\set{t\in \mathbb T: \chi(t) = 1 \ \forall \chi \in M} &\leftarrow M.
\end{align}
Moreover, for every closed subgroup ${\mathbb H} \le {\mathbb T}$ the connected component of ${\mathbb H}$ containing the identity, $\mathbb H^\circ$, coincides with the annihilator of the torsion elements of the Pontryagin dual of $\mathbb H$.
\end{proposition}
For an affine subspace $A\subseteq V$, call $A$ admissible if it is equal to the vanishing set of a subset of the affine roots $\mathbb{P}si$.
\begin{proposition}
\label{prop:pseudolevis}
For every admissible $A\subseteq V$, there is a pseudo-Levi $L$ with $\mathbb{P}hi_L = \mathbb{P}hi_A$.
\end{proposition}
\begin{proof}
This follows easily from \cite[Remark 5.2 (b)]{steinberg}.
\end{proof}
\nomenclature{$\mathscr A$}{}
Write $\mathscr A$ for the set of admissible affine subspaces of $V$.
Cocharacters pair integrally with the roots $\mathbb{P}hi$ and so ${X_*}$ acts on the collection of admissible affine subspaces by translation.
For $A\in \mathscr A$ write $[A]$ for the orbit of $A$ in $\mathscr A$ under the action of $X_*$.
For $A\in \mathscr A$ write $L_{A}$ for the pseudo-Levi containing $T$ with $\mathbb{P}hi_{L_A} = \mathbb{P}hi_A$.
For $v\in {X_*}$ and $w\in W$ we have $L_{A+v} = L_A$ and $L_{w.A} = \hphantom{ }w.L_A$ (where $w.L_A$ denotes $wL_Aw^{-1}$).
\begin{remark}
\label{rmk:roots}
For a closed subgroup $H\le T$, $X^*_{H^\circ} = \mathbb{R} X^*_{H}\cap X^*$.
Moreover, for $tH^\circ \in H/H^\circ$ we have $X^*_{H}\subseteq X^*_{tH^\circ} \subseteq X^*_{H^\circ}$ and $\mathbb{P}hi_{H}\subseteq \mathbb{P}hi_{tH^\circ} \subseteq \mathbb{P}hi_{H^\circ}$.
Identical statements hold for closed subgroups ${\mathbb H}\le {\mathbb T}$.
\end{remark}
\begin{lemma}
\label{lem:lift}
Let ${\mathbb H} \le {\mathbb T}$ be a closed subgroup, $\pi:V \to {\mathbb T}$ be the projection map and $z+{\mathbb H}^\circ \in {\mathbb H}/{\mathbb H}^\circ$.
Then
\begin{enumerate}
\item there is an affine subspace $A\subseteq V$ such that $\pi^{-1}(z+{\mathbb H}^\circ) = A+{X_*}$, $X^*_A = X^*_{z+{\mathbb H}^\circ}$ and $\dim A = \rk {\mathbb H}^\circ$.
\item if $B$ is any other affine subspace with $\pi^{-1}(z+{\mathbb H}^\circ) = B+{X_*}$ then $B\in [A]$, $X^*_B = X^*_{z+{\mathbb H}^\circ}$ and $\dim B = \rk {\mathbb H}^\circ$.
\end{enumerate}
\end{lemma}
\begin{proof}
Let $N$ be annihilator of ${\mathbb H}^\circ$ in $X^*$.
By Proposition \ref{prop:bij2}, $X^*/N$ is a free $\mathbb{Z}$-module and so the short exact sequence
\begin{equation}
0\longrightarrow N \longrightarrow X^* \longrightarrow X^*/N \longrightarrow 0
\end{equation}
splits.
We can thus find a basis $\chi_1,\dots,\chi_n$ of $X^*$ such that $\chi_1,\dots,\chi_k$ is a basis for $N$ and $\chi_{k+1},\dots,\chi_n$ project onto a basis for $X^*/N$.
Let $\gamma_1,\dots,\gamma_n$ be the dual basis in ${X_*}$.
Then ${\mathbb H}^\circ$ is evidently equal to the image of $W = \sum_{i=k+1}^n\mathbb{R}\gamma_i = \cap_{i=1}^k\ker\chi_i$ under $\pi:V\to {\mathbb T}$.
Let $w\in V$ be such that $\pi(w) \in v + {\mathbb H}^\circ$.
Then $A = w+W$ maps onto $z+{\mathbb H}^\circ$ and so $\pi^{-1}(z+{\mathbb H}^\circ) = A+{X_*}$.
It is clear that $\chi\in X^*$ takes integer values on $A$ iff it does so on $A+{X_*}$ iff it does so on $z+{\mathbb H}^\circ$ and so $X^*_A = X^*_{z+{\mathbb H}^\circ}$.
Also $\dim A = \dim W = \rk X^*/N = \rk{\mathbb H}^\circ$.
This is establishes (1).
To establish (2) note that $A+{X_*}$ is a disjoint union of translates of $A$ by elements of the lattice $\bigoplus_{i=1}^k\mathbb{Z}\gamma_i$ and that $\bigoplus_{i=1}^k\mathbb{R}\gamma_i \cap A$ is a single point.
Thus the connected components of $A+{X_*}$ are exactly $[A]$.
If $B$ is an affine subspace such that $\pi^{-1}(z+{\mathbb H}^\circ) = B + {X_*}$ then as $B$ is connected it must be contained in some $C\in [A]$.
The property that $A+{X_*} = B+{X_*}$ then forces $C = B + \bigoplus_{i=k+1}^n\mathbb{Z}\gamma_i$.
But this can clearly only happen if $B=C$.
Finally, it is obvious that if $B\in [A]$ then $\dim B = \dim A$ and $X^*_B = X^*_A$.
\end{proof}
\begin{proposition}
\label{prop:adm}
Let $L$ be a pseudo-Levi with center ${Z}$.
There is a canonical bijection
\begin{align}
\label{eq:Zbij}
{Z}/Z^\circ & \leftrightarrow \set{A\in \mathscr A: \mathbb{P}hi_{Z} \subseteq \mathbb{P}hi_A \subseteq \mathbb{P}hi_{Z^\circ}}/{X_*}
\end{align}
with the property that if $[A]$ is the image of $tZ^\circ$ then $\mathbb{P}hi_A = \mathbb{P}hi_{C_{G}^\circ(tZ^\circ)}$.
In particular, there is a bijection
\begin{align}
\set{tZ^\circ\in Z/Z^\circ:C_{G}^\circ(tZ^\circ) = L} &\leftrightarrow \set{A\in \mathscr A: \mathbb{P}hi_A = \mathbb{P}hi_{L}}/{X_*}.
\end{align}
\end{proposition}
\begin{proof}
Fix a pseudo-Levi $L$ and let $Z$ denote its center.
The annihilator of $Z$ in $X^*$ is $\mathbb{Z} \mathbb{P}hi_{L}$.
Write $N$ for the annihilator for $Z^\circ$ in $X^*$.
Recalling the identification between $X^*$ and $\hat{\mathbb T}$, let $\mathbb H$ denote the annihilator of $\mathbb{Z}\mathbb{P}hi_{L}$ in $\mathbb T$.
Then both the character group of $Z/Z^\circ$ and the Pontryagin dual of ${\mathbb H}/{\mathbb H}^\circ$ naturally identify with $N/\mathbb{Z}\mathbb{P}hi_{L} = \text{tor}(X^*/\mathbb{Z}\mathbb{P}hi_{L})$ - the torsion subgroup of $X^*/\mathbb{Z}\mathbb{P}hi_{L}$.
But $Z/Z^\circ$ and ${\mathbb H}/{\mathbb H}^\circ$ are both finite Abelian groups and since the Pontryagin dual and character group (which we denote by $X^*$) coincide for finite groups we obtain a canonical isomorphism $f_{L}$ given by the composition
\begin{equation}
\label{eq:canon}
Z/Z^\circ \to X^*(\text{tor}(X^*/\mathbb{Z}\mathbb{P}hi_{L})) \to (\text{tor}(X^*/\mathbb{Z}\mathbb{P}hi_{L}))^\wedge \to {\mathbb H}/{\mathbb H}^\circ.
\end{equation}
\begin{claim}
The isomorphism $f_{L}$ has the property that for all $tZ^\circ\in Z/Z^\circ$, $X^*_{tZ^\circ} = X^*_{f_{L}(tZ^\circ)}$.
\end{claim}
\begin{proof}
Suppose $\chi\in X^*$ vanishes on $tZ^\circ$.
Then since $\chi$ is multiplicative, it must vanish on $Z^\circ$ too and hence lie in $N$.
Its image in $N/\mathbb{Z}\mathbb{P}hi_{L} = \text{tor}(X^*/\mathbb{Z}\mathbb{P}hi_{L}) = X^*(Z/Z^\circ)$ - the character group of $Z/Z^\circ$ - must thus lie in $X^*(Z/Z^\circ)_{tZ^\circ}$.
Conversely any lift of an element $\bar \chi \in X^*(Z/Z^\circ)_{tZ^\circ}$ to $X^*$ clearly lies in $X^*_{tZ^\circ}$.
Thus $X^*_{tZ^\circ}$ is equal to all possible lifts of elements in $X^*(Z/Z^\circ)_{tZ^\circ}$.
We may similarly characterise $X^*_{f_{L}(tZ^\circ)}$ and so we obtain the desired result.
\end{proof}
Now fix a $tZ^\circ$, let $v+{\mathbb H}^\circ = f_{L}(tZ^\circ)$ and let $\pi:V\to {\mathbb T}$ denote the projection map.
By Lemma \ref{lem:lift} we may assign to $z+{\mathbb H}^\circ$ a well defined class $[A]$ with the property that for any $B\in [A]$ we have $\pi^{-1}(z+{\mathbb H}^\circ) = B+{X_*}$, $\dim B = \rk {\mathbb H}^\circ$ and $X^*_B = X^*_{z+{\mathbb H}^\circ}$.
Define $\lambda_{L}(tZ^\circ)$ to be $[A]$
\begin{claim}
$[A]$ is an admissible class with $\mathbb{P}hi_{Z}\subseteq \mathbb{P}hi_A \subseteq \mathbb{P}hi_{Z^\circ}$.
\end{claim}
\begin{proof}
The conditions on $\mathbb{P}hi_A$ are evident from the fact that $\mathbb{P}hi_A = \mathbb{P}hi_{z+{\mathbb H}^\circ}$ and remark \ref{rmk:roots}.
It remains to show that $A$ is admissible.
We know that $\mathbb{P}hi_{L}\subseteq X^*_{z+{\mathbb H}^\circ} = X^*_A$ and so $A$ lies in the vanishing set of $\set{\alpha-\alpha(A):\alpha\in \mathbb{P}hi_{L}}\subseteq \mathbb{P}si$.
It thus suffices to check that $\dim A = \dim V - \dim \mathbb{R}\mathbb{P}hi_{L}$.
This follows from $\dim A = \rk{\mathbb H}^\circ = \rk X^* - \rk \mathbb{Z}\mathbb{P}hi_{L} = \dim V - \dim \mathbb{R}\mathbb{P}hi_{L}$.
\end{proof}
Thus $\lambda_{L}$ gives us a well defined forwards map for equation \ref{eq:Zbij}.
Moreover $\mathbb{P}hi_A = \mathbb{P}hi_{tZ^\circ} = \mathbb{P}hi_{C_{G}^\circ(tZ^\circ)}$ and so $\lambda_{L}$ has the advertised property.
It thus remains to show that the map has an inverse.
Let $[A]$ be an admissible class with $\mathbb{P}hi_{Z}\subseteq \mathbb{P}hi_A \subseteq \mathbb{P}hi_{Z^\circ}$.
Then $\pi(A)\subseteq {\mathbb H}$ and is connected so $\pi(A) \subseteq z + {\mathbb H}^\circ$ for some $z\in {\mathbb H}$.
But also
\begin{equation}
\mathbb{P}hi_{Z}\subseteq \mathbb{P}hi_A \subseteq \mathbb{P}hi_{Z^\circ} \subseteq X^*_{Z^\circ} = \mathbb{R} X^*_{Z} \cap X^* \subseteq \mathbb{R} X^*_{Z} = \mathbb{R} \mathbb{P}hi_{Z}
\end{equation}
and so $\mathbb{R}\mathbb{P}hi_A = \mathbb{R}\mathbb{P}hi_{Z}$.
Thus, since $A$ is admissible, $\dim A = \dim V - \dim \mathbb{R}\mathbb{P}hi_A = \dim V - \dim \mathbb{R}\mathbb{P}hi_{Z} = \rk{\mathbb H}^\circ$.
We must therefore have that $\pi(A) = z+{\mathbb H}^\circ$.
Applying $f_{L}^{-1}$ we obtain a backwards map which is clearly inverse to $\lambda_{L}$.
\end{proof}
\begin{remark}
\label{rmk:cs}
Let $L_1\subseteq L_2$ be pseudo-Levis containing $T$ with centers $Z_1,Z_2$ respectively.
Let $\mathbb H_1,\mathbb H_2$ be the corresponding subgroups of $\mathbb T$.
We have $Z_2\subseteq Z_1$ which induces a map $Z_2/Z_2^\circ \to Z_1/Z_1^\circ$.
Similarly we have a map $\mathbb H_2/\mathbb H_2^\circ \to \mathbb H_1/\mathbb H_1^\circ$.
By the naturality of the construction of $f_{L_1},f_{L_2}$ we have that
\begin{equation}
\begin{tikzcd}
Z_2/Z_2^\circ \arrow[r,"f_{L_2}"] \arrow[d] & \mathbb H_2/\mathbb H_2^\circ \arrow[d] \\
Z_1/Z_1^\circ \arrow[r,"f_{L_1}"] & \mathbb H_1/\mathbb H_1^\circ
\end{tikzcd}
\end{equation}
commutes.
\end{remark}
\begin{proposition}
\label{prop:admissiblesubset}
\nomenclature{$\lambda$}{}
There is a canonical $W$-equivariant bijection
\begin{align}
\mathscr F_{T} \leftrightarrow \mathscr A/{X_*}
\end{align}
where the forwards map is given by $\lambda:(L,tZ^\circ) \mapsto \lambda_{L}(tZ^\circ)$.
\end{proposition}
\begin{proof}
We show first that $\lambda$ is injective.
Let $(L_1,t_1Z_1^\circ),(L_2,t_2Z_2^\circ) \in \mathscr F_{T}$ and write $[A_i] = \lambda(L_i,t_iZ_i^\circ)$.
Suppose we have $[A_1] = [A_2]$.
Then
\begin{equation}
\mathbb{P}hi_{L_1} = \mathbb{P}hi_{t_1Z_1^\circ} = \mathbb{P}hi_{A_1} = \mathbb{P}hi_{A_2} = \mathbb{P}hi_{t_2Z_2^\circ} = \mathbb{P}hi_{L_2}
\end{equation}
and so $L_1 = L_2 =: L$.
We then get that $t_1Z_1^\circ = t_2Z_2^\circ$ from the fact that $\lambda_{L}$ is a bijection.
Now let $[A]$ be an admissible class.
By Proposition \ref{prop:pseudolevis} there exists a pseudo-Levi $L$ with $\mathbb{P}hi_{L} = \mathbb{P}hi_A$.
Let $Z$ denote the center of $L$.
Then by Proposition \ref{prop:adm} there exists an $tZ^\circ$ such that $\lambda(L,tZ^\circ) = [A]$ and so $\lambda$ is surjective.
It remains to show that $\lambda$ is $W$-equivariant.
Since the projection map $V\to {\mathbb H}$ is $W$ equivariant is suffices to check that for all $w\in W$, the outer square of the following diagram commutes
\begin{equation}
\begin{tikzcd}
Z/Z^\circ \arrow[rrr,"f_{L}"] \arrow{dr} \arrow[ddd,"\hphantom{x}w(-)"] & & & {\mathbb H}/{\mathbb H}^\circ \arrow[ddd,"\hphantom{x}w(-)"] \\
& X^*(N/\mathbb{Z}\mathbb{P}hi_{L}) \arrow[r,"="] \arrow[d,"\circ w^{-1}"] & (N/\mathbb{Z}\mathbb{P}hi_{L})^\wedge \arrow[ur] \arrow[d,"\circ w^{-1}"] & \\
& X^*(wN/w\mathbb{Z}\mathbb{P}hi_{L}) \arrow[r,"="] & (wN/w\mathbb{Z}\mathbb{P}hi_{L})^\wedge \arrow[dr] & \\
\hphantom{x}wZ/wZ^\circ \arrow[rrr,"f_{wL}"] \arrow[ur] & & & \hphantom{x}w{\mathbb H}/w{\mathbb H}^\circ.
\end{tikzcd}
\end{equation}
But it is clear that all the inner squares commute and so the outer square must do too.
\end{proof}
An important consequence of the proposition is the following corollary.
\begin{corollary}
\label{cor:mfL}
\nomenclature{$\mf L$}{}
Let
\begin{equation}
\mf L:\{c \text{ a face of } V\} \rightarrow \mathscr F_T, \quad c\mapsto \lambda^{-1}(\mathcal A(c)+X_*).
\end{equation}
Then $\mf L$ is $W$-equivariant surjection and $c_1,c_2$ lie in the same fibre iff $\mathcal A(c_1)+X_*=\mathcal A(c_2)+X_*$.
Moreover, if $\mf L(c) =(L,tZ_L^\circ)$ then $L$ is a complex reductive group with the same root datum as $\bfL_{\kappa_{x_0}(c)}(\barF_q)$.
\end{corollary}
\nomenclature{$\tau_{y_0}$}{}
Let us investigate how the bijection from Proposition \ref{prop:admissiblesubset} behaves under shift by a hyperspecial point $y_0\in V$ (i.e. a point $y\in V$ such that $\mathbb{P}hi_{y} = \mathbb{P}hi$).
Let $\tau_{y_0}:\mathscr A/X_*\to \mathscr A/X_*$ be the map induced by translating by $y_0$.
Let $A_0\in\mathscr A$ be the vanishing set of $\mathbb{P}si_{y_0}$.
Then $y_0\in A_0$ and $\tau_y = \tau_{y_0}$ for any $y\in A_0$ (since $\phi(y-y_0) = 0$ for all $y\in A_0$).
Since $\tau_y$ does not depend on the choice of $y\in A_0$ write $\tau_{A_0}$ for $\tau_y$ where $y\in A_0$.
Since $\mathbb{P}hi_{A_0} = \mathbb{P}hi$ we have that $\lambda^{-1}(A_0+X_*) = (G,t_0Z_G^\circ)$ for some $t_0\in Z_G$.
Since $t_0$ is central in $G$, for any $(L,tZ^\circ)\in \mathscr F_T$, we also have $(L,t_0tZ^\circ)\in \mathscr F_T$ (and this procedure is independent of the choice of coset representative of $t_0Z_G^\circ$ since $Z_G^\circ\subseteq Z^\circ$).
\begin{lemma}
\label{lem:translation}
Let $(L,tZ^\circ)\in \mathscr F_T$.
Then
$$\lambda(L,t_0tZ^\circ) = \tau_{A_0}(\lambda(L,tZ^\circ)).$$
\end{lemma}
\begin{proof}
Since $f_L$ is a group homomorphism we have that $f_L(t_0tZ^\circ) = f_L(t_0Z^\circ)+f_L(tZ^\circ)$.
Let $\pi:V\to \mathbb T$ be the projection map.
By definition of $t_0$ we have that $\pi(y_0) \in f_G(t_0Z_G^\circ)$.
By Remark \ref{rmk:cs} we have that $\pi(y_0)\in f_L(t_0Z^\circ)$.
Thus $f_L(t_0tZ^\circ) = \pi(y_0) + f_L(tZ^\circ)$ and so the lift of $f_L(t_0tZ^\circ)$ to $\mathscr A/X_*$ is $\tau_{y_0}(\lambda(L,tZ^\circ))$.
\end{proof}
\nomenclature{$\mf L_{x_0}$}{}
Now fix a hyperspecial point $x_0\in \mathcal A$ and let
$$\mf L_{x_0}:\{c\subseteq \mathcal A\} \to \mathscr F_T$$
be the composition $\mf L\circ \kappa_{x_0}^{-1}$.
\begin{lemma}
Let $x_0$, $x_0'$ be two hyperspecial points in $\mathcal A$ and $c\subseteq \mathcal A$.
Let $\mf L_{x_0}(c) = (L,tZ_{L}^\circ)$ and $\mf L_{x_0'}(c) = (L',t'Z_{L'}^\circ)$.
Then $L = L'$.
\end{lemma}
\begin{proof}
Let $v = x_0-x_0'$.
For any $y\in \mathcal A$ we have
$$\kappa_{x_0}^{-1}(y)+v = \kappa_{x_0'}^{-1}(y).$$
Therefore we have that
$$\mathcal A(\kappa_{x_0}^{-1}(c)) + v = \mathcal A(\kappa_{x_0'}^{-1}(c)).$$
But since $x_0,x_0'$ are both hyperspecial we have that $\alpha(v)\in \mathbb{Z}$ for all $\alpha\in \mathbb{P}hi$.
Therefore
$$\mathbb{P}hi_{\mathcal A(\kappa_{x_0}^{-1}(c))} = \mathbb{P}hi_{\mathcal A(\kappa_{x_0'}^{-1}(c))}$$
and so $L=L'$ as required.
\end{proof}
For $c\subseteq \mathcal A$ write $L_c$ for $\mathrm{pr}_1\circ\mf L_{x_0}$.
By the above lemma $\mathrm{pr}_1\circ\mf L_{x_0}$ is independent of the choice of $x_0$ which is why we omit it from the notation for $L_c$.
\nomenclature{$\Lambda_c^{\barF_q}$}{}
Since $\bfL_c(\barF_q)$ and $L_c$ have the same root data, by Lemma \ref{lem:pom}, there is an isomorphism of partial orders
$$\Lambda_c^{\barF_q}:\mathcal N_o^{\bfL_c}(\barF_q)\to \mathcal N_o^{L_c}(\mathbb{C}).$$
Moreover for $c\subseteq \mathcal A, \mathbb{O}\in \mathcal N_o^{\bfL_c}(\barF_q)$
\begin{equation}
\label{eq:sat}
\Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O}) = G. \Lambda_c^{\barF_q}(\mathbb{O})
\end{equation}
since saturation of nilpotent orbits can be computed purely in terms of the weighted Dynkin diagram.
\paragraph{A Parameterisation of Unramified Nilpotent Orbits}
\begin{theorem}
\label{thm:debackbij}
\nomenclature{$\Gamma_{x_0}$}{}
\nomenclature{$\widetilde\Gamma_{x_0}$}{}
Fix a hyperspecial point $x_0\in \mathcal A$.
The map
$$\Gamma_{x_0}:I_{o,d,\mathcal A}^K\to I_{o,d,T}^\mathbb{C}, \quad (c,\mathbb{O})\mapsto (\mf L_{x_0}(c),\Lambda_c^{\barF_q}(\mathbb{O}))$$
induces a bijection
\begin{equation}
\widetilde\Gamma_{x_0}:I_{o,d,\mathcal A}^K/\sim_{\mathcal A} \to I_{o,d,T}^\mathbb{C}/W.
\end{equation}
\end{theorem}
\begin{proof}
The map $I_{o,d,\mathcal A}^K\to I_{o,d,T}^\mathbb{C}$ is clearly a surjection.
Let
$$(c_1,\mathbb{O}_1),(c_2,\mathbb{O}_2)\in I_{o,d,\mathcal A}^K$$
and suppose that $(c_1,\mathbb{O}_1) \sim_{\mathcal A} (c_2,\mathbb{O}_2)$.
Then there exists a $w\in \widetilde W$ such that
$$\mathcal A(c_1) = \mathcal A(wc_2) \text{ and } \mathbb{O}_1 = j_{c_1,wc_2}(w.\mathbb{O}_2).$$
By corollary \ref{cor:mfL}, we have that $\mf L_{x_0}(c_1) = \dot w.\mf L_{x_0}(c_2)$.
Moreover
$$\Lambda_{c_1}^{\barF_q}(\mathbb{O}_1) = \Lambda_{c_1}^{\barF_q}(j_{c_1,wc_2}(w.\mathbb{O}_2)) = \Lambda_{wc_2}^{\barF_q}(w.\mathbb{O}_2) = \dot w\Lambda_{c_2}^{\barF_q}(\mathbb{O}_2).$$
Thus
$$(\mf L_{x_0}(c_1),\Lambda_c^{\barF_q}(\mathbb{O}_1)) = \dot w.(\mf L_{x_0}(c_2),\Lambda_c^{\barF_q}(\mathbb{O}_2))$$
and so $I_{o,d,\mathcal A}^K \to I_{o,d,T}^\mathbb{C}/W$ descends to a map
$$I_{o,d,\mathcal A}^K/\sim_{\mathcal A} \to I_{o,d,T}^\mathbb{C}/W.$$
To see that this map is injective suppose that $(c_1,\mathbb{O}_1),(c_2,\mathbb{O}_2)\in I_{o,d,\mathcal A}^K$ are such that there is a $w_0\in W$ with
$$(\mf L_{x_0}(c_1),\Lambda_c^{\barF_q}(\mathbb{O}_1)) = w_0.(\mf L_{x_0}(c_2),\Lambda_c^{\barF_q}(\mathbb{O}_2)).$$
Since $\mf L_{x_0}(c_1) = w_0.\mf L_{x_0}(c_2) = \mf L_{x_0}(w_0c_2)$ we have that
$$\mathcal A(c_1) + X_* = \mathcal A(w_0c_2)+X_*$$
and so there is a $w\in \widetilde W$ such that $\dot w = w_0$, and $A(c_1) = A(wc_2)$.
Moreover
$$\Lambda_{c_1}^{\barF_q}(\mathbb{O}_1) = \dot w\Lambda_{c_2}^{\barF_q}(\mathbb{O}_2) = \Lambda_{wc_2}^{\barF_q}(w.\mathbb{O}_2) = \Lambda_{c_1}^{\barF_q}(j_{c_1,wc_2}(w.\mathbb{O}_2))$$
and so $\mathbb{O}_1 = j_{c_1,wc_2}(w.\mathbb{O}_2)$.
Thus $(c_1,\mathbb{O}_1)\sim_{\mathcal A}(c_2,\mathbb{O}_2)$ as required.
\end{proof}
\nomenclature{$\mathrm{pr}_1$}{}
\nomenclature{$\theta_{x_0,\bfT_K}$}{}
Let $\mathrm{pr}_1:\mathcal N_{o,c}\to \mathcal N_o$ be the projection onto the first factor.
Let $\theta_{x_0,\bfT_K}:\mathcal N_o(K)\to \mathcal N_{o,c}$ be the composition
\begin{equation}
\begin{tikzcd}
\mathcal N_o(K) \arrow[r,"\sim"] & I_{o,d,\mathcal A}^K/\sim_{\mathcal A} \arrow[r,"\widetilde\Gamma_{x_0}"] & I_{o,d,T}^\mathbb{C}/W \arrow[r,"\sim"] & \mathcal N_{o,c}.
\end{tikzcd}
\end{equation}
where the first isomorphism is the inverse of $\mathcal L:I_{o,d,\mathcal A}^K/\sim_{\mathscr N}\to \mathcal N_o(K)$ (see Section \ref{par:abc}) and the third isomorphism is the composition of the two isomorphisms in Equation \ref{eq:Wequivtoccl}.
\begin{theorem}
\label{thm:unramifiedparam}
The map $\theta_{x_0,\bfT_K}:\mathcal N_o(K) \to \mathcal N_{o,c}$ is a bijection and for all $\mathbb{O}\in \mathcal N_o(K)$
$$\Lambda_{\bfT_K}^{\bar k}(\mathcal N_o(\bar k/K)(\mathbb{O})) = \mathrm{pr}_1(\theta_{x_0,\bfT_K}(\mathbb{O})).$$
\end{theorem}
\begin{proof}
Since all the maps involved are bijections, $\theta_{x_0,\bfT_K}$ is also a bijection.
Let $\mathbb{O}\in \mathcal N_o(K)$ and $(c,\mathbb{O}')\in I_{o,d,\mathcal A}^K(\mathbb{O})$.
By theorem \ref{thm:lift}
$$\Lambda_{\bfT_K}^{\bar k}\circ\mathcal N_o(\bar k/K)(\mathbb{O}) = \Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O}').$$
By equation \ref{eq:sat}
$$\Lambda_{x_0}^{\barF_q}\circ j_{c,o}(\mathbb{O}') = G.\Lambda_c^{\barF_q}(\mathbb{O}').$$
But
$$ G.\Lambda_c^{\barF_q}(\mathbb{O}') = \mathrm{pr}_1(\theta_{x_0,\bfT_K}(\mathbb{O}))$$
as required.
\end{proof}
\nomenclature{$\mathscr H$}{}
\nomenclature{$c(x)$}{}
Let $\mathscr H$ denote the set of hyperspecial faces of $\mathcal B(\bfG,K)$ (these are the faces of $\mathcal B(\bfG,K)$ which contain hyperspecial points).
For a point $x\in \mathcal B(\bfG,K)$ write $c(x)$ for the face containing $x$.
\begin{lemma}
Suppose $x_0,x_0'$ are hyperspecial points of $\mathcal A$ such that $c(x_0) = c(x_0')$.
Then $\theta_{x_0,\bfT_K} = \theta_{x_0',\bfT_K}$.
\end{lemma}
\begin{proof}
It suffices to show that $\mf L_{x_0} = \mf L_{x_0'}$.
We have that $\mf L_{x_0} = \mf L\circ \kappa_{x_0}^{-1}$ and $\mf L_{x_0} = \mf L\circ \kappa_{x_0'}^{-1}$.
But if $c(x_0) = c(x_0')$ then the isomorphism of chamber complexes induced by $\kappa_{x_0}$ and $\kappa_{x_0'}$ are the same.
\end{proof}
In the proof of the above lemma we showed that $\mf L_{x_0}$ only depends on the face of $x_0$.
Thus for $h\in \mathscr H\cap \mathcal A$ we will write $\mf L_h$ for $\mf L_{x_0}$ where $x_0$ is any point in $h$.
Similarly we write $\widetilde\Gamma_h$ for $\widetilde\Gamma_{x_0}$ where $x_0$ is any point in $h$.
Note that when $\bfG_K$ is semisimple and $x_0$ is hyperspecial, $c(x_0) = x_0$ and so there is no content to the lemma.
\begin{lemma}
Let $c$ be a face of $\mathcal B(\bfG, K)$, $g\in \bfG(K)$ and suppose $c,gc \subseteq\mathcal A$.
Then there exists a $n\in \mathscr N$ (the normaliser of $\bfT_K$), such that $gc = nc$.
\end{lemma}
\begin{proof}
Let $c_1$ be a chamber of $\mathcal A$ with $c$ as a face.
Then $gc_1$ has $gc$ as a face.
Let $c_2$ be a chamber of $\mathcal A$ containing $gc$ as a face.
Then $gc_1$ and $c_2$ both share $gc$ as a face and so there exists a $g_1\in \bfP_{gc}(\mf O)$ such that $g_1gc_1 = c_2$ (and necessarily $g_1gc = gc$).
Let $g_2 = g_1g$.
Then $g_2\mathcal A$ and $\mathcal A$ contain $c_2$.
Since the subgroup of $\bfG(K)$ preserving types (in the sense of \cite[Section 5.5]{garrett}) acts transitively on pairs of apartments and chambers of $\mathcal B(\bfG,K)$ there exists a $g_3$ such that $g_3g_2\mathcal A = \mathcal A$ and $g_3c_2 = c_2$ where $g_3$ fixes $c_2$ point-wise.
In particular $g_3gc = gc$.
Since $g_3g_2 \mathcal A = \mathcal A$ we have that $g_3g_2 \in \mathscr N$.
Let $n = g_3g_2$.
Then $nc = g_3g_2c = g_3 g_1 g c = g c$ as required.
\end{proof}
\begin{lemma}
\label{lem:hyperspecialorbit}
Suppose $x_0,x_0'$ are hyperspecial points of $\mathcal A$ and there exists a $g\in \bfG(K)$ such that $c(x_0') = gc(x_0)$.
Then $\theta_{x_0,\bfT_K} = \theta_{x_0',\bfT_K}$.
\end{lemma}
\begin{proof}
By the previous lemma we may assume that $g\in \mathscr N$.
Let $h = c(x_0),h'=c(x_0')$.
We need to show that $\widetilde\Gamma_{h} = \widetilde\Gamma_{h'}$.
Let $(c,\mathbb{O})\in I^K_{o,d,\mathcal A}$.
It is sufficient to show that $\mf L_h(c) = \mf L_{h'}(c)$.
But $\mf L_{h'} = \mf L_{gx_0}$ since $c(gx_0) = gc(x_0) = h'$.
Finally, since $x_0$ is hyperspecial, $gx_0 = x_0+v$ for some $v\in X_*$ (to see this, note that the orbit of $\mathscr N$ on $x_0$ is $\kappa_{x_0}(\widetilde W.0) = \kappa_{x_0}(X_*)$) and by Corollary \ref{cor:mfL} we have $\mf L_{x_0+v} = \mf L_{x_0}$.
Thus $\mf L_{h} = \mf L_{h'}$ as required.
\end{proof}
\nomenclature{$\mathscr O$}{}
\nomenclature{$\theta_{\mathscr O,\bfT_K}$}{}
\nomenclature{$\beta_{\bfT_K,\bfT_K'}$}{}
Let $\mathscr O \in \bfG(K)\backslash \mathscr H$ be a $\bfG(K)$ orbit on $\mathscr H$.
By the previous lemma, the map $\theta_{x_0,\bfT_K}$ is independent of the choice of $x_0 \in h$ for $h\in \mathscr O\cap \mathcal A$.
Thus we write $\theta_{\mathscr O,\bfT_K}$ for $\theta_{x_0,\bfT_K}$ where $x_0\in h$ for some $h\in \mathscr O\cap \mathcal A$.
Now suppose $\bfT_K,\bfT_K'$ are two maximal $K$-split tori of $\bfG_K$.
Let $g\in \bfG(K)$ be such that $\bfT_K' = g\bfT_K g^{-1}$.
This induces an isomorphism or root data
\begin{equation}
\label{eq:rd}
\mathcal R(\bfG_K,\bfT_K)\xrightarrow{\sim}\mathcal R(\bfG_K,\bfT_K'),
\end{equation}
an isomorphism of complex groups
\begin{equation}
\label{eq:cg}
(G,T) \xrightarrow{\sim} (G',T'),
\end{equation}
and an isomorphism
\begin{equation}
\label{eq:noc}
\beta_{\bfT_K,\bfT_K'}:\mathcal N_{o,c}\xrightarrow{\sim }\mathcal N_{o,c}'.
\end{equation}
The isomorphisms in Equations \ref{eq:rd}, \ref{eq:cg} differ by conjugation by an element of $W$ for different choices of $g$ with $\bfT_K' = g\bfT_K g^{-1}$ and so the isomorphism in Equation \ref{eq:noc} is independent of choice of $g$.
\begin{theorem}
\label{thm:naturality}
Fix an orbit $\mathscr O\in \bfG(K)\backslash \mathscr H$.
Let $\bfT_K,\bfT_K'$ be two maximal $K$-split tori of $\bfG_K$.
Then the diagram
\begin{equation}
\begin{tikzcd}[row sep = tiny]
& \mathcal N_{o,c} \arrow[dd,"\beta_{\bfT_K,\bfT_K'}"] \\
\mathcal N_o(K) \arrow[ru,"\theta_{\mathscr O,\bfT_K}"] \arrow[rd,swap,"\theta_{\mathscr O,\bfT_K'}"] & \\
& \mathcal N_{o,c}'
\end{tikzcd}
\end{equation}
commutes.
\end{theorem}
\begin{proof}
Let $g\in \bfG(K)$ be such that $\bfT_K' = g \bfT_K g^{-1}$ and $\mathcal A = \mathcal A(\bfT_K,K),\mathcal A' = \mathcal A(\bfT_K',K)$.
Then $\mathcal A = g\mathcal A'$.
Let $h \in \mathscr O \cap \mathcal A$ and pick $x_0\in h$.
Then $gx_0\in gh \in \mathscr O\cap \mathcal A'$.
Let
$$\mathcal R(\bfG_K,\bfT_K) = (X^*,\mathbb{P}hi,X_*,\mathbb{P}hi^\vee), \quad \mathcal R(\bfG_K,\bfT_K') = (X'^*,\mathbb{P}hi',X'_*,\mathbb{P}hi'^\vee)$$
and $(\zeta^*,\zeta_*):(X^*,X_*)\to (X'^*,X'_*)$ be the isomorphisms induced by $\mathbb{A}d(g)$ (these isomorphisms restrict to isomorphisms $\mathbb{P}hi\to \mathbb{P}hi',\mathbb{P}hi^\vee\to \mathbb{P}hi'^\vee$).
Let $V=X_*\otimes_\mathbb{Z}\mathbb{R}, V'=X_*'\otimes_\mathbb{Z}\mathbb{R}$ and write $\zeta_{*,\mathbb{R}}:V\to V'$ for $\zeta_*\otimes_\mathbb{Z}\mathbb{R}$.
Write $\eta:(G,T)\to (G',T')$ for the isomorphism of complex algebraic groups induced from $(\zeta^*,\zeta_*)$.
We claim that
\begin{equation}
\begin{tikzcd}
\{c\text{ a face of } V\} \arrow[r,"\mf L"] \arrow[d,"\zeta_{*,\mathbb{R}}"] & \mathscr F_T \arrow[d,"\eta"] \\
\{c\text{ a face of } V'\} \arrow[r,"\mf L'"] & \mathscr F_{T'}.
\end{tikzcd}
\end{equation}
commutes.
Note that in this diagram, by $\zeta_{*,\mathbb{R}}$ we mean the map on chamber complexes induced by $\zeta_{*,\mathbb{R}}$, and by $\eta$ we mean the map $\mathscr F_T\to \mathscr F_{T'}$ induced by $\eta$.
We omit the details since this is a slightly tedious, but standard check.
We remark simply that the isomorphism in Equation \ref{eq:canon} is equivariant with respect to $\eta$ on the left, and $\zeta_{*,\mathbb{R}}$ (descended to the compact torus) on the right and the commutativity of the square follows essentially from this.
We have also that the first diagram below commutes and so the second one does too.
\begin{equation}
\begin{tikzcd}
V \arrow[d,"\zeta_{*,\mathbb{R}}"] \arrow[r,"\kappa_{x_0}"] & \mathcal A \arrow[d,"x\mapsto gx"] \\
V' \arrow[r,"\kappa_{gx_0}"] & \mathcal A',
\end{tikzcd}
\quad
\begin{tikzcd}
\mathcal A \arrow[r,"\mf L_{x_0}"] \arrow[d,"x\mapsto gx"] & \mathscr F_T \arrow[d,"\eta"] \\
\mathcal A' \arrow[r,"\mf L'_{gx_0}"] & \mathscr F_{T'}.
\end{tikzcd}
\end{equation}
Now for $c\subseteq \mathcal A'$, the root system of $\bfL_c(\barF_q)$ is $\bar \mathbb{P}hi_c$, while the root system for $L_c$ is $\mathbb{P}hi_c$.
Moreover $\mathbb{A}d(g)$ induces an isomorphism between $\bfL_c(\barF_q)$ and $\bfL_{gc}(\barF_q)$ which identifies $\bar \mathbb{P}hi_c$ with $\bar \mathbb{P}hi'_{gc}$ according to the identification induced by $\zeta^*$.
Finally $\zeta^*$ restricts to an isomorphism between $\mathbb{P}hi_c$, the root system of $L_c$, and $\mathbb{P}hi'_{gc}$, the root system of $L'_{gc}$.
In particular the first diagram below commutes and so the second one does too
\begin{equation}
\begin{tikzcd}
\bar \mathbb{P}hi_{c} \arrow[r] \arrow[d] & \bar \mathbb{P}hi'_{gc} \arrow[d] \\
\mathbb{P}hi_{c} \arrow[r] & \mathbb{P}hi'_{gc},
\end{tikzcd}
\quad
\begin{tikzcd}
\mathcal N_o^{\bfL_c}(\barF_q) \arrow[r,"\mathbb{A}d(g)"] \arrow[d,"\Lambda^{\barF_q}_c"] & \mathcal N_o^{\bfL_{gc}}(\barF_q) \arrow[d,"\Lambda^{\barF_q}_{gc}"] \\
\mathcal N_o^{L_c}(\mathbb{C}) \arrow[r,"\eta"] & \mathcal N_o^{L'_{gc}}(\mathbb{C}).
\end{tikzcd}
\end{equation}
It follows that
\begin{equation}
\label{eq:2nd}
\begin{tikzcd}
I_{o,d,\mathcal A}^K \arrow[r,"\Gamma_{x_0}"] \arrow[d,"\mathbb{A}d(g)"] & I_{o,d,T}^\mathbb{C} \arrow[d,"\eta"] \\
I_{o,d,\mathcal A'}^K \arrow[r,"\Gamma'_{gx_0}"] & I_{o,d,T'}^\mathbb{C}
\end{tikzcd}
\end{equation}
also commutes.
But if $(c',\mathbb{O}')\in I_{o,d,\mathcal A'}^K$ is equal to $g.(c,\mathbb{O})$ for some $(c,\mathbb{O}\in I_{o,d,\mathcal A}^K)$ then $(c',\mathbb{O}')\sim_K (c,\mathbb{O})$.
Thus
\begin{equation}
\label{eq:1st}
\begin{tikzcd}[row sep=tiny]
& I_{o,d,\mathcal A}^K/\sim_{\mathcal A} \arrow[dd,"\mathbb{A}d(g)"]\\
\mathcal N_o(K) \arrow[<-,ur,"\sim"] \arrow[<-,dr,"\sim"] & \\
& I_{o,d,\mathcal A'}^K/\sim_{\mathcal A'}.
\end{tikzcd}
\end{equation}
commutes.
Finally
\begin{equation}
\label{eq:3rd}
\begin{tikzcd}
I_{o,d,T}^\mathbb{C} \arrow[r] \arrow[d,"\eta"] & \mathcal N_{o,c} \arrow[d,"\beta_{\bfT_K,\bfT_K'}"] \\
I_{o,d,T'}^\mathbb{C} \arrow[r] & \mathcal N'_{o,c}
\end{tikzcd}
\end{equation}
trivially commutes.
Stringing together the diagrams in Equations \ref{eq:1st}, \ref{eq:2nd} and \ref{eq:3rd} yields the desired commutative triangle.
\end{proof}
\paragraph{Equivariance of the parameterisation}
\label{par:equiv}
We now finally investigate how the parameterisations corresponding to different $\mathscr O\in \bfG(K)\backslash \mathscr H$ relate.
Fix a $\bfT_K$, let $\mathcal A = \mathcal A(\bfT_K,K)$ and set $\mathscr H_{\mathcal A} = \mathscr H\cap \mathcal A$.
Let us define an action of $Z_G/Z_G^\circ$ on $\bfG(K)\backslash \mathscr H$.
Let $tZ_G^\circ\in Z_G/Z_G^\circ$ and let $A_0+X_* = \lambda(G,tZ_G^\circ)$.
Let $c\in\mathscr H\cap \mathcal A$.
Note that hyperspecial faces are in fact admissible classes i.e. $\mathcal A(c,\mathcal A) = c$ for hyperspecial faces $c$ (since $\mathbb{P}hi_y$ is constant on $\mathcal A(c,\mathcal A)$).
Thus by the discussion preceding Lemma \ref{lem:translation}, for $y\in A_0+X_*$, the translation $\tau_y:\mathscr A/X_*\to \mathscr A/X_*$ restricts to a map $\mathscr H_{\mathcal A}/X_*\to \mathscr H_{\mathcal A}/X_*$ which does not depend on the choice of $y$.
\nomenclature{$\tau_{tZ_G^\circ}$}{}
\nomenclature{$\mathscr H_{\mathcal A}$}{}
Denote the resulting map
$$\tau_{tZ_G^\circ}:\mathscr H_{\mathcal A}/X_*\to \mathscr H_{\mathcal A}/X_*.$$
We now define the action of $tZ_G^\circ$ on $\bfG(K)\backslash\mathscr H$ as follows.
Let $\mathscr O\in \bfG(K)\backslash \mathscr H$.
Let $c\in \mathscr O\cap \mathcal A$.
We saw in the proof of Lemma \ref{lem:hyperspecialorbit} that $\mathscr O \cap \mathcal A = c+X_*$.
Let $c'+X_* = \tau_{tZ_G^\circ}(c+X_*)$ and $\mathscr O' = \bfG(K).(c')$.
This is well defined because $\bfG(K).c' = c'+X_*$.
Finally we set
$$(tZ_G^\circ). \mathscr O := \mathscr O'.$$
\begin{proposition}
\label{prop:staction}
The map $(tZ_G^\circ):\bfG(K)\backslash \mathscr H \to \bfG(K)\backslash \mathscr H$ defines a simply transitive action of $Z_G/Z_G^\circ$ on $\bfG(K)\backslash \mathscr H$.
\end{proposition}
\begin{proof}
The fact that this defines an action follows immediately from the fact that the map in Equation \ref{eq:canon} is a group homomorphism.
To see that the action is simply transitive, first note that the map
$$\mathscr H_{\mathcal A}/X_*\to \bfG(K)\backslash \mathscr H, \quad c+X_*\mapsto \bfG(K).c$$
is a well defined bijection (it is injective since $\bfG(K).c \cap \mathcal A = c+X_*$ and surjective since $\bfG(K)$ acts transitively on apartments).
Thus it suffices to show that $Z_G/Z_G^\circ$ acts simply transitively on $\mathscr H_{\mathcal A}/X_*$.
To see that the action is transitive let $c+X_*,c'+X_*\in \mathscr H_{\mathcal A}$.
Let $x_0\in c+X_*,x_0'\in c'+X_*$ and $y=x_0-x_0'$.
Let $A_0 = \mathbb{P}si_{y_0}$ and $(G,tZ_G^\circ) = \lambda^{-1}(A_0+X_*)$.
Then $\tau_y = \tau_{tZ_G^\circ}$ and hence $\tau_{tZ_G^\circ}(c+X_*) = c'+X_*$.
Finally let $c+X_*\in \mathscr H_{\mathcal A},tZ_G^\circ\in Z_G/Z_G^\circ$ and suppose $\tau_{tZ_G^\circ}(c+X_*) = c+X_*$.
Let $A_0+X_* = \lambda(G,tZ_G^\circ)$ and $y\in A_0+X_*$.
By definition $\tau_{tZ_G^\circ} = \tau_{y}$.
Let $x_0\in c$ and $A_0'$ be the vanishing set of $\mathbb{P}hi$ is $V$.
Then $c+X_* = \kappa_{x_0}(A_0'+X_*)$ and so since $\tau_y(c+X_*) = c+X_*$ it follows that $y\in A_0' + X_*$.
Thus $A_0+X_* = A_0'+X_*$.
But $\lambda^{-1}(A_0'+X_*) = (G,Z_G^\circ)$ and so $tZ_G^\circ = Z_G^\circ$.
Thus the action is simply transitive.
\end{proof}
There is also an action of $Z_G/Z_G^\circ$ on $\mathcal N_{o,c}$ given by
$$tZ_G^\circ:(n,sC_G^\circ(n))/G\mapsto (n,tsC_G^\circ(n))/G.$$
This is well defined since $t\in Z_G$ and $Z_G^\circ \subseteq C_G^\circ(n)$.
We will write this action as $(\mathbb{O},C)\mapsto (\mathbb{O},tC)$ when we view $\mathcal N_{o,c}$ as pairs $(\mathbb{O},C)$.
\begin{proposition}
\label{prop:equivariance}
Let $\mathscr O\in \bfG(K)\backslash \mathscr H$ and $tZ_G^\circ\in Z_G/Z_G^\circ$.
Then
$$\theta_{(tZ_G^\circ).\mathscr O,\bfT_K} = (tZ_G^\circ).\theta_{\mathscr O,\bfT_K}.$$
\end{proposition}
\begin{proof}
This is an immediate consequence of the definitions and Lemma \ref{lem:translation}.
\end{proof}
\paragraph{Compatibility with the Bala-Carter parameterisation}
\begin{proposition}
\label{prop:trivccl}
Let $\mathbb{O} \in \mathcal N_o$ and let $[(J,J')]_W\in \mb{BC}(\mathbb{D}elta)$ be the Bala--Carter parameter for $\mathbb{O}$ with respect to $\mathbb{D}elta$.
Then
$$\theta_{x_0,\bfT_K}\circ\mb{ABC}_{x_0,\bfT_K}([(J,J')]_{\widetilde W}) = (\mathbb{O},1).$$
\end{proposition}
\begin{proof}
For $I\in \mathbf P(\widetilde\mathbb{D}elta)$ let $c(I)$ denote the face of $c_0$ with $\ms{Type}$ equal to $I$.
Let $\mathbb{O}\in \mathcal N_o$ and let $[(J,J')]_W$ be the Bala--Carter parameter for $\mathbb{O}$.
Then $\mathbb{O}$ intersects non-trivially with $\mf l_{c(J)}$ and the distinguished orbit $\mathbb{O}_{J'}$ of $\mf l_{c(J)}$ parameterised by $J'$ lies in this intersection.
Since $L_{c(J)}$ is a Levi it is equal to $C_{G}^\circ(Z_J^\circ)$ where $Z_J$ is the center of $L_{c(J)}$.
Thus $(L_{c(J)},Z_J^\circ,\mathbb{O}_{J'}) \in I_{o,d,T}^\mathbb{C}$ and its image in $\mathcal N_{o,c}$ is $(\mathbb{O},1)$.
We also have that $\lambda(L_{c(J)},Z^\circ_J) = [A]$ where $A$ is the vanishing set of $J$, and $(\Lambda_{c(J)}^{\barF_q})^{-1}(\mathbb{O}_{J'})$ is the distinguished nilpotent orbit of $\bfl_{c(J)}$ corresponding to $J'$.
It follows that $\theta^{-1}(\mathbb{O},1)$ has affine Bala--Carter parameter $[(J,J')]_{\widetilde W}$ as required.
\end{proof}
This result implies that the diagram below commutes.
\begin{equation}
\begin{tikzcd}
\mb {BC}_{\bfT_K}(\mathbb{D}elta)/\sim_W \arrow[r,"i"] \arrow[d,"\sim"] & \mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)/\sim_{\widetilde W} \arrow[d,"\sim"] \arrow[r] & \mb {BC}_{\bfT_K}(\mathbb{D}elta)/\sim_W \arrow[d,"\sim"]\\
\mathcal N_o(\bar k) \arrow[r,"\mathbb{O}\mapsto \theta_{x_0,\bfT_K}^{-1}(\Lambda_{\bfT_K}^{\bar k}(\mathbb{O}){,}1)"] \arrow[rr,bend right=15,"\id",swap] & \mathcal N_o(K) \arrow[r,"\mathcal N_o(\bar k/K)"] & \mathcal N_o(\bar k).
\end{tikzcd}
\end{equation}
Here the map $i$ is the map induced by the inclusion map
\begin{equation}
\mb {BC}_{\bfT_K}(\mathbb{D}elta) \to \mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta).
\end{equation}
We remark that this composition $\theta_{x_0,\bfT_K}\circ \mb{ABC}_{x_0,\bfT_K}$ is independent of $x_0$ as can be easily seen from the construction of the two maps (indeed the composition can be defined entirely by referring only to the vector space $V$ and not the affine space $\mathcal A$).
\paragraph{Example: \texorpdfstring{$G_2$}{G2}}
Consider the case when $\textbf G$ is $K$-split semisimple of type $G_2$ (there is only one isogeny type).
Let $\mathbb{D}elta=\set{\alpha_1,\alpha_2}$ denote the simple roots where $\alpha_2$ is the short root and let $\alpha_0$ be affine simple root in $\widetilde\mathbb{D}elta$.
The extended Dynkin diagram of $G_2$ is $\dynkin[extended,labels*={\alpha_0,\alpha_1,\alpha_2}]G2$.
The set $\mb {ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)$ consists of the pairs
\begin{align}
(\emptyset,\emptyset), (\set{\alpha_0},\emptyset), (\set{\alpha_1},\emptyset), (\set{\alpha_2},\emptyset), (\set{\alpha_1,\alpha_2},\emptyset), \nonumber \\
(\set{\alpha_1,\alpha_2},\set{\alpha_2}), (\set{\alpha_0,\alpha_2},\emptyset), (\set{\alpha_0,\alpha_1},\emptyset).
\end{align}
Under the equivalence relation $\sim_{\widetilde W}$, all equivalence classes are singletons except $(\set{\alpha_0},\emptyset)\sim_{\widetilde W} (\set{\alpha_1},\emptyset)$ so $\mathcal N_o(K)$ has size 7.
We also obtain 7 nilpotent orbits using the parameterisation in terms of $\mathcal N_{o,c}$.
The only nilpotent orbit with non-trivial $G_\mathbb{C}$-equivariant fundamental group is $G_2(a_1)$.
In this case $A(G_2(a_1)) = S_3$ which has 3 conjugacy classes which we denote by representatives $1,(12),(123)$.
Using theorem \ref{thm:unramifiedparam}, the unramified orbits can be parameterised by
\begin{align}
(0,1), (A_1,1), (\widetilde A_1,1), (G_2(a_1),1), (G_2(a_1),(12)), (G_2(a_1),(123)), (G_2,1)\in \mathcal N_{o,c}.
\end{align}
We thus have two parameterisations
$$\mb{ABC}_{x_0,\bfT_K}:\mb{ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)\to \mathcal N_o(K), \theta_{x_0,\bfT_K}:\mathcal N_o(K)\to \mathcal N_{o,c}.$$
We now demonstrate how they match up i.e. we describe the composition
$$\theta_{x_0,\bfT_K}\circ \mb{ABC}_{x_0,\bfT_K}:\mb{ABC}_{\bfT_K}(\widetilde\mathbb{D}elta)\xrightarrow{\sim}\mathcal N_{o,c}.$$
We already have from Proposition \ref{prop:trivccl} the following matchings
\begin{align}
(\emptyset,\emptyset) &\leftrightarrow (1,1), \nonumber\\
(\set{\alpha_1},\emptyset) &\leftrightarrow (A_1,1) \nonumber\\
(\set{\alpha_2},\emptyset) &\leftrightarrow (\widetilde A_1,1) \nonumber\\
(\set{\alpha_1,\alpha_2},\set{\alpha_1}) &\leftrightarrow (G_2(a_1),1) \nonumber\\
(\set{\alpha_1,\alpha_2},\emptyset) &\leftrightarrow (G_2,1)
\end{align}
It remains to determine how to match up $(G_2(a_1),(12))$ and $(G_2(a_1),(123))$ together with $(\set{\alpha_0,\alpha_1},\emptyset)$ and $(\set{\alpha_0,\alpha_2},\emptyset)$.
For this we must look at the map $\lambda$.
Figure \ref{fig:apt} is a diagram of the chamber complex on $V$ together with the coroot lattice (in grey), the coroots of the simple roots and minus the highest root (in blue), and a fundamental domain for the topological torus $V/X_*$ (in red).
\begin{figure}
\caption{The chamber complex for $V$ is displayed alongside the coroot lattice (in grey), the coroots $\check \alpha_0$,$\check \alpha_1$,$\check \alpha_2$ (in blue), and the fundamental domain for the torus $\mathbb T$ (in red).}
\label{fig:apt}
\end{figure}
\begin{figure}
\caption{The colored disks (grey, blue, yellow) are all the lifts of the vanishing set of $\set{\alpha_0,\alpha_1}
\label{fig:alpha01}
\end{figure}
\begin{figure}
\caption{The colored disks (grey, red, cyan, green) are all the lifts of the vanishing set of $\set{\alpha_0,\alpha_2}
\label{fig:alpha02}
\end{figure}
Figures \ref{fig:alpha01} and \ref{fig:alpha02} respectively show the vanishing sets of $\set{\alpha_0,\alpha_1}$ and $\set{\alpha_0,\alpha_2}$ in $\mathbb T$ (recall the definition from section \ref{par:basicnotation2} (2)) lifted to $V$.
Having the same color indicates having the same image in $\mathbb T$.
Note that the yellow and blue lattices are $W$ conjugate, and the red, cyan and green lattices are also $W$ conjugate.
Thus respectively they give rise to the same element of $\mathcal N_{o,c}$.
It suffices thus to focus on the blue and green lattices respectively.
Clearly the blue lattice gives rise to a conjugacy class of order $3$, while the green one gives rise to a conjugacy class of order $2$.
The remaining matchings are thus
\begin{align}
(\set{\alpha_0,\alpha_1},\emptyset) &\leftrightarrow (G_2(a_1),(123)), \nonumber\\
(\set{\alpha_0,\alpha_2},\emptyset) &\leftrightarrow (G_2(a_1),(12)).
\end{align}
\subsection{Equivalence Classes of Unramified Nilpotent Orbits}
\paragraph{A Duality Map for Unramified Nilpotent Orbits}
\label{sec:duality}
\nomenclature{$d_S$}{}
\nomenclature{$d_{S,\mathscr O,\bfT_K}$}{}
\nomenclature{$d_{S,x_0,\bfT_K}$}{}
\nomenclature{$\mathcal N_o^\vee$}{}
\nomenclature{$\text{Ind}_{\bfl_c}^{\mf g^\vee}$}{}
\nomenclature{$d_{\bfl_c}$}{}
\nomenclature{$j_{\dot W_c}^WE$}{}
Fix a torus $\bfT_K$ and corresponding Langlands dual group $\bfG^\vee$ as in Section \ref{par:basicnotation1}.
Write $\mathcal N_o^\vee$ for $\mathcal N_o^{\bfG^\vee}(\mathbb{C})$ and $\mathcal A$ for $\mathcal A(\bfT_K,K)$.
In \cite[Section 6]{sommersduality}, Sommers defines a duality map
\begin{equation}
d_S:\mathcal N_{o,c} \to \mathcal N_o^\vee.
\end{equation}
Let $\mathscr O\in \bfG(K)\backslash \mathscr H$ and fix a hyperspecial point $x_0$ in $\mathscr O\cap \mathcal A$ to fix an isomorphism $\mathscr N/\bfT(\mf O^\times)\simeq \widetilde W$.
Define
\begin{equation}
d_{S,\mathscr O,\bfT_K} = d_S \circ \theta_{\mathscr O,\bfT_K}: \mathcal N_o(K) \to \mathcal N_o^\vee.
\end{equation}
The map $d_S$ is natural under isomorphisms of complex reductive algebraic groups and so $d_{S,\mathscr O,\bfT_K}$ is natural in $\bfT_K$.
We wish to give a recipe for computing $d_{S,\mathscr O,\bfT_K}$ in practice.
Let $c\subseteq \mathcal A$ and let $E$ be a special representation of $\dot W_c$.
Recall from \cite[Theorem 10.7]{lusztig} that the representation $j_{\dot W_c}^WE$ obtained through truncated induction corresponds under the Springer correspondence for $\mf g^\vee(\mathbb{C})$ to a nilpotent orbit $\mathbb{O}^\vee\in \mathcal N_o^\vee$ and the trivial local system.
This gives us a map
$$\text{Ind}_{\bfl_c}^{\mf g^\vee}:\mathcal N_{o,sp}^{\bfL_c}(\barF_q) \to \mathcal N_o^\vee$$
(recall from the end of Section \ref{par:basicnotation1} the definition of $\mathcal N_{o,sp}^{\bfL_c}(\barF_q)$).
Let $d_{\bfl_c}$ denote the Lusztig--Spaltenstein duality
$$d_{\bfl_c}:\mathcal N_o^{\bfL_c}(\barF_q)\to \mathcal N_{o,sp}^{\bfL_c}(\barF_q).$$
Define
\begin{equation}
d_{S,x_0,\bfT_K}:I_{o,\mathcal A}^K\to \mathcal N_o^\vee,\quad (c,\mathbb{O}) \mapsto \text{Ind}_{\bfl_c}^{\mf g^\vee}\circ d_{\bfl_c}(\mathbb{O}).
\end{equation}
\begin{proposition}
\label{prop:ds}
The following diagram commutes
\begin{equation}
\begin{tikzcd}[column sep = large]
I_{o,\mathcal A}^K \arrow[r,"d_{S,x_0,\bfT_K}"] \arrow[d,swap,"\mathcal L"] & \mathcal N_o^\vee \\
\mathcal N_o(K) \arrow[ur,swap,"d_{S,\mathscr O,\bfT_K}"]
\end{tikzcd}
\end{equation}
\end{proposition}
\begin{proof}
Let $c'\subseteq c$ be faces of $\mathcal A$.
Then $\bfl_c(\barF_q)$ is the reductive quotient of the parabolic $\bfp_c(\mf O)/\bfu_{c'}(\mf O)$ in $\bfl_{c'}(\barF_q)$.
For $\mathbb{O}\in \mathcal N_o^{\bfL_c}(\barF_q)$, write $\text{Ind}_c^{c'}\mathbb{O}$ for the Lusztig--Spaltenstein induction of $\mathbb{O}$ from $\bfl_{c}(\barF_q)$ to $\bfl_{c'}(\barF_q)$.
Let $\ms {Sat}_c^{c'}$ denote the map from $\mathcal N_o^{\bfL_c}(\barF_q)$ to $\mathcal N_o^{\bfL_{c'}}(\barF_q)$ obtained by $\bfL_{c'}(\barF_q)$-saturation (note that although $\bfl_c(\barF_q)$ is naturally only a subquotient of $\bfl_{c'}(\barF_q)$, it is standard to identify it with a Levi factor of $\bfp_c(\mf O)/\bfu_{c'}(\mf O)$ and the saturation map is independent of this choice of Levi factor).
Recall that
$$d_{\bfl_{c'}}\circ\ms{Sat}_c^{c'}\mathbb{O} = \text{Ind}_c^{c'}\circ d_{\bfl_{c}}\mathbb{O}.$$
Then since $\text{Ind}_{\bfl_{c'}}^{\mf g^\vee}\circ\text{Ind}_{\bfl_c}^{\bfl_{c'}} = \text{Ind}_{\bfl_c}^{\mf g^\vee}$ and $\mathcal L_c(\mathbb{O}) = \mathcal L_{c'}(\ms{Sat}_{c}^{c'}\mathbb{O})$ it suffices to show that the diagram commutes when $I_{o,\mathcal A}^K$ is replaced by $I_{o,d,\mathcal A}^{K}$.
Now consider the following diagram
\begin{equation}
\begin{tikzcd}
& I_{o,d,\mathcal A}^K \arrow[ddl,"\mathcal L"] \arrow[dr,red,"\color{black}\Gamma_{x_0}"] \arrow[dd] \arrow[ddrrr,red,bend left=20,"\color{black}d_{S,x_0,\bfT_K}"] \\
& & I_{o,d,T}^\mathbb{C} \arrow[dr,red,"\color{black}\mathrm{MS}_{o,T}"] \arrow[d] \\
\mathcal N_o(K) \arrow[rrr,bend right = 20,swap,"\theta_{\mathscr O,\bfT_K}"] & I_{o,d,\mathcal A}^K/\sim_{\mathcal A} \arrow{l}[swap]{\sim} \arrow{r}{\sim}[swap]{\widetilde \Gamma_{x_0}} & I_{o,d,T}^\mathbb{C}/W \arrow[r,"\sim"] & \mathcal N_{o,c} \arrow[r,red,"\color{black}d_S"] & \mathcal N_o^\vee.
\end{tikzcd}
\end{equation}
We wish to show that the outermost square commutes.
But we already know all the inner squares and triangles commute except for the square outlined by the red arrows.
The maps $\text{Ind}_{\bfl_c}^{\mf g^\vee}$ and $d_{\bfl_c}$ can also be defined on $\mathcal N_{o,sp}^{L}(\mathbb{C})\to \mathcal N_o^\vee$ and $\mathcal N_o^{L}(\mathbb{C})\to \mathcal N_{o,sp}^{L}(\mathbb{C})$ where $L$ is a pseudo-Levi of $G$, using using the Springer correspondence over $\mathbb{C}$ instead of $\barF_q$.
Define
$$d_{S,\bfT_K}^\mathbb{C}:I_{o,d,T}^\mathbb{C}\to \mathcal N_{o,c}, \quad (L,tZ_L^\circ,\mathbb{O}) \mapsto \text{Ind}_{\mf l}^{\mf g^\vee}\circ d_{\mf l}(\mathbb{O})$$
where $\mf l$ is the Lie algebra of $L$.
Since for good characteristic the Springer correspondence does not depend on the base field, it is clear that $d_{S,x_0,\bfT_K} = d_{S,\bfT_K}^\mathbb{C} \circ \Gamma_{x_0}$.
It remains to show that $d_{S,\bfT_K}^\mathbb{C} = d_S\circ \mathrm{MS}_{o,T}$.
This follows from \cite[Proposition 7]{Sommers2001}.
\end{proof}
One remarkable property of $d_S$ is that it does not depend on the isogeny of $G$ (in fact isogeny of semisimple quotient of $G$).
\nomenclature{$\mathcal N_{o,\bar c}$}{}
\nomenclature{$\mf Q$}{}
Let
\begin{equation}
\mathcal N_{o,\bar c} = \set{(\mathbb{O},C):\mathbb{O}\in \mathcal N_o(\mathbb{C}),C \in \mathcal C(\bar A(\mathbb{O}))}
\end{equation}
where $\bar A(\mathbb{O})$ is Lusztig's canonical quotient of $A(\mathbb{O})$ as defined in \cite[Section 5]{sommersduality} and write $\mf Q:\mathcal N_{o,c}\to \mathcal N_{o,\bar c}$ for the map induced by $C\mapsto \bar C$ (where $\bar C$ denotes the image of $C$ in $\bar A(\mathbb{O})$).
Then \cite[Theorem 1]{achar} shows that $d_S$ factors through $\mf Q$.
We will call the resulting map from $\mathcal N_{o,\bar c}\to \mathcal N_o^\vee$ also $d_S$.
Let $Z_G/Z_G^\circ$ act trivially on $\mathcal N_{o,\bar c}$.
The argument in \cite[Section 5]{Sommers2001} shows that the map $\mf Q:\mathcal N_{o,c}\to \mathcal N_{o,\bar c}$ is $Z_G/Z_G^\circ$-equivariant.
\begin{proposition}
\label{prop:indpd}
Let $\mathscr O,\mathscr O'\in \bfG(K)\backslash \mathscr H$.
Then $d_{S,\mathscr O,\bfT_K} = d_{S,\mathscr O',\bfT_K}$.
\end{proposition}
\begin{proof}
By Proposition \ref{prop:staction} there exists a (unique) $tZ_G^\circ$ such that $(tZ_G^\circ).\mathscr O = \mathscr O'$.
Thus
\begin{align*}
d_{S,\mathscr O',\bfT_K} &= d_S \circ \mf Q \circ \theta_{(tZ_G^\circ).\mathscr O,\bfT_K} = d_S \circ \mf Q((tZ_G^\circ).\theta_{\mathscr O,\bfT_K}) \\
&= d_S\circ \mf Q\circ \theta_{\mathscr O,\bfT_K} = d_{S,\mathscr O,\bfT_K}
\end{align*}
where the third equality uses the $Z_G/Z_G^\circ$-equivariance of $\mf Q$.
\end{proof}
\nomenclature{$d_{S,\bfT_K}$}{}
\nomenclature{$\widetilde\theta_{\bfT_K}$}{}
Thus we can define $d_{S,\bfT_K}$ to be the map $d_{S,\mathscr O,\bfT_K}$ where $\mathscr O$ is any element of $\bfG(K)\backslash \mathscr H$ and this is well defined.
We will also define $\widetilde \theta_{\bfT_K} = \mf Q\circ \theta_{\mathscr O,\bfT_K}$ where $\mathscr O\in \bfG(K)\backslash \mathscr H$.
This is also independent of $\mathscr O$ by the $Z_G/Z_G^\circ$-equivariance of $\mf Q$.
\paragraph{An equivalence relation on $\mathcal N_o(K)$}
\nomenclature{$\le_A$}{}
Recall that in \cite{achar}, Achar defined a pre-order $\le_A$ on $\mathcal N_{o,c}$ by $(\mathbb{O}_1,C_1)\le_A (\mathbb{O}_2,C_2)$ if $\mathbb{O}_1\le \mathbb{O}_2$ and $d_S(\mathbb{O}_1,C_1)\ge d_S(\mathbb{O}_2,C_2)$.
We can transport it over $\mathcal N_o(K)$ via a $\theta_{\mathscr O,\bfT_K}$ for some $\mathscr O\in \bfG(K)\backslash \mathscr H$ to obtain a pre-order on $\mathcal N_o(K)$.
By Theorem \ref{thm:unramifiedparam}, this pre-order can be expressed as: $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$
$$\mathbb{O}_1\le_A \mathbb{O}_2 \iff \mathcal N_o(\bar k/K)(\mathbb{O}_1) \le \mathcal N_o(\bar k/K)(\mathbb{O}_2) \text{ and } d_{S,\bfT_K}(\mathbb{O}_1)\ge d_{S,\bfT_K}(\mathbb{O}_2).$$
By Proposition \ref{prop:indpd} this pre-order is independent of the choice of $\mathscr O$.
By naturality of $d_{S,\mathscr O,\bfT_K}$ in $\bfT_K$, this pre-order also does not depend on the choice of $\bfT_K$.
Now define $\mathbb{O}_1\sim_A\mathbb{O}_2$ if $\mathbb{O}_1\le_A \mathbb{O}_2$ and $\mathbb{O}_2\le_A \mathbb{O}_1$.
\begin{theorem}
\label{thm:thetabar}
\nomenclature{$\bar\theta_{\bfT_K}$}{}
The fibres of the map $\widetilde\theta_{\bfT_K}$ are exactly the $\sim_A$-equivalence classes on $\mathcal N_o(K)$.
In particular $\widetilde\theta_{\bfT_K}$ descends to a natural (in $\bfT_K$) bijection
$$\bar\theta_{\bfT_K}:\mathcal N_o(K)/\sim_A\to \mathcal N_{o,\bar c}.$$
\end{theorem}
\begin{proof}
The fibres of $\mf Q$ are precisely the equivalence classes of $\le_A$ on $\mathcal N_{o,c}$ by \cite[Theorem 1]{achar}.
Naturality in $\bfT_K$ is inherited from naturality of $\theta_{\mathscr O,\bfT_K}$ and $\mf Q$.
\end{proof}
By construction $\le_A$ descends to a partial order on $\mathcal N_o(K)$.
\begin{lemma}
Let $\mathbb{O}_1,\mathbb{O}_2\in \mathcal N_o(K)$.
If $\mathbb{O}_1\sim_A\mathbb{O}_2$ then
$$\mathcal N_o(\bar k/K)(\mathbb{O}_1) = \mathcal N_o(\bar k/K)(\mathbb{O}_2).$$
\end{lemma}
\begin{proof}
Obvious.
\end{proof}
The saturation map $\mathcal N_o(\bar k/K)$ is constant on $\sim_A$ classes.
\begin{lemma}
\label{lem:orderrev}
Let $(c,\mathbb{O}_1'),(c,\mathbb{O}_2')\in I_{o,\mathcal A}^K$ and let $\mathbb{O}_i = \mathcal L_c(\mathbb{O}_i')$ for $i=1,2$.
If $\mathbb{O}_1'\le \mathbb{O}_2'$ then $d_{S,\bfT_K}(\mathbb{O}_2)\le d_{S,\bfT_K}(\mathbb{O}_1)$.
In particular $\mathbb{O}_1\le_A\mathbb{O}_2$.
\end{lemma}
\begin{proof}
If $\mathbb{O}_1'\le \mathbb{O}_2'$ then $d_{\bfl_c}(\mathbb{O}_2')\le d_{\bfl_c}(\mathbb{O}_1')$.
But $\text{Ind}_{\bfl_c}^{\mf g^\vee}$ is order preserving by \cite[Theorem 12.5]{spaltenstein} and so $d_{S,\bfT_K}(\mathbb{O}_2)\le d_{S,\bfT_K}(\mathbb{O}_1)$.
\end{proof}
\paragraph{The Canonical Unramified Wavefront Set}
\nomenclature{$^K\mathrm{WF}(X)$}{}
Let $(\pi,X)$ be a depth-$0$ representation of $\bfG(k)$.
Define the \emph{canonical unramified wavefront set} of $(\pi,X)$ to be
$$^K\mathrm{WF}(X):=\max_{c\subseteq \mathcal B(\bfG,k)}[\mathcal L_c(\hphantom{ }^{\mf F}\mathrm{WF}(X^{\ms U_c}))] \quad (\subseteq \mathcal N_o(K)/\sim_A).$$
The canonical unramified wavefront set has many of the nice properties that $^K\widetilde\mathrm{WF}(X)$ has.
\begin{lemma}
\label{lem:charfreewf}
Let $(\pi,X)$ be a depth-$0$ representation of $\bfG(k)$.
Then
$$^K\mathrm{WF}(X) = \max [\hphantom{ }^K\Xi(X)].$$
\end{lemma}
\begin{proof}
The proof is exactly the same as Lemma \ref{lem:liftwf} except we need to be careful for the second part which we now provide the details for.
Let $\mathbb{O}$ be a maximal element of $^K\Xi(\pi)$ with respect to $\le_A$.
Write $\mathbb{O} = \mathcal N_o(K/k)(\mathbb{O}_1)$ where $\mathbb{O}_1\in \Xi(X)$.
Let $(c,\mathbb{O}_1')\in I_o^k(\mathbb{O}_1)$.
Let
$$\mathbb{O}_2' = \mathcal N_o^{\bfL_c}(\barF_q/\mathbb F_q)(\mathbb{O}_1').$$
Let $\mathbb{O}_3'$ be a wavefront set nilpotent for $V^{\bfU_c(\mf o)}$ such that $\mathbb{O}_2' \le \mathbb{O}_3'$.
Let $\mathbb{O}_3 = \mathcal L_c(\mathbb{O}_3')$.
$\mathbb{O}_3'$ is an element of $^K\Xi(\pi)$.
By Lemma \ref{lem:orderrev}, $\mathbb{O}_2' \le \mathbb{O}_3'$ implies that $\mathbb{O} \le_A \mathbb{O}_3$.
By maximality of $\mathbb{O}$ in $^K\Xi(\pi)$ we get that
$$\mathbb{O}\sim_A\mathbb{O}_3.$$
In particular
$$\mathcal N_o(\bar k/K)(\mathbb{O}) = \mathcal N_o(\bar k/K)(\mathbb{O}_3).$$
Thus by corollary \ref{cor:alginc} we have that $\mathbb{O}_2'= \mathbb{O}_3'$.
It follows that $\mathbb{O} \in \hphantom{ }^K\mathrm{WF}_c(\pi)$.
\end{proof}
\begin{theorem}
\label{thm:algwf}
Let $(\pi,X)$ be a depth $0$ representation of $\bfG(k)$.
Then
$$^{\bar k}\mathrm{WF}(X) = \max \mathcal N(\bar k /K)(\hphantom{ }^K\mathrm{WF}(X)).$$
\end{theorem}
\begin{proof}
By Proposition \ref{prop:wfs} and corollary \ref{cor:maxl}, $^{\bar k}\mathrm{WF}(X)$ consists of the maximal elements of $\mathcal N_o(\bar k/K)(\hphantom{ }^K\Xi(X))$.
Since $\mathcal O_1\le_A \mathcal O_2$ implies that $^{\bar k}\mathcal O_1\le \hphantom{ }^{\bar k}\mathcal O_2$, the results follows by Lemma \ref{lem:charfreewf}.
\end{proof}
\paragraph{Achar's Duality Map}
\label{par:acharduality}
\nomenclature{$\mathcal N_{o,\bar c}^\vee$}{}
\nomenclature{$d_A$}{}
\nomenclature{$d$}{}
\nomenclature{$d_{LS}$}{}
Let $\mathcal N_{o,c}^\vee,\mathcal N_{o,\bar c}^\vee$ be the analogous objects of $\mathcal N_{o,c}$ and $\mathcal N_{o,\bar c}$ for $G^\vee$.
In \cite{achar}, Achar introduces duality maps
\begin{equation}
\begin{tikzcd}
\mathcal N_{o,\bar c} \arrow[r,shift left,"d_A"] & \arrow[l,shift left,"d_A"] \mathcal N^\vee_{o,\bar c}.
\end{tikzcd}
\end{equation}
which extend $d_{S}$ in the sense that the following diagram commutes
\begin{equation}
\begin{tikzcd}
\mathcal N_{o,c} \arrow[r,"d_S"] \arrow[d,"\mf Q"] & \mathcal N_o^\vee \\
\mathcal N_{o,\bar c} \arrow[r,"d_A"] & \mathcal N_{o,\bar c}^\vee. \arrow[u,"\mathrm{pr}_1^\vee"]
\end{tikzcd}
\end{equation}
Here $\mathrm{pr}_1^\vee:\mathcal N_{o,\bar c}\to\mathcal N_o^\vee$ denotes the projection onto the first factor.
Let $d:\mathcal N_o^\vee\to \mathcal N_o$ denote the Barbasch--Vogan duality \cite{bv} and $d_{LS}:\mathcal N_o\to \mathcal N_o$ denote the Lusztig--Spaltenstein duality \cite{spaltenstein}.
\begin{theorem}
\label{thm:canoninv}
\begin{enumerate}
\item Let $\mathbb{O} \in \mathcal N_o(K)$. Then $\Lambda_{\bfT_K}^{\bar k}\circ \mathcal N_o(\bar k/K)(\mathbb{O}) \le d\circ d_{S,\bfT_K}(\mathbb{O})$.
\item Let $\mathbb{O}^\vee\in \mathcal N_o^\vee$. Then
\begin{equation}
\set{(\mathbb{O},C)\in \mathcal N_{o,c}:d_S(\mathbb{O},C) = \mathbb{O}^\vee, \mathbb{O} = d(\mathbb{O}^\vee)} = \mf Q^{-1}(d_A(\mathbb{O}^\vee,1)).
\end{equation}
In particular this set is non-empty.
\item Let $\mathbb{O}\in \mathcal N_o(K)$ and $\mathbb{O}^\vee\in \mathcal N_o^\vee$.
If $d_{S,\bfT_K}(\mathbb{O})\ge \mathbb{O}^\vee$ then $\bar\theta_{\bfT_K}([\mathbb{O}])\le_A d_A(\mathbb{O}^\vee,1)$.
\end{enumerate}
\end{theorem}
\begin{proof}
In \cite[Proposition 2.3]{achar} Achar proves that $d_S(\mathbb{O}',C)\le d_S(\mathbb{O}',1)$ for all $(\mathbb{O}',C)\in \mathcal N_{o,c}$.
But $d_S(\mathbb{O}',1) = d(\mathbb{O}')$.
Thus $d\circ d(\mathbb{O}')\le d\circ d_S(\mathbb{O}',C)$.
Since $\mathbb{O}'\le d_{LS}\circ d_{LS}(\mathbb{O}') = d\circ d(\mathbb{O}')$ the first part follows.
The second part is an immediate consequence of \cite[Remark 14]{sommersduality} and \cite[Proposition 2.8]{achar}.
For the third part note that by part 1 of this theorem we have that
$$\Lambda_{\bfT_K}^{\bar k}\circ\mathcal N_o(\bar k/K)(\mathbb{O})\le d(\mathbb{O}^\vee).$$
By assumption we also have
$$d_S(\mathbb{O})\ge d_S(d_A(\mathbb{O}^\vee,1)) = \mathbb{O}^\vee.$$
Since $p_1(d_A(\mathbb{O}^\vee,1)) = d(\mathbb{O}^\vee)$, it follows by definition that $\bar \theta([\mathbb{O}])\le_A d_A(\mathbb{O}^\vee,1)$.
\end{proof}
When using the duality maps to compute wavefront sets we will often want to consider the following modifications.
Define
$$d_{A,\bfT_K}^K:\mathcal N_{o,\bar c}^\vee\to \mathcal N_{o}(K)/\sim_A, \quad d_A^K = (\bar\theta_{\bfT_K})^{-1} \circ d_A$$
and
$$d_{\bfT_K}^{\bar k}:\mathcal N_{o}^\vee\to \mathcal N_{o}(\bar k), \quad d^{\bar k} = (\Lambda_{\bfT_K}^{\bar k})^{-1} \circ d.$$
\section{The Wavefront Set for the Principal Block}
\paragraph{Basic Notation}
\nomenclature{$\mathrm{Rep}(\bfG(k))$}{}
\nomenclature{$\mathcal B^\vee,\mathcal B^\vee_x$}{}
\nomenclature{$H^i(\mathcal B^\vee_S)^\rho$}{}
\nomenclature{$\mathrm{Irr}(A(S))_0$}{}
Let $\mathrm{Rep}(\bfG(k))$ denote the category of smooth representations of $\bfG(k)$.
If $H$ is a complex reductive group and $x$ is an element of $H$ or its Lie algebra $\mf h$, we write $H(x)$ for the centraliser of $x$ in $H$, and $A_H(x)$ for the group of connected components of $H(x)$. If $S$ is a subset of $H$ or $\mf h$ (or indeed, of $H \cup \mf h$), we can similarly define $H(S)$ and $A_H(S)$. We will sometimes write $A(x)$, $A(S)$ when the group $H$ is implicit.
Write $\mathcal B^\vee$ for the flag variety of $G^\vee$, i.e. the variety of Borel subgroups $B^{\vee} \subset G^{\vee}$. Note that $\mathcal{B}^{\vee}$ has a natural left $G^{\vee}$-action.
For $g\in G^\vee$, write
$$\mathcal B^\vee_g = \{B^\vee\in \mathcal B^\vee \mid g\in B^\vee \}.$$
(this coincides with the subvariety of Borels fixed by $g$). Similarly, for $x\in \mathfrak g^\vee$, write
$$\mathcal B^\vee_x = \{B^\vee\in \mathcal B^\vee \mid x\in \mathfrak b^\vee \}.$$
If $S$ is a subset of $G^{\vee}$ or $\mf g^{\vee}$ (or indeed of $G^{\vee} \cup \mf g^{\vee}$), write
$$\mathcal B^\vee_S = \bigcap_{x\in S} \mathcal{B}^{\vee}_x.$$
The singular cohomology group $H^i(\mathcal B^\vee_S,\mathbb{C}) = H^i(\mathcal{B}^{\vee}_S)$ carries an action of $A(S)=A_{G^\vee}(S)$. For an irreducible representation $\rho\in\mathrm{Irr}(A(S)))$, let
$$H^i(\mathcal B^\vee_S)^\rho := \mathbb{H}om_{A(S)}(\rho,H^i(\mathcal{B}^{\vee}_S)).$$
We will often consider the subset
\begin{equation}\label{eq:defofIrr0}
\mathrm{Irr}(A(S))_0 := \{\rho \in \mathrm{Irr}(A(S)) \mid H^{\mathrm{top}}(\mathcal{B}_S^{\vee})^{\rho} \neq 0\}.
\end{equation}
\subsection{Local Wavefront Sets for the Principal Block}
\paragraph{Representations with Unipotent Cuspidal Support}
\label{s:unip-cusp}
\nomenclature{$\mathbb{P}i^{Lus}(\bfG(k))$}{}
\begin{definition} Let $X$ be an irreducible smooth $\mathbf{G}({k})$-representation. We say that $X$ has \emph{unipotent cuspidal support} if there is a parahoric subgroup $\bfP_c \subset \bfG$ such that $X^{\mathbf U_c(\mf o)}$ contains an irreducible Deligne--Lusztig cuspidal unipotent representation of ${\mathbf L}_c(\mathbb F_q)$. Write $\mathbb{P}i^{{Lus}}(\bfG( k))$ for the set of all such representations.
\end{definition}
\nomenclature{$\mathsf L(\bfG(k))$}{}
\nomenclature{$\mf g_q^\vee$}{}
\nomenclature{$s_0$}{}
Recall that an irreducible $\mathbf{G}({k})$-representation $V$ is \emph{Iwahori spherical} if $V^{\mathbf{I}(\mathfrak {o})} \neq 0$ for an Iwahori subgroup $\bfI(\mf o)$ of $\bfG(k)$. We note that all such representations have unipotent cuspidal support, corresponding to the case $\mathbf P_c=\mathbf I$ and the trivial representation of $\mathbf T(\mathbb F_q)$.
We will now recall the classification of irreducible representations of unipotent cuspidal support. Write $\ms L(\bfG( k))$ for the set of $G^\vee$-orbits (under conjugation) of triples $(s,n,\rho)$ such that
\begin{itemize}
\item $s\in G^\vee$ is semisimple,
\item $n\in \mathfrak g^\vee$ such that $\operatorname{Ad}(s) n=q n$,
\item $\rho\in \mathrm{Irr}(A_{G^{\vee}}(s,n))$ such that $\rho|_{Z(G^\vee)}$ acts as the identity.
\end{itemize}
Without loss of generality, we may assume that $s\in T^\vee$. Note that $n\in\mathfrak g^\vee$ is necessarily nilpotent. The group $G^\vee(s)$ acts with finitely many orbits on the $q$-eigenspace of $\mathbb{A}d(s)$
$$\mathfrak g_q^\vee=\{x\in\mathfrak g^\vee\mid \operatorname{Ad}(s) x=qx\}$$
In particular, there is a unique open $G^\vee(s)$-orbit in $\mathfrak g_q^\vee$.
Fix an $\mathfrak{sl}(2)$-triple $\{n^-,h,n\} \subset \mf g^{\vee}$ with $h\in \mathfrak t^\vee_{\mathbb R}$ and set
\begin{equation}
\label{eq:s0}
s_0:=sq^{-\frac{h}{2}}.
\end{equation}
Then $\operatorname{Ad}(s_0)n=n$.
Recall the definition of $T^\vee_c$ from section \ref{par:basicnotation1}.
\begin{theorem}[{Local Langlands correspondence, \cite[Theorem 7.12]{kl}\cite[Theorem 5.21]{Lu-unip1}}]\label{thm:Langlands} Suppose that $\bfG$ is adjoint and $ k$-split. There is a natural bijection
$$\ms L(\bfG( k))\xrightarrow{\sim} \mathbb{P}i^{{Lus}}(\bfG( k)),\qquad (s,n,\rho)\mapsto X(s,n,\rho),$$
such that
\begin{enumerate}
\item $X(s,n,\rho)$ is tempered if and only if $s_0\in T_c^\vee$ and $\overline {G^\vee(s)n}=\mathfrak g_q^\vee$,
\item $X(s,n,\rho)$ is square integrable (modulo the center) if and only if it is tempered and $A_{G^{\vee}}(s,n)$ contains no nontrivial torus.
\item $X(s,n,\rho)^{\mathbf I(\mf o)}\neq 0$ if and only if $\rho\in \mathrm{Irr}(A(s,n))_0$, i.e., $\rho$ is such that
$$H^{\mathrm{top}}(\mathcal B^\vee_{s,n})^\rho\neq 0.$$
\end{enumerate}
\end{theorem}
For each parameter $(s,n,\rho) \in \ms L(\mathbf{G}({k}))$, there is an associated \emph{standard representation} $Y(s,n,\rho) \in \mathrm{Rep}(\mathbf{G}({k}))$.
For Iwahori spherical representations, the relevant results are \cite[Theorems 7.12, 8.2, 8.3]{kl}. For the more general setting of representations with unipotent cuspidal support, see \cite[\S10]{Lu-gradedII}.
The standard modules are of finite length and have the property that $X(s,n,\rho)$ is the unique simple quotient of $Y(s,n,\rho)$.
\paragraph{The Borel--Casselman Equivalence}
\nomenclature{$\mathcal H_{\bfI}$}{}
\nomenclature{$\mathrm{Rep}_\bfI(\bfG(k))$}{}
\nomenclature{$m_{\bfI}$}{}
\nomenclature{$R_\bfI(\bfG(k))$}{}
\nomenclature{$R(\mathcal H_\bfI)$}{}
In this section we assume that $\mb G$ is $k$-split.
We fix a maximal $K$-split torus $\bfT_K$, a root basis $\mathbb{D}elta$ for $\mathbb{P}hi$, and a hyperspecial point $x_0\in \mathcal A(\bfT_K,K)$.
Recall the definition of $c_0$ from the end of Section \ref{par:basicnotation2} and let $\bfI$ be the Iwahori subgroup of $\bfG$ corresponding to $c_0$.
Recall the \emph{Iwahori--Hecke algebra} associated to $\mathbf{G}(k)$ and $\bfI$,
$$\mathcal H_{\mathbf{I}}=\{f\in C^\infty_c(\bfG( k))\mid f(i_1gi_2)=f(g),\ i_1,i_2\in \mathbf I(\mf o)\}.$$
Multiplication in $\mathcal H_{\mathbf{I}}$ is given by convolution with respect to a fixed Haar measure of $\bfG( k)$. Let $\mathrm{Rep}_{\mathbf{I}}(\bfG( k))$ denote the Iwahori category, i.e. the full subcategory of $\mathrm{Rep}(\bfG( k))$ consisting of representations $X$ such that $X$ is generated by $X^{\mathbf I(\mf o)}$. The simple objects in this category are the (irreducible) Iwahori spherical representations.
By the Borel--Casselman Theorem \cite[Corollary 4.11]{Bo}, there is an exact equivalence of categories
\begin{equation}\label{eq:mI}
m_{\mathbf{I}}: \mathrm{Rep}_{\mathbf{I}}(\bfG(k))\to \mathrm{Mod}(\mathcal H_{\mathbf{I}}), \qquad m_{\mathbf{I}}(X) = X^{\mathbf I(\mf o)}.
\end{equation}
This equivalence induces a group isomorphism
\begin{equation}\label{eq:mIhom}
m_{\mathbf{I}}: R_{\mathbf{I}}(\mathbf{G}(k)) \xrightarrow{\sim} R(\mathcal{H}_{\mathbf{I}}),
\end{equation}
where $R_{\mathbf{I}}(\mathbf{G}(k))$ (resp. $R(\mathcal{H}_{\mathbf{I}})$) is the Grothendieck group of $\mathrm{Rep}_{\mathbf{I}}(\mathbf{G}(k))$ (resp. $\mathrm{Mod}(\mathcal{H}_{\mathbf{I}})$).
\paragraph{Deformations of Modules of the Iwahori-Hecke Algebra}
\nomenclature{$\mathcal H_c$}{}
\nomenclature{$\mf J_c$}{}
\nomenclature{$X|_{W_c}$}{}
Suppose $\mathbf{P}_c$ is a parahoric subgroup containing $\mathbf{I}$ with pro-unipotent radical $\mathbf{U}_c$ and reductive quotient $\mathbf{L}_c$.
The finite Hecke algebra $\mathcal H_{c}$ of $\bfL_c(\mathbb F_q)$ embeds as a subalgebra of $\mathcal H_{\bfI}$.
For $X\in \mathrm{Rep}_{\bfI}(\bfG(\sfk))$ admissible, the Moy--Prasad theory of unrefined minimal $K$-types \cite{moyprasad} implies that the finite dimensional $\bfL_c(\mathbb F_q)$-representation $X^{\bfU_{c}(\mf o)}$ is a sum of principal series unipotent representations and so corresponds to an $\mathcal H_{c}$-module with underlying vector space
$$(X^{\bfU_c(\mf o)})^{\bfI(\mf o)/\bfU_c(\mf o)} = X^{\bfI(\mf o)}.$$
The $\mathcal H_c$-module structure obtained in this manner coincides naturally with that of
$$\mathbb{R}es_{\mathcal H_c}^{\mathcal H_\bfI}m_\bfI(X).$$
Let
$$\mathfrak J_c:\mathcal H_c\to \mathbb{C}[W_c]$$
be the isomorphism introduced by Lusztig in \cite[Theorem 3.1]{lusztigdeformation}.
Given any $\mathcal H_c$-module $M$ we can use the isomorphism $\mathfrak J_c$ to obtain a $W_c$-representation which we denote by $M_{q\to1}$.
Define
\begin{equation}
\label{eq:Wcqto1}
X|_{W_c}:=(\mathbb{R}es_{\mathcal H_c}^{\mathcal H_\bfI}m_\bfI(X))_{q\to 1}.
\end{equation}
We will need to recall some structural facts about the Iwahori--Hecke algebra. Let $\mathbb{C}X:=X_*(\mathbf T,\bar{\mathsf k})=X^*(\mathbf{T}^\vee,\bar{\sfk})$ and consider the (extended) affine Weyl group $\widetilde{W} := W \ltimes \mathbb{C}X$. Let
$$S := \{s_{\alpha} \mid \alpha \in \mathbb{D}elta\} \subset W$$
denote the set of simple reflections in $W$. For each $x \in \mathbb{C}X$, write $t_x \in \widetilde{W}$ for the corresponding translation. If $W$ is irreducible, let $\alpha_h$ be the highest root and set $s_0=s_{\alpha_h} t_{-\alpha_h^\vee}$, $S^a=S\cup\{s_0\}$. If $W$ is a product, define $S^a$ by adjoining to $S$ the reflections $s_0$, one for each irreducible factor of $W$. Consider the length function $\ell: \widetilde{W} \to \mathbb{Z}_{\geq 0}$ extending the usual length function on the affine Weyl group $W^a=W\ltimes \mathbb Z \mathbb{P}hi^\vee$
\nomenclature{$l$}{}
\[\ell(w t_x)=\sum_{\substack{\alpha\in \mathbb{P}hi^+\\w(\alpha)\in \mathbb{P}hi^-}} |\langle x,\alpha\rangle+1|+\sum_{\substack{\alpha\in \mathbb{P}hi^+\\w(\alpha)\in \mathbb{P}hi^+}} |\langle x,\alpha\rangle|.
\]
For each $w \in \widetilde{W}$, choose a representative $\bar w$ in the normaliser $N_{\bfG(\mathsf k)}(\mathbf I(\mf o))$. Recall the affine Bruhat decomposition
\[\bfG(\mathsf k)=\bigsqcup_{w\in \widetilde W} \mathbf I(\mf o) \bar w \mathbf I(\mf o),
\]
For each $w \in \widetilde{W}$, write $T_w \in \mathcal{H}_{\mathbf{I}}$ for the characteristic function of $\mathbf I(\mf o) \bar w \mathbf I(\mf o) \subset \mathbf{G}(\sfk)$. Then $\{T_w\mid w\in \widetilde W\}$ forms a $\mathbb C$-basis for $\mathcal H_{\mathbf{I}}.$
The relations on the basis elements $\{T_w \mid w \in \widetilde{W}\}$ were computed in \cite[Section 3]{iwahorimatsumoto}:
\begin{equation}\label{eq:relations}
\begin{aligned}
&T_w\cdot T_{w'}=T_{ww'}, \qquad \text{if }\ell(ww')=\ell(w)+\ell(w'),\\
&T_s^2=(q-1) T_s+q,\qquad s\in S^a.
\end{aligned}
\end{equation}
\nomenclature{$R$}{}
\nomenclature{$\mathcal H_{\bfI,v}$}{}
Let $R$ be the ring $\mathbb{C}[v,v^{-1}]$ and for $a\in \mathbb{C}^*$ let $\mathbb{C}_a$ be the $R$-module $R/(v-a)$.
Let $\mathcal H_{\bfI,v}$ denote the Hecke algebra with base ring $R$ instead of $\mathbb{C}$ and where $q$ is replaced with $v^2$ in the relations (\ref{eq:relations}).
By specialising $v$ to $\sqrt{q}$, $1$, we obtain
\begin{equation}
\mathcal H_{\bfI,v}\otimes_R\mathbb{C}_{\sqrt q} \cong \mathcal H_{\bfI}, \qquad \mathcal H_{\bfI,v}\otimes_R\mathbb{C}_1 \cong \mathbb{C}[ \widetilde W].
\end{equation}
Suppose $Y=Y(s,n,\rho)$ is a standard Iwahori spherical representation, see Section \ref{s:unip-cusp}, and let $M=m_\bfI(Y)$.
By \cite[Section 5.12]{kl} there is a $\mathcal H_{\bfI,v}$ module $M_v$, free over $R$, such that
$$M_v\otimes_R\mathbb{C}_{\sqrt q} \cong M$$
as $\mathcal H_\bfI$-modules.
We can thus construct the $\widetilde W$-representation
$$Y_{q\to 1}:= M_v\otimes_R \mathbb{C}_{1}.$$
\nomenclature{$R(\widetilde{W})$}{}
\nomenclature{$(\bullet)_{q\to 1}$}{}
Let $R(\widetilde{W})$ be the Grothendieck group of $\mathrm{Rep}(\widetilde W)$. Since the standard modules form a $\mathbb{Z}$-basis for $R_\bfI(\bfG(\sfk))$, the Grothendieck group of $\mathrm{Rep}_\bfI(\bfG(\sfk))$, there is a unique homomorphism
\begin{equation}\label{eq:qto1hom}(\bullet)_{q\to 1}: R_\bfI(\bfG(\sfk)) \to R(\widetilde W)\end{equation}
extending $Y \mapsto Y_{q \to 1}$. Moreover, since
$$\mathbb{R}es_{W_c}^{\widetilde W} Y_{q\to 1} = Y|_{W_c}$$
for the Iwahori spherical standard modules we have that
\begin{equation}
\label{eq:heckerestriction}
\mathbb{R}es_{W_c}^{\widetilde W}X_{q\to 1} = X|_{W_c}
\end{equation}
for all $X\in R_\bfI(\bfG(\sfk))$.
\paragraph{Reduction to Weyl Groups}
Recall the definition of $\mathbb{O}^s$ in Section \ref{sec:kawanaka}.
\begin{theorem}
\label{thm:locwf}
Suppose $X\in \mathrm{Rep}_\bfI(\bfG(k))$.
Let $c\subset c_0$.
Then
\begin{equation}
^{\barF_q}\mathrm{WF}(X^{\bfU_c(\mf o)})= \max\set{\mathbb{O}^s(E):[\mathbb{R}es_{W_c}^{\widetilde W}(X^{\bfI(\mf o)}):E]>0}
\end{equation}
and $^K\mathrm{WF}_c(X)$ is the lift of these orbits.
\end{theorem}
\begin{proof}
This is just putting together Equation \ref{eq:heckerestriction} and the results in Section \ref{sec:kawanaka}.
\end{proof}
\subsection{The Wavefront Set of Spherical Arthur Representations}
Suppose $\bfG$ is adjoint and split over $k$.
Fix a maximal $K$-split torus $\bfT_K$, a root basis $\mathbb{D}elta$, and a hyperspecial point $x_0\in \mathcal A(\bfT_K,K)$.
Let $c_0$ be the chamber defined as in the end of Section \ref{par:basicnotation2} and $\bfI$ be the corresponding Iwahori-subgroup.
Recall from the end of Section \ref{par:acharduality} the definitions of $d_{A,\bfT_K}^K$ and $d_{\bfT_K}^{\bar k}$.
Let $(\pi,X)$ be the spherical Arthur representation with parameter $\mathbb{O}^\vee$.
With respect to Lusztig's parameterisation of representations with cuspidal unipotent support, this is the representation with parameter $[(q^{\frac12 h^\vee},0,triv)]$, where $h^\vee$ is a neutral element for an $\lalg{sl}_2$-triple for $\mathbb{O}^\vee$.
In this section we will prove the following theorem.
\begin{theorem}
\label{thm:arthurwf}
Let $(\pi,X)$ be the spherical Arthur representation with parameter $\mathbb{O}^\vee\in \mathcal N_o^\vee$.
Then
\begin{equation}
^{K}\mathrm{WF}(X) = d_{A,\bfT_K}^K(\mathbb{O}^\vee,1),\quad \hphantom{ }^{\bar k}\mathrm{WF}(X) = d_{\bfT_K}^{\bar k}(\mathbb{O}^\vee).
\end{equation}
\end{theorem}
\begin{remark}
By the discussion preceeding Proposition 2.3.5 in \cite{cmo}, the above theorem in fact holds without the restriction that $\bfG$ is adjoint.
\end{remark}
Let $\ms n\in \mathbb{O}^\vee$.
Our strategy is to apply Theorem \ref{thm:locwf}.
The conditions apply since $X$ is Iwahori spherical (since it is spherical).
The first step is thus to get a grasp on the $\widetilde W$ structure of $(X^{\bfI(\mf o)})_{q\to 1}$.
With respect to Lusztig's parameterisation of representations with cuspidal unipotent support, $\ms{AZ}(X)$ corresponds to $[(q^{\frac12 h^\vee},\ms n,triv)]$, where $h^\vee$ is a neutral element for $\mathbb{O}^\vee$.
In particular $s_0 = 1$ (as defined in Equation \ref{eq:s0}) and so by Theorem \ref{thm:Langlands} (1), $\ms{AZ}(X)$ is tempered.
Let $Y'$ be the standard module of $\ms{AZ}(X)$ (so that $\ms{AZ}(X)$ is the unique simple quotient of $Y'$).
Then since $\ms{AZ}(X)$ is tempered, by \cite[Theorem 8.2]{kl} we have that $Y' = \ms{AZ}(X)$.
In particular, by \cite[Corollary 8.1]{reeder} we have that
$$(\ms {AZ}(X)^{\bfI(\mf o)})_{q\to 1} = (Y')_{q\to 1} = \sgn \otimes H^*(\mathcal B^\vee_{\ms n})^{triv}$$
where $X_*$ acts trivially on the total cohomology space.
It follows that
$$(X^{\bfI(\mf o)})_{q\to 1} = H^*(\mathcal B^\vee_{\ms n})^{triv}$$
and that the action of $\widetilde W$ factors through $\cdot: \widetilde W\to W$.
It thus suffices to understand $H^*(\mathcal B^\vee_{\ms n})^{triv}$ as a $W$-module and to do this we will use the theory of perverse sheaves.
We adopt the conventions of \cite{shoji}, except that we will work with perverse sheaves over $\mathbb{C}$ rather than $l$-adic sheaves, and we will ignore shifts since we are exclusively interested in the total cohomology.
For $J\in \mathbf P(\widetilde \mathbb{D}elta)$, we will write $c(J)$ for the face of $c_0$ with type equal to $J$, and we write $W_J$ in place of $W_{c(J)}$ for notational convenience.
Let $\pi:\widetilde {\mf g^\vee}\to \mf g^\vee$ be the Grothendieck resolution and $\underline \mathbb{C}$ be the constant sheaf on $\widetilde {\mf g^\vee}$.
Let $N^\vee$ be the nilpotent cone on of $\mf g^\vee$.
Then $\pi_*\underline \mathbb{C} = \IC(\mf g^\vee,\mathcal L)$ where $\mathcal L$ is the local system on the regular semisimple elements $\mf g^\vee_{reg}$ obtained by pushing forward the constant sheaf along $\pi^{-1}(\mf g^\vee_{reg})\to \mf g^\vee_{reg}$.
This map is an etale covering with Galois group $W^\vee$ and $\mathcal L$ is a $\mb G^\vee$-equivariant sheaf of $W^\vee$ representations.
We can thus decompose it as $\mathcal L = \bigoplus_{E\in \mathrm{Irr}(W^\vee)}E\otimes \mathcal L_E$.
Thus $\pi_*\mathbb{C} = \bigoplus_{E\in \mathrm{Irr}(W^\vee)}E\otimes \IC(\mf g^\vee,\mathcal L_E)$ and we have that $\IC(\mf g^\vee,\mathcal L_E)\mid_{N^\vee} = \IC(\overline {\mathbb{O}^\vee_E},\mathcal E_E)$ where the map $E\mapsto (\mathbb{O}^\vee_E,\mathcal E_E)$ is the Springer correspondence \cite{shoji}.
Thus $\pi_*\underline \mathbb{C}\mid_{N^\vee} = \bigoplus_{E\in \mathrm{Irr}(W^\vee)}\IC(\overline{\mathbb{O}^\vee_E},\mathcal E_E)$.
\begin{lemma}
Let $J\in \mathbf P(\widetilde\mathbb{D}elta)$ and $E\in \mathrm{Irr}(W_J)$.
View $W_J$ as a subgroup of $W^\vee$ via
\begin{equation}
\begin{tikzcd}[column sep=small]W_J\arrow[r,"\cdot"] & W \arrow[r,"\sim"] & W^\vee\end{tikzcd}.
\end{equation}
The first map is the projection map $\cdot:\widetilde W\to W$.
Define $K_{J,E}$ to be the perverse sheaf
\nomenclature{$K_{J,E}$}{}
\begin{equation}
K_{J,E} := \bigoplus_{E'\in \mathrm{Irr}(W^\vee)}[E':\text{Ind}_{W_J}^{W^\vee}E]\IC(\overline{\mathbb{O}^\vee_{E'}},\mathcal E_{E'}).
\end{equation}
Then the support of $K_{J,E}$ lies in the closure of $d_{S,\bfT_K}(c(J),\mathbb{O}^s(E))$.
\end{lemma}
\begin{proof}
By \cite[Proposition 4.3]{achar_aubert}, $[\text{Ind}_{W_J}^{W^\vee}E:E']>0$ implies that $\mathbb{O}^\vee_{E'} \le \text{Ind}_{\bfl_{c(J)}}^{\mf g^\vee} \mathbb{O}''$ where $\mathbb{O}''$ is the Springer support of the unique special representation in the same family as $E$.
But $\mathbb{O}'' = d_{LS}(\mathbb{O}'(E))$ and so in fact
$$\mathbb{O}^\vee_{E'} \le d_{S,x_0,\bfT_K}(c(J),\mathbb{O}^s(E)) = d_{S,\bfT_K}(c(J),\mathbb{O}^s(E))$$
where the last equality is Proposition \ref{prop:ds}.
Therefore $\text{supp} (K_{J,E})$ is contained in the closure of $d_{S,\bfT_K}(c(J),\mathbb{O}^s(E))$ as required.
\end{proof}
\begin{theorem}
\label{thm:wfspringer}
Let $\ms n\in N^\vee$ and $\mathbb{O}_{\ms n}^\vee = G^\vee.\ms n$.
\begin{enumerate}
\item For all $J\in \mathbf P(\widetilde\mathbb{D}elta)$ and $E\in \mathrm{Irr}(W_J)$ such that $E$ is an irreducible constituent of $\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})^{triv}$, we have that $\mathbb{O}_{\ms n}^\vee \le d_{S,\bfT_K}(c(J),\mathbb{O}^s(E))$.
\item Let $\widetilde \mathbb{O}\in \widetilde\theta_{\bfT_K}^{-1}(d_A(\mathbb{O}_{\ms n}^\vee,1))$ and $(c(J),\mathbb{O}')\in I_{o,d,c_0}^K(\widetilde \mathbb{O})$.
Let $E$ be the special representation of $W_J$ corresponding to $d_{\bfl_{c(J)}}(\mathbb{O}')$.
Then
\begin{equation}
[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})^{triv}] = 1.
\end{equation}
\end{enumerate}
\end{theorem}
\begin{proof}
\begin{enumerate}
\item Let $J\in \mathbf P(\widetilde\mathbb{D}elta)$ and $E\in \mathrm{Irr}(W_J)$.
We have that
\begin{align}
[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})] &= [E:\mathbb{R}es_{W_J}^{W^\vee}\bigoplus_{E'\in \mathrm{Irr}(W^\vee)}E'\otimes H^*_{\ms n}\IC(\overline{\mathbb{O}^\vee_{E'}},\mathcal E_{E'})] \\
&= \sum_{E'\in \mathrm{Irr}(W)}[E':\text{Ind}_{W_J}^{W^\vee}E]\dim H^*_{\ms n}\IC(\overline{\mathbb{O}^\vee_{E'}},\mathcal E_{E'}) \\
&= \dim H^*_{\ms n}K_{J,E}.
\end{align}
Thus if $[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})]>0$ then
$$\ms n\in \text{supp} K_{J,E} \subseteq \overline{d_{S,\bfT_K}(c(J),\mathbb{O}^s(E))}.$$
But $[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})^{triv}]>0$ implies that $[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})]>0$ and so indeed $\mathbb{O}_{\ms n}^\vee\le d_{S,\bfT_K}(c(J),\mathbb{O}^s(E))$.
\item Let $\widetilde \mathbb{O},c,\mathbb{O}',E$ be as in the statement of the theorem.
Since $\mathbb{O}'$ is distinguished, it is special and so $\mathbb{O}'(E) = \mathbb{O}'$ and $\widetilde \mathbb{O} = \widetilde \mathbb{O}(c(J),\mathbb{O}^s(E))$.
Let $E' = j_{W_J}^{W^\vee}E$.
Then $[\text{Ind}_{W_J}^{W^\vee}E:E'] = 1$, $\mathbb{O}^\vee_{E'} = d_{S,x_0,\bfT_K}(c(J),\mathbb{O}^s(E)) = \mathbb{O}_{\ms n}^\vee$, and $\mathcal E_{E'}$ is the trivial local system.
Since $[E'':\text{Ind}_{W_J}^{W^\vee}E]>0$ implies that $\mathbb{O}^\vee_{E''} \le d_{S,x_0,\bfT_K}(c(J),\mathbb{O}^s(E)) = \mathbb{O}_{\ms n}^\vee$ \cite[Proposition 4.3]{achar_aubert}, we have that
\begin{equation}
H^*(K_{J,E})\mid_{\mathbb{O}_{\ms n}^\vee} = \bigoplus_{E''\in \mathrm{Irr}(W^\vee):\mathbb{O}^\vee_{E''} = \mathbb{O}_{\ms n}^\vee}[E'':\text{Ind}_{W_J}^{W^\vee}E]\mathcal E_{E''}.
\end{equation}
By a similar calculation as above we have that $[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})^{triv}] = [triv:H^*(K_{J,E})\mid_{\mathbb{O}^\vee}]$.
Thus $[E:\mathbb{R}es_{W_J}^{\widetilde W}H^*(\mathcal B^\vee_{\ms n})^{triv}] = [E':\text{Ind}_{W_J}^{W^\vee}E] = 1$ as required.
\end{enumerate}
\end{proof}
We can now give a proof of Theorem \ref{thm:arthurwf}.
\begin{proof}
Let $J\in \mathbf P(\widetilde\mathbb{D}elta)$.
By Theorem \ref{thm:locwf} we know that
\begin{equation}
^K\mathrm{WF}_{c(J)}(X)=\set{\mathcal L(c,\mathbb{O}^s(E)):E\in \mathrm{Irr}(W_J),[E:\mathbb{R}es_{W_J}^{\widetilde W}V^{\ms I}]>0}.
\end{equation}
By part 1 of Theorem \ref{thm:wfspringer}, for any $\widetilde \mathbb{O}\in \hphantom{ }^K\mathrm{WF}_{c(J)}(X)$ we have $d_{S,\bfT_K}(\widetilde \mathbb{O})\ge \mathbb{O}^\vee$.
By Theorem \ref{thm:canoninv}, it follows that $\widetilde\theta_{\bfT_K}(\widetilde \mathbb{O})\le_A d_A(\mathbb{O}^\vee,1)$.
But by part 2 of Theorem \ref{thm:wfspringer} we have that for any $\widetilde \mathbb{O}\in \widetilde\theta_{\bfT_K}^{-1}(d_A(\mathbb{O}^\vee,1))$ and $(c(J),\mathbb{O}')\in I_{c_0}^{o,d}(\widetilde \mathbb{O})$ we have that $\widetilde \mathbb{O}\in \hphantom{ }^K\mathrm{WF}_{c(J)}(X)$.
It follows that
$$^K\mathrm{WF}(X) = \bar\theta_{\bfT_K}^{-1}(d_A(\mathbb{O}^\vee,1)) = d_A^K(\mathbb{O}^\vee,1).$$
Since $\mathcal N_o(\bar k/K)(\widetilde \mathbb{O}) = d^{\bar k}(\mathbb{O}^\vee)$ for any $\widetilde \mathbb{O}\in \widetilde\theta_{\bfT_K}^{-1}(d_A(\mathbb{O}^\vee,1))$ it follows that
$$^{\bar k}\mathrm{WF}(X) = d^{\bar k}(\mathbb{O}^\vee).$$
\end{proof}
\mathrm{pr}intnomenclature
\end{document}
|
\betaegin{document}
\rightharpoonup oenewcommand{1.2}{1.2}
\rightharpoonup oenewcommand{1.0}{1.0}
\tauitle{\betaf A Note on Braided $T$-categories over Monoidal
Hom-Hopf Algebras}
\deltaate{}
\alphauthor {{\betaf Miman You \quad Shuanhong Wang \footnote {Corresponding author: Shuanhong Wang, [email protected]}}\\
{\sigmamall Department of Mathematics, Southeast University}\\
{\sigmamall Nanjing, Jiangsu 210096, P. R. of China}}
\psiaketitle
\betaegin{center}
\betaegin{minipage}{12.cm}
\noindent{\betaf Abstract} Let ${\sigmal Aut}_{mHH}(H)$ denote the set
of all automorphisms of a monoidal Hom-Hopf algebra $H$
with bijective antipode in the sense of Caenepeel and Goyvaerts
\cdotircte{CG2011}. The main aim of this paper is to provide new examples
of braided $T$-category in the sense of Turaev \cdotircte{T2008}.
For this, first we construct a monoidal Hom-Hopf
$T$-coalgebra $\psiathcal{MHD}(H)$
and prove that the $T$-category $Rep(\psiathcal{MHD}(H))$
of representation of $\psiathcal{MHD}(H)$
is isomorphic to $\psiathcal {MHYD}(H)$ as braided $T$-categories,
if $H$ is finite-dimensional.
Then we construct a new braided $T$-category
$\psiathcal{ZMHYD}(H)$ over $\psiathbb{Z},$ generalizing the main
construction by Staic \cdotircte{S2007}.
\\
\varepsilonskip 0.3cm
\noindent{\betaf Key words}: Monoidal Hom-Hopf algebra; Braided $T$-category;
Diagonal crossed Hom-product, Monoidal Hom-Hopf $T$-coalgebra.
\varepsilonskip 0.3cm
{\betaf Mathematics Subject Classification:} 16W30.
\etand{minipage}
\etand{center}
\sigmaection*{0. INTRODUCTION}
Braided $T$-categories introduced by Turaev \cdotircte{T2008} are of interest due to their applications in homotopy quantum field theories,
which are generalizations of ordinary topological quantum field theories. As such, they are interesting to different research communities
in mathematical physics (see \cdotircte{FY1989, K2004, T1994, VA2001, VA2005}).
Although Yetter-Drinfeld modules over Hopf algebras provide examples of such braided
$T$-categories, these are rather trivial.
The wish to obtain more interesting homotopy quantum field theories provides a strong
motivation to find new examples of braided $T$-categories.
\\
The aim of this article is to construct new examples of braided $T$-categories isomorphic
to the $T$-category $\psiathcal {MHYD}(H)$ in \cdotircte{YW2014}.
For this purpose, we prove that, if $(H,A,H)$ is a Yetter-Drinfeld Hom-datum
(the second $H$ is regarded as an $H$-Hom-bimodule coalgebra) in \cdotircte{YW2014},
with $H$ finite dimensional, then the category $ _{A}\psiathcal{MHYD}^{H}(H)$
of Yetter-Drinfeld Hom-modules is isomorphic to the category of left modules
over the diagonal crossed Hom-product $H^{*}\betaowtie A$. Then
when $H$ is finite-dimensional we construct a monoidal Hom-Hopf $T$-coalgebra
$\psiathcal{MHYD}(H)$, and prove that the $T$-category
$Rep(\psiathcal{MHD}(H))$ of representation of $\psiathcal{MHD}(H)$
is isomorphic to $\psiathcal {MHYD}(H)$ as braided $T$-categories.
\\
The article is organized as follows.
\\
We will present the background material in Section 1.
This section contains the relevant definitions on braided
$T$-categories, monoidal Hom-Hopf algebras and monoidal Hom-Hopf
$T$-coalgebras necessary for the understanding of the construction.
In Section 2, we define the notion of a diagonal crossed
Hom-product algebra over a monoidal Hom-Hopf algebra.
And then when $H$ is finite dimensional,
we prove the category $ _{A}\psiathcal{MHYD}^{H}(H)$
is isomorphic to the category of left $H^{*}\betaowtie A$-modules,
$_{H^{*}\betaowtie A}\psiathcal{M}$.
\\
Section 3, when $H$ is finite-dimensional we construct a monoidal Hom-Hopf $T$-coalgebra
$\psiathcal{MHYD}(H)$, and prove that the $T$-category
$Rep(\psiathcal{MHD}(H))$ of representation of $\psiathcal{MHD}(H)$
is isomorphic to $\psiathcal {MHYD}(H)$ as braided $T$-categories.
Section 4, we construct a new braided $T$-category
$\psiathcal{ZMHYD}(H)$ over $\psiathbb{Z},$ generalizing the main
construction by Staic \cdotircte{S2007}.
\sigmaection*{1. PRELIMINARIES}
\deltaef\tauhetaeequation{1. \alpharabic{equation}}
\sigmaetcounter{equation} {0} \hskip\phiarindent
Throughout, let $k$ be a fixed field. Everything is over $k$ unless
otherwise specified. We
refer the readers to the books of Sweedler \cdotircte{S1969}
for the relevant concepts on the general theory of Hopf
algebras. Let $(C, \Deltaelta )$ be a coalgebra. We use the "sigma" notation for
$\Deltaelta $ as follows:
$$
\Deltaelta (c)=\sigmaum c_1\otimestimes c_2, \,\,\forall c\iotan C.
$$
\varepsilonskip 0.5cm
{\betaf 1.1. Braided $T$-categories.}
\varepsilonskip 0.5cm
A {\sigmal monoidal category} ${\cdotal C}=({\cdotal C},\psiathbb{I},\otimestimes,a,l,r)$
is a category ${\cdotal C}$ endowed with a functor
$\otimestimes: {\cdotal C}\tauimes{\cdotal C}\rightharpoonup oightarrow{\cdotal C}$
(the {\sigmal tensor product}), an object $\psiathbb{I}\iotan {\cdotal C}$
(the {\sigmal tensor unit}), and natural isomorphisms $a$
(the {\sigmal associativity constraint}), where
$a_{U,V,W}:(U\otimestimes V)\otimestimes W\rightharpoonup oightarrow U\otimestimes (V\otimestimes W)$
for all $U,V,W\iotan {\cdotal C}$, and $l$ (the {\sigmal left unit constraint})
where $l_U: \psiathbb{I}\otimestimes U\rightharpoonup oightarrow U,\,r$
(the {\sigmal right unit constraint}) where
$r_{U}:U\otimestimes{\cdotal C}\rightharpoonup oightarrow U$ for all $U\iotan {\cdotal C}$,
such that for all $U,V,W,X\iotan {\cdotal C},$
the {\sigmal associativity pentagon}
$a_{U,V,W\otimestimes X}\cdotircrc a_{U\otimestimes V,W,X}
=(U\otimestimes a_{V,W,X})\cdotircrc a_{U,V\otimestimes W,X}\cdotircrc
(a_{U,V,W}\otimestimes X)$ and
$(U\otimestimes l_V)\cdotircrc(r_U\otimestimes V)=a_{U,I,V}$ are satisfied.
A monoidal categoey ${\cdotal C}$ is {\sigmal strict} when all
the constraints are identities.
\\
Let $G$ be a group and let $Aut({\cdotal C})$ be the group of
invertible strict tensor functors from ${\cdotal C}$ to itself.
A category ${\cdotal C}$ over $G$
is called a {\sigmal crossed category } if it satisfies the following:
\betaegin{eqnarray*}
&\betalacklozenge & {\cdotal C} \psibox{ is a monoidal category;}\\
&\betalacklozenge & {\cdotal C} \psibox{ is disjoint union of a family
of subcategories }\{{\cdotal C}_{\alpha }\}_{\alpha \iotan
G},\psibox{ and for any }U\iotan {\cdotal C}_{\alpha },\\
&&V\iotan {\cdotal C}_{\beta }, U\otimes V\iotan {\cdotal C}_{\alpha \beta }.
\psibox{ The subcategory }{\cdotal C}_{\alpha }
\psibox{ is called the }\alpha\psibox{th component of }{\cdotal C};\\
&\betalacklozenge & \psibox{Consider a group homomorphism }
\varepsilonp : G\lambdar Aut({\cdotal C}), \beta \psiapsto \varepsilonp _{\beta }, \psibox{ and
assume that}\\
&& \varepsilonp _{\beta }(\varepsilonp _{\alpha })
=\varepsilonp _{\beta\alphalpha\betaeta^{-1}},
\psibox{ for all }\alphalpha,\betaeta\iotan G.\psibox{ The functors }
\varepsilonp _{\beta } \psibox{ are called conjugation}\\
&&\psibox{ isomorphisms.}
\etand{eqnarray*}
Furthermore, $ {\cdotal C}$ is called strict when it is
strict as a monoidal category.
\\
{\sigmal Left index notation}: Given $\alpha \iotan G$
and an object $V\iotan {\cdotal C}_{\alpha }$, the functor $\varepsilonp _{\alpha }$
will be denoted by ${}^V( \cdotdot )$, as in Turaev \cdotircte{T2008} or
Zunino \cdotircte{Z2004}, or even ${}^{\alpha }( \cdotdot )$.
We use the notation ${}^{\otimesverline{V}}( \cdotdot )$
for ${}^{\alpha ^{-1}}( \cdotdot )$. Then we have
${}^V id_U=id_{{} V^U}$ and
${}^V(g\cdotircrc f)={}^Vg\cdotircrc {}^Vf$.
Since the conjugation $\varepsilonp : G\lambdar Aut({\cdotal C})$ is a
group homomorphism, for all $V, W\iotan {\cdotal C}$, we have ${}^{V\otimes W}( \cdotdot )
={}^V({}^W( \cdotdot ))$ and ${}^\psiathbb{I}( \cdotdot )={}^V({}^{\otimesverline{V}}( \cdotdot ))
={}^{\otimesverline{V}}({}^V( \cdotdot ))=id_{\cdotal C}$. Since, for all
$V\iotan {\cdotal C}$, the functor ${}^V( \cdotdot )$ is strict, we have
${}^V(f\otimes g)={}^Vf\otimes {}^Vg$, for any morphisms $f$ and $g$ in ${\cdotal C}$,
and ${}^V\psiathbb{I}=\psiathbb{I}$.
\\
A {\sigmal braiding} of a crossed category ${\cdotal C}$ is
a family of isomorphisms $({c=c_{U,V}})_{U,V}\iotan {\cdotal C}$,
where $c_{U,V}: U\otimestimes V\rightharpoonup oightarrow {}^UV\otimestimes U$
satisfying the following conditions:
\betaegin{itemize}
\iotatem [(1)] For any arrow $f\iotan {\cdotal C}_{\alpha }(U, U')$ and
$g\iotan {\cdotal C}(V, V')$,
$$
(({}^{\alpha }g)\otimes f)\cdotircrc c _{U, V}=c _{U' V'}\cdotircrc (f\otimes g).
$$
\iotatem [(2)]
For all $ U, V, W\iotan {\cdotal C},$ we have
\betaegin{eqnarray*}
&& c _{U\otimes V, W}=a_{{}^{U\otimes V}W, U, V}\cdotircrc (c _{U, {}^VW}\otimes
id_V)\cdotircrc a^{-1}_{U, {}^VW, V}\cdotircrc (\iota _U\otimes c _{V, W})
\cdotircrc a_{U, V, W},\\
&& c _{U, V\otimes W}=a^{-1}_{{}^UV, {}^UW, U}
\cdotircrc (\iota _{({}^UV)}\otimes c _{U, W})\cdotircrc a_{{}^UV, U, W}\cdotirc
(c _{U, V}\otimes \iota_W)\cdotircrc a^{-1}_{U, V, W},
\etand{eqnarray*}
where $a$ is the natural isomorphisms in the tensor category
${\cdotal C}$.
\iotatem [(3)] For all $ U, V\iotan {\cdotal C}$ and $\beta\iotan G$,
$$ \varepsilonp _{\beta }(c_{U, V})=c _{\varepsilonp _{\beta }(U), \varepsilonp _{\beta }(V)}. $$
\etand{itemize}
A crossed category endowed with a braiding is called
a {\sigmal braided $T$-category}.
\\
{\betaf 1.2. Monoidal Hom-Hopf algebras.}
\varepsilonskip 0.5cm
Let $\psiathcal{M}_{k}=(\psiathcal{M}_{k},\otimes,k,a,l,r )$
denote the usual monoidal category of $k$-vector spaces and linear maps between them.
Recall from \cdotircte{CG2011}
that there is the {\iotat monoidal Hom-category} $\widetilde{\psiathcal{H}}(\psiathcal{M}_{k})=
(\psiathcal{H}(\psiathcal{M}_{k}),\,\otimes,\,(k,\,id),
\,\widetilde{a},\,\widetilde{l},\,\widetilde{r })$, a new monoidal category,
associated with $\psiathcal {M}_{k}$ as follows:
\betaegin{itemize}
\iotatem The objects of the monoidal category
$ \psiathcal{H}(\psiathcal{M}_{k})$ are couples
$(M,\xi_{M})$, where $M \iotan \psiathcal {M}_{k}$ and $\xi_{M} \iotan Aut_k(M)$, the set of
all $k$-linear automomorphisms of $M$;
\iotatem The morphism $f:(M,\xi_{M})\rightharpoonup oightarrow (N,\xi_{N})$ in $ \psiathcal{H}(\psiathcal{M}_{k})$
is the $k$-linear map $f: M\rightharpoonup oightarrow N$ in $\psiathcal{M}_{k}$
satisfying $ \xi_{N} \cdotircrc f = f\cdotirc \xi_{M}$, for any two objects
$(M,\xi_{M}),(N,\xi_{N})\iotan \psiathcal{H}(\psiathcal{M}_{k})$;
\iotatem The tensor product is given by
$$
(M,\xi_{M})\otimes (N,\xi_{N})=(M\otimes N,\xi_{M}\otimes\xi_{N} )
$$
for any $(M,\xi_{M}),(N,\xi_{N})\iotan \psiathcal{H}(\psiathcal{M}_{k})$.
\iotatem The tensor unit is given by $(k, id)$;
\iotatem The associativity constraint $\widetilde{a}$
is given by the formula
$$
\widetilde{a}_{M,N,L}=a_{M,N,L}\cdotircrc((\xi_{M}\otimes id)\otimes
\xi_{L}^{-1})=(\xi_{M}\otimes(id\otimes\xi_{L}^{-1}))\cdotircrc a_{M,N,L},
$$
for any objects
$(M,\xi_{M}),(N,\xi_{N}),(L,\xi_{L})\iotan \widetilde{\psiathcal{H}}(\psiathcal{M}_{k})$;
\iotatem The left and right unit constraint
$\widetilde{l}$ and $\widetilde{r }$ are given by
$$
\widetilde{l}_M=\xi_{M}\cdotircrc l_M=l_M\cdotircrc(id\otimes\xi_{M}),\, \quad
\widetilde{r}_M =\xi_{M}\cdotircrc r_M=r_M\cdotircrc(\xi_{M}\otimes id)
$$
for all $(M,\xi_{M}) \iotan \widetilde{\psiathcal{H}}(\psiathcal{M}_{k})$.
\etand{itemize}
We now recall from \cdotircte{CG2011} the following notions used later.
\\
{\iotat Definition 1.2.1.} Let $\widetilde{\psiathcal{H}}(\psiathcal{M}_{k})$
be a monoidal Hom-category. A {\iotat monoidal Hom-algebra} is an object
$(A,\xi_{A})$ in $\widetilde{\psiathcal{H}}(\psiathcal{M}_{k})$ together
with an element $1_A\iotan A$ and linear maps
$$m:A\otimes A\rightharpoonup oightarrow A;\,\,a\otimes b\psiapsto ab, \,\,\,\xi_{A}\iotan Aut_k(A)$$
such that
\betaegin{eqnarray}
\xi_{A}(ab)=\xi_{A}(a)\xi_{A}(b),&& \alphalpha(1_A)=1_A ,\lambdaabel{eq1}\\
\xi_{A}(a)(bc)=(ab)\xi_{A}(c),&& a1_A=1_Aa=\xi_{A}(a),\lambdaabel{eq2}
\etand{eqnarray}
for all $a,b,c\iotan A.$
\\
{\iotat Definition 1.2.2. A monoidal Hom-coalgebra} is
an object $(C,\xi_{C})$ in the category
$\widetilde{\psiathcal{H}}(\psiathcal{M}_{k})$
together with linear maps
$\Delta:C\rightharpoonup oightarrow C\otimes C,\,\Delta(c)=c_1\otimes c_2$ and
$\varepsilonarepsilon:C\rightharpoonup oightarrow k$ such that
\betaegin{eqnarray}
\Delta(\xi_{C}(c))=\xi_{C}(c_1)\otimes\xi_{C}(c_2),
&& \varepsilonarepsilon(\xi_{C}(c))=\varepsilonarepsilon(c),\lambdaabel{eq3}\\
\xi_{C}^{-1}(c_1)\otimes\Delta(c_2)=\Delta(c_1)\otimes\xi_{C}^{-1}(c_2),
&& c_1\varepsilonarepsilon(c_2)=\xi_{C}^{-1}(c)=\varepsilonarepsilon(c_1)c_2,\lambdaabel{eq4}
\etand{eqnarray}
for all $c\iotan C.$
\\
{\betaf Remark 1.2.3.} (1) Note that (1.4) is equivalent to
$c_1\otimes c_{21}\otimes \xi_{C}(c_{22})=\xi_{C}(c_{11})\otimes c_{12}\otimes c_2.$
Analogue to monoidal Hom-algebras, monoidal Hom-coalgebras
will be short for counital monoidal Hom-coassociative coalgebras
without any confusion.
(2) Let $(C,\xi_{C})$ and $(C',\xi_{C}')$ be two monoidal Hom-coalgebras.
A monoidal Hom-coalgebra map $f:(C,\xi_{C})\rightharpoonup oightarrow(C',\xi_{C}')$
is a linear map such that $f\cdotircrc \xi_{C}=\xi_{C}'\cdotircrc f, \Delta\cdotircrc f=(f\otimes f)\cdotircrc\Delta$
and $\varepsilonarepsilon'\cdotircrc f=\varepsilonarepsilon.$
\\
{\iotat Definition 1.2.4. A monoidal Hom-Hopf algebra} $H=(H,\xi_{H},m,1_H,\Delta,\varepsilonarepsilon,S)$
is a bialgebra with $S$ in $ \widetilde{\psiathcal{H}}(\psiathcal {M}_{k}).$
This means that $(H,\alphalpha,m,1_H)$ is a monoidal Hom-algebra and
$(H,\alphalpha,\Delta,\varepsilonarepsilon)$ is
a monoidal Hom-coalgebra such that $\Delta$ and $\varepsilonarepsilon$
are morphisms of algebras,
that is, for all $h,g\iotan H,$
$$\Delta(hg)=\Delta(h)\Delta(g),\, \,\, \Delta(1_H)=1_H\otimes1_H,\,\,\,\,\,\,
\varepsilonarepsilon(hg)=\varepsilonarepsilon(h)\varepsilonarepsilon(g), \,\,\,\,\,\varepsilonarepsilon(1_H)=1.$$
$S$ is the convolution inverse of the identity morphism $id_H$
(i.e., $ S*id=1_H\cdotirc \varepsilonarepsilon=id*S$). Explicitly, for all $h\iotan H$,
$$
S(h_1)h_2=\varepsilonarepsilon(h)1_H=h_1S(h_2).
$$
{\betaf Remark 1.2.5.} (1) Note that a monoidal Hom-Hopf algebra is
by definition a Hopf algebra in $ \widetilde{\psiathcal{H}}(\psiathcal {M}_{k})$.
(2) Furthermore, the antipode of monoidal Hom-Hopf algebras has
almost all the properties of antipode of Hopf algebras such as
$$S(hg)=S(g)S(h),\,\,\,\, S(1_H)=1_H,\,\,\,\,
\Delta(S(h))=S(h_2)\otimes S(h_1),\,\,\,\,\,\,\varepsilonarepsilon\cdotirc S=\varepsilonarepsilon.$$
That is, $S$ is a monoidal Hom-anti-(co)algebra homomorphism.
Since $\xi_{H}$ is bijective and commutes with $S$,
we can also have that the inverse $\xi_{H}^{-1}$ commutes with $S$,
that is, $S\cdotirc \xi_{H}^{-1}= \xi_{H}^{-1}\cdotirc S.$
\\
In the following, we recall the notions of actions on monoidal
Hom-algebras and coactions on monoidal Hom-coalgebras.
\\
Let $(A,\xi_{A})$ be a monoidal Hom-algebra.
{\iotat A left $(A,\xi_{A})$-Hom-module} consists of
an object $(M,\xi_{M})$ in $\tauilde{\psiathcal{H}}(\psiathcal {M}_{k})$
together with a morphism
$\phisi:A\otimes M\rightharpoonup oightarrow M,\phisi(a\otimes m)=a\cdotdot m$ such that
$$\xi_{A}(a)\cdot(b\cdot m)=(ab)\cdot\xi_{M}(m),\,\,
\,\,\xi_{M}(a\cdot m)=\xi_{A}(a)\cdot\xi_{M}(m),\,\,
\,\,1_A\cdot m=\xi_{M}(m),$$
for all $a,b\iotan A$ and $m \iotan M.$
\\
Monoidal Hom-algebra $(A,\xi_{A})$ can be
considered as a Hom-module on itself by the Hom-multiplication.
Let $(M,\xi_{M})$ and $(N,\xi_{N})$ be two left $(A,\xi_{A})$-Hom-modules.
A morphism $f:M\rightharpoonup oightarrow N$ is called left
$(A,\xi_{A})$-linear if
$f(a\cdot m)=a\cdot f(m),f\cdotirc \xi_{M}= \xi_{N}\cdotirc f.$
We denoted the category of left $(A,\xi_{A})$-Hom modules by
$\widetilde{\psiathcal{H}}(_{A}\psiathcal {M}_{k})$.
\\
Similarly, let $(C,\xi_{C})$ be a monoidal Hom-coalgebra.
{\iotat A right $(C,\xi_{C})$-Hom-comodule} is an object
$(M,\xi_{M})$ in $\tauilde{\psiathcal{H}}(\psiathcal {M}_{k})$
together with a $k$-linear map
$\rightharpoonup oho_M:M\rightharpoonup oightarrow M\otimes C,\rightharpoonup oho_M(m)=m_{(0)}\otimes m_{(1)}$ such that
\betaegin{equation}
\xi_{M}^{-1}(m_{(0)})\otimes \Delta_C(m_{(1)})
=(m_{(0)(0)}\otimes m_{(0)(1)})\otimes \xi_{C}^{-1}(m_{(1)}),\lambdaabel{eq5}
\etand{equation}
\betaegin{equation}\lambdaabel{eq6}
\rightharpoonup oho_M(\xi_{M}(m))=\xi_{M}(m_{(0)})\otimes\xi_{C}(m_{(1)}),
\ \ \
m_{(0)}\varepsilonarepsilon(m_{(1)})=\xi_{M}^{-1}(m),
\etand{equation}
for all $m\iotan M.$
\\
$(C,\xi_{C})$ is a Hom-comodule on itself via the Hom-comultiplication.
Let $(M,\xi_{M})$ and $(N,\xi_{N})$ be two right $(C,\xi_{C})$-Hom-comodules.
A morphism $g:M\rightharpoonup oightarrow N$ is called right $(C,\xi_{C})$-colinear
if $g\cdotirc \xi_{M}=\xi_{N}\cdotirc g$ and
$g(m_{(0)})\otimes m_{(1)}=g(m)_{(0)}\otimes g(m)_{(1)}.$
The category of right
$(C,\xi_{C})$-Hom-comodules is denoted by
$\widetilde{\cdotal{H}}(\cdotal {M}^C)$ .
\\
{\iotat Definition 1.2.6.} Let $(H,m,\Deltaelta,S,\xi_{H})$ be a monoidal Hom-bialgebra
and $\alpha, \beta\iotan {\sigmal Aut}_{mHH}(H)$. Recall from \cdotircte{YW2014}
that a {\iotat left-right $(\alpha, \beta)$-Yetter-Drinfeld Hom-module } over $(H,\xi_{H})$
is the object $(M,\cdotdot,\rightharpoonup oho,\xi_{M})$ which
is both in $\widetilde{\cdotal{H}}(_{H}\cdotal {M})$ and $\widetilde{\cdotal{H}}(\cdotal {M}^{H})$
obeying the compatibility condition:
\betaegin{equation}\lambdaabel{eq7}
\rightharpoonup o (h\cdot m)=\xi_{H}(h_{21})\cdot m_{0}\otimes (\beta(h_{22})\xi_{H}^{-1}(m_{1}))\alpha(S^{-1}(h_{1})),
\etand{equation}
{\betaf Remark 1.2.7.} (1) The category of all left-right $(\alpha,\beta)$-Yetter-Drinfeld Hom-modules
is denoted by $ _{H}\psiathcal{MHYD}^{H}(\alpha, \beta)$ with understanding morphism.
(2) If $(H,\xi_{H})$ is a monoidal Hom-Hopf algebra with a bijective
antipode $S$ and $S$ commute with $\alpha,\beta$, then the above equality is equivalent to
\betaegin{equation}\lambdaabel{eq8}
h_{1}\cdot m_{0}\otimes \beta(h_{2})m_{1}=\xi_{M}((h_{2}\cdot\xi_{M}^{-1} (m))_{0}) \otimes (h_{2}\cdot
\xi_{M}^{-1}(m))_{1}\alpha(h_{1}).
\etand{equation}
for all $h\iotan H$ and $m\iotan M$.
(3) If $(M,\xi_{M}) \iotan {_{H}}\psiathcal {MHYD}^{H}(\alpha,\beta)$
and $(N,\xi_{N})\iotan {_{H}}\psiathcal {MHYD}^{H}(\gammaamma,\deltaelta)$, with
$\alpha,\beta,\gamma,\delta \iotan {\sigmal Aut}_{mHH}(H)$, then $(M \otimes N,\xi_{M}\otimes\xi_{N})
\iotan {_{H}}\psiathcal {MHYD}^{H}(\alpha\gamma, \delta\gamma^{-1}\beta\gamma)$ with structures as follows:
\betaegin{eqnarray}
h\cdot (m \otimes n) &=& \gamma (h_{1})\cdot m \otimes \gamma^{-1}\beta\gamma(h_{2})\cdot n,\lambdaabel{eq12}\\
m\otimes n &\psiapsto & (m_{0}\otimes n_{0})\otimes n_{1}m_{1}.\lambdaabel{eq13}
\etand{eqnarray}
for all $m\iotan M,n\iotan N$ and $h\iotan H.$
\\
{\iotat Definition 1.2.8.} Let $(H,\xi_{H})$ be a monoidal Hom-algebra. A monoidal Hom-algebra
$(A,\xi_{A})$ is called an {\iotat $(H,\xi_{H})$-Hom-bicomodule
algebra} in \cdotircte{YW2014}, with Hom-comodule maps $\rightharpoonup oho_l$ and $\rightharpoonup oho_r$
obeying the following axioms:
\betaegin{itemize}
\iotatem [(1)] $\rightharpoonup oho_l: A\rightharpoonup oightarrow H\otimes A,\ \rightharpoonup oho_l(a) = a_{[-1]}\otimestimes a_{[0]}$, and
$\rightharpoonup oho_r: A\rightharpoonup oightarrow A\otimes H,\ \rightharpoonup oho_r(a)= a_{<0>}\otimestimes a_{<1>},$
\iotatem [(2)] $\rightharpoonup oho_l$ and $\rightharpoonup oho_r$ satisfy the following compatibility condition:
for all $a\iotan A,$
\betaegin{eqnarray}\lambdaabel{eq9}
a_{<0>[-1]}\otimestimes a_{<0>[0]}\otimestimes \xi_{H}^{-1}(a_{<1>})
= \xi_{H}^{-1}(a_{[-1]})\otimestimes a_{[0]<0>}\otimestimes a_{[0]<1>}.
\etand{eqnarray}
\etand{itemize}
{\iotat Definition 1.2.9.} Let $(H,\xi_{H})$ be a monoidal Hom-Hopf algebra,
$(A,\xi_{A})$ be an $H$-Hom-bicomodule algebra.
We consider {\iotat the Yetter-Drinfeld Hom-datum $(H,A,H)$} as in \cdotircte{YW2014},
(the second $H$ is regarded as an $H$-Hom-bimodule coalgebra),
and {\iotat the Yetter-Drinfeld Hom-module category $ _{A}\psiathcal{MHYD}^{H}(H)$,}
whose objects are $k$-modules $(M,\xi_{M})$ with the following additional structure:
\betaegin{itemize}
\iotatem [(1)] $M$ is a left $A$-module;
\iotatem [(2)] we have a $k$-linear map $\rightharpoonup oho_{M}:M\rightharpoonup oightarrow M\otimes H, \rightharpoonup oho_{M}(m)=m_{0}\otimestimes m_{1},$
\iotatem [(3)] the following compatibility conditions holds:
\betaegin{eqnarray}
(a\cdotdot m)_{0}\otimestimes(a\cdotdot m)_{1}
=\xi_{A}(a_{[0]<0>})\cdotdot m_{0}
\otimestimes( a_{[0]<1>}\alphalpha^{-1}(m_{1}))S^{-1}(a_{[-1]}),&& \lambdaabel{eq10}\\
a_{<0>}\cdotdot m_{0}\otimestimes a_{<1>} m_{1}
=\xi_{M}((a_{[0]}\cdotdot \xi_{M}^{-1}(m))_{0})
\otimestimes(a_{[0]}\cdotdot \xi_{M}^{-1}(m))_{1}a_{[-1]}. &&\lambdaabel{eq11}\ \
\etand{eqnarray}
\etand{itemize}
for all $a\iotan A$ and $m\iotan M$. \\
{\iotat Definition 1.2.10.} Let $(A, \xi_A)$ be a monoidal Hom-algebra,
$(M,\xi_{M})$ be a monoidal Hom-algebra. Assume that $(M, \alphalpha _M)$ is both a left and a right
$A$-module algebra (with actions denoted by $A\otimes M\rightharpoonup oightarrow M$, $a\otimes m\psiapsto a\cdotdot m$ and $M\otimes A
\rightharpoonup oightarrow M$, $m\otimes a\psiapsto m\cdotdot a$). We call $(M, \xi _M)$ an {\etam $A$-bimodule} as in
\cdotircte{FP2014} if the following condition is satisfied, for all $a, a'\iotan A$, $m\iotan M$:
\betaegin{eqnarray}\lambdaabel{bimodule}
&&\xi _A(a)\cdotdot (m\cdotdot a')=(a\cdotdot m)\cdotdot \xi _A(a').
\lambdaabel{hombimodule}
\etand{eqnarray}
{\betaf 1.3. Monoidal Hom-Hopf $T$-coalgebras.}
\varepsilonskip 0.5cm
{\iotat Definition 1.3.1.} Let $G$ be a group with unit $1$. Then we recall from Yang Tao \cdotircte{Y2014}
that {\iotat a monoidal Hom $T$-coalgebra}
$(C, \xi_{C})$ over $G$ is a family of objects $\{(C_{p}, \xi_{C_{p}})\}_{p\iotan G}$
in $\psiathcal{\widetilde{H}}(\psiathcal{M}_{k})$ together with linear maps
$\Deltaelta_{p, q}: C_{pq}\lambdaongrightarrow C_{p}\otimestimes C_{q}, c_{pq}\psiapsto c_{(1, p)}\otimestimes c_{(2, q)}$
and $\varepsilonarepsilon: C_{e}\lambdaongrightarrow k$ such that
\betaegin{eqnarray*}
\xi_{C_{P}}^{-1}(c_{(1, p)}) \otimestimes \Deltaelta_{q, r}(c_{(2, qr)}) = \Deltaelta_{p, q}(c_{(1, pq)}) \otimestimes \xi_{c_{r}}^{-1}(c_{(2, r)}),
&& \forall c\iotan C_{pqr}, \\
c_{(1, p)}\varepsilonarepsilon(c_{(2, e)})=\varepsilonarepsilon(c_{(1, e)})c_{(2, p)}=\xi_{C_{p}}^{-1}(c_{p}), && \forall c\iotan C_{p}, \\
\Deltaelta_{p, q}(\xi{C_{pq}}^{-1}(c_{pq}))=\xi_{C_{p}}^{-1}(c_{(1, p)}) \otimestimes \xi_{C_{q}}^{-1}(c_{(2, q)}), && \forall c\iotan C_{pq}, \\
\varepsilonarepsilon(\xi_{C_{e}}^{-1}(c))=\varepsilonarepsilon(c), && \forall c\iotan C_{e}.
\etand{eqnarray*}
Let $(C, \xi_{C})$ and $(C', \xi_{C}')$ be two monoidal Hom $T$-coalgebras over $G$.
A Hom-coalgebra map $f: (C, \xi_{C})\lambdaongrightarrow(C', \xi_{C}')$
is a family of linear maps $\{f_{p}\}_{p\iotan G}$, $f_{p}: (C_{p}, \xi_{C_{p}})\lambdaongrightarrow(C'_{p}, \xi_{C_{p}}')$
such that $f_{p}\cdotircrc \xi_{C_{p}} = \xi_{C_{p}}'\cdotircrc f_{p}$, $\Deltaelta_{p, q}\cdotircrc f_{pq} = (f_{p}\otimestimes f_{q})\Deltaelta_{p, q}$ and $\varepsilonarepsilon\cdotircrc f_{e}= \varepsilonarepsilon$.
\\
{\iotat Definition 1.3.2.} A {\iotat monoidal Hom-Hopf $T$-coalgebra} $(H=\betaigoplus_{p\iotan G}H_{p},
\xi= \{\xi_{H_{p}}\}_{p\iotan G})$
is a monoidal Hom $T$-coalgebra
where each $(H_{p}, \xi_{H_{p}})$ is a monoidal Hom-algebra with multiplication $m_{p}$ and unit $1_{p}$
endowed with antipode $S=\{S_{p}\}_{p\iotan G}$,
$S_{p}: H_{p}\lambdaongrightarrow H_{p^{-1}} \iotan \psiathcal{\widetilde{H}}(\psiathcal{M}_{k})$ such that
\betaegin{eqnarray*}
\Deltaelta_{p, q}(hg) = \Deltaelta_{p, q}(h)\Deltaelta_{p, q}(g), \quad \Deltaelta_{p, q}(1_{pq}) = 1_{p} \otimestimes 1_{q}, && \forall h, g\iotan H_{pq}\\
\varepsilonarepsilon(hg)=\varepsilonarepsilon(h)\varepsilonarepsilon(g), \qquad \varepsilonarepsilon(1_{e})=1_{k}, && \forall h, g\iotan H_{e}\\
S_{p^{-1}}(h_{(1, p^{-1})})h_{(2, p)} = \varepsilonarepsilon(h)1_{p} = h_{(1, p)}S_{p^{-1}}(h_{(2, p^{-1})}) && \forall h \iotan H_{e}.
\etand{eqnarray*}
Note also that the $(H_{e}, \xi_{e}, m_{e}, 1_{e}, \Deltaelta_{e, e}, \varepsilonarepsilon, S_{e})$
is a monoidal Hom-Hopf algebra in the usual sense of the word.
We call it the neutral component of $H$.
{\iotat Definition 1.3.3.} A monoidal Hom-Hopf $T$-coalgebra $(H=\betaigoplus_{p\iotan G}H_{p},
\xi= \{\xi_{H_{p}}\}_{p\iotan G})$ is called a {\iotat monoidal Hom-Hopf crossed monoidal Hom-Hopf
$T$-coalgebra } if it is endowed with a family of
algebra isomorphisms $\varepsilonarphi= \{\varepsilonarphi_{\betaeta}^{\alphalpha}:
H_{\alphalpha}\rightharpoonup oightarrow H_{\betaeta\alphalpha\betaeta^{-1}}\}_{\alphalpha,\betaeta\iotan G}$
such that
\betaegin{itemize}
\iotatem each $\varepsilonarphi_{\gamma}$ preserves the comultiplication and the counit i.e.,
for any $\alphalpha,\betaeta,\gammaamma\iotan G,$
we have $\Deltaelta_{\gammaamma\alphalpha\gammaamma^{-1},\gammaamma\betaeta\gammaamma^{-1}}
\cdotircrc\varepsilonarphi_{\gammaamma}=(\varepsilonarphi_{\gammaamma}\otimestimes\varepsilonarphi_{\gammaamma})
\cdotircrc\Deltaelta_{\alphalpha,\betaeta}$ and $\varepsilonarepsilon \cdotircrc\varepsilonarphi_{\gammaamma}=\varepsilonarepsilon.$
\iotatem $\varepsilonarphi$ is multiplicative, i.e.,
$\varepsilonarphi_{\betaeta}\cdotircrc\varepsilonarphi_{\gammaamma}=\varepsilonarphi_{\betaeta\gammaamma},$
for any $\betaeta,\gammaamma\iotan G.$
\etand{itemize}
It is easy to get the following identities,
$\varepsilonarphi_1|H_{\alphalpha}=id_{\alphalpha}$ and $\varepsilonarphi_{\alphalpha}^{-1}=\varepsilonarphi_{\alphalpha^{-1}},$
for all $\alphalpha\iotan G.$
Moreover, $\varepsilonarphi$ preserves the antipode, i.e.,
$\varepsilonarphi_{\betaeta}\cdotircrc S_{\alphalpha}
=S_{\betaeta\alphalpha\betaeta^{-1}}\cdotircrc\varepsilonarphi_{\betaeta}$
for all $\alphalpha,\betaeta\iotan G$.
\\
\sigmaection*{2. THE DIAGONAL CROSSED HOM-PRODUCT}
\deltaef\tauhetaeequation{2. \alpharabic{equation}}
\sigmaetcounter{equation}{0} \hskip\phiarindent
In this section, we define the notion of the diagonal crossed
Hom-product over a monoidal Hom-Hopf algebra that
are based on Hom-associative left and right coactions.
If $H$ is finite dimensional, we prove the category $ _{A}\psiathcal{MHYD}^{H}(H)$
is isomorphic to the category of left $H^{*}\betaowtie A$-modules,
$_{H^{*}\betaowtie A}\psiathcal{M}$, generalizing the results in \cdotircte{DPV2006}.
\\
In what follows, let $(H,\xi_{H})$ be a monoidal Hom-Hopf algebra with
the bijective antipode $S$ and let ${\sigmal Aut}_{mHH}(H)$ denote the set
of all automorphisms of a monoidal Hopf algebra $H$.
\\
{\betaf Definition 2.1.} Let $(H,\xi_{H})$ be a finite dimensional monoidal Hom-Hopf algebra,
$(A,\xi_{A})$ be a monoidal Hom-bicomodule algebra. Then the diagonal crossed Hom-product
$H^{*}\betaowtie A$ is defined as follows:
\betaegin{itemize}
\iotatem [--] as $k$-spaces, $H^{*}\betaowtie A=H^{*}\otimes A$;
\iotatem [--] multiplication is given by
\betaegin{eqnarray}\lambdaabel{2.2}
(f\betaowtie a)(g\betaowtie b) = f(a_{[-1]}\rightharpoonup oightharpoonup
(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))\betaowtie \xi_{A}^{2}(a_{[0]_{<0>}})b;
\etand{eqnarray}
\betaegin{eqnarray}\lambdaabel{2.1}
h\rightharpoonup oightharpoonup f = \lambdaanglef_{2},\xi_{H}^{-1}(h)\rightharpoonup oightarrowngle\xi_{H}^{*-2}(f_{1})\ \
\psibox{ and } \ \ f\lambdaeftharpoonup h = \lambdaanglef_{1},\xi_{H}^{-1}(h)\rightharpoonup oightarrowngle\xi_{H}^{*-2}(f_{2});
\etand{eqnarray}
\etand{itemize}
for all $a,b\iotan (A,\xi_{A}),\ f,g\iotan (H^{*},\xi_{H}^{*-1}),h\iotan (H,\xi_{H}).$
\\
{\betaf Proposition 2.2.} Let $(A,\xi_{A})$ be an $(H,\xi_{H})$-Hom-bicomodule algebra
and $(H^{*},\xi_{H}^{*-1})$ be an $(H,\xi_{H})$-Hom-bimodule algebra. Then the tensor space
$H^{*}\otimes A$ is a Hom-algebra with the multiplication in the formula (\rightharpoonup oef{2.2})
and the unit $\varepsilonarepsilon_{H}\betaowtie 1_{A}.$
\\
{\betaf Proof.} It is obvious that
$(\varepsilonarepsilon_{H}\betaowtie 1_{A})(f\betaowtie a)=\xi_{H}^{*-1}(f)\betaowtie \xi_{A}(a),$
so $(\varepsilonarepsilon_{H}\betaowtie 1_{A})$ is unit element. We have:
\betaegin{eqnarray*}
&&[(f\betaowtie a)(g\betaowtie b)]\xi_{H^*\betaowtie A}(\phihi\betaowtie c)\\
&=&[f(a_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))]
((\xi_{A}^{2}(a_{[0]_{<0>}})b)_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*}(\phihi)\\
&&\lambdaeftharpoonup S^{-1}((\xi_{A}^{2}(a_{[0]_{<0>}})b)_{[0]_{<1>}})))
\betaowtie\xi_{A}^{2}((\xi_{A}^{2}(a_{[0]_{<0>}})b)_{[0]_{<0>}})\xi_{A}(c)\\
&=&\xi_{H}^{*-1}(f)[(a_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))
\xi_{H}^{*}((\xi_{H}^{2}(a_{[0]_{<0>_{[-1]}}})b_{[-1]})\rightharpoonup oightharpoonup(\xi_{H}^{*}(\phihi)\\
&&\lambdaeftharpoonup S^{-1}(\xi_{H}^{2}(a_{[0]_{<0>_{[0]_{<1>}}}})b_{[0]_{<1>}})))]
\betaowtie\xi_{A}^{5}(a_{[0]_{<0>_{[0]_{<0>}}}})(\xi_{A}^{2}(b_{[0]_{<0>}})c)\\
&=&\xi_{H}^{*-1}(f)[(a_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))
\lambdaangle\xi_{H}^{*-1}(\phihi_{221}),\xi_{H}(a_{[0]_{<0>_{[-1]}}})\rightharpoonup oightarrowngle \\
&& \lambdaangle\xi_{H}^{*-1}(\phihi_{222}),\xi_{H}^{-1}(b_{[-1]})\rightharpoonup oightarrowngle
\lambdaangle\xi_{H}^{*}(\phihi_{11}),S^{-1}\xi_{H}^{-1}(b_{[0]_{<1>}})\rightharpoonup oightarrowngle
\lambdaangle\xi_{H}^{*}(\phihi_{12}),S^{-1}\xi_{H}(a_{[0]_{<0>_{[0]_{<1>}}}})\rightharpoonup oightarrowngle\\
&& \xi_{H}^{*-2}(\phihi_{21})]\betaowtie\xi_{A}^{5}(a_{[0]_{<0>_{[0]_{<0>}}}})(\xi_{A}^{2}(b_{[0]_{<0>}})c)\\
&=&\xi_{H}^{*-1}(f)[(a_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))
\lambdaangle\xi_{H}^{*-2}(\phihi_{2122}),\xi_{H}(a_{[0]_{<0>_{[-1]}}})\rightharpoonup oightarrowngle\\
&& \lambdaangle\phihi_{22},\xi_{H}^{-1}(b_{[-1]})\rightharpoonup oightarrowngle\lambdaangle\xi_{H}^{*2}(\phihi_{1}),S^{-1}\xi_{H}^{-1}(b_{[0]_{<1>}})\rightharpoonup oightarrowngle
\lambdaangle\phihi_{211},S^{-1}\xi_{H}(a_{[0]_{<0>_{[0]_{<1>}}}})\rightharpoonup oightarrowngle\\
&& \xi_{H}^{*-4}(\phihi_{2121})]\betaowtie\xi_{A}^{5}(a_{[0]_{<0>_{[0]_{<0>}}}})
(\xi_{A}^{2}(b_{[0]_{<0>}})c)\\
&=&\xi_{H}^{*-1}(f)[(a_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))
(\xi_{H}^{2}(a_{[0]_{<0>_{[-1]}}})\rightharpoonup oightharpoonup(\xi_{H}^{*2}(b_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(\phihi)\\
&&\lambdaeftharpoonup S^{-1}(b_{[0]_{<1>}})))\lambdaeftharpoonup S^{-1}\xi_{H}^{2}(a_{[0]_{<0>_{[0]_{<1>}}}})))]
\betaowtie\xi_{A}^{5}(a_{[0]_{<0>_{[0]_{<0>}}}})(\xi_{A}^{2}(b_{[0]_{<0>}})c)\\
&=&\xi_{H}^{*-1}(f)[(\xi_{H}(a_{[-1]_{1}})\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}\xi_{H}(a_{[0]_{<1>2}})))
(\xi_{H}(a_{[-1]2})\rightharpoonup oightharpoonup(\xi_{H}^{*2}(b_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(\phihi)\\
&&\lambdaeftharpoonup S^{-1}(b_{[0]_{<1>}})))\lambdaeftharpoonup S^{-1}\xi_{H}(a_{[0]_{<1>1}})))]
\betaowtie\xi_{A}^{3}(a_{[0]_{<0>}})(\xi_{A}^{2}(b_{[0]_{<0>}})c)\\
&=&\xi_{H^*\betaowtie A}(f\betaowtie a)[(g\betaowtie b)(\phihi\betaowtie c)].
\etand{eqnarray*}
Thus the multiplication is Hom-associative. This completes the proof.
$\betalacksquare$
\\
{\betaf Example 2.3.} (1) If $(A,\xi_{A})=(H,\xi_{H})$ and $\rightharpoonup oho_l=\rightharpoonup oho_r=\Deltaelta$
the formula (\rightharpoonup oef{2.2}) coincides with the multiplication in the Drinfeld double $(D(H),\xi_{H}^{*-1}\otimes\xi_{H})=(H^{*cop}\betaowtie H,\xi_{H}^{*-1}\otimes\xi_{H})$, i.e.
\betaegin{eqnarray}\lambdaabel{2.3}
(f\betaowtie h)(g\betaowtie l) = f(h_{1}\rightharpoonup oightharpoonup
(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(h_{22})))\betaowtie \xi_{A}^{2}(h_{21})l,
\etand{eqnarray}
for all $f,g\iotan H^{*}$ and $h,l\iotan H.$
\\
(2) Recall from Example 2.5 in \cdotircte{YW2014}
that $\alpha,\beta \iotan Aut_{mHH}(H)$ and
as $k$-vector spaces $(H(\alpha,\beta),\xi_{H})=(H,\xi_{H})$,
and $(H(\alpha,\beta),\xi_{H})\iotan\,\, _{H}\psiathcal{MHYD}^{H}(\alpha, \beta)$,
with with right $H$-Hom-comodule structure via Hom-comultiplication
and left $H$-Hom-module structure given by:
$$
h\cdotdot x=(\beta(h_2)\xi_{H}^{-1}(x))\alpha(S^{-1}(\xi_{H}(h_1))).
$$
for all $h,\,x\iotan H.$
The diagonal crossed product
$(A(\alpha,\beta),\xi_{H}^{*-1}\otimes \xi_{H})=(H^{*}\betaowtie H(\alpha,\beta),\xi_{H}^{*-1}\otimes \xi_{H})$,
whose multiplication is
\betaegin{eqnarray}\lambdaabel{2.10}
(f\betaowtie h)(g\betaowtie l) = f(\alpha(h_{1})\rightharpoonup oightharpoonup
(\xi_{H}^{*2}(g)\lambdaeftharpoonup S^{-1}(\beta(h_{22}))))\betaowtie \xi_{H}^{2}(h_{21})l,
\etand{eqnarray}
for all $f,g \iotan H^{*}$ and $h,l\iotan H$.
\\
The Drinfeld double $D(H)$ is a Hom-Hopf algebra with coproduct
$\Deltaelta_{D(H)}$ given by
\betaegin{eqnarray}\lambdaabel{2.4}
\Deltaelta_{D(H)}(f\betaowtie h) = (f_{2}\betaowtie h_{1})\otimes(f_{1}\betaowtie h_{2}),
\etand{eqnarray}
for all $f\iotan H^{*}$ and $h\iotan H.$
\\
{\betaf Proposition 2.4.} Let $(A,\xi_{A})$ be an $(H,\xi_{H})$-Hom-bicomodule
algebra. Then $H^{*}\betaowtie A$ is a $D(H)$-Hom-bicomodule algebra
with two coactions $\rightharpoonup oho_{r_{D(H)}}: H^*\betaowtie A\rightharpoonup oightarrow (H^*\betaowtie A)\otimes D(H)$
and $\rightharpoonup oho_{l_{D(H)}}:H^*\betaowtie A\rightharpoonup oightarrow D(H)\otimes(H^*\betaowtie A)$ given by
\betaegin{eqnarray*}
\rightharpoonup oho_{r_{D(H)}}(f\betaowtie a)&=& (f_{2}\betaowtie a_{<0>})\otimes (f_{1}\otimes a_{<1>}),\\
\rightharpoonup oho_{l_{D(H)}}(f\betaowtie a)&=& (f_{2}\betaowtie a_{[-1]})\otimes (f_{1}\otimes a_{[0]}),
\etand{eqnarray*}
where elements in $D(H)$ are written as $(f\otimes h),h\iotan H, f\iotan H^{*},a\iotan A$.
\\
{\betaf Proof.} In view of (\rightharpoonup oef{2.4}) the comodule axioms
and the Hom-coassociative (\rightharpoonup oef{eq9}) are obvious.
We are left to prove that $\rightharpoonup oho_{r_D(H)}$ and $\rightharpoonup oho_{l_D(H)}$
are Hom-algebra maps. To this end we use the following identities obviously
holding for all $f\iotan H^{*},h,l\iotan H$
\betaegin{eqnarray}\lambdaabel{2.5}
\rightharpoonup oho(h\rightharpoonup oightharpoonup(f\lambdaeftharpoonup l))
= (\xi_{H}^{*-1}(f_{1})\lambdaeftharpoonup l)\otimes
(\xi_{H}^{-1}(h)\rightharpoonup oightharpoonup\xi_{H}^{*-1}(f_{2}) ),
\etand{eqnarray}
With this we now compute
\betaegin{eqnarray*}
&&\rightharpoonup oho_{r_{D(H)}}(f\betaowtie a)\rightharpoonup oho_{r_D(H)}(g\betaowtie b)\\
&=&[(f_{2}\betaowtie a_{<0>})\otimes(f_{1}\otimes a_{<1>})]
[(g_{2}\betaowtie b_{<0>})\otimes(g_{1}\otimes b_{<1>})]\\
&=&(f_{2}(a_{<0>_{[-1]}}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g_{2})\lambdaeftharpoonup
S^{-1}(a_{<0>_{[0]_{<1>}}})))\betaowtie\xi_{A}^{2}(a_{<0>_{[0]_{<0>}}})b_{<0>})\\
&&\otimes (f_{1}(a_{<1>1}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g_{1})\lambdaeftharpoonup
S^{-1}(a_{<1>22})))\otimes\xi_{H}^{2}(a_{<1>21})b_{<1>})\\
&=&(f_{2}\lambdaangleg_{21},S^{-1}\xi_{H}(a_{<0>_{[0]_{<1>}}})\rightharpoonup oightarrowngle
\lambdaangleg_{222},\xi_{H}^{-1}(a_{<0>_{[-1]}})\rightharpoonup oightarrowngle\xi_{H}^{*-2}(g_{221})
\betaowtie\xi_{A}^{2}(a_{<0>_{[0]_{<0>}}})b_{<0>})\\
&& \otimes (f_{1}\lambdaangleg_{11},S^{-1}\xi_{H}(a_{<1>22})\rightharpoonup oightarrowngle\lambdaangleg_{122},\xi_{H}^{-1}(a_{<1>1})\rightharpoonup oightarrowngle
\xi_{H}^{*-2}(g_{121})\otimes\xi_{H}^{2}(a_{<1>21})b_{<1>})\\
&=&(f_{2}\lambdaangleg_{22},a_{<0>_{[-1]}}\rightharpoonup oightarrowngle\xi_{H}^{*-1}(g_{21})\betaowtie\xi_{A}(a_{<0>_{[0]}})b_{<0>})
\otimes (f_{1}\lambdaangleg_{11},S^{-1}(a_{<1>2})\rightharpoonup oightarrowngle \xi_{H}^{*-1}(g_{121})\\
&&\otimes\xi_{H}(a_{<1>1})b_{<1>})\\
&=&(f_{2}\lambdaangleg_{22},\xi_{H}^{-1}(a_{[-1]})\rightharpoonup oightarrowngle\xi_{H}^{*-1}(g_{21})
\betaowtie\xi_{A}^{2}(a_{[0]_{<0>_{<0>}}})b_{<0>})
\otimes (f_{1}\lambdaangleg_{11},S^{-1}(a_{[0]_{<1>}})\rightharpoonup oightarrowngle \xi_{H}^{*-1}(g_{121})\\
&&\otimes\xi_{H}^{2}(a_{[0]_{<0>_{<1>}}})b_{<1>})\\
&=&(f_{2}(\xi_{H}^{-1}(a_{[-1]})\rightharpoonup oightharpoonup \xi_{H}^{*}(g_{2}))
\betaowtie\xi_{A}^{2}(a_{[0]_{<0>_{<0>}}})b_{<0>})
\otimes (f_{1}(\xi_{H}^{*}(g_{1})\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}}))\\
&&\otimes\xi_{H}^{2}(a_{[0]_{<0>_{<1>}}})b_{<1>})\\
&\sigmatackrel{(\rightharpoonup oef{2.5})}{=}&(f_{2}(a_{[-1]}\rightharpoonup oightharpoonup (\xi_{H}^{*2}(g)
\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))_{2}
\betaowtie\xi_{A}^{2}(a_{[0]_{<0>_{<0>}}})b_{<0>})\\
&& \otimes (f_{1}(a_{[-1]}\rightharpoonup oightharpoonup (\xi_{H}^{*2}(g)
\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))_{1}
\otimes\xi_{H}^{2}(a_{[0]_{<0>_{<1>}}})b_{<1>})\\
&=&\rightharpoonup oho_{r_{D(H)}}(f(a_{[-1]}\rightharpoonup oightharpoonup (\xi_{H}^{*2}(g)
\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))
\betaowtie\xi_{A}^{2}(a_{[0]_{<0>}})b)\\
&=& \rightharpoonup oho_{r_{D(H)}}((f\betaowtie a)(g\betaowtie b)).
\etand{eqnarray*}
Hence $\rightharpoonup oho_{r_{D(H)}}$ is a Hom-algebra map. The argument for
$\rightharpoonup oho_{l_{D(H)}}$ is analogous.
$\betalacksquare$
\\
{\betaf Example 2.4.} Let $(H,\xi_{H})$ be finite dimensional.
Then $(A(\alpha,\beta),\xi_{H}^{*-1}\otimes\xi_{H})$ becomes a $D(H)$-bicomodule
algebra, with structures
\betaegin{eqnarray*}
H^{*}\betaowtie H(\alpha,\beta)\rightharpoonup oightarrow (H^*\betaowtie H(\alpha,\beta))\otimes D(H),
&& f\betaowtie h\psiapsto (f_{2}\betaowtie h_{1})\otimes(f_{1}\betaowtie \beta(h_{2})),\\
H^*\betaowtie H(\alpha,\beta)\rightharpoonup oightarrow D(H)\otimes(H^*\betaowtie H(\alpha,\beta)),
&&f\betaowtie h\psiapsto (f_{2}\betaowtie \alpha(h_{1}))\otimes(f_{1}\betaowtie h_{2}).
\etand{eqnarray*}
for all $f\iotan H^{*},h\iotan H.$
\\
In the rest of this section we establish that if $(H,\xi_{H})$
is a monoidal Hom-Hopf algebra and is finite dimensional
then the category $ _{A}\psiathcal{MHYD}^{H}(H)$ is isomorphic
to the category of left $H^{*}\betaowtie A$-modules,
$_{H^{*}\betaowtie A}\psiathcal{M}.$
\\
{\betaf Lemma 2.5.} Let $(H,\xi_{H})$ be a monoidal Hom-Hopf algebra
and $(H,A,H)$ a Yetter-Drinfeld Hom-datum. We have a functor
$F: _{A}\psiathcal{MHYD}^{H}(H)\rightharpoonup oightarrow _{H^{*}\betaowtie A}\psiathcal{M},$
given by $F(M)=M$ as $k$-module, with the $H^{*}\betaowtie A$-module structure
defined by
\betaegin{eqnarray}\lambdaabel{2.6}
(f\betaowtie u)\tauriangleright m=\lambdaanglef,(u\cdotdot\xi_{M}^{-1}(m))_{1}\rightharpoonup oightarrowngle
\xi_{M}^{2}((u\cdotdot\xi_{M}^{-1}(m))_{0}),
\etand{eqnarray}
for all $f\iotan (H^{*},\xi_{H}^{*-1}),u\iotan (A,\xi_{A})$ and $m\iotan (M,\xi_{M})$.
$F$ transforms a morphism to itself.
\\
{\betaf Proof.} For all $f,g\iotan H^{*},a,b\iotan A$ and $m\iotan M$, we compute:
\betaegin{eqnarray*}
&&[(f\betaowtie a)(g\betaowtie b)]\tauriangleright \xi_{M}(m)\\
&=&[f(a_{[-1]}\rightharpoonup oightharpoonup(\xi_{H}^{*2}(g)
\lambdaeftharpoonup S^{-1}(a_{[0]_{<1>}})))\betaowtie \xi_{A}^{2}(a_{[0]_{<0>}})b]
\tauriangleright\xi_{M}(m)\\
&=&\lambdaangleg_{1},S^{-1}\xi_{H}(a_{[0]_{<1>}})\rightharpoonup oightarrowngle\lambdaangleg_{22},\xi_{H}^{-1}(a_{[-1]})\rightharpoonup oightarrowngle
[f\xi_{H}^{*2}(g_{21})\betaowtie \xi_{A}^{2}(a_{[0]_{<0>}})b]\tauriangleright\xi_{M}(m)\\
&=&\lambdaangleg_{1},S^{-1}\xi_{H}(a_{[0]_{<1>}})\rightharpoonup oightarrowngle\lambdaangleg_{22},\xi_{H}^{-1}(a_{[-1]})\rightharpoonup oightarrowngle
\lambdaanglef,((\xi_{A}^{2}(a_{[0]_{<0>}})b)\cdotdot m)_{11}\rightharpoonup oightarrowngle\\
&& \lambdaangle\xi_{H}^{*-2}(g_{21}),((\xi_{A}^{2}(a_{[0]_{<0>}})b)\cdotdot m)_{12}\rightharpoonup oightarrowngle
\xi_{M}^{2}(((\xi_{A}^{2}(a_{[0]_{<0>}})b)\cdotdot m)_{0})\\
&\sigmatackrel{( \rightharpoonup oef{eq10})}{=}&
\lambdaangleg_{1},S^{-1}\xi_{H}(a_{[0]_{<1>}})\rightharpoonup oightarrowngle\lambdaangleg_{22},\xi_{H}^{-1}(a_{[-1]})\rightharpoonup oightarrowngle
\lambdaanglef,((\xi_{H}^{2}(a_{[0]_{<0>_{[0]_{<1>}}}})b_{[0]_{<1>}})\xi_{H}^{-1}(m_{1}))_{1}\\
&&S^{-1}(\xi_{H}^{2}(a_{[0]_{<0>_{[-1]}}})b_{[-1]})_{1}\rightharpoonup oightarrowngle
\lambdaangleg_{21},\xi_{H}^{-2}(((\xi_{H}^{2}(a_{[0]_{<0>_{[0]_{<1>}}}})b_{[0]_{<1>}})\xi_{H}^{-1}(m_{1}))_{2}\\
&& S^{-1}(\xi_{H}^{2}(a_{[0]_{<0>_{[-1]}}})b_{[-1]})_{2})\rightharpoonup oightarrowngle
\xi_{M}^{2}(\xi_{A}(\xi_{A}^{2}(a_{[0]_{<0>_{[0]_{<0>}}}})b_{[0]_{<0>}})\cdotdot m_{0})\\
&=&\lambdaangleg,S^{-1}\xi_{H}(a_{[0]_{<1>}})
((((a_{[0]_{<0>_{[0]_{<1>2}}}}\xi_{H}^{-2}(b_{[0]_{<1>2}}))\xi_{H}^{-3}(m_{12}))\\
&&S^{-1}(a_{[0]_{<0>_{[-1]1}}}\xi_{H}^{-2}(b_{[-1]1})))\xi_{H}^{-1}(a_{[-1]}))\rightharpoonup oightarrowngle
\lambdaanglef,((\xi_{H}^{2}(a_{[0]_{<0>_{[0]_{<1>1}}}})b_{[0]_{<1>1}})\xi_{H}^{-1}(m_{11}))\\
&&S^{-1}(\xi_{H}^{2}(a_{[0]_{<0>_{[-1]2}}})b_{[-1]2})\rightharpoonup oightarrowngle
\xi_{M}^{2}((\xi_{A}^{3}(a_{[0]_{<0>_{[0]_{<0>}}}})\xi_{A}(b_{[0]_{<0>}}))\cdotdot m_{0})\\
&=&\lambdaangleg,S^{-1}\xi_{H}^{3}(a_{[0]_{<1>22}})
(((a_{[0]_{<1>21}}\xi_{H}^{-1}(b_{[0]_{<1>2}}))\xi_{H}^{-2}(m_{12}))(S^{-1}\xi_{H}^{-1}(b_{[-1]1})\\
&&(S^{-1}\xi_{H}^{-1}(a_{[-1]12})\xi_{H}^{-1}(a_{[-1]11}))))\rightharpoonup oightarrowngle
\lambdaanglef,((a_{[0]_{<1>1}})b_{[0]_{<1>1}})\xi_{H}^{-1}(m_{11}))\\
&&S^{-1}(a_{[-1]2}b_{[-1]2})\rightharpoonup oightarrowngle
\xi_{M}^{2}(\xi_{A}(a_{[0]_{<0>}}b_{[0]_{<0>}}))\cdotdot m_{0})\\
&=&\lambdaangleg,\xi_{H}^{2}(b_{[0]_{<1>2}})(\xi_{H}^{-1}(m_{12})(S^{-1}(b_{[-1]1}))\rightharpoonup oightarrowngle
\lambdaanglef,((\xi_{H}(a_{[0]_{<1>}})b_{[0]_{<1>1}})\xi_{H}^{-1}(m_{01}))\\
&&S^{-1}(\xi_{H}^{-1}(a_{[-1]2})b_{[-1]2})\rightharpoonup oightarrowngle
\xi_{M}^{3}((a_{[0]_{<0>}}b_{[0]_{<0>}})\cdotdot m_{00})\\
&=&\lambdaangleg,(b_{[0]_{<1>}}\xi_{H}^{-2}(m_{1}))S^{-1}(b_{[-1]})\rightharpoonup oightarrowngle
\lambdaanglef,((\xi_{H}^{-1}(a_{[0]_{<1>}})\xi_{H}(b_{[0]_{<0>_{[0]_{<1>}}}}))\xi_{H}^{-1}(m_{01}))\\
&&S^{-1}(\xi_{H}^{-1}(a_{[-1]2})\xi_{H}(b_{[0]_{<0>_{[-1]}}}))\rightharpoonup oightarrowngle
\xi_{M}^{3}((a_{[0]_{<0>}}\xi_{A}(b_{[0]_{<0>_{[-1]}}}))\cdotdot m_{00})\\
&\sigmatackrel{(\rightharpoonup oef{eq10})}{=}&
\lambdaangleg,(b_{[0]_{<1>}}\xi_{H}^{-2}(m_{1}))S^{-1}(b_{[-1]})\rightharpoonup oightarrowngle
\lambdaanglef,(a\cdotdot(\xi_{A}(b_{[0]_{<0>}})\cdotdot\xi_{M}^{-1}(m_{0})))_{1}\rightharpoonup oightarrowngle\\
&&\xi_{M}^{2}((\xi_{A}(a)\cdotdot\xi_{M}(\xi_{A}(b_{[0]_{<0>}})\cdotdot\xi_{M}^{-1}(m_{0})))_{0})\\
&\sigmatackrel{(\rightharpoonup oef{eq10})}{=}&
\lambdaangleg,(b\cdotdot\xi_{M}^{-1}(m))_{1}\rightharpoonup oightarrowngle
\lambdaangle\xi_{H}^{*-1}(f),(\xi_{A}(a)\cdotdot\xi_{M}((b\cdotdot\xi_{M}^{-1}(m))_{0}))_{1}\rightharpoonup oightarrowngle\\
&& \xi_{M}^{2}((\xi_{A}(a)\cdotdot\xi_{M}((b\cdotdot\xi_{M}^{-1}(m))_{0}))_{0})\\
&=&(\xi_{H}^{*-1}(f)\betaowtie \xi_{A}(a))\tauriangleright\xi_{M}^{2}((b\cdotdot\xi_{M}^{-1}(m))_{0})
\lambdaangleg,(b\cdotdot\xi_{M}^{-1}(m))_{1}\rightharpoonup oightarrowngle\\
&=&(\xi_{H}^{*-1}(f)\betaowtie \xi_{A}(a))\tauriangleright[(g\betaowtie b)\tauriangleright m],
\etand{eqnarray*}
as needed. It is not hard to see that
$(\varepsilonarepsilon_{H}\betaowtie 1_{A})\tauriangleright m=\xi_{M}(m)$,
for all $m\iotan M,$ so $M$ is a left $H^{*}\betaowtie A$-module.
The fact that a morphism in $ _{A}\psiathcal{MHYD}^{H}(H)$
becomes a morphism in $_{H^{*}\betaowtie A}\psiathcal{M}$
can be proved more easily, we leave the details to the reader.
$\betalacksquare$
\\
{\betaf Lemma 2.6.} Let $(H,\xi_{H})$ be a monoidal Hom-Hopf algebra
and $(H,A,H)$ a Yetter-Drinfeld Hom-datum and assume $H$ is finite dimensional.
We have a functor $G: _{H^{*}\betaowtie A}\psiathcal{M}\rightharpoonup oightarrow_{A}\psiathcal{MHYD}^{H}(H),$
given by $G(M)=M$ as $k$-module, with the structure maps
defined by
\betaegin{eqnarray}\lambdaabel{2.7}
u\cdotdot m=(\varepsilonarepsilon_{H}\betaowtie\xi_{A}^{-1}(u))\tauriangleright m,
\etand{eqnarray}
\betaegin{eqnarray}\lambdaabel{2.8}
\rightharpoonup oho_{M}:M\rightharpoonup oightarrow M\otimes H,\ \ \rightharpoonup oho_{M}(m)= m_{0}\otimes m_{1}
=\sigmaum_{i=1}^{n}(\xi_{H}^{*2}(e^{i})\betaowtie 1_{A})\tauriangleright \xi_{M}^{-2}(m)\otimes e_{i},
\etand{eqnarray}
for all $u\iotan (A,\xi_{A})$ and $m\iotan (M,\xi_{M})$. Here $\{e_{i}\}_{i=1,...,n}$
is a basis of $H$ and $\{e^{i}\}_{i=1,...,n}$ is the corresponding dual basis
of $H^{*}$. $G$ transforms a morphism to itself.
\\
{\betaf Proof.} The most difficult part of the proof is to show that $G(M)$
satisfies the relations (\rightharpoonup oef{eq10}) or (\rightharpoonup oef{eq11}). It is then straightforward to show that
a map in $ _{H^{*}\betaowtie A}\psiathcal{M}$ is also a map in $_{A}\psiathcal{MHYD}^{H}(H),$
and that $G$ is a functor.
We compute:
\betaegin{eqnarray*}
&&u_{<0>}\cdotdot m_{0}\otimes u_{<1>}m_{1}\\
&=& \sigmaum_{i=1}^{n}(\varepsilonarepsilon_{H}\betaowtie \xi_{A}^{-1}(u_{<0>}))
\tauriangleright((\xi_{H}^{*2}(e^{i})\betaowtie 1_{A})\tauriangleright
\xi_{M}^{-2}(m))\otimes u_{<1>}e_{i}\\
&=&\sigmaum_{i=1}^{n}[\varepsilonarepsilon_{H}(\xi_{A}^{-2}(u_{<0>_{[-1]}})
\rightharpoonup oightharpoonup(\xi_{H}^{*4}(e^{i})\lambdaeftharpoonup S^{-1}\xi_{H}^{-2}(u_{<0>_{[0]_{<1>}}})))
\betaowtie \xi_{A}(u_{<0>_{[0]_{<0>}}})]\tauriangleright\xi_{M}^{-1}(m)\\
&&\otimes u_{<1>}e_{i}\\
&=&\sigmaum_{i=1}^{n}\lambdaanglee_{1}^{i},S^{-1}\xi_{H}(u_{<0>_{[0]_{<1>}}})\rightharpoonup oightarrowngle\lambdaanglee_{22}^{i},\xi_{H}^{-1}(u_{<0>_{[-1]}})\rightharpoonup oightarrowngle
\lambdaanglee_{21}^{i},\xi_{H}^{-1}((\xi_{A}(u_{<0>_{[0]_{<0>}}})\cdotdot\xi_{M}^{-2}(m))_{1})\rightharpoonup oightarrowngle\\
&&\xi_{M}^{2}((\xi_{A}(u_{<0>_{[0]_{<0>}}})\cdotdot\xi_{M}^{-2}(m))_{0})
\otimes u_{<1>}e_{i}\\
&=&\sigmaum_{i=1}^{n}\lambdaanglee^{i},S^{-1}\xi_{H}(u_{<0>_{[0]_{<1>}}})((u_{<0>_{[0]_{<0>}}}\cdotdot\xi_{M}^{-3}(m))_{1}
\xi_{H}^{-1}(u_{<0>_{[-1]}}))\rightharpoonup oightarrowngle\\
&&\xi_{M}^{2}((\xi_{A}(u_{<0>_{[0]_{<0>}}})\cdotdot\xi_{M}^{-2}(m))_{0})
\otimes u_{<1>}e_{i}\\
&=&\xi_{M}^{2}((\xi_{A}(u_{<0>_{[0]_{<0>}}})\cdotdot\xi_{M}^{-2}(m))_{0})
\otimes (\xi_{H}^{-1}(u_{<1>})S^{-1}\xi_{H}(u_{<0>_{[0]_{<1>}}}))\\
&& \xi_{H}((u_{<0>_{[0]_{<0>}}}\cdotdot\xi_{M}^{-3}(m))_{1}u_{<0>_{[-1]}})\\
&=&\xi_{M}^{2}((u_{<0>_{[0]}}\cdotdot\xi_{M}^{-2}(m))_{0})
\otimes (u_{<1>2})S^{-1}(u_{<1>1}))
((u_{<0>_{[0]}}\cdotdot\xi_{M}^{-2}(m))_{1}\xi_{H}(u_{<0>_{[-1]}}))\\
&=&\xi_{M}((u_{[0]}\cdotdot\xi_{M}^{-1}(m))_{0})\otimes (u_{[0]}\cdotdot\xi_{M}^{-1}(m))_{1}u_{[-1]},
\etand{eqnarray*}
for all $u\iotan (A,\xi_{A})$ and $m\iotan(M,\xi_{M})$, and this finishes the proof.
$\betalacksquare$\\
The next result generalizes (\cdotircte{CZ2014}, Prop. 4.3),
which is recovered by taking $H = A$.
\\
{\betaf Theorem 2.7.} Let $(H,\xi_{H})$ be a monoidal Hom-Hopf algebra
and $(H,A,H)$ a Yetter-Drinfeld datum, assuming $H$ to be finite dimensional.
Then the categories $_{A}\psiathcal{MHYD}^{H}(H)$ and $ _{H^{*}\betaowtie A}\psiathcal{M}$
are isomorphic. \\
{\betaf Proof.} We have to verify that the functors $F$ and $G$ defined in Lemmas 2.5 and 2.6
are inverse to each other. Let $M\iotan _{A}\psiathcal{MHYD}^{H}(H)$. The structures on
$G(F(M))$ are denoted by $\cdotdot^{'}$
and $\rightharpoonup oho_{M}^{'}$. For any $u\iotan (A,\xi_{A})$ and $m\iotan (M,\xi_{M})$ we have that
$$u\cdotdot^{'}m = (\varepsilon\betaowtie \xi_{A}^{-1}(u))\tauriangleright m
= \lambdaangle\varepsilon,(\xi_{A}^{-1}(u)\cdotdot \xi_{M}^{-1}(m))_{1}\rightharpoonup oightarrowngle
\xi_{M}^{2}((\xi_{A}^{-1}(u)\cdotdot \xi_{M}^{-1}(m))_{0}) = u\cdotdot m.$$
We now compute for $m\iotan (M,\xi_{M})$ that
\betaegin{eqnarray*}
\rightharpoonup oho_{M}^{'}(m)&=&\sigmaum_{i=1}^{n}(\xi_{H}^{*2}(e^{i})\betaowtie1_{A})
\tauriangleright \xi_{M}^{-2}(m)\otimes e_{i}\\
&\sigmatackrel{(\rightharpoonup oef{2.6})}{=}&\sigmaum_{i=1}^{n}\lambdaangle\xi_{H}^{*2}(e^{i}),(1_{A}\cdotdot\xi_{M}^{-3}(m))_{1}\rightharpoonup oightarrowngle
\xi_{M}^{2}((1_{A}\cdotdot\xi_{M}^{-3}(m))_{0})\otimes e_{i}\\
&=&\sigmaum_{i=1}^{n}\lambdaanglee^{i},m_{1}\rightharpoonup oightarrownglem_{0}\otimes e_{i} = \rightharpoonup oho_{M}(m).
\etand{eqnarray*}
Conversely, take $M\iotan _{H^{*}\betaowtie A}\psiathcal{M}$. We want to show that
$F(G(M))=M$. If we denote the left $H^{*}\betaowtie A$-action on $F(G(M))$ by
$\psiapsto$, then using Lemmas 2.5 and 2.6 we find,
for all $f\iotan (H^{*},\xi_{H}^{*-1}),u\iotan (A,\xi_{A})$ and $m\iotan (M,\xi_{M})$:
\betaegin{eqnarray*}
(f\betaowtie u)\psiapsto m &=&\lambdaanglef,(u\cdotdot\xi_{M}^{-1}(m))_{1}\rightharpoonup oightarrowngle\xi_{M}^{2}((u\cdotdot\xi_{M}^{-1}(m))_{0})\\
&=&\sigmaum_{i=1}^{n}\lambdaanglef,e_{i}\rightharpoonup oightarrowngle\xi_{M}^{2}((\xi_{H}^{*2}(e^{i})\betaowtie 1_{A})
\tauriangleright \xi_{M}^{-2}(u\cdotdot\xi_{M}^{-1}(m)))\\
&=&\lambdaangle\xi_{H}^{*2}(f),\xi_{M}^{-2}(u\cdotdot\xi_{M}^{-1}(m))\rightharpoonup oightarrowngle\xi_{M}^{2}(u\cdotdot\xi_{M}^{-1}(m))\\
&=&(f\betaowtie u)\tauriangleright m,
\etand{eqnarray*}
and this finishes our proof.
$\betalacksquare$\\
{\betaf Proposition 2.8.} Let $(H,\xi_{H})$ be finite dimensional
and $H(\alpha,\beta)$ be an $H$-Hom-bicomdule algebra, with an
$H$-Hom-comodule structures showed in Example 2.9 (in \cdotircte{YW2014}).
Then $_{H(\alpha,\beta)}\psiathcal{MHYD}^{H}(H)\sigmaimeq _{H^{*}\betaowtie H(\alpha,\beta)}\psiathcal{M}$.
\\
The proof is left to the reader.
\\
Recall from Prop.2.12 in \cdotircte{YW2014},
$_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)=_{H(\alpha,\beta)}\psiathcal{MHYD}^{H}(H)$.
\\
{\betaf Proposition 2.9.} $_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)
\sigmaimeq _{H^{*}\betaowtie H(\alpha,\beta)}\psiathcal{M}$.
\\
We just give the correspondence as follows. If $M\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$,
then $M\iotan _{H^{*}\betaowtie H(\alpha,\beta)}\psiathcal{M}$ with structure
$$ (f\betaowtie h)\tauriangleright m=f((h\cdotdot\xi_{M}^{-1}(m))_{1})
\xi_{M}^{2}((h\cdotdot\xi_{M}^{-1}(m))_{0}).$$
Conversely, if $M\iotan _{H^{*}\betaowtie H(\alpha,\beta)}\psiathcal{M}$, then
$M\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$ with structures
\betaegin{eqnarray*}
h\cdotdot m &=& (\varepsilonarepsilon_{H}\betaowtie\xi_{H}^{-1}(h))\tauriangleright m,\\
\rightharpoonup oho_{M}(m)&=& m_{0}\otimes m_{1}
=(\sigmaum_{i=1}^{n}\xi_{H}^{*2}(e^{i})\betaowtie 1_{A})\tauriangleright \xi_{M}^{-2}(m)\otimes e_{i}
\etand{eqnarray*}
for all $f\iotan H^{*},h\iotan H,m\iotan M,$
where $\{e_{i}\}_{1,...,n},\{e^{i}\}_{1,...,n}$ are dual bases in $H$ and $H^{*}$.
The proof is left to the reader.
\sigmaection*{3. A BRAIDED $T$-CATEGORY $Rep(\psiathcal{MHD}(H))$ }
\deltaef\tauhetaeequation{3. \alpharabic{equation}}
\sigmaetcounter{equation} {0} \hskip\phiarindent
Denote $G={\sigmal Aut}_{mHH}(H) \tauimes {\sigmal Aut}_{mHH}(H)$
a group with multiplication as follows:
for all $\alpha,\beta,\gamma,\delta\iotan {\sigmal Aut}_{mHH}(H)$,
$$
(\alpha,\beta)\alphast (\gamma,\delta)=(\alpha\gamma, \delta\gamma^{-1}\beta\gamma).
$$
The unit of this group is $(id,id)$ and $(\alpha,\beta)^{-1}=(\alpha^{-1}, \alpha\beta^{-1}\alpha^{-1})$.
\\
In this section we will construct a monoidal Hom-Hopf $T$-coalgebra
over $G$, denoted by $\psiathcal{MHD}(H)$, and prove that the $T$-category
$Rep(\psiathcal{MHD}(H))$ of representation of $\psiathcal{MHD}(H)$
is isomorphic to $\psiathcal {MHYD}(H)$ in \cdotircte{YW2014} as braided $T$-categories.
\\
{\betaf Proposition 3.1.} Let $(M,\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$
and assume that $(M,\xi_{M})$ is finite dimensional.
Then $(M^{*},\xi_{M}^{*-1})=Hom(M,k)$ becomes an object in
$_{H}\psiathcal{MHYD}^{H}(\alpha^{-1},\alpha\beta^{-1}\alpha^{-1})$, with module structure
$$ (h\cdotdot p)(m)=p(\beta^{-1}\alpha^{-1}S\xi_{H}^{-1}(h)\cdotdot\xi_{M}^{-2}(m)),$$
and comodule structure
$$\rightharpoonup oho(p)(m)= p_{0}(\xi_{M}^{-1}(m))\otimes\xi_{H}(p_{1})
= p(\xi_{M}(m_{0}))\otimes S^{-1}\xi_{H}^{2}(m_{1}),$$
for all $h\iotan H,p\iotan M^{*}$ and $m\iotan M$. Moreover, the maps
$b_{M}:k\rightharpoonup oightarrow M\otimes M^{*}, ~b_{M}(1)=\sigmaum_{i}c_{i}\otimes c^{i}$
(where $\{c_{i}\}$ and $\{c^{i}\}$ are dual bases in $M$ and $M^{*}$)
and $d_{M}:M^{*}\otimes M\rightharpoonup oightarrow k, ~d_{M}(p\otimes m)=p(m),$ are left
$H$-module maps and right $H$-comodule maps and we have
$$(\xi_{M}\otimes d_{M})(b_{M}\otimes \xi_{M}^{-1})=id_{M},\ \
( d_{M}\otimes \xi_{M}^{*-1})(\xi_{M}^{*}\otimes b_{M})=id_{M^{*}}.$$
\\
{\betaf Proof.} Following the idea of the proof of Panaite and Staic
(\cdotircte{PS2007}, Prop. 3.6), we first prove that $(M^{*},\xi_{M}^{*-1})$
is indeed an object in $_{H}\psiathcal{MHYD}^{H}(\alpha^{-1},\alpha\beta^{-1}\alpha^{-1})$.
We compute:
\betaegin{eqnarray*}
&& (\xi_{H}(h_{21})\cdotdot p_{0})(m)\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})
\xi_{H}^{-1}(p_{1})) \alpha^{-1}S^{-1}(h_{1})\\
&=& p_{0}(\beta^{-1}\alpha^{-1}S(h_{21})\cdotdot\xi_{M}^{-2}(m)
\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})\xi_{H}^{-1}(p_{1}))\alpha^{-1}S^{-1}(h_{1})\\
&=& p(\xi_{M}^{2}((\beta^{-1}\alpha^{-1}S(h_{21})\cdotdot\xi_{M}^{-2}(m))_{0}))
\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})S^{-1}((\beta^{-1}\alpha^{-1}S(h_{21})\cdotdot\xi_{M}^{-2}(m))_{1}))\\
&& \alpha^{-1}S^{-1}(h_{1})\\
&=& p(\xi_{M}^{2}(\beta^{-1}\alpha^{-1}S\xi_{H}(h_{2112})\cdotdot\xi_{M}^{-2}(m_{0})))
\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})S^{-1}((\alpha^{-1}S(h_{2111})\xi_{H}^{-3}(m_{1}))\\
&& \alpha\beta^{-1}\alpha^{-1}(h_{212})))\alpha^{-1}S^{-1}(h_{1})\\
&=& p(\beta^{-1}\alpha^{-1}S\xi_{H}^{3}(h_{2112})\cdotdot m_{0})
\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})(\alpha\beta^{-1}\alpha^{-1}S^{-1}(h_{212})\\
&& (S^{-1}\xi_{H}^{-3}(m_{1})\alpha^{-1}(h_{2111}))))\alpha^{-1}S^{-1}(h_{1})\\
&=& p(\beta^{-1}\alpha^{-1}S\xi_{H}^{3}(h_{2112})\cdotdot m_{0})
\otimes ((\alpha\beta^{-1}\alpha^{-1}\xi_{H}^{-1}(h_{22})\alpha\beta^{-1}\alpha^{-1}S^{-1}(h_{212}))\\
&& (S^{-1}\xi_{H}^{-2}(m_{1})\alpha^{-1}\xi_{H}(h_{2111})))\alpha^{-1}S^{-1}(h_{1})\\
&=& p(\beta^{-1}\alpha^{-1}S\xi_{H}^{3}(h_{2112})\cdotdot m_{0})
\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})\alpha\beta^{-1}\alpha^{-1}S^{-1}\xi_{H}(h_{212}))\\
&& ((S^{-1}\xi_{H}^{-2}(m_{1})\alpha^{-1}\xi_{H}(h_{2111}))\alpha^{-1}S^{-1}\xi_{H}^{-1}(h_{1}))\\
&=& p(\beta^{-1}\alpha^{-1}S\xi_{H}(h_{21})\cdotdot m_{0})
\otimes (\alpha\beta^{-1}\alpha^{-1}\xi_{H}(h_{222})\alpha\beta^{-1}\alpha^{-1}S^{-1}\xi_{H}(h_{221}))\\
&& (S^{-1}\xi_{H}^{-1}(m_{1})(\alpha^{-1}\xi_{H}^{-1}(h_{12})\alpha^{-1}S^{-1}\xi_{H}^{-1}(h_{11})))\\
&=& p(\beta^{-1}\alpha^{-1}S\xi_{H}^{-1}(h)\cdotdot m_{0}) \otimes S^{-1}\xi_{H}(m_{1})\\
&=& (h\cdotdot p)(\xi_{M}^{2}(m_{0}))\otimes S^{-1}\xi_{H}(m_{1})\\
&=& (h\cdotdot p)_{0}(m)\otimes (h\cdotdot p)_{1},
\etand{eqnarray*}
which means that
\betaegin{eqnarray*}
(h\cdotdot p)_{0}\otimes (h\cdotdot p)_{1}
= (\xi_{H}(h_{21})\cdotdot p_{0})\otimes (\alpha\beta^{-1}\alpha^{-1}(h_{22})
\xi_{H}^{-1}(p_{1})) \alpha^{-1}S^{-1}(h_{1}).
\etand{eqnarray*}
On $k$ we have the trivial Hom-module and Hom-comodule structure,
and with these $k\iotan _{H}\psiathcal{YD}^{H}$. We want to prove that $b_{M}$
and $d_{M}$ are $H$-Hom-module maps. We compute:
\betaegin{eqnarray*}
(h\cdotdot b_{M}(1))(m)&=&(h\cdotdot(\sigmaum_{i}c_{i}\otimes c^{i}))(m)\\
&\sigmatackrel{(\rightharpoonup oef{eq12})}{=}& \sigmaum_{i}\alpha^{-1}(h_{1})\cdotdot c_{i}\otimes (\alpha\beta\alpha^{-1}(h_{2})
\cdotdot c^{i})(m)\\
&=& \sigmaum_{i}\alpha^{-1}(h_{1})\cdotdot c_{i}\otimes c^{i}(\beta^{-1}\alpha^{-1}S\alpha\beta\alpha^{-1}\xi_{H}^{-1}(h_{2})
\cdotdot \xi_{M}^{-2}(m))\\
&=& \sigmaum_{i}\alpha^{-1}(h_{1})\cdotdot c_{i}\otimes c^{i}(S\alpha^{-1}\xi_{H}^{-1}(h_{2})\cdotdot \xi_{M}^{-2}(m))\\
&=& \alpha^{-1}(h_{1})\cdotdot (S\alpha^{-1}\xi_{H}^{-1}(h_{2})\cdotdot \xi_{M}^{-2}(m))\\
&=& \alpha^{-1}(\xi_{-1}(h_{1})S\xi_{H}^{-1}(h_{2}))\cdotdot \xi_{M}^{-1}(m)\\
&=& \varepsilonarepsilon(h)\sigmaum_{i}c_{i}\otimes c^{i}(m)\\
&=& (\varepsilonarepsilon(h)b_{M}(1))(m),
\etand{eqnarray*}
\betaegin{eqnarray*}
d_{M}(h\cdotdot(p\otimes m))&\sigmatackrel{(\rightharpoonup oef{eq12})}{=}&
d_{M}(\alpha(h_{1})\cdotdot p\otimes \beta^{-1}(h_{2})\cdotdot m)\\
&=& (\alpha(h_{1})\cdotdot p)(\beta^{-1}(h_{2})\cdotdot m)\\
&=& p(\beta^{-1}\alpha^{-1}S\alpha\xi_{H}^{-1}(h_{1})\cdotdot\xi_{M}^{-2}(\beta^{-1}(h_{2})\cdotdot m))\\
&=& p(\beta^{-1}(S\xi_{H}^{-2}(h_{1})\xi_{H}^{-2}(h_{2}))\cdotdot\xi_{M}^{-1}( m))\\
&=& \varepsilonarepsilon(h)d_{M}(p\otimes m).
\etand{eqnarray*}
They also are $H$-Hom-comodule maps;
\betaegin{eqnarray*}
((b_{M}(1))_{0}\otimes (b_{M}(1))_{1})(m)
&=& \sigmaum_{i}(c_{i})_{0}\otimes (c^{i})_{0}(m)\otimes(c^{i})_{1}(c_{i})_{1}\\
&=& \sigmaum_{i}(c_{i})_{0}\otimes c^{i}(\xi_{M}^{2}(m_{0}))\otimes S^{-1}\xi_{H}(m_{1})(c_{i})_{1}\\
&=& \xi_{M}^{2}(m_{00})\otimes S^{-1}\xi_{H}(m_{1})\xi_{H}^{2}(m_{01})\\
&=& \xi_{M}(m_{0})\otimes S^{-1}\xi_{H}^{2}(m_{12})\xi_{H}^{2}(m_{11})\\
&=& (b_{M}(1)\otimes 1)(m),
\etand{eqnarray*}
\betaegin{eqnarray*}
d_{M}((p\otimes m)_{0})\otimes (p\otimes m)_{1}&=& p_{0}( m_{0})\otimes m_{1}p_{1}\\
&=& p(\xi_{M}^{2}(m_{00}))\otimes m_{1}S^{-1}\xi_{H}(m_{01})\\
&=& p(\xi_{M}(m_{0}))\otimes \xi_{H}(m_{12})S^{-1}\xi_{H}(m_{11})\\
&=& d_{M}(p\otimes m)\otimes 1.
\etand{eqnarray*}
Finally, we compute:
\betaegin{eqnarray*}
(\xi_{M}\otimes d_{M})(b_{M}\otimes \xi_{M}^{-1})(m)
&=& (\xi_{M}\otimes d_{M})(b_{M}(1)\otimes \xi_{M}^{-1}(m))\\
&=& (\xi_{M}\otimes d_{M})(\sigmaum_{i}(c_{i}\otimes c^{i})\otimes \xi_{M}^{-1}(m))\\
&=& \sigmaum_{i}\xi_{M}^{2}(c_{i})\otimes c^{i}(\xi_{M}^{-2}(m)) = m
\etand{eqnarray*}
The argument for $( d_{M}\otimes \xi_{M}^{*-1})(\xi_{M}^{*}\otimes b_{M})=id_{M^{*}}$
is analogous.
$\betalacksquare$\\
Similarly, one can obtain:
\\
{\betaf Proposition 3.2.} Let $(M,\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$
and assume that $(M,\xi_{M})$ is finite dimensional.
Then $(^{*}M,^{*}\xi_{M}^{-1})=Hom(M,k)$ becomes an object in
$_{H}\psiathcal{MHYD}^{H}(\alpha^{-1},\alpha\beta^{-1}\alpha^{-1})$, with module structure
$$ (h\cdotdot p)(m)=p(\beta^{-1}\alpha^{-1}S\xi_{H}^{-1}(h)\cdotdot\xi_{M}^{-2}(m)),$$
and comodule structure
$$\rightharpoonup oho(p)(m)= p_{0}(\xi_{M}^{-1}(m))\otimes\xi_{H}(p_{1})
= p(\xi_{M}(m_{0}))\otimes S^{-1}\xi_{H}^{2}(m_{1}),$$
for all $h\iotan H, ~p\iotan\ ^{*}M$ and $m\iotan M$. Moreover, the maps
$b_{M}:k\rightharpoonup oightarrow M\otimes\ ^{*}M, ~b_{M}(1)=\sigmaum_{i}c_{i}\otimes c^{i}$
(where $\{c_{i}\}$ and $\{c^{i}\}$ are dual bases in $M$ and $^{*}M$)
and $d_{M}:\ ^{*}M\otimes M\rightharpoonup oightarrow k, ~d_{M}(p\otimes m)=p(m),$ are left
$H$-module maps and right $H$-comodule maps and we have
$$(\xi_{M}\otimes d_{M})(b_{M}\otimes \xi_{M}^{-1})=id_{M},\ \
( d_{M}\otimes ^{*}\xi_{M}^{-1})(^{*}\xi_{M}\otimes b_{M})=id_{^{*}M}.$$
\\
Now, if we consider $\psiathcal {MHYD}(H)_{fd}$, the subcategory
of $\psiathcal {MHYD}(H)$ consisting of finite dimensional objects,
then by Proposition 3.1. and Proposition 3.2. we obtain:\\
{\betaf Corollary 3.3.} $\psiathcal {MHYD}(H)_{fd}$ is a braided $T$-category
with left and right dualities over $G$.
\\
Assume now that $(H,\xi_{H})$ is finite dimensional. We will
construct a monoidal Hom-Hopf $T$-coalgebra over $G$, denoted by
$ \psiathcal{MHD}(H)$, with the property that the $T$-category
$Rep(\psiathcal{MHD}(H))$ of representation of $\psiathcal{MHD}(H)$ is isomorphic
to $\psiathcal{MHYD}(H)$ as braided $T$-categories.
\\
{\betaf Theorem 3.4.} $\psiathcal{MHD}(H)=\{\psiathcal{MHD}(H)_{(\alpha,\beta)}\}_{(\alpha,\beta)\iotan G}$
is a monoidal Hom-Hopf $T$-coalgebra with the following structures:
\betaegin{itemize}
\iotatem For any $(\alpha,\beta)\iotan G,$ the $(\alpha,\beta)$-component $\psiathcal{MHD}(H)_{(\alpha,\beta)}$
will be the diagonal crossed Hom-product algebra $H^{*}\betaowtie H(\alpha,\beta)$
in Eq. (\rightharpoonup oef{2.10}),
\iotatem The comultiplication on $ \psiathcal{MHD}(H)$ is given by
\betaegin{eqnarray*}
\Deltaelta_{(\alpha,\beta),(\gamma,\delta)}:\psiathcal{MHD}(H)_{(\alpha,\beta)*(\gamma,\delta)}
&\rightharpoonup oightarrow& \psiathcal{MHD}(H)_{(\alpha,\beta)}\otimes\psiathcal{MHD}(H)_{(\gamma,\delta)},\\
\Deltaelta_{(\alpha,\beta),(\gamma,\delta)}(f\betaowtie h)&= &(f_{1}\betaowtie\gamma(h_{1}))\otimes(f_{2}\betaowtie\gamma^{-1}\beta\gamma(h_{2})),
\etand{eqnarray*}
\iotatem The counit $\varepsilonarepsilon$ is obtained by setting
$$\varepsilonarepsilon(f\betaowtie h)=\varepsilonarepsilon(h)f(1_{H}),$$
\iotatem For any $(\alpha,\beta)\iotan G$, the $(\alpha,\beta)^{th}$ component of the antipode of
$\psiathcal{MHD}(H)$ is given by
\betaegin{eqnarray*}
S_{(\alpha,\beta)}:\psiathcal{MHD}(H)_{(\alpha,\beta)}&\rightharpoonup oightarrow& \psiathcal{MHD}(H)_{(\alpha,\beta)^{-1}}= \psiathcal{MHD}(H)_{(\alpha^{-1},\alpha\beta^{-1}\alpha^{-1})},\\
S_{(\alpha,\beta)}(f\betaowtie h)&=&(\varepsilonarepsilon\betaowtie\alpha\beta S\xi_{H}^{-1}(h))
(S^{*-1}\xi_{H}^{*}(f)\betaowtie1_{H}),
\etand{eqnarray*}
\iotatem For $(\alpha,\beta),(\gamma,\delta)\iotan G$, the conjugation isomorphism is given by
\betaegin{eqnarray*}
\varepsilonarphi_{(\gamma,\delta)}^{(\alpha,\beta)}: \psiathcal{MHD}(H)_{(\gamma,\delta)}
&\rightharpoonup oightarrow& \psiathcal{MHD}(H)_{(\alpha,\beta)*(\gamma,\delta)*(\alpha,\beta)^{-1}},\\
\varepsilonarphi_{(\gamma,\delta)}^{(\alpha,\beta)}(f\betaowtie h)&=&(f\cdotircrc\beta\alpha^{-1}
\betaowtie\alpha\gamma^{-1}\beta^{-1}\gamma(h)),
\etand{eqnarray*}
\etand{itemize}
for all $f\iotan H^{*},h\iotan H.$
\\
{\betaf Proof.} We have to check the axioms of monoidal Hom-Hopf $T$-coalgebra.
Hom-coassociativity and multiplicativity of $\Deltaelta$ are satisfied.
We compute
\betaegin{eqnarray*}
&& m_{(\alpha,\beta)}(id\otimes S_{(\alpha,\beta)^{-1}})\Deltaelta_{(\alpha,\beta),(\alpha,\beta)^{-1}}(f\betaowtie h)\\
&=& (f_{1}\betaowtie\alpha^{-1}(h_{1}))[(\varepsilonarepsilon\betaowtie S\alpha^{-1}\xi_{H}^{-1}(h_{2}))
(S^{*-1}\xi^{*}(f_{2})\betaowtie 1_{H})]\\
&=& [(\xi_{H}^{*}(f_{1})\betaowtie\alpha^{-1}\xi_{H}^{-1}(h_{1}))
(\varepsilonarepsilon\betaowtie S\alpha^{-1}\xi_{H}^{-1}(h_{2}))]
(S^{*-1}(f_{2})\betaowtie 1_{H})\\
&=& [(\xi_{H}^{*}(f_{1})(\alpha((\alpha^{-1}\xi_{H}^{-1}(h_{1}))_{1})\rightharpoonup oightharpoonup
(\varepsilonarepsilon\lambdaeftharpoonup S^{-1}\beta((\alpha^{-1}\xi_{H}^{-1}(h_{1}))_{22})))\\
&& \betaowtie\xi_{H}^{2}((\alpha^{-1}\xi_{H}^{-1}(h_{1}))_{21})
S\alpha^{-1}\xi_{H}^{-1}(h_{2}))](S^{*-1}(f_{2})\betaowtie 1_{H})\\
&=& [f_{1} \varepsilonarepsilon( S^{-1}\beta\alpha^{-1}\xi_{H}^{-2}(h_{122}))
\varepsilonarepsilon(\xi_{H}^{-2}(h_{11}))\betaowtie\alpha^{-1}\xi_{H}(h_{121})
S\alpha^{-1}\xi_{H}^{-1}(h_{2})]\\
&&(S^{*-1}(f_{2})\betaowtie 1_{H})\\
&=& (f_{1} \betaowtie\alpha^{-1}\xi_{H}^{-1}(h_{1}S(h_{2})))(S^{*-1}(f_{2})\betaowtie 1_{H})\\
&=& \varepsilonarepsilon(h)(f_{1} \betaowtie 1_{H})(S^{*-1}(f_{2})\betaowtie 1_{H})\\
&=& \varepsilonarepsilon(h)(f_{1}S^{*-1}(f_{2})\betaowtie 1_{H})\\
&=& \varepsilonarepsilon(f\betaowtie h)(\varepsilonarepsilon\betaowtie 1_{H}),\\
\etand{eqnarray*}
and similarly
$ m_{(\alpha,\beta)}( S_{(\alpha,\beta)^{-1}}\otimes id)\Deltaelta_{(\alpha,\beta)^{-1},(\alpha,\beta)}(f\betaowtie h)
= \varepsilonarepsilon(f\betaowtie h)(\varepsilonarepsilon\betaowtie 1_{H}).$
This completes the proof.
$\betalacksquare$\\
Moreover, via the isomorphisms Prop.2.9. $_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)
\sigmaimeq _{H^{*}\betaowtie H(\alpha,\beta)}\psiathcal{M}$, we obtain
\\
{\betaf Theorem 3.5.} $Rep(\psiathcal{MHD}(H))\sigmaimeq \psiathcal{MHYD}(H)$
as braided $T$-categories over $G$.
\sigmaection*{4. A BRAIDED $T$-CATEGORY $\psiathcal{ZMHYD}(H)$}
\deltaef\tauhetaeequation{4. \alpharabic{equation}}
\sigmaetcounter{equation} {0} \hskip\phiarindent
In this section, we will construct a new braided $T$-category
$\psiathcal{ZMHYD}(H)$ over $\psiathbb{Z}.$
\\
{\betaf Definition 4.1.} Let $(C,\xi_{C})$ be a monoidal Hom-coalgebra.
Then $g$ is called a group-like element, that is
$$\xi_{C}(g) = g,\ \ \Deltaelta(g)= g \otimes g,\ \ \varepsilonarepsilon(g)=1, $$
for all $g\iotan C$.
\\
{\betaf Example 4.2.} Recall from Example 3.5 in \cdotircte{CWZ2013}
that $( H_{4} =k\{ 1,\ g,\ x, \ y = gx\,\},\xi_{H_{4}} , \Deltaelta , \varepsilonarepsilon , S )$
is a monoidal Hom-Hopf algebra, where the algebraic structure are given as follows:
$\betaullet$ The multiplication
$"\cdotircrc "$ is given by
$$\betaegin{array}{|c|c|c|c|c|}
\hline \cdotircrc & 1_{H_4} & g & x & y\\
\hline 1_{H_4} & 1_{H_4} & g & cx & cy \\
g & g & 1_{H_4} & cy & cx \\
x& cx & -cy& 0 & 0 \\
y& cy & -cx & 0 & 0 \\
\hline
\etand{array}$$
$\betaullet$ The automorphism $\xi_{H_{4}}$ is given by
$ \xi_{H_{4}}(1)=1,\ \xi_{H_{4}}(g)=g,\ \xi_{H_{4}}(x)=cx,$\\
$\xi_{H_{4}}(gx)=cgx,$
for all $0\neq c\iotan k;$
$\betaullet$ The comultiplication $\Deltaelta$ is defined by
\betaegin{eqnarray*}
\Deltaelta(1)=1\otimestimes 1,&&
\Deltaelta(g)=g\otimestimes g,\\
\Deltaelta(x)=c^{-1}(x\otimestimes 1)+ c^{-1}(g\otimestimes x),
&&\Deltaelta(gx)=c^{-1}(gx\otimestimes g)+c^{-1}(1\otimestimes gx);
\etand{eqnarray*}
$\betaullet$ The counit $\varepsilonarepsilon$ is defined by
$ \varepsilonarepsilon(1)=1,\ \
\varepsilonarepsilon(g)=1,\ \
\varepsilonarepsilon(x)=0,\ \ \varepsilonarepsilon(gx)=0. $
$\betaullet$ The antipode $S$ is given by
$ S(1)=1,\ \ S(g)=g,\ \ S(x)=-gx,\ \ S(gx)=-x.$
Then $1_{H_{4}}, g $ are group-like elements of $H_{4}$.
\\
In \cdotircte{YW2014}, the authors introduced the notion of
left-right $(\alpha, \beta)$-Yetter-Drinfeld Hom-module {\iotat Definition 1.2.6.}
We will in the section give its some special cases.
\\
{\betaf Example 4.3.} For $(M,\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(S^{2},id)$,
the {\iotat left-right anti-Yetter-Drinfeld Hom-module category, }
i.e., the compatibility condition is
\betaegin{eqnarray*}
(h\cdot m)_{0}\otimes (h\cdot m)_{1} =
\xi_{H}(h_{21})\cdot m_{0}\otimes (h_{22}\xi_{H}^{-1}(m_{1}))S(h_{1}),
\etand{eqnarray*}
for $h\iotan H,m\iotan M.$
{\betaf Example 4.4.} In $_{H}\psiathcal{MHYD}^{H}(S^{2n},id)$, the object is called
a {\iotat left-right $n$-Yetter-Drinfeld Hom-modules, i.e., $n-\psiathcal{MHYD}$-module},
for $(M,\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(S^{2n},id)$,
the compatibility condition is
\betaegin{eqnarray*}
(h\cdot m)_{0}\otimes (h\cdot m)_{1} =
\xi_{H}(h_{21})\cdot m_{0}\otimes (h_{22}\xi_{H}^{-1}(m_{1}))S^{2n-1}(h_{1}),
\etand{eqnarray*}
for $h\iotan H,m\iotan M.$
{\betaf Example 4.5.} Similar to Panaite and Staic (\cdotircte{PS2007}, Example 2.7),
for $\alpha,\beta\iotan {\sigmal Aut}_{mHH}(H) $, and assume that there is an algebra map
$\tauhetaeta: H\rightharpoonup oightarrow k$ and a group-like element $\otimesmega\iotan (H,\xi_{H})$ such that
\betaegin{eqnarray*}
\alpha(h) = \otimesm^{-1}(\tauhetaeta(h_{11})\beta(h_{12})\tauhetaeta(S(h_{2}))\otimesm), \ \ \ \forall h\iotan H.
\etand{eqnarray*}
Then we can check that $k\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$
with structures: $h\cdotdot 1=\tauhetaeta(h)$ and $\rightharpoonup o(1)= 1\otimes \otimesm.$
More generally, if $V$ is any vector space,
then $(V,\xi_{V})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta),$
with structures $h\cdotdot v=\tauheta(h)\xi_{V}(v)$ and
$\rightharpoonup o(v)= v_{0}\otimes v_{1}= \xi_{V}^{-1}(v)\otimes \otimesm,$ for all
$h\iotan H$ and $v\iotan V.$
\\
If $\alpha,\beta\iotan {\sigmal Aut}_{mHH}(H)$ such that there exist $(\tauheta,\otimesm)$
as show in Example 4.1, we will say that $(\tauheta,\otimesm)$ is a pair
in involution corresponding to $(\alpha,\beta)$ and the left-right
$(\alpha,\beta)$-Yetter-Drinfeld Hom-modules $k$ and $(V,\xi_{V})$ will be denoted
by $_{\tauheta}k^{\otimesm}$ and $_{\tauheta}V^{\otimesm}$, respectively.
\\
In the following, we will show that in the presence of a pair
in involution, there exists an isomorphism of categories
$_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)\sigmaimeq _{H}\psiathcal{MHYD}^{H}.$
\\
{\betaf Proposition 4.6.} Let $\alpha,\beta\iotan {\sigmal Aut}_{mHH}(H)$ and assume
that there exists $(\tauheta,\otimesm)$ a pair in involution corresponding to $(\alpha,\beta)$.
Then the categories $_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$
and $_{H}\psiathcal{MHYD}^{H}$ are isomorphic.
\\
{\betaf Proof.} In order to prove the isomorphism between
two categories, we only need to give a pair of inverse functors.
The functors pair $(F,G)$ is given as follows.
If $(M,\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta),$
then $(F(M),\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}$, where $F(M)= M$
as vector space, with structures
\betaegin{eqnarray*}
&h\rightharpoonup oightarrow m = &\tauheta(\beta^{-1}S(h_{1}))\beta^{-1}(h_{2})\cdotdot m, \\
&\rightharpoonup o(m)=:& m_{<0>}\otimes m_{<1>}=m_{0}\otimes m_{1}\otimesm^{-1}.
\etand{eqnarray*}
If $(N,\xi_{N})\iotan _{H}\psiathcal{MHYD}^{H},$
then $(G(N),\xi_{N})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\beta)$, where $G(N)= N$
as vector space, with module and comodule structures
\betaegin{eqnarray*}
&h\rightharpoonup oightharpoondown n =& \tauheta(h_{1})\beta(h_{2})\cdotdot n, \\
&\rightharpoonup o(n)=:&n^{(0)}\otimes n^{(1)}= n_{0}\otimes n_{1}\otimesm.
\etand{eqnarray*}
Both $F$ and $G$ act as identities on morphisms.
One checks that $F$ and $G$ are functors and inverse to each other.
$\betalacksquare$\\
{\betaf Proposition 4.7.} Let $\alpha,\beta,\gamma\iotan {\sigmal Aut}_{mHH}(H)$.
The categories $_{H}\psiathcal{MHYD}^{H}(\alpha\beta,\gamma\beta)$
and $_{H}\psiathcal{MHYD}^{H}(\alpha,\gamma)$ are isomorphic.
\\
{\betaf Proof.} A pair of inverse functors $(F,G)$ is given as follows.
If $(M,\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha\beta,\gamma\beta),$
then $(F(M),\xi_{M})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\gamma)$, where $F(M)= M$
as vector space, with structures
\betaegin{eqnarray*}
&h\rightharpoonup oightarrow m =& \beta^{-1}(h)\cdotdot m, \\
&\rightharpoonup o(m)=:&m_{<0>}\otimes m_{<1>}=m_{0}\otimes m_{1}.
\etand{eqnarray*}
If $(N,\xi_{N})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha,\gamma),$
then $(G(N),\xi_{N})\iotan _{H}\psiathcal{MHYD}^{H}(\alpha\beta,\gamma\beta)$, where $G(N)= N$
as vector space, with module and comodule structures
\betaegin{eqnarray*}
&h\rightharpoonup oightharpoondown n =& \beta(h)\cdotdot n, \\
&\rightharpoonup o(n)=:&n^{(0)}\otimes n^{(1)}= n_{0}\otimes n_{1}.
\etand{eqnarray*}
Both $F$ and $G$ act on morphisms as identities.
We can check that $F$ and $G$ are functors and inverse to each other.
This completes the proof.
$\betalacksquare$
\\
{\betaf Corollary 4.8.} For all $\alpha,\beta\iotan {\sigmal Aut}_{mHH}(H)$, we have
isomorphisms of categories:
\betaegin{eqnarray*}
&&_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)\sigmaimeq _{H}\psiathcal{MHYD}^{H}(\alpha\beta^{-1},id),
\ \ _{H}\psiathcal{MHYD}^{H}(\alpha,\alpha)\sigmaimeq _{H}\psiathcal{MHYD}^{H},\\
&& _{H}\psiathcal{MHYD}^{H}(\alpha,id)\sigmaimeq _{H}\psiathcal{MHYD}^{H}(id,\alpha^{-1}),
\ \ _{H}\psiathcal{MHYD}^{H}(id,\beta)\sigmaimeq _{H}\psiathcal{MHYD}^{H}(\alpha^{-1},id).
\etand{eqnarray*}
Let again $\alpha,\beta\iotan {\sigmal Aut}_{mHH}(H)$ such that there exist
$(\tauheta,\otimesm)$ a pair in involution corresponding to $(\alpha,\beta)$, and assume
that $(H,\xi_{H})$ is finite dimensional. Then we know that $_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)
\sigmaimeq _{H^{*}\betaowtie H(\alpha,\beta)}\psiathcal{M}$,
$_{H}\psiathcal{MHYD}^{H}\sigmaimeq _{D(H)}\psiathcal{M}$
(\cdotircte{CZ2014}, Proposition 4.3), and the isomorphism $_{H}\psiathcal{MHYD}^{H}(\alpha,\beta)\sigmaimeq _{H}\psiathcal{MHYD}^{H}$ constructed in the theorem is induced by
a monoidal Hom-algebra isomorphism as follows.
\\
{\betaf Corollary 4.9.} $(H^{*}\betaowtie H(\alpha,\beta),\xi_{H}^{*-1}\otimes \xi_{H})
\sigmaimeq (D(H), \xi_{H}^{*-1}\otimes \xi_{H})$ as monoidal Hom-algebras,
given by
\betaegin{eqnarray*}
D(H)\rightharpoonup oightarrow H^{*}\betaowtie H(\alpha,\beta),&&
f\otimes h\psiapsto \otimesm^{-1}\rightharpoonup oightharpoonup f\betaowtie \tauheta(\beta^{-1}(S(h_{1})))\beta^{-1}(h_{2}),\\
H^{*}\betaowtie H(\alpha,\beta)\rightharpoonup oightarrow D(H),&&
f\betaowtie h\psiapsto \otimesm\rightharpoonup oightharpoonup f\otimes \tauheta(h_{1})\beta(h_{2}).
\etand{eqnarray*}
for all $h\iotan H, f\iotan H^{*},$ and a group-like element $\otimesm\iotan H$.
\\
Finally, we consider some special cases, which are shown in Example 4.3.
Similar to the cases in Staic \cdotircte{S2007}, we give the following two propositions.
We define the modular pair $(\otimesm,\tauheta)$ in monoidal Hom-Hopf algebra $(H,\xi_{H})$,
i.e., $\tauheta$ is an algebra map $H\rightharpoonup oightarrow k$ and
$\otimesm\iotan(H,\xi_{H})$ is a group-like element
satisfying $\tauheta(\otimesm)=1.$ Defining an endomorphism $\widetilde{S}$ of $(H,\xi_{H})$
by $\widetilde{S}(h)=S(h_{1})\tauheta(h_{2})$ for all $h\iotan H$, then $(\otimesm, \tauheta)$ is called
a modular pair in involution if $\widetilde{S}^{2}(h)=\otimesm^{-1}(h\otimesm).$
\\
{\betaf Proposition 4.10.} Let $(H,\xi_{H})$ be a monoidal Hom-Hopf algebra,
$(\otimesm,\tauheta)$ a modular pair in involution and $(M,\xi_{M})$ a left-right
anti-Yetter-Drinfeld Hom-module. If we define a new action of $H$ on $M$ as:
$$h\rightharpoonup oightharpoondown m = \tauheta(S(h_{1}))h_{2}\cdotdot m,$$
and a new coaction as follows:
$$\rightharpoonup o(m)=m_{<0>}\otimes m_{<1>}= m_{0}\otimes m_{1}\otimesm^{-1},$$
then $(M,\rightharpoonup oightharpoondown, \rightharpoonup o)$ is a left-right Yetter-Drinfeld Hom-module.
\\
{\betaf Proof.} First, since $\tauheta: H\rightharpoonup oightarrow k$ is an algebra morphism
and $\otimesm$ is a group-like element, the module and comdule
structures are given by above formulas.
We denote the involution inverse of $\tauheta$ by $\tauheta^{-1}$.
From $\widetilde{S}^{2}(h)=\otimesm^{-1}(h\otimesm)$, we can get
$\tauheta (S(h_{11}))S^{2}(h_{12})\tauheta(h_{2}) = \otimesm^{-1}(h\otimesm)$
and $\tauheta^{-1}(h_{1})S(h_{21})\tauheta(h_{22}) = \otimesm^{-1}(S^{-1}(h)\otimesm):$
\betaegin{eqnarray*}
&&(h\rightharpoonup oightharpoondown m)_{<0>}\otimes (h\rightharpoonup oightharpoondown m)_{<1>}\\
&=& \tauheta^{-1}(h_{1})(h_{2}\cdotdot m)_{0}\otimes (h_{2}\cdotdot m)_{1}\otimesm^{-1} \\
&=& \tauheta^{-1}(h_{1})\xi_{H}(h_{221})\cdotdot m_{0}
\otimes ((h_{222} \xi_{H}^{-1}(m_{1}))S(h_{21}))\otimesm^{-1} \\
&=& h_{21}\cdotdot m_{0}\otimes ((\xi_{H}^{-1}(h_{22}) \xi_{H}^{-1}(m_{1}))
\tauheta^{-1}(\xi_{H}(h_{11}))S(h_{12}))\otimesm^{-1} \\
&=& h_{21}\cdotdot m_{0}\otimes ((\xi_{H}^{-1}(h_{22}) \xi_{H}^{-1}(m_{1}))
((\otimesm^{-1}\otimesm)\tauheta^{-1}(\xi_{H}(h_{11}))S(h_{121})\tauheta(h_{1221})\tauheta^{-1}(h_{1222})))\otimesm^{-1} \\
&=& h_{21}\cdotdot m_{<0>}\otimes ((h_{22}\xi_{H}^{-1}(m_{<1>}))
((\otimesm^{-1}\tauheta^{-1}(\xi_{H}^{2}(h_{111}))S\xi_{H}(h_{1121})\tauheta(h_{1122}))\otimesm^{-1}))\\
&& \tauheta^{-1}(\xi_{H}^{-2}(h_{12})) \\
&=& h_{21}\cdotdot m_{<0>}\otimes (h_{22}\xi_{H}^{-1}(m_{<1>}))
S^{-1}\xi_{H}(h_{11})\tauheta^{-1}(\xi_{H}^{-2}(h_{12})) \\
&=& \tauheta^{-1}(\xi_{H}(h_{211}))\xi_{H}(h_{212})\cdotdot m_{<0>}
\otimes (h_{22}\xi_{H}^{-1}(m_{<1>}))S^{-1}(h_{1}) \\
&=& \xi_{H}(h_{21})\rightharpoonup oightharpoondown m_{<0>}\otimes (h_{22}\xi_{H}^{-1}(m_{<1>}))S^{-1}(h_{1})
\etand{eqnarray*}
This means that $(M,\rightharpoonup oightharpoondown, \rightharpoonup o)$ is a left-right Yetter-Drinfeld Hom-module.
$\betalacksquare$\\
By Example 4.4 and Remark 1.2.7 (3), we have the following proposition.
\\
{\betaf Proposition 4.11.} For any integer numbers $m$ and $n$, if $(M,\xi_{M})$
is a left-right $m$-Yetter-Drinfeld Hom-module and $(N,\xi_{N})$ is an
$n$-Yetter-Drinfeld Hom-module, then $(M\otimes N,\xi_{M}\otimes\xi_{N})$ is
a left-right $m+n$-Yetter-Drinfeld Hom-module with
module structure and comodule structure as follows
\betaegin{eqnarray*}
h\cdot (m \otimes n) &=& S^{2n} (h_{1})\cdot m \otimes h_{2}\cdot n,\\
m\otimes n &\psiapsto & (m_{0}\otimes n_{0})\otimes n_{1}m_{1}.
\etand{eqnarray*}
for all $m\iotan M,n\iotan N$ and $h\iotan H.$
\\
Let $\psiathcal{ZMHYD}(H)$ be the disjoint union of all categories
$_{H}\psiathcal{MHYD}^{H}(S^{2n},id)$ of left-right $n$-Yetter-Drinfeld
Hom-modules with $n\iotan \psiathbb{Z}$, the set of integer numbers.
Then by Theorem 3.7 in \cdotircte{YW2014} and Proposition 4.11,
the following corollary is a generalization of the main result
in Staic \cdotircte{S2007}.
\\
{\betaf Corollary 4.12.} $\psiathcal{ZMHYD}(H)$ is a braided $T$-category
over $\psiathbb{Z}$.
\\
{\betaf Example 4.13.} Let $A =\lambdaanglea\rightharpoonup oightarrowngle$ be a cyclic group of order $n$,
and $Aut(A)=\{\sigma_{t}: \sigma_{t}(a)=a^{t},0<t<n, (t,n)=1, t\iotan \psiathbb{Z}\}$.
Then $(k[A],\xi_{k[A]})$ is a monoidal Hom-Hopf algebra with structure
given by
\betaegin{eqnarray*}
a^{i}\cdotircrc a^{j}=\xi_{k[A]}^{-1}(a^{i}a^{j}), &&
\Deltaelta(a^{i})=\xi_{k[A]}^{-1}(a^{i})\otimes \xi_{k[A]}^{-1}(a^{i}),\\
\varepsilonarepsilon(a^{i})=1_{k}, && S(a^{i})= a^{-i},
\etand{eqnarray*}
for all $i,j\iotan \psiathbb{Z}$.
First, $Aut_{mHH}(k[A])=Aut(A)$.
Let $(H,\xi_{H})=(k[A], \xi_{k[A]}=\sigmaigma_{2})$
is a monoidal Hom-Hopf algebra given by
\betaegin{eqnarray*}
a^{i}\cdotircrc a^{j}= \sigmaigma_{2}^{-1}(a^{i}a^{i})= a^{i+j-2}, &&
\Deltaelta(a^{i})=\sigmaigma_{2}^{-1}(a^{i})\otimes \sigmaigma_{2}^{-1}(a^{i})=a^{i-2}\otimes a^{i-2},\\
\varepsilonarepsilon(a^{i})=1_{k}, && S(a^{i})= a^{-i}.
\etand{eqnarray*}
for all $i,j\iotan \psiathbb{Z}$. It is easy to check that
$$ S^{2n}(a^{i})=a^{i},$$
for all $n\iotan \psiathbb{Z}.$
\\
Let $\psiathcal{ZMHYD}(k[A])$ be the disjoint union of all categories
$_{k[A]}\psiathcal{MHYD}^{k[A]}(S^{2n},id)$ of left-right $n-\psiathcal{MHYD}$
with $n\iotan \psiathbb{Z}$.
\\
Let $(M,\xi_{M})$ be an $m-\psiathcal{MHYD}$-module
and $(N,\xi_{N})$ be an $n-\psiathcal{MHYD}$-module, for all $m,n\iotan \psiathbb{Z}$.
Then $(M\otimes N,\xi_{M}\otimes\xi_{N})$ is $m+n-\psiathcal{MHYD}$-module with structures
as follows:
\betaegin{eqnarray*}
a^{i}\cdotdot (x\otimes y) &= &S^{2n}(a^{i-2})\cdotdot x\otimes a^{i-2}\cdotdot y,\\
(x\otimes y)&\psiapsto &(x_{0}\otimes y_{0})\otimes y_{1}x_{1},
\etand{eqnarray*}
for all $x\iotan M, y\iotan N,a^{i}\iotan k[A], n\iotan \psiathbb{Z}.$
On $^{(S^{2m},id)}N = N$, there is an action $\unrhd$ given by
$$ a^{i}\unrhd y = S^{-2m}(a^{i})\cdotdot y,$$
and a coaction $\rightharpoonup o_{r}$ defined by
$$y\psiapsto y_{0}\otimes S^{2m}(y_{1}),$$
$ y\iotan N,a^{i}\iotan k[A], m\iotan \psiathbb{Z}.$
Let $(M,\xi_{M})$ be an $m-\psiathcal{MHYD}$-module
and $(N,\xi_{N})$ be an $n-\psiathcal{MHYD}$-module, for all $m,n\iotan \psiathbb{Z}$.
Then the braiding
$$c_{M,N}: M\otimes N \rightharpoonup oightarrow ^{(S^{2m},id)}N \otimes M$$
is given by
$$ c_{M,N}(x\otimes y)= \xi_{N}(y_{0})\otimes y_{1}\cdotdot \xi_{M}^{-1}(x),$$
for all $x\iotan M, y\iotan N, m\iotan \psiathbb{Z}.$
Then by Corollary 4.12, $\psiathcal{ZMHYD}(k[A])$ is
a new braided $T$-category over $\psiathbb{Z}.$
\sigmaection*{ACKNOWLEDGEMENTS}
This work was supported by the NSF of China (No. 11371088) and the NSF of Jiangsu Province (No. BK2012736).
\betaegin{thebibliography}{aa}
\betaibitem{DPV2006} Bulacu D., Panaite F. and Van F. Oystaeyen, (2006). Generalized
diagonal crossed products and smash products for quasi-Hopf algebras.
{\sigmal Applications Communications in Mathematical Physics.} 266, 355-399.
\betaibitem{CG2011} Caenepeel S., Goyvaerts I. (2011).
Monoidal Hom-Hopf algebras.
{\sigmal Commun. Algebra.} 39, 2216-2240.
\betaibitem{CWZ2013} Chen Y. Y., Wang Z. W., and Zhang L. Y. (2013).
Integrals for monoidal Hom-Hopf algebras
and their applications. {\sigmal J. Math. Phys.} 54, 073515.
\betaibitem{CZ2014} Chen Y. Y., Zhang L. Y. (2014).
The category of Yetter-Drinfel'd
Hom-modules and quantum Hom-Yang-Baxter equation.
{\sigmal J. Math. Phys.} 55, 031702.
\betaibitem{FY1989} Freyd P. J., Yetter D. N. (1989). Braided
compact closed categories with applications to low-dimensional topology.
{\sigmal Adv. in Math.} 77(2), 156-182.
\betaibitem{K2004} Kirillov A. J. (2004). On $G$-equivariant
modular categories. {\sigmal Math.} QA/0401119.
\betaibitem{LS2014} Liu L., Shen B. L. (2014). Radford's biproducets and
Yetter-Drinfeld modules for monoidal Hom-Hopf algebras.
{\sigmal J. Math. Phys.} 55, 031701.
\betaibitem{LW2010} Liu L., Wang, S. H. (2010). Constructing new
braided $T$-categories over weak Hopf algebras.
{\sigmal Appl. Categ. Atruct.} 18, 431-459.
\betaibitem{FP2014} Makhlouf A., Panaite F., Hom-L-R-smash products,
Hom-diagonal crossed products and the Drinfeld double of a Hom-Hopf algebra.
{\sigmal arXiv: math. RA}/1403.7077.
\betaibitem{PS2007} Panaite P., Staic M. D. (2007). Generalized (anti)
Yetter-Drinfel'd modules as components of a braided T-category.
{\sigmal Israel J. Math.} 158, 349-366.
\betaibitem{S2007} Staic, M. D. (2007). A note on anti-Yetter-Drinfeld
modules. {\sigmal Contemp. Math.} 441, 149-153.
\betaibitem{S1969} Sweedler, M. E. (1969). Hopf Algebras, Benjamin, New York, 1969.
\betaibitem{T1994} Turaev V. G. (1994). Quantum Invariants of Knots and
$3$-Manifolds. {\sigmal de Gruyter Stud. Math.} 18, de Gruyter, Berlin.
\betaibitem{T2008} Turaev, V. G. (2008). Crossed group-categories.
{\iotat Arab. J. Sci. Eng. Sect. C Theme Issues} 33(2C), 483-503.
\betaibitem{VA2001} Virelizier, A. (2001). Alg$\gammarave{e}$bras de Hopf
gradu$\alphacute{e}$es et fibr$\alphacute{e}$s plats
sur les 3-vari$\alphacute{e}$t$\alphacute{e}$s.
{\sigmal Ph. D. thesis, Universite Louis Pasteur, Strasbourg.}
\betaibitem{VA2005} Virelizier, A. (2005). Involutory Hopf group-coalgebras
and flat bundles over 3-manifolds.
{\sigmal Fundam. Math.} 188, 241-270.
\betaibitem{Y2014} Yang, T. (2014). Another construction of the braided
$T$-category. {\sigmal arXiv: math. RA}/1409.6936v2.
\betaibitem{YW2014} You, M. M., Wang, S. H. (2014). Constructing new braided
$T$-categores over monoidal Hom-Hopf algebras. {\sigmal J. Math. Phys.} 55, 111701.
\betaibitem{Z2004} Zunino M. (2004). Yetter-Drinfeld modules for crossed structures.
{\sigmal J. Pure Appl. Algebra.} 193, 313-343.
\etand{thebibliography}
\etand{document}
|
\begin{document}
\title{Ergodic decomposition of group actions on rooted trees}
\author{Rostislav Grigorchuk\footnote{The first author was partially supported by the NSF grant DMS-1207699}\\
Department of Mathematics,\\
Texas A\&M University,\\
College Station, TX, 77843\\
\href{mailto:[email protected]}{[email protected]}\\
\and
Dmytro Savchuk\footnote{The second author was partially supported by the New Researcher Grant and the Proposal Enhancement Grant from USF Internal Awards Program.}\\
Department of Mathematics and Statistics\\
University of South Florida\\
4202 E Fowler Ave\\
Tampa, FL 33620-5700\\
\href{mailto:[email protected]}{[email protected]}
}
\maketitle
\abstract{We prove a general result about the decomposition on ergodic
components of group actions on boundaries of spherically homogeneous
rooted trees. Namely, we identify the space of ergodic components with
the boundary of the orbit tree associated with the action, and show that
the canonical system of ergodic invariant probability measures coincides
with the system of uniform measures on the boundaries of minimal
invariant subtrees of the tree.
A special attention is given to the case of groups generated by finite
automata. Few examples, including the lamplighter group, Sushchansky
group, and the, so called, Universal group are considered in order to
demonstrate applications of the theorem.}
\section*{Introduction}
The ergodic decomposition theorem is one of the most important and frequently used theorems in dynamical systems and ergodic theory. It was initiated by von Neumann, Bogolyubov and Krylov but, perhaps, its first precise form was given by Rokhlin~\cite{rokhlin:ergodic_decomp49}, where he introduced the class of measure spaces now called the Lebesgue spaces.
At first, the ergodic theorem was proved for the case of one automorphism of a Lebesgue space or a one parameter family of such automorphisms, which corresponds to the actions of groups $\mathbb Z$ or $\mathbb R$ respectively. Later, the theorem was extended to the case of countable groups and locally compact groups (and further generalizations were made including passing from finite to infinite measures, from invariant to quasi-invariant measures, and from locally compact groups to some classes of non-locally compact groups~\cite{bufetov:ergodic_decomposition14}).
In 1961 Varadarajan~\cite{varadarajan:groups_of_automorphisms63} (see also Farrell~\cite{farrell:representations_invar_measures62}) proved the ergodic decomposition theorem in the topological setting, namely when a group $G$ acts on a Polish space by homeomorphisms. Varadarajan's theorem (Theorem~\ref{thm:erg_decomp_general} in Section~\ref{sec:prelim}) describes ergodic decomposition for each $G$-invariant probability measure.
The main goal of this article is to show how Varadarajan's theorem works in the situation when a group $G$ acts by automorphisms on a spherically homogeneous rooted tree $T$ and, consequently, by homeomorphisms on its boundary $\partial T$ (which is homeomorphic to the Cantor set as soon as the tree has infinitely many ends). For any such action of $G$ each level of the tree $T$ is an invariant subset. The uniform probability measure $\mu_T$ on $\partial T$ is invariant with respect to the whole group $\mathop{\rm Aut}\nolimits(T)$ of automorphisms of the tree and the ergodicity of the system $(G,\partial T,\mu_T)$ is equivalent to level transitivity of the action $(G,T)$~\cite[Proposition~6.5]{gns00:automata} (and also is equivalent to unique ergodicity).
This situation has also a direct connection to the theory of profinite groups. Namely, if
a group $G$ acts transitively on the levels of a tree, then its closure $\overline{G}$ in $\mathop{\rm Aut}\nolimits(T)$, which is a profinite group, acts transitively on the boundary $\partial T$, and the uniform measure $\mu_T$ becomes the image of the Haar measure on $G$. In this case, the dynamical system $(G,\partial T,\mu_T)$ is isomorphic to the system $(G, \overline{G}/P,\mu_T)$, where $P =\mathrm{stab}_{\overline{G}}(\xi)$ is the stabilizer of point $\xi\in\partial T$ under the action of $\overline{G}$. The converse is also true in the following sense. By the result of Mackey~\cite{mackey:ergodic64}, any action $(G,X,\mu)$ with pure point spectrum, where $G$ is a countable group acting faithfully on $X$ by transformations preserving the probability measure $\mu$, is isomorphic to the action of type $(G',K/P,\lambda)$, where $K$ is a profinite group, $G'$ is a subgroup of $K$ isomorphic to $G$, and $P$ is a closed subgroup of $K$.
In turn, as shown in Theorem 2.9 in~\cite{grigorch:dynamics11eng}, the latter action is isomorphic to the action $(G,\partial T,\nu)$, where a spherically homogeneous rooted tree $T$ is constructed as the coset tree of a family of open subgroups of $K$ whose intersection is $P$, and $\nu$ is the uniform measure on $\partial T$. Therefore, the profinite case in Mackey's theorem corresponds precisely to the action on rooted trees.
In the case when the action of $G$ on $T$ is not level transitive the situation is more complicated. In order to decompose $\mu_T$ into ergodic components and describe all $G$-invariant ergodic probability measures on $\partial T$ one needs to know the structure of the \emph{orbit tree} $T_G$ whose vertices are orbits of $G$ on the set of vertices $V(T)$ of $T$ and the adjacency relation is induced by the adjacency in $T$ (i.e., $T_G$ is simply the quotient graph of $T$ under the action of $G$). This tree was used in~\cite{gawron_ns:conjugation} in order to give a criterion for establishing when two elements are conjugate in $\mathop{\rm Aut}\nolimits(T)$, as well as recently in~\cite{klimann:finiteness} to deal with the finiteness problem in automaton groups generated by invertible-reversible automata. In Theorem~\ref{thm:EI_homeo_bndry} we show that the boundary $\partial T_G$ of the orbit tree can be naturally identified with the space of ergodic components of the action of $G$ on $\partial T$: there is a bijection between $\partial T_G$ and the set of the minimal invariant subtrees of $T$, and uniform probability measures on boundaries of these trees are exactly all ergodic invariant probability measures for the system $(G,\partial T)$. In Section~\ref{sec:examples} we apply the obtained results to get the ergodic decompositions for actions of some groups generated by finite automata.
The class of automaton groups possesses a number of interesting and unusual algebraic and dynamical properties. There are many examples showing that even simple automata (with a small number of states and an alphabet consisting of just two symbols) demonstrate very complicated algebraic, combinatorial, and dynamic behavior~\cite{grigorch:burnside,gns00:automata,grigorch_z:basilica,bondarenko_gkmnss:full_clas32_short,grigorch_s:hanoi_spectrum}.
After considering simple examples of ergodic decompositions of actions of finite groups and level transitive actions, we deal with Sushchansky infinite $p$-groups~\cite{sushch:burnside} in Subsection~\ref{ssec:sushch} and the universal group~\cite{grigorch:solved} for the family of groups $G_{\omega}$ from~\cite{grigorch:degrees} in Subsection~\ref{ssec:univ_grigorch}.
The most complicated example is studied in Subsection~\ref{ssec:lamp} and deals with the 2-state automaton over 2-letter alphabet generating the lamplighter group $\mathcal L$. The automaton presentation of $\mathcal L$ was found in~\cite{gns00:automata} and was used in~\cite{grigorch_z:lamplighter} to compute the spectrum of the discrete Laplacian, which happened to be purely discrete. This automaton presentation of $\mathcal L$ is given on a binary tree, which by Lemma~3 in~\cite{bondarenko_gkmnss:full_clas32_short} implies that its action on this tree is spherically transitive. Therefore by Proposition 6.5 in~\cite{gns00:automata} there is only one (ergodic) invariant probability measure on $\partial T$. However, it is more interesting in this case to consider the ergodic decomposition of actions of subgroups of $\mathcal L$ that do not act level transitively. In particular, we give a complete description of such decompositions for cyclic subgroups $\langle a\rangle$ and $\langle b\rangle$, where $a$ and $b$ are the automorphisms of the tree corresponding to the states of the generating automaton. In order to get the structure of the orbit trees in these cases, we explicitly describe how each orbit looks like using the representation of the lamplighter group by functions that act on formal power series.
We hope that the considerations initiated in this article will be useful for further investigations of group actions on trees and for solving the classification problems started in~\cite{gns00:automata,bondarenko_gkmnss:full_clas32_short}.
The structure of the paper is the following. In Section~\ref{sec:prelim} we recall basic definitions and set up the notation. The main theorem is proved in Section~\ref{sec:theorem}. We conclude the paper with several particular examples in Section~\ref{sec:examples}.
\noindent \textbf{Acknowledgement.} The authors would like to thank the referee for the very detailed careful review with numerous suggestions that greatly improved the exposition of the paper.
\section{Preliminaries}
\label{sec:prelim}
In this paper we will deal only with rooted trees, i.e. the trees with a distinguished vertex called the \emph{root}. For each such tree $T$ and $n\geq 0$ the set $[T]_n$ of vertices of $T$ at combinatorial distance $n$ from the root is called the $n$-th \emph{level} of $T$. For each vertex $v$ of $T$ of the $n$-th level the vertices of the $(n+1)$-st level adjacent to $v$ are called the \emph{children} of $v$. We will visualize the rooted trees as growing down with the root on top. In this visualization the children of a vertex are the vertices that are right below it.
For each rooted tree $T$ the boundary $\partial T$ of $T$ is defined as the set of all infinite paths in $T$ starting from the root that do not have backtracking. A tree is called \emph{spherically homogeneous} if the degrees of all vertices of each level coincide (but this common degree may depend on the level). A special very important class of spherically homogeneous rooted trees is the class of regular rooted trees. A rooted tree is called \emph{regular} if each vertex of the tree has the same number of children. If each vertex has $d$ children, the tree is called $d$-regular rooted tree (or $d$-ary rooted tree) and is denoted by $T_d$. The tree $T_2$ is called \emph{binary} and is depicted in Figure~\ref{fig:binary_tree}.
\begin{figure}
\caption{Binary tree $T_2$\label{fig:binary_tree}
\label{fig:binary_tree}
\end{figure}
The class of regular rooted trees naturally arises in symbolic dynamics. Indeed, let $\Sigma$ be a finite alphabet of cardinality $d$. We will denote by $\Sigma^*$ and $\Sigma^{\omega}$ the sets of all finite and infinite words over $\Sigma$, respectively. The set $\Sigma^*$ can be naturally identified with the set of vertices of the $d$-ary rooted tree $T_d$, where the empty word $\emptyset\in\Sigma^*$ corresponds to the root of the tree and words $v$ and $vx$ for $v\in\Sigma^*$ and $x\in\Sigma$ are declared to be adjacent. With this identification between $\Sigma^*$ and $T_d$ the set $\Sigma^\omega$ is naturally identified with the boundary $\partial T_d$. The set $\Sigma^n$ of words over $\Sigma$ of length $n$ constitutes the $n$-th level of $\Sigma^*$. For a word $v\in\Sigma^*\cup\Sigma^\omega$ we will denote by $|v|\in\mathbb N\cup\{0,\infty\}$ the length of $v$.
For a rooted tree $T$ and point $\xi\in\partial T$ we denote by $[\xi]_n$ the vertex of $T$ located on a path $\xi$ at the distance $n$ from the root. In the particular case of the regular rooted tree $T=\Sigma^*$ and $\xi=x_1x_2x_3\ldots$ for $x_i\in\Sigma$ we have $[\xi]_n=x_1x_2\ldots x_n$.
Let $G$ be a group acting on a rooted tree $T$ by automorphisms preserving the root. Then, this action preserves the levels of the tree. We say that $(G,T)$ is \emph{spherically transitive} if it is transitive on each level of $T$. A necessary condition for the action to be spherically transitive is that the tree $T$ has to be spherically homogeneous.
In our study the central role is played by the following notion.
\begin{definition}
The \emph{orbit tree} $T_{G}$ for the action of $G$ on a rooted tree $T$ (i.e. on the set of vertices $V(T)$ of $T$) is the graph whose vertices correspond to the orbits of $G$ on the levels of $T$, in which two orbits are adjacent if and only if they contain vertices that are adjacent in $T$.
\end{definition}
It follows directly from the definition that the orbit tree $T_G$ is again a rooted tree with the root corresponding to the 1-element orbit consisting of the root of $T$. Indeed, suppose vertices $v$ and $w$ belong to the $n$-th level $[T]_n$ of $T$ and let $v'$ and $w'$ be vertices of $[T]_{n-1}$ adjacent to $v$ and $w$, respectively. If $v$ and $w$ belong to the same orbit of $G$, then there is $g\in G$ that moves $v$ to $w$. In this case the same $g$ necessarily moves $v'$ to $w'$. Thus, each vertex of the $n$-th level of $T_G$ is adjacent to exactly one vertex of the previous level. However, $T_G$ may be not spherically homogeneous even if $T$ is spherically homogeneous. Orbit trees in various forms have been studied earlier (see, for example,~\cite{gawron_ns:conjugation,bondarenko_s:sushch,klimann:finiteness,klimann_ps:3state}). They describe the partition of the set of vertices of a rooted tree into transitive components under the action of a group.
There is a natural map $\psi\colon V(T)\to V(T_{G})$ that sends a vertex of $T$ to its orbit viewed as a vertex of $T_{G}$. This map naturally extends to a continuous map $\psi\colon \partial T\to\partial T_{G}$ with respect to the topologies that we define below.
The boundary $\partial T$ of a rooted tree may be viewed as an (ultra)metric space as follows: fix a monotonically decreasing sequence $\{\lambda_n\}_{n\geq0}$ converging to 0 and define the distance of two points in $\partial T$ to be equal to $\lambda_k$, where $k$ denotes the length of the longest common part of the two (geodesic) paths connecting the root to each of them.
This metric defines a topology on $\partial T$ that in the case of a spherically homogeneous rooted tree can be constructed in the following way. The set of vertices of a spherically homogeneous rooted tree $T_{\{\Sigma_n, n\geq0\}}$ can be identified with
\[\bigcup_{n\geq 0}\prod_{i=0}^n\Sigma_i,\]
where $\Sigma_n$ is an alphabet of cardinality that is equal to the number of children of each vertex of level $n$. The boundary of $\partial T_{\{\Sigma_n, n\geq0\}}$ of this tree is naturally identified with $\prod_{n\geq 0}\Sigma_n$ that is endowed with a Tychonoff product topology (when using the discrete topologies on $\Sigma_n$, $n\geq 0$). The topological structure induces the Borel structure on $\partial T$. In the case of spherically homogeneous tree $T$ one can construct the uniform probability measure $\mu_T$ on $\partial T$ by defining
\[\mu_T(C_v)=\frac1{|[T]_{|v|}|},\]
where for a vertex $v\in V(T)$ the cylindrical set $C_v$ consists of all infinite paths in $\partial T$ that go through $v$. This is the measure whose existence and uniqueness is proved in the Kolmogorov consistency (also called extension, or existence) theorem~\cite{kolmogoroff:grundbegriffe,parthasarathy:prob_measures67}. In the case of a regular tree the uniform probability measure on its boundary coincides with the Bernoulli measure.
\begin{lemma}
The map $\psi\colon \partial T\to\partial T_{G}$ is a continuous surjective map.
\end{lemma}
\begin{proof}
A basis of the topology in $\partial T_{G}$ consists of cylindrical sets $C_{\mathcal O}, \mathcal O\in V(T_{G})$ consisting of all infinite paths in $T_G$ that go through a vertex $\mathcal O$ of $T_{G}$ (i.e., $\mathcal O$ represents an orbit of $G$ on some level of $T$). Therefore,
\[\psi^{-1}(C_{\mathcal O})=\bigcup_{v\in\mathcal O}C_v\]
is open in $\partial T$ (in fact, it is clopen) and, hence, $\psi$ is continuous.
\end{proof}
Similarly to the boundary of a rooted tree, the whole group $\mathop{\rm Aut}\nolimits(T)$ of all automorphisms of a rooted tree $T$ can be naturally endowed with a topology, induced by the metric $\lambda(\alpha,\gamma)=\lambda_k$, where $\{\lambda_n\}$ is again any monotonically decreasing sequence converging to 0, and $k$ is the largest number of the level of $T$ on which the actions of the automorphisms $\alpha$ and $\gamma$ coincide. Note, that the topology defined by such metric does not depend on the choice of $\{\lambda_n\}$.
By a \emph{measure} on a standard Borel space $X$ we will mean a non-zero Borel measure on $X$. A measure $\mu$ on $X$ is called \emph{probability measure} if $\mu(X)=1$. With the above described topology $\mathop{\rm Aut}\nolimits(T)$ is a compact totally disconnected group (hence, a \emph{profinite group}, i.e. a group isomorphic to the inverse limit of an inverse system of discrete finite groups) acting on $\partial T$ and, in the case when $T$ is spherically homogeneous, preserving the uniform probability measure $\mu_T$. Moreover, the converse is true in the following sense.
\begin{proposition}[\mbox{see~\cite[Proposition 2]{grigorch:jibranch}}]
Every countably based profinite group is isomorphic to a closed subgroup of $\mathop{\rm Aut}\nolimits(T)$ for some spherically homogeneous rooted tree $T$.
\end{proposition}
\begin{proof}
Let $G$ be a countably based profinite group. By definition it has a countable descending sequence $G=V_0>V_1>V_2>\cdots$ of finite index open subgroups with trivial intersection. Then $G$ acts faithfully by automorphisms on the, so-called, \emph{coset tree} $T$ of the sequence $\{V_n\}_{n\geq 0}$ constructed as follows. The vertices of $T$ correspond to the cosets of $V_n$ in $G$ for all $n\geq 0$. Two vertices corresponding to cosets $V_{n}g$ and $V_{n+1}h$ are adjacent if and only if $V_{n}g\supset V_{n+1}h$. Then $G$ acts on $T$ by automorphisms simply by right multiplication: an element $g\in G$ sends $V_nh$ to $V_nhg$. This action is clearly faithful since the kernel is equal to the trivial $\cap_{n\geq 0}V_n$.
\end{proof}
Let $G$ be a locally compact group acting on a standard Borel space $X$ by transformations preserving a probability measure $\mu$. Measure $\mu$ is called \emph{ergodic} if the measure of each $G$-invariant Borel set in $X$ is either 0 or 1.
We denote by $\mathcal M_G$ the space of all invariant probability measures on $X$ and by $\mathcal{M}^e_G$ the set of all ergodic invariant probability measures on $X$. Both $\mathcal M_G$ and $\mathcal{M}^e_G$ are Borel subsets of the standard Borel space $P(X)$ of all probability measures on $X$. Recall that $P(X)$ is endowed with the \emph{weak topology} (sometimes called \textit{weak$^*$ topology}): a sequence of measures $\mu_n\in P(X)$ \emph{weakly converges} to a measure $\mu\in P(X)$ if for each bounded continuous function $f\colon X\to\mathbb R$ we have
\[\int f\,d\mu_n\to\int f\,d\mu,\quad n\to\infty.\]
In the case when $G$ is a countable discrete group, an invariant measure in $\mathcal M_G$ is ergodic if and only if it is an extreme point in the (Choquet) simplex $\mathcal{M}_G$, i.e. it cannot be written as a convex combination of other invariant measures from $\mathcal{M}_G$ with non-zero coefficients. However, this is not true for general locally compact groups as Kolmogorov's example shows~\cite{grigorch_h:amenability_ergodic_top_groups,bufetov:ergodic_decomposition14,fomin:measures1950}.
The ergodic decomposition theorem due to Varadarajan~\cite{varadarajan:groups_of_automorphisms63} stated as in Kechris Miller\cite{kechris_m:topics_in_orbit_equivalence04} (see also~\cite{farrell:representations_invar_measures62}) states:
\begin{theorem}
\label{thm:erg_decomp_general}
For a locally compact second countable group $G$ let $X$ be a standard Borel $G$-space and let $\mathcal M_G$ and $\mathcal{M}^e_G$ be the spaces of all invariant probability measures on $X$ and ergodic invariant probability measures on $X$, respectively. Suppose $\mathcal M_G\neq\emptyset$. Then $\mathcal{M}^e_G\neq\emptyset$ and there is a Borel surjection $\pi\colon X\to \mathcal{M}^e_G$ such that
\begin{itemize}
\item[1)] $\pi$ is $G$-invariant (i.e., $\pi$ is constant on each orbit of $G$),
\item[2)] For $\nu\in\mathcal{M}^e_G$ the set $X_{\nu}=\{x\in X\colon \pi(x)=\nu\}$ satisfies $\nu(X_\nu)=1$ and the action $G\curvearrowright X_{\nu}$ has a unique invariant measure, namely $\nu$, and
\item[3)] if $\mu\in\mathcal M_G$, then $\mu=\int\pi(\xi)\,d\mu(\xi)$.
\end{itemize}
Moreover, $\pi$ is uniquely determined in the sense that, if $\pi'$ is another such map, then the set $\{x\in X\colon\pi(x)\neq\pi'(x)\}$ has measure zero with respect to all measures in $\mathcal M_G$.
\end{theorem}
Throughout the paper we will use the above theorem in two cases: when a group $G$ is countable with the discrete topology, and when $G$ is a profinite group.
\section{Ergodic decomposition for groups acting on rooted trees}
\label{sec:theorem}
Let $G$ be a group acting on a rooted tree $T$ by automorphisms and, hence, on its boundary $\partial T$ by homeomorphisms. Throughout this section we will write $\mathcal X=\partial T$ and $\mathcal Y=\partial T_{G}$ where $T_{G}$ is the orbit tree associated with the action of $G$ on $T$.
\begin{definition}
A \emph{leaf} of a rooted tree is a vertex of degree one which is different from the root of the tree.
\end{definition}
All rooted trees that we consider in this paper are rooted trees with no leaves (i.e., each vertex lies on some path(s) in the boundary of the tree).
\begin{definition}
Let $T$ be an infinite rooted tree with no leaves. A subtree of $T$ with no leaves is called \emph{rooted} if it contains the root of $T$.
\end{definition}
\begin{definition}
Let $G$ be a group acting on a rooted tree $T$ with no leaves. A nonempty rooted subtree $T'$ of $T$ with no leaves is called \emph{minimal} (denoted $T'\prec T$) if it is a minimal (with respect to inclusion) invariant subtree with no leaves.
\end{definition}
\begin{proposition}
For a group $G$ acting on a rooted tree $T$, the boundary $\partial T$ can be decomposed as
\begin{equation}
\label{eq:boundary}
\partial T=\bigsqcup_{T'\prec T}\partial T'.
\end{equation}
Moreover, there is a bijection between the set of minimal subtrees of $T$ and the boundary $\partial T_{G}$ of the orbit tree $T_{G}$ associated with the action of $G$ on $T$.
\end{proposition}
\begin{proof}
First we show that if $T'$ and $T''$ are two different minimal subtrees, then $\partial T'\cap\partial T''=\emptyset$. Indeed, since $T'$ and $T''$ are minimal subtrees of $T$, on each level of $T$ their sets of vertices must either coincide or be disjoint: if $\xi\in\partial T'\cap\partial T''$, then $[\xi]_n$ is a common vertex of the $n$-th levels of $T'$ and $T''$ and therefore, since there is such a vertex for each $n$, minimality ensures that $T'=T''$.
Now, for each $\xi\in\partial T$ we will build a minimal subtree $T_{\xi}$ of $T$ with $\xi\in\partial T_{\xi}$. Define $V(T_{\xi})$ to be the preimage under $\psi$ of the set of vertices $\{[\psi(\xi)]_n\colon n\geq0\}$ of the orbit tree $T_{G}$. In other words, $T_{\xi}$ is a union of orbits of $[\xi]_n$ under the action of $G$. Then by construction $T_{\xi}$ is a minimal subtree of $T$ containing $\xi$. Moreover, if $T'$ is a minimal subtree of $T$, its boundary must contain some point $\eta\in\partial T$ that is also contained in $T_{\eta}$, yielding $T'=T_{\eta}$. Finally, the fact that $T_{\xi}=T_{\xi'}$ if and only if $\psi(\xi)=\psi(\xi')$ proves that the map from the boundary $\partial T_{G}$ to the set of minimal subtrees of $T$ sending $\eta\in\partial T_{G}$ to $T_{\eta}=\psi^{-1}\left(\{[\eta]_n\colon n\geq0\}\right)$ is a bijection.
\end{proof}
The last proof motivates the following notation: for $\xi\in\partial T$, and $\eta\in\partial T_{G}$ we associate minimal subtrees $T_{\xi}$ and $T_{\eta}$ of $T$ with $\xi\in\partial T_{\xi}$, $\psi(\partial T_{\eta})=\eta$ and $T_{\xi}=T_{\psi(\xi)}$.
Observe, that the decomposition~\eqref{eq:boundary} can now be rewritten as
\begin{equation}
\label{eq:boundary}
\partial T=\bigsqcup_{\eta\in\partial T_G}\partial T_\eta
\end{equation}
and for each $\eta\in\partial T_G$ the boundary $\partial T_{\eta}$ is a closed subset of $\partial T$.
\begin{theorem}
\label{thm:EI_homeo_bndry}
Let $G$ be a countable discrete or profinite group acting faithfully by automorphisms on a spherically homogeneous rooted tree $T$ and by homeomorphisms on its boundary $\mathcal X=\partial T$, and let $\mathcal Y=\partial T_G$ be the boundary of the corresponding orbit tree $T_G$.
\begin{itemize}
\item[(a)] The map $\beta\colon \mathcal Y\to\mathcal{M}^e_G$ sending a point $\eta\in\mathcal Y$ to the uniform probability measure $\mu_{\eta}$ with support $\partial T_{\eta}$ in the space $\mathcal{M}^e_G$ of invariant ergodic measures on $\mathcal X$ is a homeomorphism.
\item[(b)] The map $\pi=\beta\circ\psi\colon \mathcal X\to\mathcal{M}^e_G$ sending a point $\xi\in\mathcal X$ to the uniform probability measure $\mu_{\psi(\xi)}$ with support $\partial T_{\xi}$ satisfies conditions 1)-3) of Theorem~\ref{thm:erg_decomp_general} and thus defines the ergodic decomposition of the action of $G$ on $\mathcal X$.
\end{itemize}
\end{theorem}
\begin{proof}
We will first prove the theorem for the case when $G$ is a countable discrete group.
We start from the proof of part (a). The action of $G$ on $T_\eta$ is level transitive since each level $n$ of $T_\eta$ corresponds to exactly one orbit of $G$ on the same level $n$ of $T$. Hence, by~\cite[Proposition~6.5]{gns00:automata}, the action of $G$ on $\partial T_\eta$ (and thus on $\partial T$) is ergodic with respect to $\mu_\eta$, so $\mu_\eta\in\mathcal{M}^e_G$ and $\beta$ is well-defined.
To show that $\beta$ is surjective, assume $\mu\in\mathcal{M}^e_G$. Let $[\mu]_n$ be the measure induced by $\mu$ on the $n$-th level of $T$, i.e., for $A\subset [T]_n$
\[[\mu]_n(A)=\sum_{v\in A}\mu(C_v).\]
In other words, $[\mu]_n$ is the projection of $\mu$ induced by the natural projection $p_n\colon\partial T\to [T]_n$. As $\mu$ is ergodic invariant probability measure, so is $[\mu]_n$. Therefore, $[\mu]_n$ is supported on exactly one orbit $\mathcal O_{n,\mu}$ of $G$ on $[T]_n$ an its value on each vertex of $\mathcal O_{n,\mu}$ is equal to $|\mathcal O_{n,\mu}|^{-1}$. The sequence of orbits $\{\mathcal O_{n,\mu}\}_{n\geq0}$ defines a unique point $\eta\in\mathcal Y$ since $\mathcal O_{n,\mu}$ is always adjacent to $\mathcal O_{n+1,\mu}$ in $T_{G}$. By construction we get that $\mu$ coincides with $\mu_\eta$ on each cylindrical set in $\partial T$. Thus, $\mu=\mu_\eta$ and the map $\beta$ is onto.
Finally, we will prove that the topological structure on $\mathcal Y$ is isomorphic to the one on $\mathcal{M}^e_G$. Since both $\mathcal Y$ and $\mathcal{M}^e_G$ are metrizable, their topologies are completely determined by the convergent sequences.
Thus, it is enough to show that the following conditions are equivalent: (i) $\eta_n\to\eta,\ n\to\infty$ in $\mathcal Y$ and (ii) $\mu_{\eta_n}\to\mu_{\eta}$ weakly as $n\to\infty$, i.e. formula
\begin{equation}
\label{eqn:conv}
\int \mathds 1_{C_v}\,d\mu_{\eta_n}\to\int \mathds 1_{C_v}\,d\mu_{\eta},\quad n\to\infty.
\end{equation}
holds for all $v\in V(T)$, where $\mathds 1_{C_v}$ denotes the characteristic function of a cylindrical set $C_v$.
Suppose that $v$ is on the $l$-th level of $T$ and $\eta_n\to\eta,\ n\to\infty$. Then there is $N>0$ such that for all $n\geq N$ we have $[\eta_n]_l=[\eta]_l$. But in this case for $n\geq N$
\begin{multline*}\int \mathds 1_{C_v}\,d\mu_{\eta_n}=
\left\{\begin{array}{ll}
0,&\text{if $v\notin[\eta_n]_l$}\\
|\psi^{-1}([\eta_n]_l)|^{-1},&\text{if $v\in[\eta_n]_l$}
\end{array}\right.\\
=
\left\{\begin{array}{ll}
0,&\text{if $v\notin\psi^{-1}([\eta]_l)$}\\
|\psi^{-1}([\eta]_l)|^{-1},&\text{if $v\in\psi^{-1}([\eta]_l)$}
\end{array}\right.=\int \mathds 1_{C_v}\,d\mu_{\eta}.
\end{multline*}
Therefore convergence~\eqref{eqn:conv} takes place.
Conversely, assume that $\mu_{\eta_n}\to\mu_{\eta},\ n\to\infty$ and suppose that $\eta_n\not\to\eta,\ n\to\infty$. Then there is some level $l$ and a subsequence $\{\eta_{n_i}\}_{i\geq 1}$ such that $[\eta_{n_i}]_l\neq[\eta]_l$. Consider the set
\[A=\bigsqcup_{v\in\psi^{-1}([\eta]_l)}C_v\]
corresponding to all points in $\mathcal X$ that pass through vertices in $\psi^{-1}([\eta]_l)$. The characteristic function $\mathds 1_{A}\colon\mathcal X\to\mathbb R$ satisfies
\[\int \mathds 1_{A}\,d\mu_{\eta_n}=0\neq 1=\int \mathds 1_{A}\,d\mu_{\eta}\]
contradicting our assumptions. This finishes the proof of part (a).
To prove part (b) of the theorem, we first note that $\pi$ is a Borel surjection since it is a composition of a continuous projection $\psi$ and a homeomorphism $\beta$. We will now check conditions 1)-3) one by one. Condition 1) is trivially satisfied by definition of $\pi$.
Condition 2) is satisfied by Proposition 6.5 in~\cite{gns00:automata}, as for each $e\in\mathcal{M}^e_G$ the set $X_e=\pi^{-1}(e)$ simply coincides with $\partial T_\eta$ for $\eta\in\mathcal Y$ such that $e=\mu_\eta$.
Finally, condition 3) is proved as follows. Consider $\mu\in \mathcal M_G$ and $A=C_v$, $v\in V(T)$. Let $\mathcal O_v$ denote the orbit of $v$ under $G$. For each $w\in\mathcal O_v$ \begin{equation}
\label{eqn:invar}
\mu(C_w)=\mu(C_v)=\mu(A).
\end{equation}
Also, for $\xi\in\mathcal X$ we have
\[
\mu_{\psi(\xi)}(A)=\left\{\begin{array}{ll}
0,&\text{if $\xi$ does not pass through a vertex in $\mathcal O_v$},\\
\frac1{|\mathcal O_v|},&\text{otherwise}.
\end{array}
\right.
\]
Therefore, the right-hand side of the equality in condition 3) applied to the set $A$ can be decomposed as:
\begin{multline*}
\int_{\mathcal X}\bigl(\pi(\xi)\bigr)(A)\,d\mu(\xi)=\sum_{w\in\mathcal O_v}\int_{T_w}\mu_{\psi(\xi)}(A)\,d\mu(\xi)=\sum_{w\in\mathcal O_v}\int_{T_w}\frac1{|\mathcal O_v|}\,d\mu(\xi)\\
=\frac1{|\mathcal O_v|}\sum_{w\in\mathcal O_v}\int_{T_w}\,d\mu(\xi)=\frac1{|\mathcal O_v|}\sum_{w\in\mathcal O_v}\mu(T_w)=\frac1{|\mathcal O_v|}\sum_{w\in\mathcal O_v}\mu(A)=\mu(A),
\end{multline*}
where we applied equality~\eqref{eqn:invar} in the next to the last transition.
Finally, we note that the profinite case follows from the previous case because for a countably based profinite group $G$ one can always find a countable discrete dense subgroup in $G$. Note, that $G$ must be countably based since it acts faithfully on $T$ and, hence it is a subgroup of the countably based group $\mathop{\rm Aut}\nolimits(T)$.
\end{proof}
\section{Examples of Ergodic Decompositions}
\label{sec:examples}
\subsection{Groups generated by automata}
Most of the interesting examples of groups acting on rooted trees come from the class of groups generated by automata (not to be confused with automatic groups in the sense of~\cite{epstein_chpt:word_processing_in_groups92}). We start by recalling some basic definitions that we shall need later.
\begin{definition}
A \emph{Mealy automaton} (or simply \emph{automaton}) is a tuple
$(Q,\Sigma,\pi,\lambda)$, where $Q$ is a set (the set of states), $\Sigma$ is a
finite alphabet, $\pi\colon Q\times \Sigma\to Q$ is the transition function
and $\lambda\colon Q\times \Sigma\to \Sigma$ is the output function. If the set
of states $Q$ is finite the automaton is called \emph{finite}. If
for every state $q\in Q$ the output function $\lambda(q,x)$ induces
a permutation of $\Sigma$, the automaton $\mathcal A$ is called invertible.
Selecting a state $q\in Q$ produces an \emph{initial automaton}
$\mathcal A_q$.
\end{definition}
Automata are often represented by their associated \emph{Moore diagrams}. The
Moore diagram of an automaton $\mathcal A=(Q,\Sigma,\pi,\lambda)$ is the directed
labelled graph in which the vertices are the states from $Q$ and the labelled edges
have the form $q\stackrel{x|\lambda(q,x)}{\longrightarrow}\pi(q,x)$ for
$q\in Q$ and $x\in \Sigma$. If the automaton is invertible, then it is
common to label vertices of the Moore diagram by the permutation
$\lambda(q,\cdot)\in\mathop{\rm Sym}\nolimits(\Sigma)$ and leave just first components from the labels
of the edges, see for example Figure~\ref{fig:lamplighter_aut}. An example of Moore diagram (for Sushchansky automaton) is shown in
Figure~\ref{fig:lamplighter_aut}.
Any initial automaton induces an endomorphism of the rooted tree $\Sigma^*$ (in this situation we consider $\Sigma^*$ specifically as a tree and not as free monoid). Given a word
$v=x_1x_2x_3\ldots x_n\in \Sigma^*$ it scans its first letter $x_1$ and
outputs $\lambda(x_1)$. The rest of the word is handled in a similar
fashion by the initial automaton $\mathcal A_{\pi(x_1)}$. Formally speaking,
the functions $\pi$ and $\lambda$ can be extended to $\pi\colon
Q\times \Sigma^*\to Q$ and $\lambda\colon Q\times \Sigma^*\to \Sigma^*$ via
\[\begin{array}{l}
\pi(q,x_1x_2\ldots x_n)=\pi(\pi(q,x_1),x_2x_3\ldots x_n),\\
\lambda(q,x_1x_2\ldots x_n)=\lambda(q,x_1)\lambda(\pi(q,x_1),x_2x_3\ldots x_n).\\
\end{array}
\]
By construction any initial automaton acts on the rooted tree $\Sigma^*$ as a
endomorphism. In case of invertible automaton it acts as an
automorphism of this rooted tree. Below we will sometimes identify a state $q$ of an automaton $\mathcal A$ with the corresponding initial automaton $\mathcal A_q$. Thus each state of an automaton defines an endomorphism (or automorphism in the invertible case) of the tree $\Sigma^*$.
\begin{definition}
Let $\mathcal A$ be an (invertible) automaton over an alphabet $\Sigma$. The semigroup $\mathds S(\mathcal A)$ (group $\mathds G(\mathcal A)$) generated by all states of $\mathcal A$ viewed as endomorphisms (automorphisms) of the rooted tree $\Sigma^*$ under the operation of composition is called an \emph{automaton semigroup} (\emph{automaton group}).
\end{definition}
Another popular name for automaton groups (resr. semigroups) is
self-similar groups (resr. semigroups)
(see~\cite{nekrash:self-similar}).
We will also consider subgroups of automaton groups. These groups are generated by one or more initial invertible automata.
Conversely, any endomorphism of $\Sigma^*$ can be encoded by the action
of a suitable initial automaton. In order to show this we need the notion of a
\emph{section} of a endomorphism at a vertex of the tree. Let $g$ be
a endomorphism of the tree $\Sigma^*$ and $x\in \Sigma$. Then for any $v\in
\Sigma^*$ we have
\[g(xv)=g(x)v'\]
for some $v'\in \Sigma^*$. Then the map $g|_x\colon \Sigma^*\to \Sigma^*$ given by
\[g|_x(v)=v'\]
defines a endomorphism of $\Sigma^*$ and is called the \emph{section} of
$g$ at vertex $x$. Furthermore, for any nontrivial word $x_1x_2\ldots x_n\in \Sigma^*$
we define \[g|_{x_1x_2\ldots x_n}=(\ldots((g|_{x_1})|_{x_2})|_{x_3}\ldots)|_{x_n}.\]
Finally, for empty word $\emptyset$ corresponding to the root of the tree we define $g|_{\emptyset}=g$.
Given a endomorphism $g$ of $\Sigma^*$ we construct an initial automaton
$\mathcal A(g)$ whose action on $\Sigma^*$ coincides with that of $g$ as follows.
The set of states of $\mathcal A(g)$ is the set $\{g|_v\colon v\in \Sigma^*\}$
of different sections of $g$ at the vertices of the tree. The
transition and output functions are defined by
\[\begin{array}{l}
\pi(g|_v,x)=g|_{vx},\\
\lambda(g|_v,x)=g|_v(x).
\end{array}\]
Thus, the semigroup of all endomorphisms of the tree $\Sigma^*$ is isomorphic to the semigroup generated by all initial automata over $\Sigma$. Respectively, the group of all automorphisms of the tree $\Sigma^*$ is isomorphic to the group generated by all initial invertible automata over $\Sigma$.
For any automaton group $G$ there is a natural embedding of $G$ into the permutational wreath product of $G$ with $\mathop{\rm Sym}\nolimits(\Sigma)$
\[G\hookrightarrow G \wr_{\Sigma} \mathop{\rm Sym}\nolimits(\Sigma)\]
defined by
\[G\ni g\mapsto (g_1,g_2,\ldots,g_d)\lambda(g)\in G\wr_{\Sigma} \mathop{\rm Sym}\nolimits(\Sigma),\]
where $g_1,g_2,\ldots,g_d$ are the sections of $g$ at the vertices
of the first level, and $\lambda(g)$ is the permutation of $\Sigma$ induced by the action of $g$ on the first level of the tree.
The above embedding is convenient in computations involving the
sections of automorphisms, as well as for defining automaton groups.
When $G$ is a finitely generated automaton group, the restriction of the above embedding to a (finite) generating set of $G$ is sometimes called the \emph{wreath recursion} defining the group. For example, the wreath recursion of the Lamplighter group generated by the automaton shown in Figure~\ref{fig:lamplighter_aut} is given in~\eqref{eq:lamplighter_wr}.
\subsection{Some simple cases}
\label{ssec:triv}
We start with two easy examples.
\begin{proposition}
Let $G$ be a finite group acting on a rooted spherically homogeneous tree $T$ with infinite boundary. The space $\mathcal{M}^e_G$ of ergodic invariant probability measures on $\partial T$ is homeomorphic to the Cantor set.
\end{proposition}
\begin{proof}
Since $G$ is finite, the size of each orbit is bounded by $|G|$. Therefore, there is only a finite number of times when the size of the orbit grows while passing from a vertex to its child in the orbit tree. Thus, there is an integer $N$ such that for each vertex of $T_G$ of level at least $N$ corresponding to an orbit $\mathcal O$, and any child of this vertex corresponding to an orbit $\mathcal O'$, we have $|\mathcal O|=|\mathcal O'|$. Hence, the structure of the subtrees of the orbit tree $T_{G}$ hanging down from the vertices of level $N\neq 0$ will coincide with the structure of corresponding subtrees of $T$. Equivalently, for each $n\geq N$ the degrees of all vertices of level $n$ in $T_{G}$ coincide with the degree of vertices of level $n$ in $T$. Since $T$ has infinite boundary, it has an infinite number of levels where branching is happening. This implies that the same is true for $T_G$ as well, so $\partial T_G$ is homeomorphic to the Cantor set.
\end{proof}
In the opposite case when a group $G$ acts spherically transitively on an infinite spherically homogeneous rooted tree, the orbit tree is just a 1-ary rooted tree in which every vertex has exactly one child, its boundary consists of one point that corresponds to the unique (ergodic) invariant probability measure on $\partial T$. This particular case is considered in~\cite[Proposition~6.5]{gns00:automata}. Note, that in the case of a regular rooted tree $T_d$, according to~\cite{gawron_ns:conjugation}, an automorphism acts spherically transitively on $T_d$ if and only if it is conjugate to the, so-called, adding machine. A more general approach to adding machines acting on Cantor sets is developed in~\cite{buescu_s:liapunov_stability95,buescu_s:liapunov_stability06}, where their classification is given in terms of their spectral properties.
In the case when a spherically homogeneous tree $T$ is constructive, i.e. the sequence $\{d_n\}_{n\geq 0}$, where $d_n$ is the degree of vertices on the $n$-th level, is recursive, we formulate the following algorithmic questions.
\begin{question}
Let $G$ be an automaton group (or, more generally, a group acting on a constructive spherically homogeneous tree $T$ with infinite boundary).
\begin{itemize}
\item Is there a way to algorithmically describe the structure of the orbit tree $T_G$?
\item Is there an algorithm that checks whether $\partial T_G$ is finite, or even consists of one point (equivalently, whether $G$ acts level-transitively on the tree), or is homeomorphic to the Cantor set?
\end{itemize}
\end{question}
\subsection{The Universal Grigorchuk group}
\label{ssec:univ_grigorch}
Another example that we study here is the universal Grigorchuk group\footnote{The second author insists on the use of this terminology}. This group is defined as a universal group for the family of Grigorchuk groups $G_{\omega}$ constructed in~\cite{grigorch:degrees}. Namely, it is the quotient $F_4/N$ of the free group $F_4$ of rank 4 by the normal subgroup $N=\cap_{\omega\in\{0,1\}^\omega}N_{\omega}$, where $G_{\omega}=F_4/N_{\omega}$. For detailed information about this group we refer the reader to~\cite{grigorch:solved,benli_gn:universal}. The main open question related to this group is whether it is amenable. For our purposes we shall only need the realization of this group as an automaton group.
\begin{proposition}[\cite{benli_gn:universal}]
The universal Grigorchuk group $U$ can be defined as a group generated by the 4-state automaton over the 6-letter alphabet $\Sigma=\{1,2,3,4,5,6\}$ given by the following wreath recursion:
\[\begin{array}{lcl}
a &=& (1, 1, 1, 1, 1, 1)(14)(25)(36),\\
b &=& (a, a, 1, b, b, b),\\
c &=& (a, 1, a, c, c, c),\\
d &=& (1, a, a, d, d, d).
\end{array}
\]
\end{proposition}
Note that $\Sigma$ is partitioned into three disjoint alphabets $\Sigma_1=\{1,4\}$, $\Sigma_2=\{2,5\}$ and $\Sigma_3=\{3,6\}$. It follows immediately from the wreath recursion that if for $g\in U$ and $x_1x_2\ldots x_n\in\Sigma^n$ we have $g(x_1x_2\ldots x_n)=y_1y_2\ldots y_n$ for some $y_1y_2\ldots y_n\in\Sigma^n$, then for each $i$ letters $x_j$ and $y_j$ must belong to the same $\Sigma_i$. The next proposition shows that this is the only obstruction to transitivity.
\begin{proposition}
The orbits of the action of $U$ on level $n$ of $\Sigma^*$ are Cartesian products of the alphabets $\Sigma_{i_1}\Sigma_{i_2}\ldots\Sigma_{i_n}$, where $i_j\in\{1,2,3\}$.
\end{proposition}
\begin{proof}
The proof follows by induction on levels and uses the fact that $U$ is \emph{self-replicating}. In other words, for each $v\in\Sigma^*$ the natural endomorphism $\phi_v$ from the stabilizer $\mathop{\rm Stab}\nolimits_U(v)=\{g\in U\mid g(v)=v\}$ of $v$ in $U$ to $U$, given by $\phi_v(g) = g|_v$, is surjective. In particular, for each $v\in\Sigma^*$ there is $g\in U$ such that $g(v)=v$ and $g|_v=a$. The existence of such element proves the induction step as $a$ permutes the letters in each $\Sigma_i$.
\end{proof}
We directly obtain the following corollary related to the ergodic decomposition.
\begin{corollary}
The orbit tree $T_U$ of the action of $U$ is isomorphic to the 3-ary regular rooted tree, and therefore the space $\mathcal{M}^e_U$ of ergodic invariant probability measures is homeomorphic to the Cantor set.
\end{corollary}
\subsection{Sushchansky groups}
\label{ssec:sushch}
Suchchansky introduced a class of infinite $p$-groups generated by pairs of initial automata acting on the $p$-ary rooted tree (using the language of Kaloujnine tableaux~\cite{kalou:la_structure}) in~\cite{sushch:burnside}. These groups were later studied in~\cite{bondarenko_s:sushch}, where, in particular, it was proved that they have intermediate growth and the structure of the orbit trees was computed.
Let $\sigma=(0,1,\ldots,p-1)$ be a cyclic
permutation of the alphabet $\Sigma=\{0,1,\ldots,p-1\}$. With a slight abuse of notation, depending on
the context, $\sigma$ will also denote the automorphism of $\Sigma^*$ of
the form $(1,1,\ldots,1)\sigma$.
Given an arbitrary linear order $\lambda=\{(\alpha_i,\beta_i)\}$ on $\Sigma^2$ we define the Sushchansky group $G_{\lambda}$ generated by the two automorphisms $A$ and $B_{\lambda}$ of $T_p$ with the set of vertices $\Sigma^*$. We first define words
$u,v\in \Sigma^{p^2}$ in the following way:
\[
u_i=\left\{
\begin{array}{ll}
0, & \hbox{ if } \beta_i=0; \\
1, & \hbox{ if } \beta_i\neq 0. \\
\end{array}
\right. \qquad \qquad
v_i=\left\{
\begin{array}{ll}
1, & \hbox{ if } \beta_i=0; \\
-\frac{\alpha_i}{\beta_i}, & \hbox{ if } \beta_i\neq 0. \\
\end{array}
\right.
\]
The words $u$ and $v$ encode the actions of $B_{\lambda}$ on the
words $00\ldots 01*$ and $10\ldots 01*$, respectively. Using the
words $u$ and $v$ we can construct automorphisms
$q_1,\ldots,q_{p^2}, r_1,\ldots, r_{p^2}$ of the tree $\Sigma^{*}$ by the
following recurrent formulas:
\begin{equation}\label{eqn def aut q r}
q_i=(q_{i+1},\sigma^{u_i},1,\ldots,1), \qquad
r_i=(r_{i+1},\sigma^{v_i},1,\ldots ,1),
\end{equation}
for $i=1,\ldots,p^2$, where the indices are considered modulo $p^2$,
i.e. $i=i+np^2$ for any $n$.
These automorphisms $q_i$ and $r_i$ are
precisely the restrictions of $B_{\lambda}$ on the words
$00(0)^{i-1+np^2}$ and $10(0)^{i-1+np^2}$, respectively, for any
$n\geq 0$.
The action of the tableau $A$ is given by:
\[
A=(1,\sigma,\sigma^2,\ldots,\sigma^{p-1})\sigma;
\]
while $B_{\lambda}$ acts trivially on the second level and the
action on the rest is given by the sections:
\[
B_{\lambda}|_{00}=q_1, \quad B_{\lambda}|_{10}=r_1, \quad
B_{\lambda}|_{21}=\sigma
\]
and all the other sections are trivial. In particular, the
automorphisms $A$ and $B_{\lambda}$ are finite-state and Sushchansky
group $G_{\lambda}$ is generated by two finite initial automata, whose structure
is shown in Figure~\ref{aut_general} (where the double circled nodes correspond to generators $A$ and $B_{\lambda}$).
\begin{figure}
\caption{The Structure of Sushchansky automaton\label{aut_general}
\label{aut_general}
\end{figure}
The following proposition describes the orbit tree
\begin{proposition}[\cite{bondarenko_s:sushch}]\label{prop orbit tree}
The structure of the orbit tree $T_{G_\lambda}$ does not depend on
the type $\lambda$ and is shown in Figure~\ref{fig orbit tree}. Namely, there is only one vertex on the first level of the tree that has $p$ children, one of which is the root of a line, and the others are the roots of regular $p$-ary trees.
\begin{figure}
\caption{The Orbit tree $T_{G_\lambda}
\label{fig orbit tree}
\end{figure}
\end{proposition}
\subsection{Lamplighter group}
\label{ssec:lamp}
Recall that the lamplighter group $\mathcal L$, the permutational wreath product $(\mathbb Z/2\mathbb Z)\wr\mathbb Z\cong\bigl(\oplus_{\mathbb Z}(\mathbb Z/2Z)\bigr)\rtimes\mathbb Z$, can be realized as an automaton group generated by the automaton shown in Figure~\ref{fig:lamplighter_aut} with the following wreath recursion:
\begin{equation}
\label{eq:lamplighter_wr}
\begin{array}{lll}
a&=&(b,a)\sigma,\\
b&=&(b,a).\\
\end{array}
\end{equation}
\begin{figure}
\caption{Automaton generating the lamplighter group $\mathcal L$\label{fig:lamplighter_aut}
\label{fig:lamplighter_aut}
\end{figure}
In this subsection we give a complete description of the ergodic decompositions for the cyclic subgroups $\langle a\rangle$ and $\langle b\rangle$ of $\mathcal L$. Let $T_{\langle a\rangle}$ and $T_{\langle b\rangle}$ be the corresponding orbit trees of the actions of these subgroups on the binary tree $T$.
\begin{theorem}
\label{thm:lamplighter}
\begin{itemize}
\item[(a)] In the orbit tree $T_{\langle a\rangle}$ all vertices on levels $2^n-1, n\geq0$ have one child and all vertices on other levels have two children (see Figure~\ref{fig:orbit_tree_a}). The space of ergodic components of the action of $\langle a\rangle$ on $X^\omega$ is homeomorphic to the Cantor set.
\item[(b)] The orbit tree $T_{\langle b\rangle}$ is recursively obtained by declaring that the root of the tree has two children that are roots of trees $T_{\langle b\rangle}$ and $T_{\langle a\rangle}$ (see Figure~\ref{fig:orbit_tree_b}). The space of ergodic components of the action of $\langle b\rangle$ on $X^\omega$ is again homeomorphic to the Cantor set.
\end{itemize}
\end{theorem}
A useful observation about the lamplighter group made in~\cite{gns00:automata} is that the action of generators $a$ and $b$ can be defined in terms of functions acting on formal power series. We are going to describe this action. The boundary $\partial T$, consisting of infinite sequences over $X$, can be identified with the ring of formal power series $(\mathbb Z/2\mathbb Z)[[t]]$ via the map
\[a_0a_1a_2a_3\ldots\mapsto a_0+a_1t+a_2t^2+a_3t^3+\cdots\]
We will use this identification. Also, we will associate finite sequences over $X$ with corresponding polynomials in $(\mathbb Z/2\mathbb Z)[t]$ , which can be viewed as power series with finite number of nonzero terms. For example, $10^k$ and $11$ in $X^*$ will correspond to $1$ and $1+t$ in $(\mathbb Z/2\mathbb Z)[t]$, respectively. As was observed in~\cite{gns00:automata}, under this identification the actions of $a$ and $b$ on $f(t)\in(\mathbb Z/2\mathbb Z)[[t]]$ are defined as
\[\begin{array}{lll}
(a(f))(t)&=&(1+t)f(t)+1,\\
(b(f))(t)&=&(1+t)f(t).\\
\end{array}
\]
It will be convenient in the proof to operate with orbits of group actions using the following notion.
\begin{definition}
For an automorphism $g\in\mathop{\rm Aut}\nolimits(X^*)$ and for $v\in X^*\cup X^\omega$ whose orbit under the action of $\langle g\rangle$ has size $m\in\mathbb N\cup\{\infty\}$ the \emph{orbit matrix} of $v$ with respect to $g$ is the $n\times |v|$ matrix $M(v,g)$ whose $ij$-th entry contains the $j$-th symbol of $g^{i-1}(v)$ (so that the first row corresponds to $v$ itself).
\end{definition}
\begin{lemma}
\label{lem:orb10}
The size of the orbit $\mathop{\rm Orb}\nolimits_b(10^k)$ of the vertex $10^k$ under the action of $b$ is $\displaystyle{2^{[\log_2k]+1}}$.
\end{lemma}
\begin{proof}
The orbit $\mathop{\rm Orb}\nolimits_b(10^k)$ corresponds to the orbit of $1$ under multiplication by $(1+t)$ in $(\mathbb Z/2\mathbb Z)[[t]]/(t^{k+1})$. This orbit will consist of polynomials
\[(1+t)^n=\Sigma_{i=0}^n \overline{n\choose i}t^i \mod t^{k+1},\]
where by $\overline{x}$ we denote $x \mod 2$. It is well-known (see, for example,~\cite{fine:binomial47}) that the coefficients of these polynomials, plotted as a rectangular array in which the $i$-th row contains the values $\overline{n\choose i}$, have a fractal shape such as that of a Sierpinski triangle as shown in Figure~\ref{fig:sierpinski}.
\begin{figure}
\caption{Initial part of the orbit of $10^\infty$ under the action of powers of $b$, where 1's are replaced with ``X'' and 0's by empty spaces.\label{fig:sierpinski}
\label{fig:sierpinski}
\end{figure}
For the purpose of completeness and to explain the further steps we will include the proof of this fact here. An important observation behind the structure of the Sierpinski triangle is that the orbit matrix $M(10^{2^{n+1}-1},b)$ is a square $2^{n+1}\times2^{n+1}$ matrix that has the following block decomposition:
\def1{1.5}
\begin{equation}
\label{eq:decomposition}
M(10^{2^{n+1}-1},b)=\left[\begin{array}{c|c}
M(10^{2^{n}-1},b)&0\\
\hline
M(10^{2^{n}-1},b)&M(10^{2^{n}-1},b)
\end{array}
\right].
\end{equation}
\def1{1}
We prove the above decomposition by induction on $n$. The base of induction is satisfied since $M(1,b)=[1]$ and
\[M(10,b)=\left[\begin{array}{c|c}
1&0\\
\hline
1&1
\end{array}
\right]\]
The induction step is proved as follows. Assume that $|\mathop{\rm Orb}\nolimits_b(10^{2^{n}-1})|=2^n$, and hence $M(10^{2^{n}-1},b)$ is a square $2^n\times2^n$ matrix.
First of all, since when $i\leq 2^n$ the expansion of $(1+t)^i$ does not have terms of degree greater than $2^n$, we immediately conclude that the upper right corner of the matrix~\eqref{eq:decomposition} is a $2^n\times2^n$ zero matrix. And by definition of the orbit matrix we will see exactly $M(10^{2^{n}-1},b)$ in the left top corner.
Further, since $(1+t)^{2^n}=1+t^{2^n}$ in $(\mathbb Z/2\mathbb Z)[[t]]$, we have
\[(1+t)^{2^n+i}=(1+t^{2^n})(1+t)^i=(1+t)^i+t^{2^n}(1+t)^i.\]
When $0\leq i< 2^{n}$ the term $(1+t)^i$ will reproduce the orbit of $10^{2^{n}-1}$ in the bottom left corner of the orbit matrix in~\eqref{eq:decomposition}, while the term $t^{2^n}(1+t)^i$ will reproduce the same orbit shifted to the right by $2^n$ positions, thus filling the bottom right corner of $M(10^{2^{n+1}-1},b)$. Furthermore, $b^{2^{n+1}}(10^{2^{n+1}-1})=10^{2^{n+1}-1}$ since
\[(1+t)^{2^{n+1}}=1+t^{2^{n+1}}\equiv 1\mod t^{2^{n+1}}.\]
Therefore, by the induction assumption, the size of $\mathop{\rm Orb}\nolimits_b(10^{2^{n+1}-1})$ equals to $2^{n+1}$, which implies that $M(10^{2^{n+1}-1},b)$ is a square $2^{n+1}\times2^{n+1}$ matrix. In particular, this size agrees with the statement of the lemma.
Finally, for $2^{n}-1<k\leq 2^{n+1}-1$ the orbit $\mathop{\rm Orb}\nolimits_b(10^k)$ has the same size as the orbit $\mathop{\rm Orb}\nolimits_b(10^{2^{n+1}-1})$ since by the decomposition~\eqref{eq:decomposition} the only line beginning with $10^k$ in the orbit matrix $M(10^{2^{n+1}-1},b)$ is the first one.
\end{proof}
\begin{lemma}
\label{lem:orb1w}
The size of the orbit $\mathop{\rm Orb}\nolimits_b(0^i1w)$ of the vertex $0^i1w$ under the action of $b$ is $\displaystyle{2^{[\log_2|w|]+1}}$.
\end{lemma}
\begin{proof}
First of all, since $b^n=(b^n,a^n)$, we have $b^n(0^i1w)=0^ib^n(1w)$. Therefore
\[|\mathop{\rm Orb}\nolimits_b(0^i1w)|=|\mathop{\rm Orb}\nolimits_b(1w)|\]
and we can assume that $i=0$.
The vertex $1w=1a_1a_2\ldots a_{|w|}$ corresponds to the power series (which is, in fact, a polynomial) $f(t)=1+tg(t)=1+a_1t+a_2t^2+\cdots+a_{|w|}t^{|w|}\in(\mathbb Z/2\mathbb Z)[[t]]$ for some polynomial $g(t)$. Therefore, the series corresponding to $b^n(1w)$ has the form $(1+t)^n(1+tg(t))$. The size of the orbit of $1w$ then is equal to $N-1$, where $N>1$ is the smallest number such that
\begin{equation}
\label{eq:series}
(1+t)^N(1+tg(t))\equiv(1+tg(t)) \mod t^{|w|+2}
\end{equation}
which is equivalent to
\[(1+tg(t))(1+(1+t)^N)\equiv 0\mod t^{|w|+2}.\]
The last equality holds true if and only if $(1+(1+t)^N)\equiv 0\mod t^{|w|+2}$ as otherwise the smallest degree non-zero term in $(1+(1+t)^N)\mod t^{|w|+2}$ would produce a non-zero term in the lefthand side of~\eqref{eq:series}. Therefore, the smallest $N$ satisfying~\eqref{eq:series} is equal to the smallest $N$ for which
\[(1+t)^N\equiv 1\mod t^{|w|+2},\]
which, by the above argument, equals to the size of the orbit $\mathop{\rm Orb}\nolimits_b(10^{|w|})$. Application of Lemma~\ref{lem:orb10} finishes the proof.
\end{proof}
\begin{corollary}
\label{cor:orb_a}
The size of the orbit $\mathop{\rm Orb}\nolimits_a(w)$ of the vertex $w$ under the action of $a$ is $\displaystyle{2^{[\log_2|w|]+1}}$.
\end{corollary}
\begin{proof}
Follows immediately from the identity $b(1w)=1a(w)$ and Lemma~\ref{lem:orb1w}.
\end{proof}
\begin{remark} It follows immediately from the equality $(b(f))(t)=(1+t)f(t)$ that
\[b(a_0a_1a_2\ldots)=a_0(a_1+a_0)(a_2+a_1)(a_3+a_2)\ldots,\]
where the addition is performed $\mod 2$. This implies that the orbit of $w=a_0a_1a_2\ldots$ under the action of $b$, viewed as an infinite matrix with $ij$-th entry containing the $j$-th symbol of $b^i(w)$, can be obtained as the sum of the corresponding matrices for the orbits of the vertices of the form $0^{l-1}10^\infty$, where the sum is taken over all $l$ for which $a_l=1$. In other words, we sum up together $\mod 2$ Sierpinski triangles that grow from positions in which $a_l=1$ (see Figure~\ref{fig:sierpinski2}).
\end{remark}
\begin{figure}
\caption{Initial part of the orbit of a random vertex under the action of powers of $b$.\label{fig:sierpinski2}
\label{fig:sierpinski2}
\end{figure}
\begin{proof}[Proof of Theorem~\ref{thm:lamplighter}]
Item (a) immediately follows from Corollary~\ref{cor:orb_a} and item (b) is an obvious consequence of the wreath recursion decomposition $b=(b,a)$.
\end{proof}
\begin{figure}
\caption{Orbit tree $T_{\langle a\rangle}
\label{fig:orbit_tree_a}
\end{figure}
\begin{figure}
\caption{Orbit tree $T_{\langle b\rangle}
\label{fig:orbit_tree_b}
\end{figure}
In the end of the paper we would like to bring the attention to the fractal nature of orbit matrices for elements of automaton groups observed in Figure~\ref{fig:sierpinski} that has not been studied before. We conclude the paper with the following example.
\begin{example}
\label{ex:ll2}
Consider a group $G$ generated by a 4-state automaton with the following wreath recursion:
\[\begin{array}{lcl}
a&=&(d,d)\sigma,\\
b&=&(c,c),\\
c&=&(a,b),\\
d&=&(b,a).
\end{array}\]
This group has been studied in~\cite{klimann_ps:orbit_automata} where it was proved, in particular, that the element $ac$ has infinite order. Very recently it was shown by Sidki and the second author that the whole group is isomorphic to the extension of index 2 of a rank 2 lamplighter group $(\mathbb Z/2\mathbb Z)^2\wr\mathbb Z$. A part of the orbit matrix of $0^\infty$ with respect to the element $ac$ is shown in Figure~\ref{fig:lamp_orbit} and also clearly has a self-similar pattern.
\end{example}
\begin{figure}
\caption{Initial part of the orbit of $0^\infty$ under the action of powers of $ac\in G$ from Example~\ref{ex:ll2}
\label{fig:lamp_orbit}
\end{figure}
\newcommand{\etalchar}[1]{$^{#1}$}
\def$'${$'$} \def$'${$'$} \def$'${$'$} \def$'${$'$}
\def$'${$'$} \def$'${$'$} \def$'${$'$} \def$'${$'$}
\def$'${$'$} \def$'${$'$} \def$'${$'$} \def$'${$'$}
\def$'${$'$}
\end{document}
|
\begin{document}
\address{School of Mathematics, University of Bristol, Fry Building, Woodland Road, Bristol, BS8 1UG, United Kingdom.}
\email{[email protected]}
\subjclass[2010]{20F05, 57M07 }
\thanks{This work was supported by the Additional Funding Programme for Mathematical Sciences, delivered by EPSRC (EP/V521917/1) and the Heilbronn Institute for Mathematical Research}
\maketitle
\begin{abstract} This very short correction notes a gap in an argument of an earlier paper, and also provides a theorem of similar flavor to the main result of that paper.
\end{abstract}
I am indebted to Dawid Kielak \cite{Kie} for pointing out a gap in the proof of Lemma 2.4 in \cite{Co}, which was used to prove Theorems A and B of that paper. There are no known counterexamples to those results, so they may be regarded as open problems. The problem with that original argument is that if $L$ and $K$ are two Magnus subgroups of a one-relator group, then the expression of an element in the intersection $L \cap K$ as a word in the free generating set of $L$ can be different from that in the free generating set for $K$. I'll state and prove a result similar to Theorem B with stronger conclusion but an extra assumption (that the one-relator group includes no Baumslag-Solitar subgroups). The proof similarly utilizes root extraction.
The notation will be identical with that of the paper \cite{Co}, with $\textbf{HEG}$, $\textbf{HEG}^m$, etc., as therein. We'll recall the following definitions from \cite{ConCor}.
\begin{definition*} A group $G$ is cm-slender (respectively lcH-slender) if every abstract group homomorphism $\phi: H \rightarrow G$, where $H$ is a completely metrizable (resp. locally compact Hausdorff) topological group, has open kernel.
\end{definition*}
\begin{theorem*} Let $G$ be a (possibly uncountable) one-relator group which has no Baumslag-Solitar subgroup. The following hold.
\begin{enumerate}
\item If $\phi: \textbf{HEG} \rightarrow G$ is an abstract homomorphism then for some $m \in \mathbb{N}$ the image $\phi(\textbf{HEG}^m)$ is finite.
\item If $\phi: H \rightarrow G$ is an abstract homomorphism, with $H$ either a completely metrizable or locally compact Hausdorff topological group, then there is a normal open subgroup $V \leq H$ with $\phi(V)$ finite.
\end{enumerate}
In particular, a torsion-free one-relator group without Baumslag-Solitar subgroups is n-slender, cm-slender, and lcH-slender.
\end{theorem*}
\begin{proof} We will first prove claim (1) and then give the quite analogous argument for (2). Assume that $\phi: \textbf{HEG} \rightarrow G$ is an abstract group homomorphism, where $G$ is a one-relator group without Baumslag-Solitar subgroups. If $G = \langle X \mid r\rangle$ we let $Y \subseteq X$ be the set of generators used in the word $r$. The group $G$ is isomorphic in the natural way to the free product $\langle Y \mid r\rangle * F(X \setminus Y)$ where $F(X \setminus Y)$ is the free group on generators $X \setminus Y$. Then $\phi: \textbf{HEG} \rightarrow \langle Y \mid r\rangle * F(X \setminus Y)$, so by \cite[Theorem 1.3]{Ed} we know that for some $N \in \mathbb{N}$ the image $\phi(\textbf{HEG}^N)$ is included into a conjugate of either $\langle Y \mid r \rangle$ or $F(X \setminus Y)$. Thus without loss of generality we compose $\phi$ with conjugation by an appropriate element in $G$ so that $\phi(\textbf{HEG}^N) \leq \langle Y \mid r\rangle$ or $\phi(\textbf{HEG}^N) \leq F(X \setminus Y)$. In case $\phi(\textbf{HEG}^N) \leq F(X \setminus Y)$, since free groups are n-slender \cite[Corollary 3.7]{Edearly}, we can select $m > N$ such that $\textbf{HEG}^m \leq \ker(\phi \upharpoonright \textbf{HEG}^N) \leq \ker(\phi)$, so $\phi(\textbf{HEG}^m)$ is trivial, hence finite.
Suppose now that $\phi(\textbf{HEG}^N) \leq \langle Y \mid r\rangle$. In case $J = \langle Y \mid r\rangle$ has torsion we know it is hyperbolic (by the Spelling Theorem of Newman \cite{Ne}). Then there exists some $m > N$ such that $\phi(\textbf{HEG}^m)$ is finite \cite[Theorem B]{BC}. Therefore we may assume that $J$ is torsion-free. As $G$ does not have Baumslag-Solitar subgroups, we know that $J$ is commutative transitive \cite[Theorem 1.3]{FMRR} and so has unique root extraction (i.e. if $s_0^t = s_1^t$, $t > 0$, then $s_0 = s_1$). Letting $p$ be a prime greater than the length of the relator $r$, we have that for each nontrivial $s \in J$ there is some $n_s \in \mathbb{N}$ such that the equation $x^{p^{n_s}} = s$ has no solution in $J$ \cite[Theorem 1]{Ne}. Then by unique root extraction we have that for nontrivial $s \in \langle Y \mid r\rangle$, the set $\{x \in J : (\exists k \in \mathbb{N}) x^{p^k} = s\}$ has cardinality at most $n_s$. Then in the terminology of \cite{ConCor} the group $J$ has finite $p$-antecedents, and as $J$ is countable and torsion-free, we know that $J$ is n-slender \cite[Theorems A, B(c)]{ConCor}. Then there exists some $m > N$ such that $\phi \upharpoonright \textbf{HEG}^m$ is trivial and we have considered the last case for (1).
Now we'll prove (2). Letting $\phi: H \rightarrow G$ with $H$ completely metrizable (respectively locally compact Hausdorff) and $G = \langle Y \mid r \rangle * F(X \setminus Y)$, with $Y$ finite, we have that either $\ker(\phi)$ is open or $\phi(H)$ lies entirely in a conjugate of $\langle Y \mid r \rangle$ or of $F(X \setminus Y)$ by \cite{Sl} (resp.\cite{MN}). In case $\ker(\phi)$ is open we are already done. Else we conjugate appropriately so that without loss of generality either $\phi(H) \leq \langle Y \mid r \rangle$ or $\phi(H) \leq F(X \setminus Y)$. If $\phi(H) \leq F(X \setminus Y)$ then since free groups are cm-slender \cite{Du} (resp. lcH-slender, also \cite{Du}) we see again that $\ker(\phi)$ is open. We are left with the case where $\phi(H) \leq \langle Y \mid r \rangle$. If the group $J = \langle Y \mid r \rangle$ has torsion then it is hyperbolic and by \cite[Theorem A]{BC} there is an open normal subgroup $V \leq H$ such that $\phi(V)$ is finite. If $J$ is torsion-free, then as in (1) $J$ has finite $p$-antecedents and we have $J$ is cm-slender and lcH-slender \cite[Theorems A, B(c)]{ConCor}. Then $\ker(\phi)$ is open and the last case for (2) is complete.
\end{proof}
It should be noted that Baumslag-Solitar groups are themselves known to be n-slender, cm-slender, and lcH-slender \cite[Theorems A, B(i)]{ConCor}. Finally, we point out that a positive answer to the following question allows one to remove the requirement that the group has no Baumslag-Solitar subgroups.
\begin{question} If $G$ is a torsion-free one-relator group and $p$ is a prime number greater than the length of the relator of $G$, then does $G$ have finite $p$-antecedents?
\end{question}
\end{document}
|
\begin{eqnarray}gin{document}
^{th}ispagestyle{empty}
\baselineskip=28pt
\begin{eqnarray}gin{center}
{{\cal L}ARGE{\bf Bayesian Semiparametric Multivariate Density Deconvolution}}
\end{center}
\baselineskip=12pt
\vskip 2mm
\begin{eqnarray}gin{center}
Abhra Sarkar\\
Department of Statistical Science, Duke University, Durham,\\ NC 27708-0251, USA\\
[email protected] \\
\hskip 3mm \\
Debdeep Pati\\
Department of Statistics, Florida State University, Tallahassee,\\ FL 32306-4330, USA\\
[email protected]\\
\hskip 3mm \\
Bani K. Mallick\\% and Raymond J. Carroll\\
Department of Statistics, Texas A\&M University, 3143 TAMU, College Station,\\ TX 77843-3143, USA\\
[email protected]\\% and [email protected]\\
\hskip 3mm \\
Raymond J. Carroll\\
Department of Statistics, Texas A\&M University, 3143 TAMU, College Station,\\ TX 77843-3143, USA\\
and School of Mathematical and Physical Sciences, University of Technology Sydney, Broadway NSW 2007, Australia\\
[email protected]\\
\end{center}
\vskip 4mm
\begin{eqnarray}gin{center}
{{\cal L}arge{\bf Abstract}}
\end{center}
\baselineskip=12pt
We consider the problem of multivariate density deconvolution when interest lies in estimating the distribution of a vector valued random variable ${\mathbf X}$
but precise measurements on ${\mathbf X}$ are not available, observations being contaminated by measurement errors ${\mathbf U}$.
The existing sparse literature on the problem assumes the density of the measurement errors to be completely known.
We propose robust Bayesian semiparametric multivariate deconvolution approaches
when the measurement error density of ${\mathbf U}$ is not known but replicated proxies are available for at least some individuals.
Additionally, we allow the variability of ${\mathbf U}$ to depend on the associated unobserved values of ${\mathbf X}$ through unknown relationships,
which also automatically includes the case of multivariate multiplicative measurement errors.
Basic properties of finite mixture models, multivariate normal kernels and exchangeable priors are exploited in novel ways
to meet modeling and computational challenges.
Theoretical results showing the flexibility of the proposed methods in capturing a wide variety of data generating processes are provided.
We illustrate the efficiency of the proposed methods in recovering the density of ${\mathbf X}$ through simulation experiments.
The methodology is applied to estimate the joint consumption pattern of different dietary components from contaminated 24 hour recalls.
Supplementary materials present substantive additional details.
\baselineskip=12pt
\par
\noindent
\underline{\bf Some Key Words}: B-splines, Conditional heteroscedasticity, Latent factor analyzers, Measurement errors, Mixture models, Multivariate density deconvolution,
Regularization, Shrinkage.
\par
\noindent
\underline{\bf Short Title}: Multivariate Density Deconvolution
\par
\noindent
\pagebreak
\pagenumbering{arabic}
\newlength{\gnat}
\setlength{\gnat}{17pt}
\baselineskip=\gnat
\section{Introduction}
Many problems of practical importance require estimation of the density $f_{{\mathbf X}}$ of a vector valued random variable ${\mathbf X}$.
Precise measurements on ${\mathbf X}$ may not, however, be available, observations being contaminated by measurement errors ${\mathbf U}$.
Under the assumption of additive measurement errors,
the observations are generated from a convolution of the density $f_{{\mathbf X}}$ of ${\mathbf X}$ and the density $f_{{\mathbf U}}$ of the measurement errors ${\mathbf U}$.
The problem of estimating the density $f_{{\mathbf X}}$ from available contaminated measurements then becomes a problem of multivariate density deconvolution.
This article proposes novel Bayesian semiparametric density deconvolution approaches
based on finite mixtures of latent factor analyzers
for robust estimation of the density $f_{{\mathbf X}}$
when the measurement error density $f_{{\mathbf U}}$ is not known,
but replicated proxies contaminated with measurement errors ${\mathbf U}$ are available for at least some individuals.
The proposed deconvolution approaches are highly robust, not having to impose restrictive parametric assumptions on $f_{{\mathbf X}}$ or $f_{{\mathbf U}}$.
Additionally, the variability of ${\mathbf U}$ is allowed to depend on the associated unobserved values of ${\mathbf X}$ through unknown relationships.
While the focus of the article will primarily be on additive measurement errors,
importantly, the methodology for additive conditionally heteroscedastic measurement errors developed here also automatically encompasses the case of multivariate multiplicative measurement errors.
To the best of our knowledge, all existing multivariate deconvolution approaches assume that ${\mathbf U}$ is independent of ${\mathbf X}$ and that the error density $f_{{\mathbf U}}$ is completely known.
Ours is thus the first paper that allows the density of the measurement errors to be unknown and free from parametric laws
and additionally also accommodates conditional heteroscedasticity in the measurement errors.
The literature on the problem of univariate density deconvolution, in which context we denote the variable of interest by $X$ and the measurement errors by $U$, is vast.
Most of the early literature considered scenarios when the measurement error density $f_{U}$ is completely known.
Fourier inversion based deconvoluting kernel density estimators have been studied by
Carroll and Hall (1988), Liu and Taylor (1989), Devroye (1989), Fan (1991a, 1991b, 1992) and Hesse (1999) among many others.
For a review of these methods, the reader may be referred to Section 12.1 in Carroll, et al. (2006) and Section 10.2.3 in Buonaccorsi (2010).
In reality $f_{U}$ is rarely known.
The problem of deconvolution when the errors are homoscedastic with an unknown density and replicated proxies are available for each subject has been addressed by Li and Vuong (1998).
See also Diggle and Hall (1993), Neumann (1997), Carroll and Hall (2004) and the references therein.
The assumptions of homoscedasticity of $U$ and their independence from $X$ are also often unrealistic.
Flexible Bayesian density deconvolution approaches that allow $U$ to be conditionally heteroscedastic
have recently been developed in Staudenmayer, et al. (2008) and Sarkar, et al. (2014).
Staudenmayer, et al. (2008) assumed the measurement errors to be normally distributed
and used finite mixtures of B-splines to estimate $f_{X}$ and a variance function that captured the conditional heteroscedasticity.
Sarkar, et al. (2014) further relaxed the assumption of normality of $U$
employing flexible infinite mixtures of normal kernels induced by Dirichlet processes to estimate both $f_{X}$ and $f_{U}$.
Sieve based methods developed in Schennach (2004) and Hu and Schennach (2008) can also handle conditional heteroscedasticity.
In sharp contrast to the univariate case, the literature on multivariate density deconvolution is quite sparse.
We can only mention Masry (1991), Youndj\'e and Wells (2008), Comte and Lacour (2013), Hazelton and Turlach (2009, 2010) and Bovy, et al. (2011).
The first three considered deconvoluting kernel based approaches assuming the measurement errors ${\mathbf U}$ to be distributed independently from ${\mathbf X}$
according to a known probability law.
Hazelton and Turlach (2009, 2011), working with the same assumptions on ${\mathbf U}$, proposed weighted kernel based methods.
Bovy, et al. (2011) modeled the density $f_{{\mathbf X}}$ using flexible mixtures of multivariate normal kernels,
but they assumed $f_{{\mathbf U}}$ to be multivariate normal with known covariance matrices,
independent from ${\mathbf X}$.
As in the case of univariate problems, the assumptions of a fully specified $f_{{\mathbf U}}$,
known covariance matrices, and independence from ${\mathbf X}$ are highly restrictive for most practical applications.
The focus of this article is on multivariate density deconvolution when $f_{{\mathbf U}}$ is not known but replicated proxies are available for at least some individuals.
The proposed deconvolution approaches can additionally accommodate conditional heteroscedasticity in ${\mathbf U}$.
The problem is important, for instance, in nutritional epidemiology,
where nutritionists are typically interested not just in the consumption behaviors of individual dietary components but also in their joint consumption patterns.
The data are often available in the form of dietary recalls and are contaminated by measurement errors that show strong patterns of conditional heteroscedasticity.
As in Sarkar, et al. (2014), we use mixture models to estimate both $f_{{\mathbf X}}$ and $f_{{\mathbf U}}$
but the multivariate nature of the problem brings in new modeling challenges and computational obstacles
that preclude straightforward extension of their univariate deconvolution approaches.
Instead of using infinite mixtures induced by Dirichlet processes, we use finite mixtures of multivariate normal kernels with exchangeable Dirichlet priors on the mixture probabilities.
The use of finite mixtures and exchangeable priors greatly reduces computational complexity while retaining essentially the same flexibility.
Carefully constructed priors also allow automatic model selection and model averaging.
To save space, detailed discussions on these important issues are moved to Section \ref{sec: mvt finite vs infinite mixture models} in the Supplementary Materials.
We also exploit symmetric Dirichlet priors and properties of multivariate normal distributions and finite mixture models
to develop a novel strategy that enables us to enforce a required zero mean restriction on the measurement errors.
Our proposed technique, as opposed to the one adopted by Sarkar, et al. (2014),
is particularly suitable for high dimensional applications and can be easily generalized to enforce moment restrictions on other types of finite mixture models.
It is well known that inverse Wishart priors, due to their dense parametrization, are not suitable for modeling covariance matrices in high dimensional applications.
In deconvolution problems the issue is further complicated since ${\mathbf X}$ and ${\mathbf U}$ are both latent.
This results in numerically unstable estimates even for small and moderate dimensions,
particularly when the true covariance matrices are sparse and the likelihood function is of complicated form.
To reduce the effective number of parameters required to be estimated,
we consider factor-analytic representation of the component specific covariance matrices with sparsity inducing shrinkage priors on the factor loading matrices.
Models for multivariate regression errors that assume normality but allow the covariance matrix to vary flexibly
with associated precisely measured and possibly multivariate predictors have recently been developed in the literature (Hoff and Niu, 2012; Fox and Dunson, 2016, etc.).
Unlike regression settings, exclusive relationships exist between different components of multivariate measurement errors ${\mathbf U}$
and different components of the associated multivariate latent `predictor' ${\mathbf X}$
- the $\ell^{th}$ component $U_{\ell}$ of ${\mathbf U}$
contaminates only the $\ell^{th}$ component $X_{\ell}$ of ${\mathbf X}$ but not others.
We thus deem covariance regression models that allow $\hbox{cov}({\mathbf U}\vert{\mathbf X})$ to vary arbitrarily with all components of ${\mathbf X}$
to be inappropriate in multivariate measurement error settings.
As discussed above, the assumption of multivariate normality is also particularly restrictive in measurement error problems.
In this article, we develop a semiparametric approach that appropriately highlights the exclusive associations between $U_{\ell}$ and $X_{\ell}$
while allowing the distribution of $({\mathbf U} \vert {\mathbf X})$ to depart from normality.
Importantly, the model also arises naturally from multivariate multiplicative measurement error settings, automatically encompassing such cases.
Diagnostic tools for checking model adequacy are also discussed.
The likelihood function for the conditional heteroscedastic model poses significant computational challenges.
We overcome these obstacles by designing a novel two-stage procedure that exploits the unique properties of conditionally heteroscedastic multivariate measurement errors to our advantage.
The procedure first estimates the variance functions characterizing $\hbox{var}(U_{\ell} \vert X_{\ell})$
using reparametrized versions of the corresponding univariate submodels.
The estimates obtained in the first stage are then plugged-in to estimate the remaining parameters in the second stage.
Having two estimation stages, our deconvolution method for conditionally heteroscedastic measurement errors is not purely Bayesian.
But they show good empirical performance and, with no other solution available in the existing literature,
they provide at least workable starting points towards more sophisticated methodology.
The article is organized as follows.
Section \ref{sec: mvt density deconvolution models} details the models.
Model identifiability issues and implementation details, including the choice of hyper-parameters and Markov chain Monte Carlo (MCMC) algorithms to sample from the posterior, are discussed in the Supplementary Materials.
Section \ref{sec: mvt model identifiability} discusses model identifiability issues.
Section \ref{sec: mvt model flexibility} presents theoretical results showing flexibility of the proposed models.
Simulation studies comparing the proposed deconvolution methods to a naive method that ignores measurement errors are presented in Section \ref{sec: mvt simulation studies}.
Section \ref{sec: mvt data analysis} presents an application of the proposed methodology in estimation of the joint consumption pattern of dietary intakes
from contaminated 24 hour recalls in a nutritional epidemiologic study.
Section \ref{sec: mvt discussion} includes a discussion.
An unnumbered section concludes the article with a description of the Supplementary Materials.
\section{Deconvolution Models} \label{sec: mvt density deconvolution models}
The goal is to estimate the unknown joint density of a $p$-dimensional multivariate random variable ${\mathbf X}$.
There are $i=1,\dots,n$ subjects.
Precise measurements of ${\mathbf X}$ are not available.
Instead, for $j=1,\dots,m_{i}$, replicated proxies ${\mathbf W}_{ij}$
contaminated with measurement errors ${\mathbf U}_{ij}$ are available for each subject $i$.
The replicates are assumed to be generated by the model
\begin{eqnarray}
{\mathbf W}_{ij} &=& {\mathbf X}_{i} + {\mathbf U}_{ij}.
\end{eqnarray}
\\
Given ${\mathbf X}_{i}$, ${\mathbf U}_{ij}$ are independently distributed with $E({\mathbf U}_{ij}\vert{\mathbf X}_{i}) = {\mathbf z}ero$. \label{eq: additive error}
The marginal density of ${\mathbf W}_{ij}$ is denoted by $f_{{\mathbf W}}$.
The implied conditional distributions of ${\mathbf W}_{ij}$ and ${\mathbf U}_{ij}$, given ${\mathbf X}_{i}$,
are denoted by $f_{{\mathbf W}\vert {\mathbf X}}$ and $f_{{\mathbf U}\vert {\mathbf X}}$, respectively.
\subsection{Modeling the Density $f_{{\mathbf X}}$} \label{sec: mvt density of interest}
In this article $f_{{\mathbf X}}$ is specified as a mixture of multivariate normal kernels
\\
\begin{eqnarray}
f_{{\mathbf X}}({\mathbf X}) = \widetilde{\epsilon}xtstyle\sum_{k=1}^{K_{{\mathbf X}}} \pi_{{\mathbf X},k} ~ {\cal M}VN_{p}({\mathbf X} \vert \mbox{\boldmath $\mu$}_{{\mathbf X},k},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k}), \label{eq: mixture model for f_X}
\end{eqnarray}
\\
where ${\cal M}VN_{p}(\cdot \vert \mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$ denotes a $p$-dimensional multivariate normal density with mean $\mbox{\boldmath $\mu$}$ and covariance matrix $\mbox{\boldmath ${\cal S}igma$}$.
For the rest of this subsection, the subscript ${\mathbf X}$ is kept implicit to keep the notation clean.
We assign a finite Dirichlet prior to the mixture probability vector $\mbox{\boldmath $\pi$} = (\pi_{1},\dots,\pi_{K})^{\rm T}$ as
\\
\begin{eqnarray}
\mbox{\boldmath $\pi$} &\sim& {\cal D}ir(\alpha/K,\dots,\alpha/K). \label{eq: symmetric Dirichlet prior}
\end{eqnarray}
\\
Here ${\cal D}ir(\alpha_{1},\dots,\alpha_{K})$ denotes a finite dimensional Dirichlet distribution on the $K$-dimensional unit simplex with concentration parameter $(\alpha_{1},\dots,\alpha_{K})$.
Given $K$ and the latent cluster membership indices, the prior is conjugate.
The symmetry of the assumed Dirichlet prior helps in additional reduction of computational complexity by simplifying MCMC mixing issues.
Provided $K$ is sufficiently large, a carefully chosen $\alpha$ can impart the posterior with certain properties
that simplify model selection and model averaging issues by influencing the posterior to concentrate in regions that favor empty redundant components,
see Section \ref{sec: mvt choice of hyper-parameters} and Section \ref{sec: mvt finite vs infinite mixture models} of the Supplementary Materials.
We assign conjugate multivariate normal priors to the component specific mean vectors $\mbox{\boldmath $\mu$}_{k}$, so that
\\
\begin{eqnarray}
\mbox{\boldmath $\mu$}_{k} &\sim& {\cal M}VN_{p}(\mbox{\boldmath $\mu$}_{0},\mbox{\boldmath ${\cal S}igma$}_{0}).
\end{eqnarray}
\\
The conjugacy again helps in simplifying posterior calculations.
Later on, we will employ similar mixture models for the density of the measurement errors,
and this conjugacy, along with some basic properties of multivariate normal kernels,
will also help us enforce the mean zero restriction on the measurement errors.
For the component specific covariance matrices $\mbox{\boldmath ${\cal S}igma$}_{k}$, we first consider conjugate inverse Wishart priors
\\
\begin{eqnarray}\
\mbox{\boldmath ${\cal S}igma$}_{k} \sim \hbox{IW}_{p}(\nu_{0},\mbox{\boldmath $\Psi$}_{0}).
\end{eqnarray}
\\
Here $\hbox{IW}_{p}(\nu,\mbox{\boldmath $\Psi$})$ denotes an inverse Wishart density on the space of $p\times p$ positive definite matrices with mean $\mbox{\boldmath $\Psi$}/(\nu-p-1)$.
While the conjugacy of the inverse Wishart priors helps in simplifying posterior calculations,
in complex high dimensional problems its dense parameterization may result in numerically unstable estimates,
particularly when the covariance matrices are sparse.
In a deconvolution problem the issue is compounded further by the nonavailability of the true ${\mathbf X}_{i}$'s.
To reduce the effective number of parameters to be estimated, we consider a parsimonious factor-analytic representation of the component specific covariance matrices:
\\
\begin{eqnarray}
\mbox{\boldmath ${\cal S}igma$}_{k} = \mbox{\boldmath ${\cal L}ambda$}_{k}\mbox{\boldmath ${\cal L}ambda$}_{k}^{\rm T} +\mbox{\boldmath $\Omega$}, \label{eq: density of X latent factor characterization}
\end{eqnarray}
\\
where $\mbox{\boldmath ${\cal L}ambda$}_{k}$ are $p\times q_{k}$ factor loading matrices
and $\mbox{\boldmath $\Omega$}$ is a diagonal matrix with non-negative entries.
In practical applications $q_{k}$ will typically be much smaller than $p$, inducing parsimonious characterizations of the unknown covariance matrices $\mbox{\boldmath ${\cal S}igma$}_{k}$.
Model (\ref{eq: mixture model for f_X}) can be equivalently represented as
\\
\begin{eqnarray}
&&\Pr(C_{i} = k) = \pi_{k}, \\
&&({\mathbf X}_{i}\vert C_{i} = k) = \mbox{\boldmath $\mu$}_{k} + \mbox{\boldmath ${\cal L}ambda$}_{k}\mbox{\boldmath $\eta$}_{i} + \mbox{\boldmath ${\cal D}elta$}_{i}, \\
&& \mbox{\boldmath $\eta$}_{i} \sim {\cal M}VN_{p}({\mathbf z}ero,{\mathbf I}_{p}),
~~~~~\mbox{\boldmath ${\cal D}elta$}_{i} \sim {\cal M}VN_{p}({\mathbf z}ero,\mbox{\boldmath $\Omega$}),
\end{eqnarray}
\\
where $C_{i}$ are the mixture labels associated with ${\mathbf X}_{i}$,
$\mbox{\boldmath $\eta$}_{i}$ are latent factors, and $\mbox{\boldmath ${\cal D}elta$}_{i}$ are errors with covariance $\mbox{\boldmath $\Omega$} = \hbox{diag}(\sigma_{1}^{2},\dots,\sigma_{p}^{2})$.
The above characterization of $\mbox{\boldmath ${\cal S}igma$}_{k}$ is not unique,
since for any semi-orthogonal matrix ${\mathbf P}$ the loading matrix $\mbox{\boldmath ${\cal L}ambda$}_{k}^{1} = \mbox{\boldmath ${\cal L}ambda$}_{k}{\mathbf P}$ also satisfies (\ref{eq: density of X latent factor characterization}).
Since interest lies primarily in estimating the density $f_{{\mathbf X}}$, identifiability of the latent factors is, however, not required.
This also allows the loading matrices to have a-priori a potentially infinite number of columns.
Sparsity inducing priors, that favor more shrinkage as the column index increases, can then be used to shrink the redundant columns towards zero.
In this article, we do this by adapting the shrinkage priors proposed in Bhattacharya and Dunson (2011) that allow easy posterior computation.
Let $\mbox{\boldmath ${\cal L}ambda$}_{k} = ((\lambda_{k,jh}))_{j=1,h=1}^{p,\infty}$, where $j$ and $h$ denote the row and the column indices, respectively.
For $h=1,\dots,\infty$, we assign priors as follows
\\
\begin{eqnarray}
\lambda_{k,jh} &\sim& {\cal N}ormal(0,\phi_{k,jh}^{-1}\tau_{k,h}^{-1}),
~~~~~\phi_{k,jh} \sim {\cal G}a(\nu/2,\nu/2), \\
\tau_{k,h} &\sim& \widetilde{\epsilon}xtstyle\hbox{pr}od_{\ell=1}^{h} \delta_{k,\ell},
~~~~~\delta_{k,\ell} \sim {\cal G}a(a_{\ell},1),
~~~~~\sigma_{j}^{2} \sim \hbox{Inv-Ga}(a_{\sigma},b_{\sigma}).
\end{eqnarray}
\\
Here ${\cal G}a(\alpha,\begin{eqnarray}ta)$ denotes a Gamma distribution with shape parameter $\alpha$ and rate parameter $\begin{eqnarray}ta$ and $\hbox{IG}(a,b)$ denotes an inverse-Gamma distribution with shape parameter $a$ and scale parameter $b$.
In the $k^{th}$ component factor loading matrix $\mbox{\boldmath ${\cal L}ambda$}_{k}$, the parameters $\{\phi_{k,jh}\}_{j=1}^{p}$ control the local shrinkage of the elements in the $h^{th}$ column,
whereas $\tau_{k,h}$ controls the global shrinkage.
When $a_{h} > 1$ for $h=2,\dots,\infty$, the sequence $\{\tau_{k,h}\}_{h=1}^{\infty}$ becomes stochastically increasing
and thus favors more shrinkage as the column index $h$ increases.
In addition to inducing adaptive sparsity and hence numerical stability,
by favoring more shrinkage as the column index increases, the shrinkage priors
play another important role in making the proposed factor analytic model highly robust to misspecification of the number of latent factors,
allowing us to adopt simple strategies to determine the number of latent factors to be included in the model in practice.
Details are deferred to Section \ref{sec: mvt choice of hyper-parameters}.
Throughout the rest of the paper, mixtures with inverse Wishart prior on the covariance matrices will be referred to as MIW models
and mixtures of latent factor analyzers will be referred to as MLFA models.
For a review of finite mixture models and mixtures of latent factor analyzers, without moment restrictions or sparsity inducing priors and with applications in measurement error free scenarios,
see Fokou\'{e} and Titterington (2003), Fr\"{u}hwirth-Schnatter (2006), Mengersen, et al. (2011) and the references therein.
For other types of shrinkage priors, see Brown and Griffin (2010), Carvalho, et al. (2010), Bhattacharya, et al. (2014) etc.
\subsection{Modeling the Density of the Measurement Errors} \label{sec: mvt density of errors}
\subsubsection{Independently Distributed Measurement Errors} \label{sec: mvt density of homoscedastic errors}
In this section, we develop models for the measurement errors ${\mathbf U}$ assuming them to be independent from ${\mathbf X}$.
That is, we assume $f_{{\mathbf U}\vert {\mathbf X}} = f_{{\mathbf U}}$ for all ${\mathbf X}$.
This remains the most extensively researched deconvolution problem for both univariate and multivariate cases.
The techniques developed in this section will also provide crucial building blocks for
more realistic models in Section \ref{sec: mvt density of heteroscedastic errors}.
The measurement errors and their density are now denoted by $\mbox{\boldmath $\epsilon$}_{ij}$ and $f_{\mbox{\boldmath $\epsilon$}}$, respectively, for reasons to become obvious shortly in Section \ref{sec: mvt density of heteroscedastic errors}.
As in Section \ref{sec: mvt density of interest}, a mixture of multivariate normals can be used to model the density $f_{\mbox{\boldmath $\epsilon$}}$
but the model now has to satisfy a mean zero constraint.
That is
\\
\begin{eqnarray}
f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$}) = \widetilde{\epsilon}xtstyle\sum_{k=1}^{K_{\mbox{\boldmath $\epsilon$}}}\pi_{\mbox{\boldmath $\epsilon$},k} ~ {\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},k},\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},k}), \\
\widetilde{\epsilon}xt{subject to}~ \widetilde{\epsilon}xtstyle\sum_{k=1}^{K_{\mbox{\boldmath $\epsilon$}}}\pi_{\mbox{\boldmath $\epsilon$},k} \mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},k} = {\mathbf z}ero.
\end{eqnarray}
\\
To get numerically stable estimates of the density of the errors, latent factor characterization of the covariance matrices with sparsity inducing shrinkage priors as in Section \ref{sec: mvt density of interest} may again be used.
Details are curtailed to avoid unnecessary repetition and we only present the mechanism to enforce the zero mean restriction on the model.
The subscript $\mbox{\boldmath $\epsilon$}$ is again dropped in favor of cleaner notation.
In later sections, the subscripts ${\mathbf X}$ and $\mbox{\boldmath $\epsilon$}$ reappear to distinguish between the parameters associated with $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$, when necessary.
Without the mean restriction and under conjugate multivariate normal priors $\mbox{\boldmath $\mu$}_{k}\sim {\cal M}VN_{p}(\mbox{\boldmath $\mu$}_{0},\mbox{\boldmath ${\cal S}igma$}_{0})$,
the posterior full conditional of $\mbox{\boldmath $\mu$}^{Kp \times 1} = (\mbox{\boldmath $\mu$}_{1}^{\rm T},\dots,\mbox{\boldmath $\mu$}_{K}^{\rm T})^{\rm T}$ is given by
\\
\begin{eqnarray}
{\cal M}VN_{K p} \left\{\left(\begin{eqnarray}gin{array}{c}
\mbox{\boldmath $\mu$}_{1}^{0} \\
\mbox{\boldmath $\mu$}_{2}^{0}\\
\vdots\\
\mbox{\boldmath $\mu$}_{K}^{0}
\end{array} \right),
\left(\begin{eqnarray}gin{array}{c c c c}
\mbox{\boldmath ${\cal S}igma$}_{1}^{0} & {\mathbf z}ero & \dots & {\mathbf z}ero\\
{\mathbf z}ero & \mbox{\boldmath ${\cal S}igma$}_{2}^{0} & \dots & {\mathbf z}ero\\
\vdots & \vdots & & \vdots\\
{\mathbf z}ero & {\mathbf z}ero & \dots & \mbox{\boldmath ${\cal S}igma$}_{K}^{0}
\end{array} \right)\right\} \end{equation}uiv {\cal M}VN_{K p} (\mbox{\boldmath $\mu$}^{0},\mbox{\boldmath ${\cal S}igma$}^{0}), \label{eq: joint mvt normal posterior of the mean vector}
\end{eqnarray}
\\
where $\mbox{\boldmath $\epsilon$}_{ij}$ and other conditioning variables are implicitly understood.
Explicit expressions of $\mbox{\boldmath $\mu$}^{0}$ and $\mbox{\boldmath ${\cal S}igma$}^{0}$ in terms of the conditioning variables can be found in Section \ref{sec: mvt choice of hyper-parameters}.
The posterior full conditional of $\mbox{\boldmath $\mu$}$ under the mean restriction can then be obtained easily by further conditioning the distribution in (\ref{eq: joint mvt normal posterior of the mean vector}) by $\mbox{\boldmath $\mu$}_{R} = \sum_{k=1}^{K}\pi_{k} \mbox{\boldmath $\mu$}_{k} = 0$
and is given by
\\
\begin{eqnarray}
(\mbox{\boldmath $\mu$} \vert \mbox{\boldmath $\mu$}_R = {\mathbf z}ero) \sim {\cal M}VN_{Kp}\{\mbox{\boldmath $\mu$}^{0} - \mbox{\boldmath ${\cal S}igma$}_{1,R}^{0}(\mbox{\boldmath ${\cal S}igma$}_{R,R}^{0})^{-1}\mbox{\boldmath $\mu$}_R^{0},\mbox{\boldmath ${\cal S}igma$}^{0} - \mbox{\boldmath ${\cal S}igma$}_{1,R}^{0}(\mbox{\boldmath ${\cal S}igma$}_{R,R}^{0})^{-1}\mbox{\boldmath ${\cal S}igma$}_{R,1}^{0}\}, \label{eq: conditional mvt normal posterior of the mean vector}
\end{eqnarray}
\\
where $\mbox{\boldmath $\mu$}_{R}^{0} = \sum_{k=1}^{K}\pi_{k} \mbox{\boldmath $\mu$}_{k}^{0} = E(\mbox{\boldmath $\mu$}_{R})$,
$\mbox{\boldmath ${\cal S}igma$}_{k,K} = \pi_{k}\mbox{\boldmath ${\cal S}igma$}_{k}^{0} = \hbox{cov}(\mbox{\boldmath $\mu$}_{k},\mbox{\boldmath $\mu$}_{R})$,
$\mbox{\boldmath ${\cal S}igma$}_{R,R}^{0} = \mbox{\boldmath ${\cal S}igma$}_{K+1,K+1} = \sum_{k=1}^{K}\pi_{k}^{2}\mbox{\boldmath ${\cal S}igma$}_{k}^{0} = \hbox{cov}(\mbox{\boldmath $\mu$}_{R})$,
and $\mbox{\boldmath ${\cal S}igma$}_{R,1}^{0} = ( \mbox{\boldmath ${\cal S}igma$}_{1,K+1}, \mbox{\boldmath ${\cal S}igma$}_{2,K+1}, \dots, \mbox{\boldmath ${\cal S}igma$}_{K,K+1} )$.
To sample from this singular density,
we can first sample from the non-singular distribution of $\{(\mbox{\boldmath $\mu$}_{1}^{\rm T},\mbox{\boldmath $\mu$}_{2}^{\rm T},\dots,\mbox{\boldmath $\mu$}_{K-1}^{\rm T})^{\rm T} \vert \mbox{\boldmath $\mu$}_{R} = {\mathbf z}ero\}$,
which can also be trivially obtained from (\ref{eq: conditional mvt normal posterior of the mean vector}), and then set $\mbox{\boldmath $\mu$}_{K} = - \sum_{k=1}^{K-1}\pi_{k}\mbox{\boldmath $\mu$}_{k}/\pi_{K}$.
\iffalse
Two remarks are in order.
First, the symmetric Dirichlet prior on the mixture probabilities plays an additional implicit but important role here.
Although we have used the $K^{th}$ component to enforce the mean restriction,
under exchangeable Dirichlet priors, the posterior is also invariant to permutations of the mixture labels, making all the components equally deserving candidates for this fix and the specific choice of any particular component irrelevant.
Second, the method depends primarily on the properties of the priors on the mixture probabilities and the mean vectors and not on the model for the covariance matrices.
The mechanism can therefore be applied quite generally in conjunction with any model for the component specific covariance matrices that does not disturb the label invariance property of the posterior, such as the MIW and the MLFA models described in this article.
\fi
\vskip 0pt
\begin{eqnarray}gin{figure}[!ht]
\centering
\includegraphics[height=5.5cm, width=16cm, trim=2cm 1cm 1cm 1cm, clip=true]{Graphical_Model1.pdf}
\vskip -10pt
\caption{
Dependency structures in trivariate deconvolution problems with (a) independently distributed and (b) conditionally varying measurement errors.
(c) Dependency structure in a trivariate regression problem with response ${\mathbf Y}$, regression errors ${\mathbf U}$ and bivariate predictor ${\mathbf X}$.
The filled rectangular regions focus on the relationship between the (potentially conditionally varying) errors ${\mathbf U}$
and the (corresponding conditioning) variable ${\mathbf X}$.
The unfilled and the shaded nodes signify latent and observable variables, respectively.
The directed and the undirected edges represent one and two-way relationships, respectively.
The solid black and the dashed gray edges in panel (b)
signify strong and weak dependencies, respectively.
}
\label{fig: graphical models}
\end{figure}
\subsubsection{Conditionally Heteroscedastic Measurement Errors} \label{sec: mvt density of heteroscedastic errors}
We now consider the case when the variances of the measurement errors depend on the associated unknown values of ${\mathbf X}$ through unknown relationships.
Interpreting the conditioning variables ${\mathbf X}$ broadly as predictors, one can loosely connect our problem of modeling conditionally heteroscedastic ${\mathbf U}$ to the problem of covariance regression (Hoff and Niu, 2012; Fox and Dunson, 2016, etc.),
where the covariance of the multivariate regression errors are allowed to vary flexibly with precisely measured and possibly multivariate predictors.
In such problems, the dimension of the regression errors is unrelated to the dimension of the predictors
and different components of the regression errors are assumed to be equally influenced by different components of the predictors.
In multivariate deconvolution problems, in contrast, the dimension of ${\mathbf U}_{ij}$ is exactly the same as the dimension of ${\mathbf X}_{i}$,
the $\ell^{th}$ component $U_{ij\ell}$ being the measurement error associated exclusively with $X_{i\ell}$.
See Figure \ref{fig: graphical models}.
While different components of ${\mathbf U}_{ij}$ may be correlated,
this exclusive association between $U_{ij\ell}$ and $X_{i\ell}$ implies that
the dependence of $U_{ij\ell}$ on ${\mathbf X}_{i}$ should be explained primarily through $X_{i\ell}$.
Figure \ref{fig: mvt EATS data results VFn}, for instance, suggests strong conditional heteroscedasticity patterns
and it is plausible to assume that this conditional variability in $U_{ij\ell}$ can be explained mostly through $X_{i\ell}$ only.
It is interesting to note these contrasts between conditionally varying regression and measurement errors
become particularly prominent in the multivariate set up.
Additionally, the aforementioned covariance regression approaches all assume multivariate normality of the regression errors.
As discussed in the introduction, such strong parametric assumptions on the error distribution are particularly restrictive in measurement error problems.
Additional detailed discussions of these important issues and resulting modeling implications
can be found in Section \ref{sec: mvt comments on the model for U given X} of the Supplementary Materials.
They preclude direct application of existing covariance regression approaches to multivariate deconvolution problems
but warrant models that can highlight the aforementioned unique dependence relationships,
accommodate distributional flexibility while enforcing the mean zero restriction, and
produce computationally stable estimates even in the absence of precise information on the conditioning variable ${\mathbf X}$.
The semiparametric approach that we adopt in this article achieves distributional flexibility, enforces the mean zero restriction,
accommodates the exclusive relationships between $U_{ij\ell}$ and $X_{i\ell}$
but ignores the weak dependencies of $U_{ij\ell}$ on $\{X_{im}\}_{m\neq \ell}$ depicted in Figure \ref{fig: graphical models}(b).
Specifically, we let
\\
\begin{eqnarray}
({\mathbf U}_{ij}\vert {\mathbf X}_{i}) = {\mathbf S}({\mathbf X}_{i})\mbox{\boldmath $\epsilon$}_{ij}, \label{eq: mvt multiplicative structure}
\end{eqnarray}
\\
where ${\mathbf S}({\mathbf X}_{i}) = \hbox{diag}\{s_{1}(X_{i1}),s_{2}(X_{i2}),\dots,s_{p}(X_{ip})\}$ and $\mbox{\boldmath $\epsilon$}_{ij}$, henceforth referred to as the `scaled errors', are distributed independently of ${\mathbf X}_{i}$.
Model (\ref{eq: mvt multiplicative structure}) implies that $\hbox{cov}({\mathbf U}_{ij}\vert {\mathbf X}_{i}) = {\mathbf S}({\mathbf X}_{i})~ \hbox{cov}(\mbox{\boldmath $\epsilon$}_{ij}) ~ {\mathbf S}({\mathbf X}_{i})$ and marginally $\hbox{var}(U_{ij\ell}\vert {\mathbf X}_{i}) = s_{\ell}^{2}(X_{i\ell})\hbox{var}(\epsilon_{ij\ell})$, a function of $X_{i\ell}$ only.
The techniques developed in Section \ref{sec: mvt density of homoscedastic errors} can now be employed to model the density of $\mbox{\boldmath $\epsilon$}_{ij}$, allowing different components of ${\mathbf U}_{ij}$ to be correlated and their joint density to deviate from multivariate normality.
We model the variance functions $s_{\ell}^{2}$, denoted also by $v_{\ell}$, using positive mixtures of B-spline basis functions
with smoothness inducing priors on the coefficients as in Staudenmayer, et al. (2008).
For the $\ell^{th}$ component, partition an interval $[A_{\ell},B_{\ell}]$ of interest into $L_{\ell}$ subintervals using knot points
$A_{\ell} = t_{\ell,1}=\dots=t_{\ell,q+1} < t_{\ell,q+2} < t_{\ell,q+3} < \dots < t_{\ell,q+L_{k}} < t_{\ell,q+L_{\ell}+1} = \dots = t_{\ell,2q+L_{\ell}+1}=B_{\ell}$.
A flexible model for the variance functions is given by
\\
\begin{eqnarray}
&& v_{\ell}(X_{i\ell}) = s_{\ell}^{2}(X_{i\ell}) = \widetilde{\epsilon}xtstyle\sum_{j=1}^{J_{\ell}} b_{q,j,\ell}(X_{i\ell}) \exp(\xi_{j\ell})= {\mathbf B}_{q,J_{\ell},\ell}(X_{i\ell}) \exp(\mbox{\boldmath $\xi$}_{\ell}), \label{eq: mvt models for variance functions} \\
&& (\mbox{\boldmath $\xi$}_{\ell}\vert J_{\ell}, \sigma_{\xi,\ell}^{2}) \hbox{pr}opto (2\pi\sigma_{\xi,\ell}^{2})^{-J_{\ell}/2} \exp\{-\mbox{\boldmath $\xi$}_{\ell}^{\rm T} P_{\ell}\mbox{\boldmath $\xi$}_{\ell}/(2\sigma_{\xi,\ell}^{2})\},~~~~ \sigma_{\xi,\ell}^{2} \sim \hbox{Inv-Ga}(a_{\xi},b_{\xi}).
\end{eqnarray}
\\
Here $\{b_{q,j,\ell}\}_{j=1}^{J_{\ell}}$ denote $J_{\ell}= (q+L_{\ell})$ B-spline bases of degree $q$ as defined in de Boor (2000), $\mbox{\boldmath $\xi$}_{\ell} = \{\xi_{1\ell},\xi_{2\ell},\dots,\xi_{J_{\ell}\ell}\}^{\rm T}$;
$\exp(\mbox{\boldmath $\xi$}_{\ell}) = \{\exp(\xi_{1\ell}), \exp(\xi_{2\ell}),\dots,\exp(\xi_{J_{\ell}\ell})\}^{\rm T}$;
and $P_{\ell}=D_{\ell}^{\rm T} D_{\ell}$, where $D_{\ell}$ is a $J_{\ell}\times(J_{\ell}+2)$ matrix such that $D_{\ell}\mbox{\boldmath $\xi$}_{\ell}$ computes the second differences in $\mbox{\boldmath $\xi$}_{\ell}$.
The prior $P_{0}(\mbox{\boldmath $\xi$}_{\ell}\vert\sigma_{\xi,\ell}^{2})$ induces smoothness in the coefficients because it penalizes $\sum_{j=1}^{J_{k}}({\cal D}elta^2 \xi_{j\ell})^2 = \mbox{\boldmath $\xi$}_{\ell}^{\rm T} P_{\ell}\mbox{\boldmath $\xi$}_{\ell}$, the sum of
squares of the second order differences in $\mbox{\boldmath $\xi$}_{\ell}$ (Eilers and Marx, 1996).
The parameters $\sigma_{\xi,\ell}^{2}$ play the role of smoothing parameter -
the smaller the value of $\sigma_{\xi,\ell}^{2}$, the stronger the penalty and the smoother the variance function.
The inverse-Gamma hyper-priors on $\sigma_{\xi,\ell}^{2}$ allow the data to have influence on the posterior smoothness and make the approach data adaptive.
Since $s_{\ell}^{2}(X_{i\ell}) \hbox{var}(\epsilon_{ij\ell})=\{s_{\ell}^{2}(X_{i\ell})c\} \{\hbox{var}(\epsilon_{ij\ell})/c\}$ for any $c>0$,
the variance functions $s_{\ell}^{2}$'s can not be uniquely determined
without additional restrictions on $\hbox{var}(\epsilon_{ij\ell})$.
Separate identifiability of ${\mathbf S}$ and $f_{\mbox{\boldmath $\epsilon$}}$ is, however, not required for inference on $f_{{\mathbf X}}$
or to assess the conditional variability in $U_{ij\ell}$.
The latter, for instance, may simply be obtained as $\hbox{var}(U_{ij\ell} \vert X_{i}) = s_{\ell}^{2}(X_{i\ell}) \hbox{var}(\epsilon_{ij\ell})$.
We thus avoid additional identifiability restrictions that would further compound modeling challenges.
Adjustments made to the estimates of $s_{\ell}^{2}$ and $f_{\mbox{\boldmath $\epsilon$}}$
to enable comparisons with the corresponding true values in simulation experiments are discussed in
Section \ref{sec: mvt estimation of variance functions} in the Supplementary Materials.
\subsubsection{Multiplicative Measurement Errors} \label{sec: mvt multiplicative errors}
In this section we consider the case of multivariate multiplicative measurement errors.
The replicates are now assumed to be generated by the model
\begin{eqnarray}
{\mathbf W}_{ij} &=& {\mathbf X}_{i} \circ \widetilde{\mathbf U}_{ij}, \label{eq: multiplicative error}
\end{eqnarray}
\\
where $\circ$ denotes element wise product
and the errors $\widetilde{\mathbf U}_{ij}$ are distributed independently of ${\mathbf X}_{i}$ with $E(\widetilde{\mathbf U}_{ij}) = {\mathbf 1}$.
Importantly, model (\ref{eq: multiplicative error}) can be reformulated to arrive at model (\ref{eq: mvt multiplicative structure}) as
\begin{eqnarray}
{\mathbf W}_{ij} &=& {\mathbf X}_{i} \circ \widetilde{\mathbf U}_{ij} = {\mathbf X}_{i} + {\mathbf U}_{ij}, ~~~\widetilde{\epsilon}xt{with}~~~{\mathbf U}_{ij}={\mathbf X}_{i}\circ(\widetilde{\mathbf U}_{ij}-{\mathbf 1}) = {\mathbf S}({\mathbf X}_{i})\mbox{\boldmath $\epsilon$}_{ij}. \label{eq: multiplicative error2}
\end{eqnarray}
\\
with $E({\mathbf U}_{ij} \vert {\mathbf X}_{i})={\mathbf X}_{i} \circ E(\widetilde{\mathbf U}_{ij}-{\mathbf 1})={\mathbf z}ero$,
${\mathbf S}({\mathbf X}_{i})=\hbox{diag}\{s_{1}(X_{i1}),\dots,s_{p}(X_{ip})\}$ with $s_{\ell}(X_{i\ell})=X_{i\ell}$
and $\mbox{\boldmath $\epsilon$}_{ij}=(\widetilde{\mathbf U}_{ij}-1)$ are independent of ${\mathbf X}_{i}$ with $E(\mbox{\boldmath $\epsilon$}_{ij})={\mathbf z}ero$.
This observation precludes the need for separate methodology to be developed
for the problem of multivariate density deconvolution in the presence of multiplicative measurement errors
and further emphasizes the importance of the additive conditionally heteroscedastic measurement error model (\ref{eq: mvt multiplicative structure}) developed in Section \ref{sec: mvt density of heteroscedastic errors}.
\section{Posterior Inference} \label{sec: mvt posterior inference}
Inference is based on samples drawn from the posterior using MCMC algorithms.
A Gibbs sampler for the independent error case discussed in Section \ref{sec: mvt density of homoscedastic errors} is presented in
Section \ref{sec: mvt posterior computation} of the Supplementary Materials.
For the conditionally heteroscedastic case discussed in Section \ref{sec: mvt density of heteroscedastic errors},
the full conditionals of the parameters characterizing the variance functions do not have closed form expressions.
MCMC algorithms where we tried to integrate Metropolis-Hastings (MH) steps within the Gibbs sampler to generate samples from the full posterior
were numerically unstable and failed to converge sufficiently quickly.
To address this challenge, we designed a novel two-stage procedure.
For each $k$, we first estimate the functions $s_{\ell}(X_{i\ell})$ by fitting the univariate deconvolution models $W_{ij\ell} = X_{i\ell} + s_{\ell}(X_{i\ell})\epsilon_{ij\ell}$.
High precision estimates of the variance functions $s_{\ell}^{2}(X_{i\ell})$ can be obtained using the univariate deconvolution models.
See Figure \ref{fig: mvt simulation results VFn d4 n1000 m3 MLFA X1 E1 Ind} in the main article
and Figure \ref{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 E1 AR} in the Supplementary Materials for illustrations.
Parameters characterizing other components of the full model are then sampled using a Gibbs sampler keeping the estimates of the variance functions fixed.
Additional details are deferred to Sections \ref{sec: mvt estimation of variance functions} and \ref{sec: mvt two-stage sampler} of the Supplementary Materials.
\section{Model Identifiability} \label{sec: mvt model identifiability}
This section presents a discussion of model identifiability issues.
The density of interest $f_{{\mathbf X}}$ is identifiable under mild technical assumptions.
In the case of independently distributed measurement errors considered in Section \ref{sec: mvt density of homoscedastic errors} of the main paper,
appealing to Li and Vuong (1998), the densities $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ are identifiable provided $m_{i}\geq 2$ replicates are available for some individuals,
and the characteristics functions $\phi_{{\mathbf X}}({\mathbf t})=E \{\exp(\iota {\mathbf t}^{\rm T}{\mathbf X})\}$ and $\phi_{\mbox{\boldmath $\epsilon$}}({\mathbf t})=E \{\exp(\iota {\mathbf t}^{\rm T}\mbox{\boldmath $\epsilon$})\}$ are non-vanishing everywhere.
In the case of conditionally heteroscedastic measurement errors considered in Section \ref{sec: mvt density of heteroscedastic errors} of the main paper,
appealing to Hu and Schennach (2004), the densities $f_{{\mathbf X}}$ and $f_{{\mathbf U} \vert {\mathbf X}}$ are identifiable provided $m_{i}\geq 3$ replicates are available for some individuals,
the joint, conditional and marginal densities of ${\mathbf W}_{1},{\mathbf W}_{2},{\mathbf W}_{3},{\mathbf X}$ are all bounded, and the density $f_{{\mathbf X}\vert{\mathbf W}}$ is bounded complete
in the sense that the unique solution to $\int f_{{\mathbf X}\vert{\mathbf W}}({\mathbf X})g({\mathbf X})d{\mathbf X}=0$ for all ${\mathbf W}$ and for all bounded $g({\mathbf X})$ is $g({\mathbf X})=0$ for all ${\mathbf X}$.
The following lemma provides a sufficient condition for the density $f_{{\mathbf X}\vert{\mathbf W}}$ to be bounded complete.
\begin{eqnarray}gin{Lem} \label{lem: identifiability}
$f_{{\mathbf X}\vert{\mathbf W}}$ is bounded complete if $E\{\exp(\iota {\mathbf t}^{\rm T}{\mathbf X}\vert {\mathbf W})\}$ is non-vanishing everywhere for all ${\mathbf W}$.
\end{Lem}
\begin{eqnarray}gin{proof}
By Theorem 10C of Goldberg (1961), since $E\{\exp(\iota {\mathbf t}^{\rm T}{\mathbf X}\vert {\mathbf W})\}$ is non-vanishing everywhere for all ${\mathbf W}$, the closed linear span of $f_{{\mathbf X}\vert{\mathbf W}}(\cdot)$ is $L_1(\mathbb{R})$. By Hahn-Banach Theorem, the dual space of $L_1(\mathbb{R})$ is $L_{\infty}(\mathbb{R})$ and there is an isometric isomorphism from $L_\infty(\mathbb{R})$ to $L_{1}(\mathbb{R})$ given by $g \mapsto \Phi_g$ where
$\Phi_g(f_{{\mathbf X}\vert{\mathbf W}}) = \int f_{{\mathbf X}\vert{\mathbf W}}({\mathbf X})g({\mathbf X})d{\mathbf X} $ for all ${\mathbf W}$.
Since the closed linear span of $f_{{\mathbf X}\vert{\mathbf W}}(\cdot)$ for all ${\mathbf W}$ is $L_1(\mathbb{R})$,
$\int f_{{\mathbf X}\vert{\mathbf W}}({\mathbf X})g({\mathbf X})d{\mathbf X} = 0$ for all ${\mathbf W}$ implies that the mapping $\Phi_g$ is identically $0$. By the isometric isomorphism above, it follows that $g$ should be identically $0$.
\end{proof}
Different types of completeness of densities are often used as key identifying conditions in measurement error problems.
See, for example, d'Haultfoeuille (2011) and Carroll, et al. (2010).
Here, we have provided a general sufficient condition for bounded completeness to hold true and a novel proof using functional analysis techniques.
Loosely speaking, if the density $f_{{\mathbf X}\vert{\mathbf W}}({\mathbf X})$ varies with ${\mathbf X}$, its characteristic function does not vanish.
Without sufficient variability of the density of ${\mathbf X}\vert{\mathbf W}$, observations on ${\mathbf W}$ do not have enough information to recover the density of ${\mathbf X}$.
Model parameters specifying the components $f_{{\mathbf X}}$, $f_{\mbox{\boldmath $\epsilon$}}$, $s_{\ell}$ etc. are not separately identifiable.
For inference on identifiable functional model components, identifiability of individual parameters is, however, not required.
Indeed, the mixture models and the associated priors were so chosen that the mixture components remain unidentifiable.
This helps simplify MCMC mixing issues. See Section \ref{sec: mvt finite vs infinite mixture models} of the Supplementary Materials.
\section{Model Flexibility}\label{sec: mvt model flexibility}
This section presents a theoretical study of the flexibility of the proposed models.
Proofs of the results are presented in the Supplementary Materials.
We focus on the deconvolution models for conditionally heteroscedastic measurement errors,
the case of independently distributed errors following as a special case.
First we show that componentwise our models for the density $f_{{\mathbf X}}$ of ${\mathbf X}$, the density $f_{\mbox{\boldmath $\epsilon$}}$ of the scaled errors $\mbox{\boldmath $\epsilon$}$, and the variance functions $v_{\ell}$ are all highly flexible.
Building on these results, we then show that our proposed deconvolution models can accommodate a large class of data generating processes.
Let the generic notation $\Pi$ denote a prior on some class of random functions.
Also let ${\cal T}$ denote the target class of functions to be modeled by $\Pi$.
The support of $\Pi$ throws light on the flexibility of $\Pi$.
For $\Pi$ to be a flexible prior, one would expect that ${\cal T}$ or a large subset of ${\cal T}$ would be contained in the support of $\Pi$.
For investigating the flexibility of priors for density functions, a relevant concept is that of Kullback-Leibler (KL) support.
The KL divergence between two densities $f_{0}$ and $f$, denoted by $d_{KL}(f_0,f)$, is defined as
$d_{KL}(f_0,f) = \int f_{0}(Z) ~ \hbox{log} ~\{f_{0}(Z)/f(Z)\} dZ$.
Let $\Pi_{f}$ denote a prior assigned to a random density $f$.
A density $f_0$ is said to belong to the KL support of $\Pi_{f}$ if
$\Pi_{f}\{f:d_{KL}(f_0,f)<\delta\}>0~\forall \delta>0$.
The class of densities in the KL support of $\Pi_{f}$ is denoted by $KL(\Pi_{f})$.
Let ${\cal F}$ be the class of target densities to be modeled by the prior $\Pi_{f}$.
Let ${\cal S}$ denote the support of ${\cal F}$ and $\widetilde{{\cal F}}\subseteq{\cal F}$ denote the class of densities that satisfy the following fairly minimal set of regularity conditions.
Since $\widetilde{\cal F}$ is a large subclass of ${\cal F}$, its inclusion in the KL support of $\Pi_{f}$ would establish the flexibility of $\Pi_{f}$.
\begin{eqnarray}gin{Cond}\label{cond: mvt regularity conditions on the density}
1. $f_{0}$ is continuous on ${\cal S}$ except on a set of measure zero.\\
2. The second order moments of $f_{0}$ are finite.\\
3. For some $r>0$ and for all ${\mathbf z}\in{\cal S}$, there exist hypercubes $C_{r}({\mathbf z})$ with side length $r$ and ${\mathbf z} \in C_{r}({\mathbf z})$ such that
\\
\begin{eqnarray}gin{eqnarray*}
\int f_{0}({\mathbf z}) ~ \hbox{log} \left\{\frac{f_{0}({\mathbf z})}{\inf_{{\mathbf t}\in C_{r}({\mathbf z})} f_{0}({\mathbf t})}\right\} d{\mathbf z} < \infty.
\end{eqnarray*}
\end{Cond}
Let $\Pi_{{\mathbf X}}$ be a generic notation for both the MIW and the MLFA prior on $f_{{\mathbf X}}$
defined in Section \ref{sec: mvt density of interest}.
Similarly, let $\Pi_{\mbox{\boldmath $\epsilon$}}$ be a generic notation for both the MIW and the MLFA prior on $f_{\mbox{\boldmath $\epsilon$}}$
defined in Section \ref{sec: mvt density of errors}.
When the measurement errors are distributed independently of ${\mathbf X}$, the support of $f_{{\mathbf X}}$, say ${\cal X}$, may be taken to be any subset of $\mathbb{R}^{p}$.
For conditionally heteroscedastic measurement errors,
the variance functions $s_{\ell}^{2}(\cdot)$ that capture the conditional variability are modeled by mixtures of B-splines defined on closed intervals $[A_{k},B_{k}]$.
In this case, the support of $f_{{\mathbf X}}$ is assumed to be the closed hypercube ${\cal X} = [A_{1},B_{1}]\times \dots \times [A_{p},B_{p}]$.
Let ${\cal F}_{{\mathbf X}}$ denote the set of all densities on ${\cal X}$, the target class of densities to be modeled by $\Pi_{{\mathbf X}}$
and $\widetilde{\cal F}_{{\mathbf X}} \subseteq {\cal F}_{{\mathbf X}}$ denote the class of densities $f_{0{\mathbf X}}$ that satisfy Conditions \ref{cond: mvt regularity conditions on the density}.
Similarly, let ${\cal F}_{\mbox{\boldmath $\epsilon$}}$ denote the set of all densities on $\mathbb{R}^{p}$ that have mean zero
and $\widetilde{\cal F}_{\mbox{\boldmath $\epsilon$}} \subseteq {\cal F}_{\mbox{\boldmath $\epsilon$}}$ denote the class of densities $f_{0\mbox{\boldmath $\epsilon$}}$ that satisfy Conditions \ref{cond: mvt regularity conditions on the density}.
The following Lemma establishes the flexibility of the models for $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$.
\begin{eqnarray}gin{Lem} \label{Lem: mvt KL support of the priors}
1. $\widetilde{{\cal F}}_{{\mathbf X}}\subseteq KL(\Pi_{{\mathbf X}})$
2. $\widetilde{{\cal F}}_{\mbox{\boldmath $\epsilon$}} \subseteq KL(\Pi_{\mbox{\boldmath $\epsilon$}})$.
\end{Lem}
For investigating the flexibility of models for general classes of functions, a relevant concept is that of sup norm support.
The sup norm distance between two functions $g_{0}$ and $g$, denoted by $||g_0-g||_{\infty}$, is defined as
$||g_0-g||_{\infty} = \sup_{Z} |g_{0}(Z)-g(Z)|$.
Let $\Pi_{g}$ denote a prior assigned to a random function $g$.
A function $g_0$ is said to belong to the sup norm support of $\Pi_{g}$ if
$\Pi_{g}(g: ||g_0-g||_{\infty}<\delta)>0~\forall \delta>0$.
The class of functions in the sup norm support of $\Pi_{g}$ is denoted by $SN(\Pi_{g})$.
Let $\Pi_{{\mathbf V}}$ denote the prior on the variance functions based on mixtures of B-spline basis functions defined in Section \ref{sec: mvt density of heteroscedastic errors}.
For notational convenience we consider the case of a univariate variance function supported on $[A,B]$.
Extension to the multivariate case with variance functions supported on ${\cal X}$ is technically trivial.
Let ${\cal C}_{+}[A,B]$ denote the set of continuous functions from $[A,B]$ to $\mathbb{R}^{+}$.
Also, for $\alpha \leq (q+1)$, let ${\cal C}_{+}^{\alpha}[A,B] \subseteq {\cal C}_{+}[A,B]$ denote the set of functions
that are $\alpha_{0}$ times continuously differentiable, and for all $v_{0} \in {\cal C}_{+}^{\alpha}[A,B]$, $\norm{v_{0}}_{\alpha}<\infty$,
where $\alpha_{0}$ is largest integer less than or equals to $\alpha$ and the seminorm is defined by
$\norm{v_{0}}_{\alpha} = \sup_{X,X'\in[A,B], X\neq X'} \{ |v_{0}^{(\alpha_0)}(X)-v_{0}^{(\alpha_0)}(X')| / |X-X'|^{\alpha-\alpha_0}\}$.
The local support properties of B-splines make the models for the variance functions very flexible as is indicated by the following lemma.
\begin{eqnarray}gin{Lem} \label{Lem: mvt sup norm support of priors on variance functions}
$ {\cal C}_{+}^{\alpha}[A,B] \subseteq {\cal C}_{+}[A,B] \subseteq SN(\Pi_{{\mathbf V}})$.
\end{Lem}
Although technically the sup norm distance between linear combinations of B-splines and any continuous function can be made arbitrarily small by increasing the number of knots,
for obvious reasons the actual bounds for the sup norm distance may not be very sharp if the function to be modeled is wiggly.
However, for most applications of practical importance, the true variance function may be assumed to be smooth,
that is, to belong to some ${\cal C}_{+}^{\alpha}[A,B]$ with $\alpha\geq 1$.
Therefore, for practical reasons, it is only important that the smaller H\"{o}lder class of functions ${\cal C}_{+}^{\alpha}[A,B]$ belongs to the sup norm support of $\Pi_{{\mathbf V}}$.
As shown in Section \ref{sec: mvt proof of sup norm support of priors on variance functions} of the Supplementary Materials,
the bounds for sup norm distance in this case will also be much sharper.
Since the models for the variance functions $v_{\ell}$ and the models for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$ are separately very flexible,
under model (\ref{eq: mvt multiplicative structure}) on the measurement errors,
the implied conditional and joint densities are also expected to be very flexible.
This is investigated in the next lemma.
For a given ${\mathbf X}$, let $\Pi_{{\mathbf U}\vert {\mathbf X}}$ denote the prior for $f_{{\mathbf U}\vert {\mathbf X}}$ induced by $\Pi_{\mbox{\boldmath $\epsilon$}}$ and $\Pi_{{\mathbf V}}$ under model (\ref{eq: mvt multiplicative structure}).
Define $\widetilde{\cal F}_{{\mathbf U}\vert {\mathbf X}} = \{f_{0{\mathbf U}\vert {\mathbf X}}: f_{0{\mathbf U}\vert {\mathbf X}}({\mathbf U}) = \hbox{pr}od_{k=1}^{p}s_{0k}^{-1}(X_{k}) f_{0\mbox{\boldmath $\epsilon$}}\{{\mathbf S}_{0}^{-1}({\mathbf X}){\mathbf U}\}, s_{0k}^{2}\in {\cal C}_{+}[A_{k},B_{k}]~\hbox{for}~ k=1,\dots,p, f_{0\mbox{\boldmath $\epsilon$}}\in \widetilde{{\cal F}}_{\mbox{\boldmath $\epsilon$}}\}$.
Also let $\Pi_{{\mathbf U}\vert {\mathbf V}}$ denote the prior for the unknown conditional density of ${\mathbf U}$ induced by $\Pi_{\mbox{\boldmath $\epsilon$}}$ and $\Pi_{{\mathbf V}}$ under model (\ref{eq: mvt multiplicative structure}).
Define $\widetilde{\cal F}_{{\mathbf U}\vert {\mathbf u}llet} = \{f_{0{\mathbf U}\vert {\mathbf u}llet}: ~\hbox{for any given}~{\mathbf X}\in{\cal X}, ~ f_{0{\mathbf U}\vert{\mathbf u}llet} = f_{0{\mathbf U}\vert {\mathbf X}}\in \widetilde{\cal F}_{{\mathbf U}\vert {\mathbf X}}\}$.
Finally, let $\Pi_{{\mathbf X},{\mathbf U}}$ denote the prior for the joint density of $({\mathbf X},{\mathbf U})$ induced by $\Pi_{{\mathbf X}}$,
$\Pi_{\mbox{\boldmath $\epsilon$}}$ and $\Pi_{{\mathbf V}}$ under model (\ref{eq: mvt multiplicative structure}).
Define $\widetilde{\cal F}_{{\mathbf X},{\mathbf U}} = \{f_{0,{\mathbf X},{\mathbf U}}: f_{0,{\mathbf X},{\mathbf U}}({\mathbf X},{\mathbf U}) = f_{0,{\mathbf X}}({\mathbf X})f_{0,{\mathbf U} \vert {\mathbf X}}({\mathbf U}\vert {\mathbf X}),~\hbox{where}~ f_{0{\mathbf X}}\in \widetilde{\cal F}_{{\mathbf X}} ~\hbox{and}~ f_{0{\mathbf U}\vert {\mathbf X}} \in \widetilde{\cal F}_{{\mathbf U}\vert {\mathbf X}} ~\hbox{for all}~ {\mathbf X}\in {\cal X}\}$.
\begin{eqnarray}gin{Lem} \label{Lem: mvt KL support of the prior on the density of U|X}
1. $\widetilde{\cal F}_{{\mathbf U}\vert {\mathbf X}} \subseteq KL(\Pi_{{\mathbf U}\vert {\mathbf X}})$ for any given ${\mathbf X}\in {\cal X}$.\\
2. For any $f_{0{\mathbf U}\vert {\mathbf u}llet}\in\widetilde{\cal F}_{{\mathbf U}\vert {\mathbf V}}$, $\Pi_{{\mathbf U}\vert {\mathbf V}}\{\sup_{{\mathbf X}\in{\cal X}} d_{KL}(f_{0{\mathbf U}\vert {\mathbf X}},f_{{\mathbf U}\vert {\mathbf X}})<\delta\}>0$ for all $\delta>0$.\\
3. $\widetilde{\cal F}_{{\mathbf X},{\mathbf U}} \subseteq KL(\Pi_{{\mathbf X},{\mathbf U}})$.
\end{Lem}
The flexibility of the implied model for the marginal density $f_{{\mathbf W}}$ is the subject of our final result.
Since the only observed quantities are ${\mathbf W}_{ij}$, the support of the induced prior on $f_{{\mathbf W}}$ tells us about the types of likelihood functions the model can approximate.
Let $\Pi_{{\mathbf W}}$ denote the prior for the density of ${\mathbf W}$ induced by $\Pi_{{\mathbf X}}$, $\Pi_{\mbox{\boldmath $\epsilon$}}$ and $\Pi_{{\mathbf V}}$ under model (\ref{eq: mvt multiplicative structure}).
Also let $\widetilde{\cal F}_{{\mathbf W}} = \{f_{0{\mathbf W}}: f_{0{\mathbf W}}({\mathbf W}) = \int f_{0{\mathbf X}}({\mathbf X})f_{0{\mathbf U}\vert {\mathbf X}}({\mathbf W}-{\mathbf X})d{\mathbf X}, f_{0{\mathbf X}} \in \widetilde{\cal F}_{{\mathbf X}}, f_{0{\mathbf U}\vert {\mathbf u}llet} \in \widetilde{\cal F}_{{\mathbf U}\vert {\mathbf u}llet}\}$,
the class of densities $f_{0{\mathbf W}}$ that can be obtained as the convolution of two densities $f_{0{\mathbf X}}$ and $f_{0{\mathbf U}\vert {\mathbf u}llet}$, where $f_{0{\mathbf X}}\in \widetilde{\cal F}_{{\mathbf X}}$ and $f_{0{\mathbf U}\vert {\mathbf u}llet} \in \widetilde{\cal F}_{{\mathbf U}\vert {\mathbf u}llet}$.
Since the supports of $\Pi_{{\mathbf X}}$ and $\Pi_{{\mathbf U}\vert {\mathbf X}}$ are large,
it is expected that the support of $\Pi_{{\mathbf W}}$ will also be large.
However, because convolution is involved, investigation of KL support of $\Pi_{{\mathbf W}}$ is a difficult problem.
A weaker but relevant concept is that of $L_1$ support.
The $L_1$ distance between two densities $f_{0}$ and $f$, denoted by $||f_{0}-f||_{1}$, is defined as
$||f_0-f||_{1} = \int |f_{0}(Z) - f(Z)| dZ$.
A density $f_0$ is said to belong to the $L_1$ support of $\Pi_{f}$ if
$\Pi_{f}(f: ||f_0-f||_{1}<\delta)>0~\forall \delta>0$.
The class of densities in the $L_1$ support of $\Pi_{f}$ is denoted by $L_{1}(\Pi_{f})$.
The following theorem shows that the $L_1$ support of $\Pi_{{\mathbf W}}$ is large.
\begin{eqnarray}gin{Thm} \label{Thm: mvt L1 support of induced prior on density of W}
$\widetilde{\cal F}_{{\mathbf W}} \subseteq L_{1}(\Pi_{{\mathbf W}})$.
\end{Thm}
The proofs of these results are deferred to Section \ref{sec: mvt proofs of theoretical results} of the Supplementary Materials.
The proofs require that the number of mixture components $K$ be allowed to vary over $\mathbb{N}$, the set of all positive integers,
through priors, denoted by the generic notation $P_{0}(K)$, that assign positive probability to all $K\in \mathbb{N}$.
Posterior computation for such methods will be computationally intensive, specially in a complicated multivariate set up like ours.
In our implementation, we thus keep the number of mixture components fixed at finite values.
\section{Simulation Experiments} \label{sec: mvt simulation studies}
The mean integrated squared error (MISE) of estimation of $f_{{\mathbf X}}$ by $\widehat{f}_{{\mathbf X}}$ is defined as $MISE = E_{f_{{\mathbf X}}} \int \{f_{{\mathbf X}}({\mathbf X})-\widehat{f}_{{\mathbf X}}({\mathbf X})\}^{2}d{\mathbf X}$.
Based on $B$ simulated data sets, a Monte Carlo estimate of MISE is given by
$MISE_{est} = ~ B^{-1}~\sum_{b=1}^{B}\sum_{m=1}^{M}\{f_{{\mathbf X}}({\mathbf X}_{b,m})-\widehat{f}_{{\mathbf X}}^{(b)}({\mathbf X}_{b,m})\}^{2}/p_{0}({\mathbf X}_{b,m})$,
where $\{{\mathbf X}_{b,m}\}_{b=1,m=1}^{B,M}$ are random samples from the density $p_{0}$.
We designed simulation experiments to evaluate the MISE performance of the proposed models for a wide range of possibilities.
The MISEs we report here are all based on $100$ simulated data sets
and $M=10^6$ samples generated from each of the two densities (a) $p_{0} = f_{{\mathbf X}}$, the true density of ${\mathbf X}$,
and (b) $p_{0}$ that is uniform on the hypercube with edges $\min_{k}\{\mbox{\boldmath $\mu$}_{{\mathbf X},k}-3{\mathbf 1}_{p}\}$ and $\max_{k}\{\mbox{\boldmath $\mu$}_{{\mathbf X},k}+3{\mathbf 1}_{p}\}$.
With carefully chosen initial values and proposal densities for the MH steps, we were able to achieve quick convergence for the MCMC samplers.
The use of exchangeable Dirichlet priors helped simplify mixing issues (Geweke, 2007).
See Section \ref{sec: mvt computational complexity} in the Supplementary Materials for additional discussions.
We programmed our methods in {R}.
In each case, we ran $3000$ MCMC iterations and discarded the initial $1000$ iterations as burn-in.
The post burn-in samples were thinned by a thinning interval of length $5$.
For the univariate samplers, $1000$ MCMC iterations with a burn-in of $500$ sufficed to produce stable estimates of the variance functions.
In our experiments with much larger iteration numbers and burn-ins, the MISE performances remained practically the same.
This being the first article that tries to solve the problem of multivariate density deconvolution when the measurement error density is unknown,
the proposed MIW and MLFA models have no competitors.
We thus compared our models with a naive Bayesian method that ignores measurement errors and treats the subject specific means
as precisely measured observations instead, modeling $f_{{\mathbf X}}$ by a finite mixture of multivariate normals as in (\ref{eq: mixture model for f_X})
with inverse Wishart priors on the component specific covariance matrices.
We considered two choices for the sample size $n = 500, 1000$.
For each subject, we simulated $m_{i}=3$ replicates.
The true density of ${\mathbf X}$ was chosen to be
$f_{{\mathbf X}}({\mathbf X}) = \sum_{k=1}^{K_{{\mathbf X}}} \pi_{{\mathbf X},k}~ {\cal M}VN_{p}({\mathbf X} \vert \mbox{\boldmath $\mu$}_{{\mathbf X},k},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k})$ with $p=4$, $K_{{\mathbf X}}=3$, $\mbox{\boldmath $\pi$}_{{\mathbf X}} = (0.25,0.50,0.25)^{\rm T}$, $\mbox{\boldmath $\mu$}_{{\mathbf X},1} = (0.8,6,4,5)^{\rm T}$, $\mbox{\boldmath $\mu$}_{{\mathbf X},2} = (2.5,4,5,6)^{\rm T}$ and $\mbox{\boldmath $\mu$}_{{\mathbf X},3} = (6,4,2,4)^{\rm T}$.
For the density of the measurement errors $f_{\mbox{\boldmath $\epsilon$}}$ we considered two choices, namely
\begin{eqnarray}gin{enumerate}
\item $f_{\mbox{\boldmath $\epsilon$}}^{(1)}(\mbox{\boldmath $\epsilon$}) = {\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$}})$, and
\item $f_{\mbox{\boldmath $\epsilon$}}^{(2)}(\mbox{\boldmath $\epsilon$}) = \sum_{k=1}^{K_{\mbox{\boldmath $\epsilon$}}} \pi_{\mbox{\boldmath $\epsilon$},k}~ {\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},k},\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},k})$ with $K_{\mbox{\boldmath $\epsilon$}}=3$, $\mbox{\boldmath $\pi$}_{\mbox{\boldmath $\epsilon$}} = (0.2,0.6,0.2)^{\rm T}$, $\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},1} = (-0.3,0,0.3,0)^{\rm T}$, $\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},2} = (-0.5,0.4,0.5,0)^{\rm T}$ and
$\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},3} = -(\pi_{\mbox{\boldmath $\epsilon$},1}\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},1}+\pi_{\mbox{\boldmath $\epsilon$},2}\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},2})/\pi_{\mbox{\boldmath $\epsilon$},3}$.
\end{enumerate}
For the component specific covariance matrices, we set $\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k} = {\mathbf D}_{{\mathbf X}}\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},0}{\mathbf D}_{{\mathbf X}}$ for each $k$, where ${\mathbf D}_{{\mathbf X}} = \hbox{diag}(0.75^{1/2},\dots,0.75^{1/2})$.
Similarly, $\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$}, k} = {\mathbf D}_{\mbox{\boldmath $\epsilon$}}\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},0}{\mathbf D}_{\mbox{\boldmath $\epsilon$}}$ for each $k$, where ${\mathbf D}_{\mbox{\boldmath $\epsilon$}} = \hbox{diag}(0.3^{1/2},\dots,0.3^{1/2})$.
For each pair of $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$, we considered four types of covariance structures for $\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},0} = \{(\sigma_{ij}^{{\mathbf X},0})\}$ and $\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},0} = \{(\sigma_{ij}^{\mbox{\boldmath $\epsilon$},0})\}$, namely
\begin{eqnarray}gin{enumerate}
\item Identity (I): $\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},0} = \mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},0} = \hbox{I}_{p}$,
\item Latent Factor (LF): $\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},0} = \mbox{\boldmath ${\cal L}ambda$}_{{\mathbf X}}\mbox{\boldmath ${\cal L}ambda$}_{{\mathbf X}} + \mbox{\boldmath $\Omega$}_{{\mathbf X}}$, with $\mbox{\boldmath ${\cal L}ambda$}_{{\mathbf X}} = (0.7,\dots,0.7)^{\rm T}$ and $\mbox{\boldmath $\Omega$}_{{\mathbf X}} = \hbox{diag}(0.51,\dots,0.51)$, and $\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},0} = \mbox{\boldmath ${\cal L}ambda$}_{\mbox{\boldmath $\epsilon$}}\mbox{\boldmath ${\cal L}ambda$}_{\mbox{\boldmath $\epsilon$}} + \mbox{\boldmath $\Omega$}_{\mbox{\boldmath $\epsilon$}}$, with $\mbox{\boldmath ${\cal L}ambda$}_{\mbox{\boldmath $\epsilon$}} = (0.5,\dots,0.5)^{\rm T}$ and $\mbox{\boldmath $\Omega$}_{\mbox{\boldmath $\epsilon$}}=\hbox{diag}(0.75,\dots,0.75)$,
\item Autoregressive (AR): $\sigma_{ij}^{{\mathbf X},0} = 0.7^{\abs{i-j}}$ and $\sigma_{ij}^{\mbox{\boldmath $\epsilon$},0} = 0.5^{\abs{i-j}}$ for each $(i,j)$, and
\item Exponential (EXP): $\sigma_{ij}^{{\mathbf X},0} = \exp(-0.5\abs{i-j})$ and $\sigma_{ij}^{\mbox{\boldmath $\epsilon$},0} = \exp(-0.9\abs{i-j})$ for each $(i,j)$.
\end{enumerate}
The parameters were chosen to produce a wide variety of one and two dimensional marginal densities,
see Figure \ref{fig: mvt simulation results XS d4 n1000 m3 MLFA X1 E1 Ind} and also Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 E1 Ind}.
Scale adjustments by multiplication with ${\mathbf D}_{{\mathbf X}}$ and ${\mathbf D}_{\mbox{\boldmath $\epsilon$}}$ were done so that the simulated values of each component of ${\mathbf X}$ fall essentially in the range $(-2,6)$
and the simulated values of all components of $\mbox{\boldmath $\epsilon$}$ fall essentially in the range $(-3,3)$.
For conditionally heteroscedastic measurement errors, we set the true variance functions at $s_{\ell}^{2}(X)=(1+X/4)^{2}$ for each component $\ell$.
A total of $16~ (2\times 1\times 2\times 4)$ cases were thus considered for both independent and conditionally heteroscedastic measurement errors.
We first discuss the results of the simulation experiments when the measurement errors ${\mathbf U}$ were independent of ${\mathbf X}$.
The estimated MISEs are presented in Table \ref{tab: mvt MISEs homoscedastic}.
When the true $f_{\mbox{\boldmath $\epsilon$}}$ was a single component multivariate normal,
the MLFA model produced the lowest MISE when the true covariance matrices were diagonal.
In all other cases the MIW model produced the best results.
When the true $f_{\mbox{\boldmath $\epsilon$}}$ was a mixture of multivariate normals, the model complexity increases and the performance of the MIW model started to deteriorate.
In this case, the MLFA model dominated the MIW model when the true covariance matrices were either diagonal or had a latent factor characterization.
The estimated MISEs for the cases when ${\mathbf U}$ were conditionally heteroscedastic
are presented in Table \ref{tab: mvt MISEs heteroscedastic}.
Models that accommodate conditional heteroscedasticity are significantly more complex
compared to models that assume independence of the measurement errors from ${\mathbf X}$.
The numerically more stable MLFA model thus out-performed the MIW model in all 32 cases.
The improvements were particularly significant when the true covariance matrices were sparse and the number of subjects was small ($n=500$).
The true and estimated univariate and bivariate marginals of $f_{{\mathbf X}}$
produced by the MIW and the MLFA methods
when the true density of the scaled errors was a mixture of multivariate normals ($f_{\mbox{\boldmath $\epsilon$}}^{(2)}$) and the component specific covariance matrices were diagonal ($\hbox{I}$)
are summarized in Figure \ref{fig: mvt simulation results XS d4 n1000 m3 MIW X1 E1 Ind} and Figure \ref{fig: mvt simulation results XS d4 n1000 m3 MLFA X1 E1 Ind}, respectively.
The true and estimated univariate and bivariate marginals for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$ for this case
produced by the two methods
are summarized in Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MIW X1 E1 Ind} and Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 E1 Ind}, respectively.
The true and the estimated variance functions produced by the univariate submodels are summarized in Figure \ref{fig: mvt simulation results VFn d4 n1000 m3 MLFA X1 E1 Ind}.
Comparisons between Figure \ref{fig: mvt simulation results XS d4 n1000 m3 MIW X1 E1 Ind} and Figure \ref{fig: mvt simulation results XS d4 n1000 m3 MLFA X1 E1 Ind}
illustrate the limitations of the MIW models in capturing high dimensional sparse covariance matrices and the improvements that can be achieved by the MLFA models.
The estimates of $f_{\mbox{\boldmath $\epsilon$}}$ produced by the two methods are in better agreement.
This may be attributed to the fact that many more residuals are available for estimating $f_{\mbox{\boldmath $\epsilon$}}$ than there are ${\mathbf X}_{i}$'s to estimate $f_{{\mathbf X}}$.
Figure \ref{fig: mvt simulation results VFn d4 n1000 m3 MLFA X1 E1 Ind} in the main paper and Figures \ref{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 E1 AR} and \ref{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 HT_E0 Ind} in the Supplementary Materials
show that the univariate submodels can recover the true variance functions well.
Additional figures when the true covariance matrices had auto-regressive structure (AR) are presented in the Supplementary Materials.
In this case the true covariance matrices were not sparse.
The MLFA method still vastly dominated the MIW method when the sample size was small ($n=500$).
When the sample size was large ($n=1000$) the two methods produced comparable results.
The proposed deconvolution methods, in particular the MLFA method, are highly scalable.
In small scale simulations, not reported here, we tried $p=6,8$ and $10$ and observed good empirical performance.
We have focused here on $p=4$ dimensional problems since with $p=4$ the numbers of univariate and bivariate marginals, $p=4$ and ${p\choose 2}=6$,
remain manageable and the results are conveniently graphically summarized.
Additional small scale simulations for a variety of other distributions with similar MISE patterns are presented in the Supplementary Materials.
\section{Example} \label{sec: mvt data analysis}
Dietary habits are known to be leading causes of many chronic diseases.
Accurate estimation of the distributions of dietary intakes is thus important
in nutritional epidemiologic surveillance and epidemiology.
Nutritionists are typically interested not just in the consumption patterns of individual dietary components but also in their joint consumption patterns.
By the very nature of the problem, ${\mathbf X}$, the average long term daily intakes of the dietary components, can never be directly observed.
Data are thus typically collected from a representative sample of the population in the form of dietary recalls,
the subjects participating in the study remembering and reporting the type and amount of food they had consumed in the past 24 hours.
The problem of estimating the joint consumption pattern of the dietary components from the contaminated 24-hour recalls then becomes a problem of multivariate density deconvolution.
A large scale epidemiologic study conducted by the National Cancer Institute,
the Eating at America's Table (EATS) study (Subar, et al. 2001),
serves as the motivation for this paper.
In this study $n=965$ participants were interviewed $m_i=4$ times over the course of a year
and their 24 hour dietary recalls (${\mathbf W}_{ij}$'s) were recorded.
The goal is to estimate the joint consumption patterns of the true daily intakes (${\mathbf X}_{i}$'s).
To illustrate our methodology, we consider the problem of estimating the joint consumption pattern of four dietary components,
namely (a) carbohydrate, (b) fiber, (c) protein and (d) a mineral potassium.
Figure \ref{fig: mvt EATS data results VFn} shows the plots of subject-specific means versus subject-specific variances for daily intakes of the dietary components
with the estimates of the variance functions produced by univariate submodels superimposed over them.
As is clearly identifiable from this plot, conditional heteroscedasticity is a very prominent feature of the measurements errors contaminating the 24 hour recalls.
The estimated univariate and bivariate marginal densities of average long term daily intakes of the dietary components produced
by the MIW method and the MLFA method are summarized in Figure \ref{fig: mvt EATS data results XS}.
The estimated univariate and bivariate marginal densities for the scaled errors are summarized in Figure \ref{fig: mvt EATS data results ES}.
The estimated marginals of ${\mathbf X}$ produced by the two methods look quite different,
while the estimated marginals of $\mbox{\boldmath $\epsilon$}$ are in close agreement.
The estimated univariate and bivariate marginal densities of the long term intakes of the dietary components produced by the MIW model look irregular and unstable,
whereas the estimates produced by the MLFA model look relatively more regular and stable.
In experiments not reported here, we observed that the estimates produced by the MIW method were sensitive to the choice of the number of mixture components,
but the estimates produced by the MLFA model were quite robust.
The trace plots and the frequency distributions of the of the numbers of nonempty mixture components are summarized
in Figures \ref{fig: mvt EATS data results Trace Plots MIW} and \ref{fig: mvt EATS data results Trace Plots MLFA} in the Supplementary Materials
and provide some idea about the relative stability of the two methods.
These observations are similar to that made in Section \ref{sec: mvt simulation studies} for conditionally heteroscedastic measurement errors and sparse covariance matrices.
We next comment only on the estimates produced by the MLFA method assuming them to be closer to the truth.
The estimates show that the long term daily intakes of the four dietary components are strongly correlated.
The shapes of the bivariate consumption patterns suggest deviations from normality.
Similarly, the shapes of the bivariate marginals for the scaled errors suggest that the measurement errors in the reported 24 hour recalls are positively correlated and deviate from normality.
People who consume more are expected to do so for most dietary components.
Strong correlations between the intakes of the dietary components are thus somewhat expected.
The correlations among different components of the measurement errors suggest that people usually have a tendency to either over-report or under-report the daily intakes.
These findings illustrate the importance of robust but numerically stable multivariate deconvolution methods in nutritional epidemiologic studies.
Additional discussions on potentially far-reaching impact of our work on nutritional epidemiology studies are deferred to
Section \ref{sec: mvt potential impact} in the Supplementary Materials.
\section{Discussion} \label{sec: mvt discussion}
We considered the problem of multivariate density deconvolution when the measurement error density is not known
but replicated proxies are available for some individuals.
We used flexible finite mixtures of multivariate normal kernels with symmetric Dirichlet priors on the mixture probabilities to model both the density of interest and the density of the measurement errors.
We proposed a novel technique to make the model for the density of the errors satisfy a zero mean restriction.
We showed that the dense parametrization of inverse Wishart priors are not suitable for modeling covariance matrices in the presence of measurement errors.
We proposed a numerically more stable approach based on latent factor characterization of the covariance matrices with sparsity inducing priors on the factor loading matrices.
We built models for conditionally heteroscedastic additive measurement errors that also automatically accommodate multivariate multiplicative measurement errors.
The methodological contributions of this article are not limited to deconvolution problems.
Mixtures of latent factor analyzers with sparsity inducing priors on the factor loading matrices can be used in other high dimensional applications including ordinary density estimation.
The techniques proposed in Section \ref{sec: mvt density of homoscedastic errors} to enforce the mean zero moment restriction on the measurement errors can be readily used to model multivariate regression errors that are distributed independently of the predictors.
The technique can also be adapted to relax the strong assumption of multivariate normality made by Hoff and Niu (2012) and Fox and Dunson (2016) in covariance regression problems.
As explained in Sections \ref{sec: mvt density of heteroscedastic errors} and \ref{sec: mvt multiplicative errors} in the main paper and also in Section \ref{sec: mvt comments on the model for U given X} in the Supplementary Materials,
the structural separability assumption (\ref{eq: mvt multiplicative structure})
arises naturally in both additive and multiplicative multivariate measurement error settings.
It would still be interesting, in future work, to consider more general covariance models that allow $\hbox{var}(U_{ij\ell}\vert {\mathbf X})$
to be explained primarily by $X_{i\ell}$, as in the current approach,
but would allow the residual variability to be explained by the remaining components $\{X_{im}\}_{m\neq \ell}$ of ${\mathbf X}$.
The current MCMC based implementation of the proposed methodology is computationally intensive.
We are pursuing the development of faster algorithms for approximate posterior inference as the subject of a separate manuscript.
The question of consistency of Bayesian procedures is intimately related to the flexibility of the priors.
For instance, in ordinary density estimation problems inclusion of the true density in the KL support of the prior is a sufficient condition to ensure weak consistency
via the Schwartz theorem.
In density deconvolution problems such a condition is not sufficient but is still required.
The results from Section \ref{sec: mvt model flexibility} thus provide crucial first steps in that direction.
We have not pursued the question of consistency of the proposed deconvolution methods any further in this article.
It remains an important direction for future research.
\baselineskip=14pt
\section*{Supplementary Materials}
The Supplementary Materials discuss
the choice of hyper-parameters and MCMC algorithms to sample from the posterior,
including the two-stage estimation procedure for conditionally heteroscedastic measurement errors.
The Supplementary Materials also present our arguments in favor of finite mixture models,
pointing out how their close connections and their subtle differences with possible infinite dimensional alternatives
are exploited to achieve significant reduction in computational complexity
while retaining the major advantages of infinite dimensional mixture models including model flexibility and automated model selection and model averaging.
The Supplementary Materials additionally present
discussions on the contrasts between regression and measurement errors
that preclude the use of covariance regression techniques to model conditionally heteroscedastic measurement errors,
the proofs of the theoretical results presented in Section \ref{sec: mvt model flexibility},
some additional figures,
and results of additional simulation experiments.
R programs implementing the deconvolution methods for conditionally heteroscedastic errors are included as part of the Supplementary Materials.
The EATS data analyzed in Section \ref{sec: mvt data analysis} can be accessed from National Cancer Institute by arranging a Material Transfer Agreement.
A simulated data set, simulated according to one of the designs described in Section \ref{sec: mvt simulation studies},
and a `readme' file providing additional details are also included in the Supplementary Materials.
\baselineskip=14pt
\section*{Acknowledgments}
Pati's research was supported by Award No. N00014-14-1-0186 from the Office of Naval Research.
Carroll's research was supported in part by a grant U01-CA057030 from the National Cancer Institute.
Mallick's research was supported in part by National Cancer Institute of the National Institutes of Health under award number R01CA194391.
We acknowledge the Texas A\&M University Brazos HPC cluster that contributed to the research reported here.
\section*{References}
\par\vskip 2mm\noindent\refhg
Bhattacharya, A. and Dunson, D. B. (2011). Sparse Bayesian infinite factor models.
{\cal B}IOK, 98, 291-306.
\par\vskip 2mm\noindent\refhg
Bhattacharya, A., Pati, D., Pillai, N. and Dunson, D. B. (2014). Bayesian shrinkage.
\emph{Unpublished manuscript}.
\par\vskip 2mm\noindent\refhg
Brown, P. J. and Griffin, J. E. (2010). Inference with normal-gamma prior distributions in regression problems.
{\cal B}A, 5, 171-188.
\par\vskip 2mm\noindent\refhg
Bovy, J., Hogg, D. W. and Rowies, S. T. (2011). Extreme deconvolution: inferring complete distribution functions from noisy, heterogeneous and incomplete observations.
{\it Annals of Statistics}AS, 5, 1657-1677.
\par\vskip 2mm\noindent\refhg
Buonaccorsi, J. P. (2010). \emph{Measurement Error: Models, Methods and Applications}.
New York: \emph{Chapman and Hall/CRC}.
\par\vskip 2mm\noindent\refhg
Carroll, R. J. and Hall, P. (1988). Optimal rates of convergence for deconvolving a density.
{\it Journal of the American Statistical Association}, 83, 1184-1186.
\par\vskip 2mm\noindent\refhg
Carroll, R. J. and Hall, P. (2004). Low order approximations in deconvolution and regression with errors in variables.
{\it Journal of the Royal Statistical Society, Series B}, 66, 31-46.
\par\vskip 2mm\noindent\refhg
Carroll, R. J., Ruppert, D., Stefanski, L. A. and Crainiceanu, C. M. (2006).
\emph{Measurement Error in Nonlinear Models} (2nd ed.). Boca Raton: \emph{Chapman and Hall/CRC Press}.
\par\vskip 2mm\noindent\refhg
Carvalho, M. C., Polson, N. G. and Scott, J. G. (2010). The horseshoe estimator for sparse signals.
{\cal B}IOK, 97, 465-480.
\par\vskip 2mm\noindent\refhg
Comte, F. and Lacour, C. (2013). Anisotropic adaptive density deconvolution.
\emph{Annales de l'Institut Henri Poincar\'e - Probabilit\'{e}s et Statistiques}, 49, 569-609.
\par\vskip 2mm\noindent\refhg
Devroye, L. (1989). Consistent deconvolution in density estimation.
{\cal C}ANADAJS, 17, 235-239.
\par\vskip 2mm\noindent\refhg
Diggle, P. J. and Hall, P. (1993). A Fourier approach to nonparametric deconvolution of a density estimate.
{\it Journal of the Royal Statistical Society, Series B}, 55, 523-531.
\par\vskip 2mm\noindent\refhg
Eilers, P. H. C. and Marx, B. D. (1996). Flexible smoothing with B-splines and penalties.
{\cal S}TATSCI, 11, 89-121.
\par\vskip 2mm\noindent\refhg
Fan, J. (1991a). On the optimal rates of convergence for nonparametric deconvolution problems.
{\it Annals of Statistics}, 19, 1257-1272.
\par\vskip 2mm\noindent\refhg
Fan, J. (1991b). Global behavior of deconvolution kernel estimators.
{\cal S}SNC, 1, 541-551.
\par\vskip 2mm\noindent\refhg
Fan, J. (1992). Deconvolution with supersmooth distributions.
{\cal C}ANADAJS, 20, 155-169.
\par\vskip 2mm\noindent\refhg
Fokou\'{e}, E. and Titterington, D. M. (2003). Mixtures of factor analyzers. Bayesian estimation and inference by stochastic simulation.
\emph{Machine Learning}, 50, 73-94.
\par\vskip 2mm\noindent\refhg
Fox, E. B. and Dunson, D. (2016). Bayesian nonparametric covariance regression.
To appear in \emph{Journal of Machine Learning Research}.
\par\vskip 2mm\noindent\refhg
Fr\"{u}hwirth-Schnatter, S. (2006). \emph{Finite Mixture and Markov Switching Models}.
New York: \emph{Springer}.
\par\vskip 2mm\noindent\refhg
Geweke, J. (2007). Interpretation and inference in mixture models: Simple MCMC works.
{\cal C}DA, 51, 3529-3550.
\par\vskip 2mm\noindent\refhg
Hazelton, M.L. and Turlach, B.A. (2009). Nonparametric density deconvolution by weighted kernel estimators.
{\cal S}aC, 19, 217-228.
\par\vskip 2mm\noindent\refhg
Hazelton, M.L. and Turlach, B.A. (2010). Semiparametric density deconvolution.
{\cal S}CAN, 37, 91-108.
\par\vskip 2mm\noindent\refhg
Hesse, C. H. (1999). Data driven deconvolution.
{\it Journal of Nonparametric Statistics}, 10, 343-373.
\par\vskip 2mm\noindent\refhg
Hoff, P. D. and Niu, X. (2012). A covariance regression model.
{\cal S}SNC, 22, 729-753.
\par\vskip 2mm\noindent\refhg
Hu, Y and Schennach, S. (2008). Instrumental Variable Treatment of Nonclassical Measurement Error Models. \emph{Econometrica}, 76, 195-216.
\par\vskip 2mm\noindent\refhg
Li, T. and Vuong, Q. (1998). Nonparametric estimation of the measurement error model using multiple indicators.
{\it Journal of Multivariate Analysis}, 65, 139-165.
\par\vskip 2mm\noindent\refhg
Liu, M. C. and Taylor, R. L. (1989). A consistent nonparametric density estimator for the deconvolution problem.
{\cal C}ANADAJS, 17, 427-438.
\par\vskip 2mm\noindent\refhg
Masry, E. (1991). Multivariate probability density deconvolution for stationary random processes.
{\it IEEE}TIT, 37, 1105-1115.
\par\vskip 2mm\noindent\refhg
Mengersen, K. L., Robert, C. P. and Titterington, D. M. (eds) (2011). \emph{Mixtures - Estimation and Applications}.
Chichester: \emph{John Wiley}.
\par\vskip 2mm\noindent\refhg
Neumann, M. H. (1997). On the effect of estimating the error density in nonparametric deconvolution.
{\it Journal of Nonparametric Statistics}, 7, 307-330.
\par\vskip 2mm\noindent\refhg
Sarkar, A., Mallick, B. K., Staudenmayer, J., Pati, D. and Carroll, R. J. (2014).
Bayesian semiparametric density deconvolution in the presence of conditionally heteroscedastic measurement errors.
{\it Journal of Computational and Graphical Statistics}, 23, 1101-1125.
\par\vskip 2mm\noindent\refhg
Schennach, S. (2004). Nonparametric regression in the presence of measurement error.
{\cal E}CTH, 20, 1046-1093.
\par\vskip 2mm\noindent\refhg
Staudenmayer, J., Ruppert, D. and Buonaccorsi, J. P. (2008). Density estimation in the presence of heteroscedastic measurement error.
{\it Journal of the American Statistical Association}, 103, 726-736.
\par\vskip 2mm\noindent\refhg
Subar, A. F., Thompson, F. E., Kipnis, V., Midthune, D., Hurwitz, P. McNutt, S., McIntosh, A. and Rosenfeld, S. (2001).
Comparative validation of the block, Willet, and National Cancer Institute food frequency questionnaires.
{\it American Journal of Epidemiology}, 154, 1089-1099.
\par\vskip 2mm\noindent\refhg
Youndj\'e, E. and Wells, M. T. (2008). Optimal bandwidth selection for multivariate kernel deconvolution.
\emph{TEST}, 17, 138-162.
\newgeometry{left=2cm,right=2.5cm,top=2.5cm,bottom=0.1cm}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{table}[!ht]\footnotesize
\begin{eqnarray}gin{center}
\begin{eqnarray}gin{tabular}{|c|c|c|c c c|}
\hline
\multirow{2}{75pt}{True Error Distribution} & \multirow{2}{50pt}{Covariance Structure} & \multirow{2}{*}{Sample Size} & \multicolumn{3}{|c|}{MISE $\times 10^4$} \\ \cline{4-6}
& & & MLFA & MIW & Naive \\ \hline\hline
\multirow{8}{75pt}{(a) Multivariate Normal} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{1.24} & 3.05 & 8.01 \\
& & 1000 & \bf{0.59} & 1.33 & 6.58 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering LF} & 500 & 6.88 & \bf{6.33} & 33.41 \\
& & 1000 & 5.15 &\bf{3.10} & 32.42 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering AR} & 500 & 11.91 & \bf{5.51} & 27.17 \\
& & 1000 & 9.82 & \bf{2.78} & 26.01 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering EXP} & 500 & 7.15 & \bf{4.40} & 17.82 \\
& & 1000 & 5.46 & \bf{2.19} & 17.40 \\ \hline
\multirow{8}{75pt}{(b) Mixture of Multivariate Normal} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{1.28} & 3.24 & 5.97 \\
& & 1000 & \bf{0.64} & 1.37 & 4.99 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{7.28} & 7.51 & 31.62 \\
& & 1000 & \bf{4.17} & 4.34 & 31.48 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering AR} & 500 & 10.43 & \bf{6.66} & 30.74 \\
& & 1000 & 7.75 & \bf{4.35} & 28.90 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering EXP} & 500 & 7.16 & \bf{5.18} & 17.85 \\
& & 1000 & 4.87 & \bf{2.66} & 17.26 \\ \hline
\end{tabular}
\caption{\baselineskip=10pt Mean integrated squared error (MISE) performance
of MLFA (mixtures of latent factor analyzers) and MIW (mixtures with inverse Wishart priors) density deconvolution models described in Section \ref{sec: mvt density deconvolution models} of this article
for {\bf homoscedastic} errors
compared with a naive method that ignores measurement errors
for different measurement error distributions.
The minimum value in each row is highlighted.
}
\label{tab: mvt MISEs homoscedastic}
\end{center}
\end{table}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{table}[!ht]\footnotesize
\begin{eqnarray}gin{center}
\begin{eqnarray}gin{tabular}{|c|c|c|c c c|}
\hline
\multirow{2}{75pt}{True Error Distribution} & \multirow{2}{50pt}{Covariance Structure} & \multirow{2}{*}{Sample Size} & \multicolumn{3}{|c|}{MISE $\times 10^4$} \\ \cline{4-6}
& & & MLFA & MIW & Naive \\ \hline\hline
\multirow{8}{75pt}{(a) Multivariate Normal} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{2.53} & 19.08 & 10.64 \\
& & 1000 & \bf{1.15} & 9.43 & 9.14 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{11.46} & 34.21 & 21.33 \\
& & 1000 & \bf{5.78} & 15.98 & 20.75 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering AR} & 500 & \bf{17.11} & 30.83 & 36.44 \\
& & 1000 & \bf{10.77} & 12.46 & 36.37 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering EXP} & 500 & \bf{11.63} & 26.99 & 24.28 \\
& & 1000 & \bf{6.67} & 10.56 & 23.36 \\ \hline
\multirow{8}{75pt}{(b) Mixture of Multivariate Normal} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{2.79} & 22.17 & 20.16 \\
& & 1000 & \bf{1.38} & 10.55 & 19.39 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{13.39} & 35.67 & 43.43 \\
& & 1000 & \bf{7.50} & 20.86 & 43.28 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering AR} & 500 & \bf{18.27} & 35.70 & 75.26 \\
& & 1000 & \bf{12.06} & 16.64 & 77.55 \\ \cline{2-6}
& \multirow{2}{50pt}{\centering EXP} & 500 & \bf{12.11} & 34.50 & 48.76 \\
& & 1000 & \bf{7.59} & 13.74 & 50.02 \\ \hline
\end{tabular}
\caption{\baselineskip=10pt Mean integrated squared error (MISE) performance
of MLFA (mixtures of latent factor analyzers) and MIW (mixtures with inverse Wishart priors) density deconvolution models described in Section \ref{sec: mvt density deconvolution models} of this article
for {\bf conditionally heteroscedastic} errors
compared with a naive method that ignores measurement errors
for different measurement error distributions.
The minimum value in each row is highlighted.
}
\label{tab: mvt MISEs heteroscedastic}
\end{center}
\end{table}
\restoregeometry
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=12cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{VFn_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_92.pdf}
\end{center}
\caption{\baselineskip=10pt Results for conditional variability $\hbox{var}(U \vert X)=s^{2}(X)\hbox{var}(\epsilon)$ produced by the univariate density deconvolution method for each component of ${\mathbf X}$ for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices. The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets for the MLFA (mixtures of latent factor analyzers) method.
For each component of ${\mathbf X}$, the true variance function is $s^{2}(X) = (1+X/4)^{2}$.
See Section \ref{sec: mvt density of heteroscedastic errors} and Section \ref{sec: mvt estimation of variance functions} for additional details.
In each panel, the true (lighter shaded green lines) and the estimated (darker shaded blue lines) variance functions
are superimposed over a plot of subject specific sample means vs subject specific sample variances.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results VFn d4 n1000 m3 MLFA X1 E1 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{XS_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt Results for $f_{{\mathbf X}}$ produced by the MIW (mixtures with inverse Wishart priors) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices. The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results XS d4 n1000 m3 MIW X1 E1 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{XS_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_92.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the $f_{{\mathbf X}}$ produced by the MLFA (mixtures of latent factor analyzers) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices. The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results XS d4 n1000 m3 MLFA X1 E1 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{ES_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the density of the scaled measurement errors $f_{\mbox{\boldmath $\epsilon$}}$ produced by the MIW (mixtures with inverse Wishart priors) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices. The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results ES d4 n1000 m3 MIW X1 E1 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{ES_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_92.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the density of the scaled measurement errors $f_{\mbox{\boldmath $\epsilon$}}$ produced by the MLFA (mixtures of latent factor analyzers) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices. The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 E1 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{EATS_DATA_MVT_DECON_HTR_Var_Fns_R_CARBO_R_FIBER_R_PROTEIN_R_POTASSIU.pdf}
\end{center}
\caption{\baselineskip=10pt Estimated variance functions $\hbox{var}(U\vert X) = s^{2}(X)\hbox{var}(\epsilon)$ produced by the univariate density deconvolution method for each component of ${\mathbf X}$ for the EATS data set with sample size $n=965$, $m_{i}=4$ replicates for each subject.
See Section \ref{sec: mvt data analysis} for additional details.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt EATS data results VFn}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{EATS_DATA_Plots_1_R_CARBO_R_FIBER_R_PROTEIN_R_POTASSIU_XS.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the EATS data set for the $f_{{\mathbf X}}$.
The off-diagonal panels show the contour plots of two-dimensional marginals estimated by the MIW method (upper triangular panels) and the MLFA method (lower triangular panels).
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the one dimensional marginal densities estimated by the MIW method (darker shaded blue lines)
and the MLFA method (lighter shaded green lines).
The figure is in color in the electronic version of this article.
}
\label{fig: mvt EATS data results XS}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{EATS_DATA_Plots_1_R_CARBO_R_FIBER_R_PROTEIN_R_POTASSIU_ES.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the EATS data set for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The off-diagonal panels show the contour plots of two-dimensional marginals estimated by the MIW method (upper triangular panels) and the MLFA method (lower triangular panels).
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the one dimensional marginal densities estimated by the MIW method (darker shaded blue lines)
and the MLFA method (lighter shaded green lines).
The figure is in color in the electronic version of this article.
}
\label{fig: mvt EATS data results ES}
\end{figure}
\pagebreak
\pagestyle{fancy}
\fancyhf{}
\rhead{\bfseriesS.\arabic{page}}
\lhead{\bfseries SUPPLEMENTARY MATERIALS}
\baselineskip 15pt
\begin{eqnarray}gin{center}
{{\cal L}ARGE{Supplementary Materials}
for\\ {\bf Bayesian Semiparametric Multivariate Density Deconvolution}}
\end{center}
\setcounter{equation}{0}
\setcounter{page}{1}
\setcounter{table}{1}
\setcounter{figure}{0}
\setcounter{section}{0}
\numberwithin{table}{section}
\renewcommand{S.\arabic{equation}}{S.\arabic{equation}}
\renewcommand{S.\arabic{section}.\arabic{subsection}}{S.\arabic{section}.\arabic{subsection}}
\renewcommand{S.\arabic{section}}{S.\arabic{section}}
\renewcommand{S.\arabic{page}}{S.\arabic{page}}
\renewcommand{S.\arabic{table}}{S.\arabic{table}}
\renewcommand{S.\arabic{figure}}{S.\arabic{figure}}
\baselineskip=15pt
\begin{eqnarray}gin{center}
Abhra Sarkar\\
Department of Statistical Science, Duke University, Durham, NC 27708-0251, USA\\
[email protected] \\
Debdeep Pati\\
Department of Statistics, Florida State University, Tallahassee, FL 32306-4330, USA\\
[email protected]\\
Bani K. Mallick\\% and Raymond J. Carroll\\
Department of Statistics, Texas A\&M University, 3143 TAMU, College Station,\\ TX 77843-3143, USA\\
[email protected]\\% and [email protected]\\
Raymond J. Carroll\\
Department of Statistics, Texas A\&M University, 3143 TAMU, College Station,\\ TX 77843-3143, USA\\
and School of Mathematical and Physical Sciences, University of Technology Sydney, Broadway NSW 2007, Australia\\
[email protected]\\
\end{center}
\baselineskip=14pt
The Supplementary Materials are organized as follows.
Section \ref{sec: mvt choice of hyper-parameters} discusses the choice of hyper-parameters.
In Section \ref{sec: mvt posterior computation}, we describe a Gibbs sampler for drawing samples from the posterior
of the deconvolution model for multivariate independently distributed homoscedastic errors, described in Section \ref{sec: mvt density of homoscedastic errors} of the main paper.
In Section \ref{sec: mvt estimation of variance functions}, we detail a two stage estimation procedure for drawing samples from the posterior
of the deconvolution model for multivariate conditionally heteroscedastic measurement errors described in Section \ref{sec: mvt density of heteroscedastic errors} of the main paper.
Section \ref{sec: mvt two-stage sampler} provides heuristic justification for the two-stage sampler.
In Section \ref{sec: mvt comments on the model for U given X}, we provide additional detailed discussion
of the model for multivariate conditionally heteroscedastic measurement errors described in Section \ref{sec: mvt density of heteroscedastic errors} of the main paper,
contrasting it with models for multivariate conditionally varying regression errors (Section \ref{sec: mvt regression vs measurement errors}),
its connections with latent factor models (Section \ref{sec: latent factor models for different covariance classes}),
its flexibility, limitations, and plausible generalizations (Section \ref{sec: mvt cov mat model}),
and tools for model adequacy checks (Section \ref{sec: mvt model adequacy checks}).
Section \ref{sec: mvt finite vs infinite mixture models} presents our arguments in favor of finite mixture models,
pointing out how their close connections and their subtle differences with possible infinite dimensional alternatives
are exploited to achieve significant reduction in computational complexity (Section \ref{sec: mvt computational complexity})
while retaining the major advantages of infinite dimensional mixture models including model flexibility (Section \ref{sec: mvt supp mat model flexibility})
and automated model selection and model averaging (Section \ref{sec: mvt model selection and model averaging}).
Section \ref{sec: mvt proofs of theoretical results} details proofs of the theoretical results presented in Section \ref{sec: mvt model flexibility} of the main paper.
Section \ref{sec: mvt additional figures} presents additional figures related to the simulation experiments discussed in Section \ref{sec: mvt simulation studies} of the main paper.
Section \ref{sec: mvt additional simulation studies} presents results of additional simulation experiments.
Section \ref{sec: mvt potential impact} discusses potentially far-reaching impact of our work in nutritional epidemiology.
\baselineskip=17pt
\section{Choice of Hyper-Parameters} \label{sec: mvt choice of hyper-parameters}
We discuss the choice of hyper-parameters in this section.
To avoid unnecessary repetition, in this section and onwards,
symbols sans the subscripts ${\mathbf X}$ and $\mbox{\boldmath $\epsilon$}$ are sometimes used as generics for similar components and parameters of the models.
For example, $K$ is a generic for $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$; $\mbox{\boldmath $\mu$}_{k}$ is a generic for $\mbox{\boldmath $\mu$}_{{\mathbf X},k}$ and $\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},k}$; and so on.
\begin{eqnarray}gin{enumerate}[topsep=0pt,itemsep=-1ex,partopsep=2ex,parsep=2ex, leftmargin=0cm, rightmargin=0cm, wide=3ex]
\item {\bf Number of mixture components:}
Practical application of our method requires that a decision be made on the number of mixture components $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$ in the models for the densities $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$, respectively.
Our simulation experiments suggest that when the true densities are finite mixtures of multivariate normals and $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$ are assigned values greater than the corresponding true numbers,
the MCMC chain often quickly reaches a steady state where the redundant components become empty.
See Figures \ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 E1 Ind},
\ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 E1 AR} and
\ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 E1 AR} in the Supplementary Materials for illustrations.
These observations are similar to that made in the context of ordinary density estimation by Rousseau and Mengersen (2011)
who studied the asymptotic behavior of the posterior for overfitted mixture models and showed that when $\alpha/K< L/2$,
where $L$ denotes the number of parameters specifying the component kernels,
the posterior is stable and concentrates in regions with empty redundant components.
We set $\alpha_{{\mathbf X}}=\alpha_{\mbox{\boldmath $\epsilon$}} =1$ so that the condition $\alpha/K < L/2$ is satisfied.
Educated guesses about $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$ may nevertheless be useful in safeguarding against gross overfitting that would result in a wastage of computation time and resources.
The following simple strategies may be employed.
Model based cluster analysis techniques as implemented by the mclust package in R (Fraley and Raftery, 2007) may be applied to
the starting values of ${\mathbf X}_{i}$ and the corresponding residuals, obtained by fitting univariate submodels for each component of ${\mathbf X}$,
to get some idea about $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$.
The chain may be started with larger values of $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$ and after a few hundred iterations the redundant empty components may be deleted on the fly.
As shown in Section \ref{sec: mvt model flexibility},
our methods can approximate a large class of data generating densities,
and we found the strategy described above to be very effective in all cases we experimented with.
The parameter $\alpha$ now plays the role of a smoothing parameter, smaller values favoring a smaller number of mixture components and thus smoother densities.
In simulation experiments involving multivariate t and multivariate Laplace distributions reported in the Supplementary Materials,
and in some other cases not reported here, the values $\alpha_{{\mathbf X}}=\alpha_{\mbox{\boldmath $\epsilon$}} =1$ worked well.
As we discuss in Section \ref{sec: mvt simulation studies}, the MIW method becomes highly numerically unstable
when the measurement errors are conditionally heteroscedastic and the true covariance matrices are highly sparse.
In these cases in particular, the MIW method usually requires much larger sample sizes for the asymptotic results to hold
and in finite samples the above mentioned strategy usually overestimates the required number of mixture components.
See Figure \ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 E1 Ind} in the Supplementary Materials for an illustration.
Since mixtures based on $(K+1)$ components are at least as flexible as mixtures based on $K$ components, as far as model flexibility is concerned, such overestimation is not an issue.
But since this also results in clusters of smaller sizes,
the estimates of the component specific covariance matrices become numerically even more unstable,
further compounding the stability issues of the MIW model.
In contrast, for the numerically more stable MLFA model, for the exact opposite reasons,
the asymptotic results are valid for moderate sample sizes and such models are also more robust to overestimation of the number of nonempty clusters.
\item {\bf Number of latent factors:}
For the MLFA method, the MCMC algorithm summarized in Section \ref{sec: mvt posterior computation} also requires that the component specific infinite factor models be truncated at some appropriate truncation level.
The shrinkage prior again makes the model highly robust to overfitting allowing us to adopt a simple strategy.
Since a latent factor characterization leads to a reduction in the number or parameters only when $q_{k} \leq \lceil (p+1)/2 \rceil$,
where $\lceil s \rceil$ denotes the largest integer smaller than or equals to $s$,
we simply set the truncation level at $q_{k} = q = \max\{2,\lceil (p+1)/2 \rceil\}$ for all the components.
We also experimented by setting the truncation level at $q_{k} = q = p$ for all $k$ with the results remaining practically the same.
The shrinkage prior, being continuous in nature, does not set the redundant columns to exact zeroes,
but it adaptively shrinks the redundant parameters sufficiently towards zero, thus producing stable and efficient estimates of the densities being modeled.
\item {\bf Other hyper-parameters:}
We take an empirical Bayes type approach to assign values to other hyper-parameters.
We set $\mbox{\boldmath $\mu$}_{{\mathbf X},0} = \overline{{\mathbf X}}^{(0)}$, the overall mean of ${{\mathbf X}}_{1:n}^{(0)}$,
where ${\mathbf X}_{1:n}^{(0)}$ denote the starting values of ${\mathbf X}_{1:n}$ for the MCMC sampler discussed in Section \ref{sec: mvt posterior computation}.
For the scaled errors we set $\mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},0} = {\mathbf z}ero$.
For the MIW model we take $\nu_{0} = (p+2)$, the smallest possible integral value of $\nu_{0}$ for which the prior mean of $\mbox{\boldmath ${\cal S}igma$}_{k}$ exists.
We then take $\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},0}/2 = \mbox{\boldmath $\Psi$}_{{\mathbf X},0}=\hbox{cov}(\overline{{\mathbf X}}_{1:n}^{(0)})$.
These choices imply $E(\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k}) = \mbox{\boldmath $\Psi$}_{{\mathbf X},0} = \hbox{cov}(\overline{{\mathbf X}}^{(0)})$
and, since the variability of each component is expected to be significantly less than the overall variability, ensure noninformativeness.
Similarly, for the scaled errors we take $\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},0}/2 = \mbox{\boldmath $\Psi$}_{\mbox{\boldmath $\epsilon$},0} = \hbox{cov}(\mbox{\boldmath $\epsilon$}_{1:N}^{(0)})$.
For the MLFA model, the hyper-parameters specifying the prior for $\mbox{\boldmath ${\cal L}ambda$}$ are set at $a_{1} = 1, a_{h} = 2$ for all $h\geq2$, and $\nu = 1$.
Inverse gamma priors with parameters $a_{\sigma} = 1.1, b_{\sigma} = 1$ are placed on the elements of $\mbox{\boldmath $\Omega$}$.
For each $k$, the variance functions were modeled using quadratic (q=2) B-splines based on $(2\times2+5+1)=10$ equidistant knot points
on $[A_{k},B_{k}] = [\hbox{min}(\overline{{\mathbf W}}_{k,1:n})-0.1~\hbox{range}(\overline{{\mathbf W}}_{k,1:n}),\hbox{max}(\overline{{\mathbf W}}_{k,1:n})+0.1~\hbox{range}(\overline{{\mathbf W}}_{k,1:n})]$,
where $\overline{{\mathbf W}}_{\ell,1:n}$ denotes the subject specific means corresponding to $\ell^{th}$ component.
\end{enumerate}
\section{Posterior Computation} \label{sec: mvt posterior computation}
Samples from the posterior can be drawn using Gibbs sampling techniques.
In what follows $\mbox{\boldmath $\zeta$}$ denotes a generic variable that collects the observed proxies ${\mathbf W}_{1:N}$
and all the parameters of a model, including the imputed values of ${\mathbf X}_{1:n}$ and $\mbox{\boldmath $\epsilon$}_{1:N}$, that are not explicitly mentioned.
Carefully chosen starting values can facilitate convergence of the sampler.
The posterior means of the $X_{i\ell}$'s, obtained by fitting univariate submodels,
are used as the starting values for the multivariate sampler.
The number of mixture components are initialized at $K_{{\mathbf X}}=(m_{{\mathbf X}}+2)$, where $m_{{\mathbf X}}$ denotes the optimal number of clusters
returned by model based clustering algorithm implemented by the mclust package in R applied to the corresponding initial values ${\mathbf X}_{1:n}^{(0)}$.
The component specific mean vectors of the nonempty clusters are set at the mean of ${\mathbf X}_{i}^{(0)}$ values that belong to that cluster.
The component specific mean vectors of the two empty clusters are set at $\overline{{\mathbf X}}^{(0)}$, the overall mean of ${{\mathbf X}}_{1:n}^{(0)}$.
For the MIW model, the initial values of the cluster specific covariance matrices are chosen in a similar fashion.
The mixture probabilities for the $k^{th}$ nonempty cluster is set at $\mbox{\boldmath $\pi$}_{{\mathbf X},k} = n_{k}/n$, where $n_{k}$ denotes the number of ${\mathbf X}_{i}^{(0)}$ belonging to the $k^{th}$ cluster.
The mixture probabilities of the empty clusters are initialized at zero.
For the MLFA method, the starting values of all elements of $\mbox{\boldmath ${\cal L}ambda$}$ and $\mbox{\boldmath $\eta$}$ are set at zero.
The starting values for the elements of $\mbox{\boldmath $\Omega$}$ are chosen to equal the variances of the corresponding starting values.
The parameters specifying the density of the scaled errors are initialized in a similar manner.
The MCMC iterations comprise the following steps.
We suppress the subscript $\mbox{\boldmath $\epsilon$}$ to keep the notation clean as in the main paper.
\begin{eqnarray}gin{enumerate}[topsep=0pt,itemsep=-1ex,partopsep=2ex,parsep=2ex, leftmargin=0cm, rightmargin=0cm, wide=3ex]
\item {\bf Updating the parameters specifying $f_{{\mathbf X}}$:}
For the MIW model the parameters specifying the density $f_{{\mathbf X}}$ are updated using the following steps.
\\
\begin{eqnarray}gin{eqnarray*}
(\mbox{\boldmath $\pi$}\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal D}ir(\alpha/K+n_{1},\alpha/K+n_{2},\dots,\alpha/K+n_{K}), \\
(C_{i}\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal M}ult(1, p_{i1},p_{i2},\dots,p_{iK}),\\
(\mbox{\boldmath $\mu$}_k\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal M}VN_{p}(\mbox{\boldmath $\mu$}_{k}^{(n)},\mbox{\boldmath ${\cal S}igma$}_{k}^{(n)}), \\
(\mbox{\boldmath ${\cal S}igma$}_{k} \vert \mbox{\boldmath $\zeta$}) &\sim& \hbox{IW}_{p}\{n_{k}+\nu_{0},\widetilde{\epsilon}xtstyle\sum_{i:C_{i}=k}({\mathbf X}_{i}-\mbox{\boldmath $\mu$}_{k})({\mathbf X}_{i}-\mbox{\boldmath $\mu$}_{k})^{\rm T}+\mbox{\boldmath $\Psi$}_{0}\},
\end{eqnarray*}
\\
where
$n_{k} = \sum_{i}1(C_{i}=k)$,
$p_{ik} \hbox{pr}opto \pi_{k} \times {\cal M}VN_{p}({\mathbf X}_{i}\vert\mbox{\boldmath $\mu$}_{k},\mbox{\boldmath ${\cal S}igma$}_{k})$,
$\mbox{\boldmath ${\cal S}igma$}_{k}^{(n)} = (\mbox{\boldmath ${\cal S}igma$}_{0}^{-1}+n_{k}\mbox{\boldmath ${\cal S}igma$}_{k}^{-1})^{-1}$
and $\mbox{\boldmath $\mu$}_{k}^{(n)} = \mbox{\boldmath ${\cal S}igma$}_{k}^{(n)} \left\{\mbox{\boldmath ${\cal S}igma$}_{k}^{-1}\widetilde{\epsilon}xtstyle\sum_{i:C_{i}=k}{\mathbf X}_{i}+\mbox{\boldmath ${\cal S}igma$}_{0}^{-1}\mbox{\boldmath $\mu$}_{0}\right\}$.
To update the parameters specifying the covariance matrices in the MLFA model, the sampler cycles through the following steps.
\\
\begin{eqnarray}gin{eqnarray*}
(\mbox{\boldmath $\lambda$}_{k,j}\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal M}VN_{q}\{({\mathbf D}_{k,j}^{-1}+\sigma_{j}^{-2}\mbox{\boldmath $\eta$}_{k}^{\rm T}\mbox{\boldmath $\eta$}_{k})^{-1}\sigma_{j}^{-2}\mbox{\boldmath $\eta$}_{k}^{\rm T}({\mathbf X}_{k}^{(j)}-\mbox{\boldmath $\mu$}_{k}^{(j)}),({\mathbf D}_{k,j}^{-1}+\sigma_{j}^{-2}\mbox{\boldmath $\eta$}_{k}^{\rm T}\mbox{\boldmath $\eta$}_{k})^{-1}\}, \\
(\mbox{\boldmath $\eta$}_{i}\vert C_{i}=k,\mbox{\boldmath $\zeta$}) &\sim& {\cal M}VN_{q}\{(\hbox{I}_{q}+\mbox{\boldmath ${\cal L}ambda$}_{k}^{\rm T}\mbox{\boldmath $\Omega$}^{-1}\mbox{\boldmath ${\cal L}ambda$}_{k})^{-1}\mbox{\boldmath ${\cal L}ambda$}_{k}^{\rm T}\mbox{\boldmath $\Omega$}^{-1}({\mathbf X}_{i}-\mbox{\boldmath $\mu$}_{k}),(\hbox{I}_{q}+\mbox{\boldmath ${\cal L}ambda$}_{k}^{\rm T}\mbox{\boldmath $\Omega$}^{-1}\mbox{\boldmath ${\cal L}ambda$}_{k})^{-1}\}, \\
(\sigma_{j}^{2}\vert \mbox{\boldmath $\zeta$}) &\sim& \hbox{Inv-Ga} \left\{a_{\sigma}+n/2,b_{\sigma}+(1/2)\widetilde{\epsilon}xtstyle\sum_{i=1}^{n}(X_{ij}-\mbox{\boldmath $\mu$}_{C_{i},j}-\mbox{\boldmath $\lambda$}_{C_i,j}^{\rm T}\mbox{\boldmath $\eta$}_{i})^{2}\right\}, \\
(\phi_{k,jh} \vert \mbox{\boldmath $\zeta$}) &\sim& {\cal G}a\{(\nu+1)/2,(\nu+\tau_{k,h}\lambda_{k,jh}^{2})/2\}, \\
(\delta_{k,h}\vert \mbox{\boldmath $\zeta$}) &\sim& \widetilde{\epsilon}xtstyle {\cal G}a\{a_{h}+p(q-h+1)/2,1+\sum_{\ell=1}^{q}\tau_{k,\ell}^{(h)} \sum_{j=1}^{p}\phi_{k,j\ell}\lambda_{k,j\ell}^{2}/2\},
\end{eqnarray*}
\\
where
$D_{k,j}^{-1} = \hbox{diag}(\phi_{k,j1}\tau_{k,1},\dots,\phi_{k,jq}\tau_{k,q})$,
$\tau_{k,\ell}^{(h)} = \hbox{pr}od_{t=1,t\neq h}^{\ell}\delta_{k,t}$,
${\mathbf X}_{k}^{(j)} = (X_{i_{1} j},X_{i_{2} j},\dots,X_{i_{n_k} j})^{\rm T}$,
$\mbox{\boldmath $\eta$}_{k}^{n_{k}\times q} = (\mbox{\boldmath $\eta$}_{i_{1}},\mbox{\boldmath $\eta$}_{i_{2}},\dots,\mbox{\boldmath $\eta$}_{i_{n_k}})^{\rm T}$, $\{i_{1}, i_{2},\dots,i_{n_{k}}\} = \{i: C_{i}=k\}$.
\item {\bf Updating the parameters specifying $f_{\mbox{\boldmath $\epsilon$}}$:}
The unconstrained full conditionals of the parameters specifying $f_{\mbox{\boldmath $\epsilon$}}$ are very similar.
For instance, for the MIW model they are given by
\begin{eqnarray}gin{eqnarray*}
(\mbox{\boldmath $\pi$}\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal D}ir(\alpha/K+N_{1},\alpha/K+N_{2},\dots,\alpha/K+N_{K}), \\
(C_{ij}\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal M}ult(1, p_{ij1},p_{ij2},\dots,p_{ijK}),\\
(\mbox{\boldmath $\mu$}_k\vert \mbox{\boldmath $\zeta$}) &\sim& {\cal M}VN_{p}(\mbox{\boldmath $\mu$}_{k}^{(N)},\mbox{\boldmath ${\cal S}igma$}_{k}^{(N)}), \\
(\mbox{\boldmath ${\cal S}igma$}_{k} \vert \mbox{\boldmath $\zeta$}) &\sim& \hbox{IW}_{p}\{N_{k}+\nu_{0},\widetilde{\epsilon}xtstyle\sum_{ij:C_{ij}=k}(\mbox{\boldmath $\epsilon$}_{ij}-\mbox{\boldmath $\mu$}_{k})(\mbox{\boldmath $\epsilon$}_{ij}-\mbox{\boldmath $\mu$}_{k})^{\rm T}+\mbox{\boldmath $\Psi$}_{0}\},
\end{eqnarray*}
\\
where
$N_{k} = \sum_{i,j}1(C_{ij}=k)$,
$p_{ijk} \hbox{pr}opto \pi_{k} \times {\cal M}VN_{p}(\mbox{\boldmath $\epsilon$}_{ij}\vert\mbox{\boldmath $\mu$}_{k},\mbox{\boldmath ${\cal S}igma$}_{k})$,
$\mbox{\boldmath ${\cal S}igma$}_{k}^{(N)} = (\mbox{\boldmath ${\cal S}igma$}_{0}^{-1}+N_{k}\mbox{\boldmath ${\cal S}igma$}_{k}^{-1})^{-1}$
and $\mbox{\boldmath $\mu$}_{k}^{(N)} = \mbox{\boldmath ${\cal S}igma$}_{k}^{(N)} \left\{\mbox{\boldmath ${\cal S}igma$}_{k}^{-1}\widetilde{\epsilon}xtstyle\sum_{ij:C_{ij}=k}\mbox{\boldmath $\epsilon$}_{ij}+\mbox{\boldmath ${\cal S}igma$}_{0}^{-1}\mbox{\boldmath $\mu$}_{0}\right\}$.
Samples from the constrained posterior $(\{\mbox{\boldmath $\mu$}_{k}\}_{k=1}^{K} \vert \sum_{k=1}^{K}\pi_{k}\mbox{\boldmath $\mu$}_{k}=0, \mbox{\boldmath $\zeta$})$
are then obtained from the unconstrained full conditionals $(\mbox{\boldmath $\mu$}_{k} \vert \mbox{\boldmath $\zeta$})$ given above
using the simple additional steps described in Section \ref{sec: mvt density of heteroscedastic errors} of the main paper.
The steps to update the parameters specifying the covariance matrices in the MLFA model are similarly obtained and are excluded.
\item {\bf Updating the values of ${\mathbf X}$:}
When the measurement errors are independent of ${\mathbf X}$, the ${\mathbf X}_{i}$ have closed form full conditionals given by
\\
\begin{eqnarray}gin{eqnarray*}
({\mathbf X}_{i}\vert C_{{\mathbf X},i}=k,C_{\mbox{\boldmath $\epsilon$},i1}=k_{1},\dots, C_{\mbox{\boldmath $\epsilon$},im_{i}}=k_{m_{i}},\mbox{\boldmath $\zeta$}) &\sim& {\cal M}VN_{p}(\mbox{\boldmath $\mu$}_{{\mathbf X}}^{(n)},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X}}^{(n)}),
\end{eqnarray*}
\\
where
$\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X}}^{(n)} = (\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k}^{-1} + \sum_{j=1}^{m_{i}}\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},k_{j}}^{-1})^{-1}$
and $\mbox{\boldmath $\mu$}_{{\mathbf X}}^{(n)} = \mbox{\boldmath ${\cal S}igma$}_{{\mathbf X}}^{(n)} (\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k}^{-1}\mbox{\boldmath $\mu$}_{{\mathbf X},k} + \sum_{j=1}^{m_{i}}\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},k_{j}}^{-1}{\mathbf W}_{ij})$.
For conditionally heteroscedastic measurement errors, the full conditionals are given by
\\
\begin{eqnarray}gin{eqnarray*}
&&\hspace{-1cm}({\mathbf X}_{i}\vert C_{{\mathbf X},i}=k,C_{\mbox{\boldmath $\epsilon$},i1}=k_{1},\dots, C_{\mbox{\boldmath $\epsilon$},im_{i}}=k_{m_{i}},\mbox{\boldmath $\zeta$}) \\
&&\hbox{pr}opto {\cal M}VN_{p}({\mathbf X}_{i}\vert \mbox{\boldmath $\mu$}_{{\mathbf X},k},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k}) \times \widetilde{\epsilon}xtstyle\hbox{pr}od_{j=1}^{m_{i}} {\cal M}VN_{p}\{{\mathbf W}_{ij} \vert {\mathbf X}_{i}+{\mathbf S}({\mathbf X}_{i}) \mbox{\boldmath $\mu$}_{\mbox{\boldmath $\epsilon$},k_{j}},{\mathbf S}({\mathbf X}_{i})\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$},k_{j}}{\mathbf S}({\mathbf X}_{i})\},
\end{eqnarray*}
\\
The full conditionals do not have closed forms.
Metropolis-Hastings (MH) steps with multivariate truncated normal proposals are used within the Gibbs sampler.
\item {\bf Updating the parameters specifying $s_{\ell}$: }
When the measurement errors are conditionally heteroscedastic,
we first estimate the variance functions $s_{\ell}^{2}(X_{i\ell})$ by fitting univariate submodels $W_{ij\ell} = X_{i\ell} + s_{\ell}(X_{i\ell})\epsilon_{ij\ell}$ for each $\ell$.
The details are provided in Section \ref{sec: mvt estimation of variance functions}.
The parameters characterizing other components of the full model are then sampled using the Gibbs sampler described above, keeping the estimates of the variance functions fixed.
An alternative class of algorithms integrates out the mixture probabilities $\mbox{\boldmath $\pi$}$ and works with the resulting Polya urn scheme (Neal, 2000).
We did not consider such algorithms as they render the labels $C_{i}$ a-priori dependent, requiring the prior conditionals $(C_{i}\vert {\mathbf C}_{-i})$ to be recomputed each time any $C_{i}$ is updated.
Importantly, we also need the sampled values of $\mbox{\boldmath $\pi$}$ to enforce the zero mean restriction $\sum_{k=1}^{K}\pi_{k}\mbox{\boldmath $\mu$}_{k}=0$ on the measurement errors.
\end{enumerate}
\section{Estimation of the Variance Functions} \label{sec: mvt estimation of variance functions}
When the measurement errors are conditionally heteroscedastic,
we need to update the parameters $\mbox{\boldmath $\xi$}_{\ell}$ that specify the variance functions $s_{\ell}^{2}(X_{i\ell})$.
These parameters do not have closed form full conditionals.
MCMC algorithms, where we tried to integrate MH steps for $\mbox{\boldmath $\xi$}_{\ell}$ with the sampler for the parameters specifying $f_{\mbox{\boldmath $\epsilon$}}$,
were numerically unstable and failed to converge sufficiently quickly.
We need to supply the values of the scaled errors $\epsilon_{ij\ell}$ to step 2 of the algorithm described in Section \ref{sec: mvt posterior computation}
and the instability stems from the operation $\mbox{\boldmath $\epsilon$}_{ij} = {\mathbf S}({\mathbf X}_{i})^{-1}{\mathbf U}_{ij}$ required to calculate the scaled residuals $\epsilon_{ij\ell}$,
as we try to divide $U_{ij\ell}$ by the quantity $s_{\ell}(X_{i\ell})$, which may be very small for certain values of $X_{i\ell}$,
for example, for values of $X_{i\ell}$ near zero for the EATS data application. See Figure \ref{fig: mvt EATS data results VFn}.
To solve the problem, we adopt a novel two-stage procedure.
First, for each $k$, we estimate the functions $s_{\ell}^{2}(X_{i\ell})$ by fitting the univariate submodels $W_{ij\ell} = X_{i\ell} + s_{\ell}(X_{i\ell})\epsilon_{ij\ell}$.
The problem of numerical instability arising out of the operation to determine the values of the scaled errors remains in these univariate subproblems too.
But the following lemma from Pelenis (2014), presented here for easy reference, provides us with an escape route by allowing us to avoid this operation in the first place.
\begin{eqnarray}gin{Lem}\label{Lem: mvt lemma from Pelenis}
Let $\mbox{\boldmath $^{th}eta$}_{1:K} = \{(\pi_{k},\mu_{k},\sigma_{k}^{2})\}_{k=1}^{K}$ be such that
\\
\begin{eqnarray}
\widetilde{\epsilon}xtstyle f_{1}(\epsilon\vert\mbox{\boldmath $^{th}eta$}_{1:K}) = \sum_{k=1}^{K} \pi_{k}~{\cal N}ormal(\epsilon\vert \mu_{k},\sigma_{k}^{2}), ~~~\hbox{with}~~ \sum_{k=1}^{K} \pi_{k} = 1, ~~~ \sum_{k=1}^{K} \pi_{k}\mu_{k} = 0.
\label{eq: mvt density of scaled errors model 1}
\end{eqnarray}
\\
Then there exists a set of parameters $\mbox{\boldmath $^{th}eta$}_{1:(K-1)}^{\star}=\{(\pi_{k}^{\star},p_{k,r}^{\star},\mu_{k,r}^{\star},\sigma_{k,r}^{\star 2})\}_{r=1,k=1}^{2,K-1}$ such that
\\
\begin{eqnarray}
&&\widetilde{\epsilon}xtstyle \hspace{-0.85cm} f_{1}(\epsilon\vert\mbox{\boldmath $^{th}eta$}_{1:K}) = f_{2}(\epsilon\vert \mbox{\boldmath $^{th}eta$}_{1:(K-1)}^{\star}) = \sum_{k=1}^{K-1} \pi_{k}^{\star}~\sum_{r=1}^{2} p_{k,r}^{\star}{\cal N}ormal(\epsilon\vert \mu_{k,r}^{\star},\sigma_{k,r}^{\star 2}), \label{eq: mvt density of scaled errors model 2}\\
&&\widetilde{\epsilon}xtstyle \sum_{k=1}^{K-1} \pi_{k}^{\star} = 1, ~~~ \sum_{r=1}^{2} p_{k,r}^{\star} = 1, ~~~ \sum_{r=1}^{2} p_{k,r}^{\star}\mu_{k,r}^{\star} = 0~ \forall k. \nonumber
\end{eqnarray}
\\
\end{Lem}
Lemma \ref{Lem: mvt lemma from Pelenis} implies that the univariate submodels for the density of the scaled errors given by (\ref{eq: mvt density of scaled errors model 1})
has a reparametrization (\ref{eq: mvt density of scaled errors model 2}) where each component is itself a two-component normal mixture with its mean restricted at zero.
The reparametrization (\ref{eq: mvt density of scaled errors model 2}) thus replaces the zero mean restriction on (\ref{eq: mvt density of scaled errors model 1})
by similar restrictions on each of its components.
These restrictions also imply that each mixture component in (\ref{eq: mvt density of scaled errors model 2}) can be further reparametrized by only four free parameters.
One such parametrization could be in terms of $\widetilde\mbox{\boldmath $^{th}eta$}_{k} = (\widetilde{p}_{k},\widetilde\mu_{k},\widetilde\sigma_{k,1}^{2},\widetilde\sigma_{k,2}^{2})$,
where $(p_{k,1}^{\star},\sigma_{k,1}^{\star2},\sigma_{k,2}^{\star 2}) = (\widetilde{p}_{k},\widetilde\sigma_{k,1}^{2},\widetilde\sigma_{k,2}^{2})$
and $\mu_{k,r}^{\star}=c_{k,r}\widetilde{\mu}_{k}$,
where $c_{k,1}=(1-\widetilde{p}_{k})/\{\widetilde{p}_{k}^2+(1-\widetilde{p}_{k})^{2}\}^{1/2}$ and $c_{k,2} = -\widetilde{p}_{k}/\{\widetilde{p}_{k}^2+(1-\widetilde{p}_{k})^{2}\}^{1/2}$.
Letting $p_{0}$ denote the prior assigned to $\widetilde\mbox{\boldmath $^{th}eta$}_{k}$, the full conditional of
$\widetilde\mbox{\boldmath $^{th}eta$}_{k}$ in terms of the conditional likelihood $f_{U\vert X}$ is proportional to
$P_{0} (\widetilde\mbox{\boldmath $^{th}eta$}_{k}) \widetilde{\epsilon}xtstyle\hbox{pr}od_{ij: C_{\epsilon,ij\ell}=k}f_{U\vert X}(U_{ij\ell}\vert X_{i\ell}, \mbox{\boldmath $\xi$}_{\ell}, \widetilde\mbox{\boldmath $^{th}eta$}_{k}, \mbox{\boldmath $\zeta$})$.
The problem of numerical instability can now be tackled by using MH steps
to update not only the parameters $\mbox{\boldmath $\xi$}_{\ell}$ specifying the variance functions but also the parameters $\{\widetilde\mbox{\boldmath $^{th}eta$}_{k}\}_{k}$ characterizing the density $f_{\epsilon}$
using the conditional likelihood $f_{U\vert X}$ (and not $f_{\epsilon}$ itself), thus escaping the need to separately determine the values of the scaled errors.
The priors and the hyper-parameters for the univariate submodels are chosen following the suggestions of Sarkar, et al. (2014)
who used an infinite dimensional extension of this reparametrized finite dimensional submodel.
The strategy of exploiting the properties of overfitted mixture models to determine the number of mixture components described in Section \ref{sec: mvt choice of hyper-parameters}
can also be applied to the univariate subproblems.
High precision estimates of the variance functions can be obtained using these reparametrized finite dimensional univariate deconvolution models.
See Figure \ref{fig: mvt simulation results VFn d4 n1000 m3 MLFA X1 E1 Ind}
and also Figures \ref{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 E1 AR} and \ref{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 HT_E0 Ind} in the Supplementary Materials for illustrations.
A similar reparametrization exists for the multivariate problem too,
but the strategy would not be very effective in a multivariate set up as it would require updating the mean vectors and the covariance matrices involved in $f_{\mbox{\boldmath $\epsilon$}}$ through MH steps
which are not efficient in simultaneous updating of large numbers of parameters.
After estimating the parameters characterizing the variance functions from the univariate submodels,
we therefore keep these estimates fixed and sample the other parameters using the Gibbs sampler described in Section \ref{sec: mvt posterior computation}.
Additional details follow.
As discussed in Section \ref{sec: mvt density of heteroscedastic errors} of the main paper,
the variance functions $s_{\ell}^{2}$'s can not be uniquely determined
without additional identifiability restrictions on the variance of $\epsilon_{ij\ell}$.
This, however, does not pose any problem to assess $\hbox{var}(U_{ij\ell}\vert X_{i\ell})$ which can be estimated as
$\widehat{v}_{\ell}(X_{i\ell}) = \sum_{m=1}^{M} v_{\ell}^{(m)}(X_{i\ell}) \hbox{var}^{(m)}(\epsilon_{ij\ell})/M$,
where $v_{\ell}^{(m)}(X_{i\ell})$
and $\hbox{var}^{(m)}(\epsilon_{ij\ell})$ are estimates of $s_{\ell}^{2}(X_{i\ell})$ and $\hbox{var}(\epsilon_{ij\ell})$
based on the $m^{th}$ sample drawn from the posterior of the $\ell^{th}$ univariate submodel in the first stage.
The final estimate of $\mbox{\boldmath $\xi$}_{\ell}$ is then obtained as
$\widetilde{\epsilon}xtstyle \widehat\mbox{\boldmath $\xi$}_{\ell,opt} = \arg_{\mbox{\boldmath $\xi$}_{\ell}} \min \sum_{r=1}^{R_{\ell}}\left\{\widehat{v}_{\ell}(X_{r\ell}^{{\cal D}elta}) - {\mathbf B}_{q,J_{\ell},\ell}(X_{r\ell}^{{\cal D}elta}) \exp(\mbox{\boldmath $\xi$}_{\ell})\right\}^{2}$,
where $\{X_{r\ell}^{{\cal D}elta}\}_{r=1}^{R_{\ell}}$ is a set of grid points on the support $[A_{\ell},B_{\ell}]$ of the variance functions.
In the second stage, we keep these estimates $\widehat\mbox{\boldmath $\xi$}_{\ell,opt}$ fixed
and sample the other parameters using the Gibbs sampler described in Section \ref{sec: mvt posterior computation}.
At the $m^{th}$ MCMC iteration of the Gibbs sampler, the scaled errors to be used in step 2 of the algorithm are obtained as
$\epsilon_{ij\ell}^{(m)}=(W_{ij\ell}-X_{i\ell}^{(m)})/\widehat{s}_{\ell}(X_{i\ell}^{(m)})$, where $\widehat{s}_{\ell}(X_{i\ell}^{(m)})=\{{\mathbf B}_{q,J_{\ell},\ell}(X_{i\ell}^{(m)}) \exp(\widehat{\mbox{\boldmath $\xi$}}_{\ell,opt})\}^{1/2}$ and $X_{i\ell}^{(m)}$ is sampled value of $X_{i\ell}$ at the $m^{th}$ iteration.
Appropriate scale adjustments are made to make the estimate $\widehat{f}_{\mbox{\boldmath $\epsilon$}}$ comparable to the true $f_{\mbox{\boldmath $\epsilon$}}$ in simulation experiments.
Specifically, $\widehat{f}_{\mbox{\boldmath $\epsilon$}} = \sum_{m=1}^{M}\pi_{k}^{(m)}{\cal M}VN({\mathbf D}\mbox{\boldmath $\mu$}_{k}^{(m)},{\mathbf D}\mbox{\boldmath ${\cal S}igma$}_{k}^{(m)}{\mathbf D})/M$,
where ${\mathbf D}=\hbox{diag}(\sigma_{true,1},\dots,\sigma_{true,p})$,
$\sigma_{true,\ell}^{2}$ is the variance of $\epsilon_{ij\ell}$ under the true $f_{\mbox{\boldmath $\epsilon$}}$ used to generate them,
and $\{\pi_{k}^{(m)},\mbox{\boldmath $\mu$}_{k}^{(m)},{\cal S}igma_{k}^{(m)}\}_{k=1}^{K}$ are $m^{th}$ sampled values from the posterior of the parameters $\{\pi_{k},\mbox{\boldmath $\mu$}_{k},{\cal S}igma_{k}\}_{k=1}^{K}$ specifying $f_{\mbox{\boldmath $\epsilon$}}$.
\section{The Two-Stage Sampler} \label{sec: mvt two-stage sampler}
Over the last two decades, MCMC techniques have remained at the forefront of Bayesian inference.
The literature on the topic is already vast and is still rapidly expanding.
While the research on exact MCMC methods is still highly active,
owing to numerous practical challenges,
approximate computation methods are becoming increasingly popular.
For a recent review of traditional exact methods and more recent approximate tools, see Green, et al. (2015).
The basic idea of the two-stage sampler described above, while being simple and intuitive,
is a novel addition to the growing literature on the topic.
We are studying its properties in greater detail in simpler settings in a separate manuscript.
Figure \ref{fig: mvt approximate sampler} below provides some heuristics.
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=10cm, width=11cm, trim=0cm 0cm 0cm 0cm, clip=true]{Approximate_Sampler.pdf}
\end{center}
\caption{\baselineskip=5pt Heuristics of the two-stage sampler.
The brown elliptical region shows the joint posterior $p(^{th}eta_{1},^{th}eta_{2} \vert {\mathbf D})$ of two parameters $^{th}eta_{1}$ and $^{th}eta_{2}$ given data ${\mathbf D}$.
The light blue curve shows $p(^{th}eta_{1} \vert {\mathbf D})$, the marginal posterior of $^{th}eta_{1}$ given data ${\mathbf D}$.
The blue curve shows $p(^{th}eta_{1} \vert ^{th}eta_{20}, {\mathbf D})$, the posterior of $^{th}eta_{1}$, where $^{th}eta_{20}$, the `true' value of $^{th}eta_{2}$, is known.
The red curve shows $p(^{th}eta_{1} \vert \widehat^{th}eta_{2}, {\mathbf D})$, the pseudo-posterior of $^{th}eta_{1}$ given $\widehat^{th}eta_{2}$, an estimate of $^{th}eta_{2}$.
$p(^{th}eta_{1} \vert \widehat^{th}eta_{2}, {\mathbf D})$ will be close to $p(^{th}eta_{1} \vert ^{th}eta_{20}, {\mathbf D})$ when $\widehat^{th}eta_{2}$ is close to $^{th}eta_{20}$.
}
\label{fig: mvt approximate sampler}
\end{figure}
Consider the problem of drawing samples from the posterior $p(^{th}eta_{1},^{th}eta_{2} \vert {\mathbf D})$ of two parameters $^{th}eta_{1}$ and $^{th}eta_{2}$ given data ${\mathbf D}$.
The basic MCMC sampler iterates between sampling from
(A) $p(^{th}eta_{1} \vert ^{th}eta_{2}, {\mathbf D})$ and (B) $p(^{th}eta_{2} \vert ^{th}eta_{1}, {\mathbf D})$.
If, however, the `true' value of $^{th}eta_{2}$ (in a frequentist sense), say $^{th}eta_{20}$, is known,
we only require step (A), which becomes $p(^{th}eta_{1} \vert ^{th}eta_{20}, {\mathbf D})$.
And if we substitute $^{th}eta_{2}$ by a point estimate $\widehat^{th}eta_{2}$, step (A) becomes $p(^{th}eta_{1} \vert \widehat^{th}eta_{2}, {\mathbf D})$.
While an uncertainty assessment based on $p(^{th}eta_{1} \vert \widehat^{th}eta_{2}, {\mathbf D})$ will be overly optimistic
compared to that based on the actual marginal posterior $p(^{th}eta_{1} \vert {\mathbf D})$,
$p(^{th}eta_{1} \vert \widehat^{th}eta_{2}, {\mathbf D})$ and $p(^{th}eta_{1} \vert ^{th}eta_{20}, {\mathbf D})$ will be close when $\widehat^{th}eta_{2}$ is close to $^{th}eta_{20}$, and
samples drawn from $p(^{th}eta_{1} \vert \widehat^{th}eta_{2}, {\mathbf D})$ may be used for approximate Bayesian inference on $^{th}eta_{1}$.
The two-stage sampler can also be explained using the following heuristics.
Under suitable regularity conditions and considering parametric models
(observe that Bayesian nonparametric models are usually large parametric models),
the posterior distribution $p(^{th}eta_{1},^{th}eta_{2} \vert {\mathbf D})$ can be approximated by a Gaussian distribution centered at the true value $\mbox{\boldmath $^{th}eta$}_0 = (^{th}eta_{10}, ^{th}eta_{20})$
and variance equal to the inverse of the Fisher information matrix ${\mathbf I}(\mbox{\boldmath $^{th}eta$}_0)$.
The justification of this argument is usually tedious and follows from Bernstein von-Mises (BvM) theorems.
Refer, for example, to Johnstone (2010), Bontemps (2011), Bickel and Kleijn (2012), Spokoiny (2013) and Castillo and Nickl (2014) for recent literature on BvM theorems
in nonparametric Bayesian models and growing parametric Bayesian models.
For the sake of convenience, let us assume such results are true for $p(^{th}eta_{1},^{th}eta_{2} \vert {\mathbf D})$. Hence the marginal posterior distribution
$p(^{th}eta_{1} \vert {\mathbf D})$ is similar to a Gaussian distribution with mean $^{th}eta_{10}$ and variance
$[{\mathbf I}(^{th}eta_0)]^{-1}_{11}$, the $(1, 1)^{th}$ block of the inverse of ${\mathbf I}(^{th}eta_0)$.
Assuming $\widehat^{th}eta_{2}$ to be a consistent estimate of $^{th}eta_{20}$,
the conditional posterior distribution in step (A) can be approximated by $p(^{th}eta_{1} \vert ^{th}eta_{20}, {\mathbf D})$
which in turn is similar to a Gaussian distribution centered at
$^{th}eta_{10}$ with precision matrix ${\mathbf I}(^{th}eta_{10} \vert ^{th}eta_{20})$,
the conditional Fisher information matrix assuming $^{th}eta_{20}$ to be known.
In classical inference, it is well known that
$[{\mathbf I}(^{th}eta_0)]^{-1}_{11} \geq [{\mathbf I}(^{th}eta_{10} \vert ^{th}eta_{20})]^{-1}$ in the sense that the difference is non-negative definite,
since knowing $^{th}eta_{20}$ results in a higher value of the `information'.
While confidence intervals based on samples drawn by the two-stage algorithm will be optimistic,
the draws will be centered around the true value $^{th}eta_{10}$
and hence may be used for approximate `mean' inference on $^{th}eta_{1}$.
\pagebreak
\section{Comments on the Model for ${{\mathbf U} \vert {\mathbf X}}$} \label{sec: mvt comments on the model for U given X}
As shown in Sarkar, et al. (2014), even in univariate deconvolution settings, due to the nonavailability of precise information about $X$,
variations in higher order conditional moments of $(U\vert X)$ are extremely difficult to capture even in large data sets.
Semiparametric approaches that focus separately on the first two moments, namely $E(U\vert X)=0$ and $\hbox{var}(U\vert X)$, and the shape of $f_{U\vert X}$,
are thus more efficient than possible fully nonparametric approaches even when the truth closely follows the setup of the nonparametric model.
See their Section 4.3.
This will certainly remain true in the significantly more difficult multivariate deconvolution problem.
In building models for $f_{{\mathbf U}\vert{\mathbf X}}$, we may thus concentrate on the class of models that separates the problem of modeling $\hbox{cov}({\mathbf U}\vert{\mathbf X})$
from that of modeling the shape and other properties of $f_{{\mathbf U}\vert{\mathbf X}}$.
Recent advances in covariance regression models,
where the covariance of the multivariate regression errors are allowed to vary flexibly with precisely measured and possibly multivariate predictors,
provide us with clues about how this may be achieved.
However, as we explain in the following section, there are major differences between conditionally varying multivariate regression errors
and conditionally varying multivariate measurement errors.
As an implication, covariance regression methods may not be exactly appropriate for modeling conditionally varying covariance matrices $\hbox{cov}({\mathbf U}\vert{\mathbf X})$ in measurement error settings.
\subsection{Regression Errors vs Measurement Errors}\label{sec: mvt regression vs measurement errors}
Consider the problem of flexible modeling of conditionally heteroscedastic regression errors where the response and the covariates are both univariate.
Consider also the problem of modeling conditionally heteroscedastic measurement errors in a univariate deconvolution set up.
From a modeling perspective, Bayesian hierarchical framework allows us to treat these two problems on par
by treating both the covariate in the regression problem and the variable of interest in the deconvolution problem simply as conditioning variables.
Of course in the regression problem $X$ is precisely measured, whereas in the deconvolution problem $X$ would be latent,
but in either case we are required to flexibly model the density of $(U\vert X)$ subject to $E(U\vert X) = 0$, where $U$, depending upon the context, denotes either regression or measurement errors.
See Figure \ref{fig: univriate graphical models supplementary materials}.
Models for regression errors that allow their variance to vary with the values of the covariate (Pati and Dunson, 2013; Pelenis, 2014) can thus be tried as
potential candidates for models for univariate conditionally heteroscedastic measurement errors.
Conversely, the models for conditionally heteroscedastic univariate measurement errors (Staudenmayer, et al. 2008; Sarkar, et al. 2014) can also be employed to model univariate conditionally heteroscedastic regression errors.
\vskip 10pt
\begin{eqnarray}gin{figure}[!ht]
\hspace*{1.5cm}\includegraphics[height=2cm, width=12cm, trim=1cm 1.25cm 1cm 1.25cm, clip=true]{Graphical_Model_Univariate.pdf}
\caption{
(a) Dependency structure in a univariate deconvolution model with latent variable of interest $X$, associated measurement errors $U$ and replicates $W$.
(b) Dependency structure in a univariate regression model with response $Y$,
associated regression errors $U$ and a univariate observed predictor $X$.
In both panels, the filled rectangular regions focus on the dependency structures between the conditionally varying errors $U$ and the conditioning variable $X$.
The unfilled and the shaded nodes signify latent and observable variables, respectively.
}
\label{fig: univriate graphical models supplementary materials}
\end{figure}
This is not quite true in a multivariate set up.
Interpreting the variables of interest ${\mathbf X}$ broadly as conditioning variables, one can again loosely connect the problem of modeling conditionally heteroscedastic multivariate measurement errors to the problem of covariance regression (Hoff and Niu, 2012; Fox and Dunson, 2016 etc.),
where the goal is to develop models that allow the covariance of multivariate regression errors to vary flexibly with precisely measured and possibly multivariate predictors.
In covariance regression problems, the dimension of the regression errors is typically unrelated to the dimension of the predictors.
Different components of the regression errors are assumed to be equally influenced by different components of the predictors
and hence independent reordering of the components of ${\mathbf X}_{i}$ will not change the dependency structure.
In multivariate deconvolution problems, in contrast,
the $\ell^{th}$ component $U_{ij\ell}$ is the measurement error associated exclusively with $X_{i\ell}$.
Here the dimension of ${\mathbf U}_{ij}$ is the same as the dimension of ${\mathbf X}_{i}$
and any reordering of the components of ${\mathbf X}_{i}$ would require that
the components of ${\mathbf U}_{ij}$ and ${\mathbf W}_{ij}$ be also reordered using the same relabeling scheme.
See Figure \ref{fig: graphical models supplementary materials}.
While different components of the measurement error vectors ${\mathbf U}_{ij}$ may be correlated,
this exclusive association between $U_{ij\ell}$ and $X_{i\ell}$ implies the plausibility that the dependence of $U_{ij\ell}$ on ${\mathbf X}_{i}$ can be explained primarily through $X_{i\ell}$.
Figure \ref{fig: mvt EATS data results VFn}, for instance, suggests strong conditional heteroscedasticity patterns
and it is plausible to assume that the conditional variability in $U_{ij\ell}$ can be explained primarily by $X_{i\ell}$ only.
The dependency structure of conditionally varying multivariate measurement errors are, therefore, different from that of conditionally varying multivariate regression errors.
Additionally, the aforementioned covariance regression approaches all assume multivariate normality of the regression errors.
As is well established in the literature, parametric distributional assumptions on the errors can be particularly restrictive in measurement error problems.
\vskip 0pt
\begin{eqnarray}gin{figure}[!ht]
\centering
\includegraphics[height=5.5cm, width=15cm, trim=2cm 1cm 1cm 1cm, clip=true]{Graphical_Model2.pdf}
\vskip -5pt
\caption{
(a) Dependency structure in a trivariate deconvolution model with latent variable of interest ${\mathbf X}=(X_{1},X_{2},X_{3})^{\rm T}$, associated measurement errors ${\mathbf U}=(U_{1},U_{2},U_{3})^{\rm T}$ and replicates ${\mathbf W}=(W_{1},W_{2},W_{3})^{\rm T}$.
The solid black and the dashed gray edges signify strong and weak dependencies, respectively.
(b) Dependence relationships in a trivariate deconvolution problem implied by the `separable' measurement error model $({\mathbf U}\vert {\mathbf X})={\mathbf S}({\mathbf X})\mbox{\boldmath $\epsilon$}$
with $\mbox{\boldmath $\epsilon$}$ independent of ${\mathbf X}$ and ${\mathbf S}({\mathbf X})=\hbox{diag}\{s_{1}(X_{1}),s_{2}(X_{2}),s_{3}(X_{3})\}$.
Unlike panel (a), possible weak relationships
between $U_{\ell}$ and $\{X_{m}\}_{m\neq \ell}$
are ignored.
(c) Dependency structure in a trivariate regression model with response ${\mathbf Y}=(Y_{1},Y_{2},Y_{3})$,
associated regression errors ${\mathbf U}=(U_{1},U_{2},U_{3})^{\rm T}$ and an observed bivariate predictor ${\mathbf X}=(X_{1},X_{2})^{\rm T}$
where ${\mathbf X}_{\sigma}=(X_{\sigma(1)},X_{\sigma(2)})^{\rm T}$ denotes arbitrary reordering of ${\mathbf X}$.
In both panels, the filled rectangular regions focus on the dependency structures between the conditionally varying errors ${\mathbf U}$ and the conditioning variable ${\mathbf X}$.
The unfilled and the shaded nodes signify latent and observable variables, respectively.
The directed and the undirected edges represent one-way and two-way relationships, respectively.
}
\label{fig: graphical models supplementary materials}
\end{figure}
These issues preclude direct application of existing covariance regression approaches to model conditionally heteroscedastic multivariate measurement errors.
Models for conditionally varying multivariate measurement errors $({\mathbf U} \vert {\mathbf X})$ should highlight their unique features,
accommodate distributional flexibility, enforce the mean zero restriction
and, to be practically effective, should be computationally stable even in the absence of precise information on the conditioning variable ${\mathbf X}$.
While we reiterate that, for both modeling and computational reasons,
the covariance regression methodology of Fox and Dunson (2016) is not be suitable for our purposes,
they still provide clues about how the problems of flexible modeling $\hbox{cov}({\mathbf U}\vert{\mathbf X})$
and that of modeling the shape of $f_{{\mathbf U}\vert{\mathbf X}}$ can be separated.
The following section explains.
\subsection{Latent Factor Models for Different Covariance Classes} \label{sec: latent factor models for different covariance classes}
Lemma \ref{lem: cholesky} gives a slightly modified version of Lemma 2.1 of Fox and Dunson (2016).
\begin{eqnarray}gin{Lem} \label{lem: cholesky}
Any conditionally varying covariance matrix $\hbox{cov}({\mathbf U}\vert{\mathbf X})= \mbox{\boldmath ${\cal S}igma$}({\mathbf X})$ can be represented as $\mbox{\boldmath ${\cal S}igma$}({\mathbf X})=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})\mbox{\boldmath ${\cal L}ambda$}^{\rm T}({\mathbf X})$ for some lower triangular matrix $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})=((\lambda_{\ell, m}({\mathbf X})))$.
\end{Lem}
\begin{eqnarray}gin{proof}
The proof follows from straightforward application of Cholesky factorization.
\end{proof}
\vskip 5pt
Following Lemma \ref{lem: cholesky},
introducing a latent factor $\mbox{\boldmath $\epsilon$}$, we can write $({\mathbf U}\vert{\mathbf X},\mbox{\boldmath $\epsilon$})=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})\mbox{\boldmath $\epsilon$}$,
that is, $(U_{\ell}\vert{\mathbf X},\mbox{\boldmath $\epsilon$})=\sum_{m=1}^{\ell}\lambda_{\ell,m}({\mathbf X})\epsilon_{m}$,
with $\mbox{\boldmath $\epsilon$} \perp {\mathbf X}$ and $\hbox{cov}(\mbox{\boldmath $\epsilon$})={\mathbf I}_{p}$.
Completely unrestricted covariance functions can thus be modeled via such latent variable framework by flexibly modeling $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})$.
$E({\mathbf U}\vert{\mathbf X})={\mathbf z}ero$ can be achieved by setting $E(\mbox{\boldmath $\epsilon$})={\mathbf z}ero$.
The general nature of the latent factor formulation having been established,
we formulate the subsequent results in terms of additional restrictions on such models.
Following the discussion in Section \ref{sec: mvt regression vs measurement errors},
we now focus specifically on covariance functions $\hbox{cov}({\mathbf U}\vert{\mathbf X})$ for measurement error problems,
where ${\mathbf U}$ and ${\mathbf X}$ are of the same dimension, each component $U_{\ell}$ of ${\mathbf U}$ being related to the corresponding component $X_{\ell}$ of the conditioning vector ${\mathbf X}$.
We consider first the situation when $(U_{\ell}\vert {\mathbf X}, \mbox{\boldmath $\epsilon$})$ depends exclusively on $X_{\ell}$ but not on $\{X_{m}\}_{m\neq\ell}$.
\begin{eqnarray}gin{Lem} \label{lem: cholesky me}
Let $({\mathbf U}\vert{\mathbf X},\mbox{\boldmath $\epsilon$})=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})\mbox{\boldmath $\epsilon$}$, where $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})=((\lambda_{\ell, m}({\mathbf X})))$ is lower-triangular,
$\mbox{\boldmath $\epsilon$} \perp {\mathbf X}$ and $\hbox{cov}(\mbox{\boldmath $\epsilon$})={\mathbf I}_{p}$.
If $(U_{\ell} \vert {\mathbf X},\mbox{\boldmath $\epsilon$}) = (U_{\ell} \vert X_{\ell},\mbox{\boldmath $\epsilon$})$ for all $\ell$,
then $\lambda_{\ell, m}({\mathbf X})=\lambda_{\ell, m}(X_{\ell})$ for all $\ell, m$.
\end{Lem}
\begin{eqnarray}gin{proof}
The proof follows trivially by noting that $(U_{\ell}\vert{\mathbf X},\mbox{\boldmath $\epsilon$})=\sum_{m=1}^{\ell} \lambda_{\ell, m}({\mathbf X})\epsilon_{m} = (U_{\ell}\vert X_{\ell},\mbox{\boldmath $\epsilon$})$,
if and only if, for all $m\leq \ell$, $\lambda_{\ell, m}({\mathbf X})$ is a function of $X_{\ell}$ only.
\end{proof}
\vskip 5pt
As an immediate corollary of Lemma \ref{lem: cholesky me}, the conditional moments $m_{\ell}^{r}({\mathbf X})=E (U_{\ell}^{r}\vert{\mathbf X})$ are functions of $X_{\ell}$ only
and the conditional cross-moments $m_{\ell,m}^{r,s}({\mathbf X})=E (U_{\ell}^{r}U_{m}^{s}\vert{\mathbf X})$ are functions of $X_{\ell}$ and $X_{m}$ only.
Modeling variations in the conditional cross-moments is a daunting task in multivariate settings, particularly in the absence of precise information on ${\mathbf X}$.
The next result allows the cross-moments $m_{\ell,m}^{r,s}({\mathbf X})$ to vary with $X_{\ell}$ and $X_{m}$,
but assumes the correlations $\hbox{corr}(U_{\ell},U_{m} \vert {\mathbf X})$ to remain constant across ${\mathbf X}$.
\begin{eqnarray}gin{Lem} \label{lem: cholesky diagonal}
Let $({\mathbf U}\vert{\mathbf X},\mbox{\boldmath $\epsilon$})=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})\mbox{\boldmath $\epsilon$}$, where $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})=((\lambda_{\ell, m}({\mathbf X})))$ is lower-triangular,
$\mbox{\boldmath $\epsilon$} \perp {\mathbf X}$ and $\hbox{cov}(\mbox{\boldmath $\epsilon$})={\mathbf I}_{p}$.
Also, let $(U_{\ell} \vert {\mathbf X},\mbox{\boldmath $\epsilon$}) = (U_{\ell} \vert X_{\ell},\mbox{\boldmath $\epsilon$})$ for all $\ell$,
and $\hbox{corr}(U_{\ell},U_{m} \vert {\mathbf X})$ does not vary with ${\mathbf X}$ for all $\ell\neq m$.
Then, $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})=\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X}){\mathbf C}$ for some diagonal matrix $\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X})=\hbox{diag}\{\lambda_{1}(X_{1}),\dots,\lambda_{p}(X_{p})\}$
and some lower-triangular matrix ${\mathbf C}$.
\end{Lem}
\begin{eqnarray}gin{proof}
\iffalse
\\
\begin{eqnarray}gin{eqnarray*}
\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}) = \left(\begin{eqnarray}gin{array}{cccc}
\lambda_{1,1}(X_{1}) & 0 & \dots & 0\\
\lambda_{2,1}(X_{2}) & \lambda_{2,2}(X_{2}) & \dots & 0\\
\vdots & \vdots & \dots & \vdots\\
\lambda_{p,1}(X_{p}) & \lambda_{p,2}(X_{p}) & \dots & \lambda_{p,p}(X_{p})\\
\end{array}\right).
\end{eqnarray*}
\\
\fi
From Lemma \ref{lem: cholesky me}, we have
$\lambda_{\ell, m}({\mathbf X})=\lambda_{\ell, m}(X_{\ell})$ for all $\ell, m$,
and $\hbox{corr}(U_{\ell},U_{m}\vert{\mathbf X})$ varies with $X_{\ell}$ and $X_{m}$ only.
Under the additional assumption of Lemma \ref{lem: cholesky diagonal},
we first prove that $ \lambda_{\ell,m}(X_{\ell}) = c_{\ell,m} \lambda_{\ell,\ell}(X_{\ell})$ for some constant $c_{\ell,m}$ for all $m < \ell$ and all $\ell=2,\dots,p$.
Without loss of generality, we assume that $\hbox{corr}(U_{\ell},U_{m}\vert {\mathbf X}) = r_{\ell,m} \neq 0$ for all $\ell\neq m$.
We have
\\
\begin{eqnarray}
\hbox{corr}(U_{1},U_{2}\vert {\mathbf X})=\frac{\lambda_{2,1}(X_{2})}{\{\lambda_{2,1}^{2}(X_{2})+\lambda_{2,2}^{2}(X_{2})\}^{1/2}} = r_{1,2} ~ \Rightarrow ~
\lambda_{2,2}^{2}(X_{2}) = \frac{(1-r_{1,2}^{2})}{r_{1,2}^{2}} \lambda_{2,1}^{2}(X_{2}). ~ \label{eq: cholesky diagonal 1}
\end{eqnarray}
\\
So the proposition holds true for $\ell=2$.
Next, assume that it holds for $\ell=2,\dots,h-1$ for some $h > 2$.
Also, from (\ref{eq: cholesky diagonal 1}), $\hbox{var}(U_{2}\vert{\mathbf X}) = \widetilde{\epsilon}xtstyle\sum_{m=1}^{2} \lambda_{2,m}^{2}(X_{2}) = \lambda_{2,1}^{2}(X_{2}) / r_{1,2}^{2}$.
This is, in fact, more generally true for all $\ell$.
For instance, for $\ell=h$,
\begin{eqnarray}
&& \hbox{corr}(U_{1},U_{h}\vert {\mathbf X})=\frac{\lambda_{h,1}(X_{h})}{\{\sum_{m=1}^{h} \lambda_{h,m}^{2}(X_{h})\}^{1/2}} = r_{1,h} ~~ \Rightarrow ~~
\sum_{m=2}^{h} \lambda_{h,m}^{2}(X_{h}) = \frac{(1-r_{1,h}^{2})}{r_{1,h}^{2}} \lambda_{h,1}^{2}(X_{h}) \nonumber\\
&& ~~ \Rightarrow ~~
\hbox{var}(U_{h}\vert{\mathbf X}) = \widetilde{\epsilon}xtstyle\sum_{m=1}^{h} \lambda_{h,m}^{2}(X_{h}) = \lambda_{h,1}^{2}(X_{h}) / r_{1,h}^{2}. \label{eq: cholesky diagonal 2}\\
&&\widetilde{\epsilon}xt{Then,} ~~
\hbox{corr}(U_{2},U_{h}\vert {\mathbf X})=\frac{\lambda_{2,1}(X_{2})\lambda_{h,1}(X_{h})+\lambda_{2,2}(X_{2})\lambda_{h,2}(X_{h})}{\{\sum_{m=1}^{2} \lambda_{2,m}^{2}(X_{2})\}^{1/2} \{\sum_{m=1}^{h} \lambda_{h,m}^{2}(X_{h})\}^{1/2}} = r_{2,h} \nonumber\\
&&~~ \Rightarrow~~ \frac{\lambda_{2,2}(X_{2})\{c_{2,1}\lambda_{h,1}(X_{h})+\lambda_{h,2}(X_{h})\}}{\abs{c_{2,1}\lambda_{2,2}(X_{2})} \abs{\lambda_{h,1}(X_{h})}} = \frac{r_{2,h}}{\abs{r_{1,2} r_{1,h}}}. \nonumber\\
&&~~ \Rightarrow~~ \lambda_{h,2}(X_{h}) = \widetilde{c}_{h,2} \lambda_{h,1}(X_{h})~\widetilde{\epsilon}xt{for some constant}~\widetilde{c}_{h,2}. \label{eq: cholesky diagonal 3}\\
&&\widetilde{\epsilon}xt{Next,} ~~
\hbox{corr}(U_{3},U_{h}\vert {\mathbf X})=\frac{\sum_{m=1}^{3}\lambda_{3,m}(X_{3})\lambda_{h,m}(X_{h})}{\{\sum_{m=1}^{3} \lambda_{3,m}^{2}(X_{3})\}^{1/2} \{\sum_{m=1}^{h} \lambda_{h,m}^{2}(X_{h})\}^{1/2}} = r_{3,h} \nonumber\\
&&~~ \Rightarrow~~ \frac{\lambda_{3,3}(X_{3})\{c_{3,1}\lambda_{h,1}(X_{h})+c_{3,2}\widetilde{c}_{h,2}\lambda_{h,1}(X_{h})+\lambda_{h,3}(X_{h})\}}{\abs{c_{3,1}\lambda_{3,3}(X_{3})} \abs{\lambda_{h,1}(X_{h})}} = \frac{r_{3,h}}{\abs{r_{1,3} r_{1,h}}} \nonumber\\
&&~~ \Rightarrow~~ \lambda_{h,3}(X_{h}) = \widetilde{c}_{h,3} \lambda_{h,1}(X_{h})~\widetilde{\epsilon}xt{for some constant}~\widetilde{c}_{h,3}. \label{eq: cholesky diagonal 4}\\
&&\widetilde{\epsilon}xt{Finally,}~~\hbox{corr}(U_{h-1},U_{h}\vert {\mathbf X})=\frac{\sum_{m=1}^{h-1}\lambda_{h-1,m}(X_{h-1})\lambda_{h,m}(X_{h})}{\{\sum_{m=1}^{h-1} \lambda_{h-1,m}^{2}(X_{h-1})\}^{1/2} \{\sum_{m=1}^{h} \lambda_{h,m}^{2}(X_{h})\}^{1/2}} = r_{h-1,h} \nonumber\\
&&~~ \Rightarrow~~ \frac{\lambda_{h-1,h-1}(X_{h-1})\{c_{h-1,1}\lambda_{h,1}(X_{h})+c_{h-1,2}\widetilde{c}_{h,2}\lambda_{h,1}(X_{h})+\dots+\lambda_{h,h}(X_{h})\}}{\abs{c_{h-1,1}\lambda_{h-1,1}(X_{h-1})} \abs{\lambda_{h,1}(X_{h})}} = \frac{r_{h-1,h}}{\abs{r_{1,h-1} r_{1,h}}} \nonumber\\
&&~~ \Rightarrow~~ \lambda_{h,h-1}(X_{h}) = \widetilde{c}_{h,h-1} \lambda_{h,1}(X_{h})~\widetilde{\epsilon}xt{for some constant}~\widetilde{c}_{h,h-1}. \label{eq: cholesky diagonal 5}
\end{eqnarray}
\\
Combining (\ref{eq: cholesky diagonal 3}), (\ref{eq: cholesky diagonal 4}), (\ref{eq: cholesky diagonal 5}) etc. with (\ref{eq: cholesky diagonal 2}),
the proposition follows by principles of mathematical induction.
This implies $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X})=\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X}){\mathbf C}$ where $\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X})=\hbox{diag}\{\lambda_{1}(X_{1}),\dots,\lambda_{p}(X_{p})\}$ with $\lambda_{\ell}(X_{\ell})=\lambda_{\ell,\ell}(X_{\ell})$ for all $\ell$ and ${\mathbf C}=((c_{\ell,m}))$ is a lower triangular matrix with $c_{\ell,\ell}=1$ for all $\ell$.
\end{proof}
\iffalse
\\
\begin{eqnarray}gin{eqnarray*}
&& \mbox{\boldmath ${\cal L}ambda$}({\mathbf X}) = \left(\begin{eqnarray}gin{array}{cccc}
\lambda_{1,1}(X_{1}) & 0 & \dots & 0\\
c_{2,1}\lambda_{2,2}(X_{2}) & \lambda_{2,2}(X_{2}) & \dots & 0\\
\vdots & \vdots & \dots & \vdots\\
c_{p,1}\lambda_{p,p}(X_{p}) & c_{p,2}\lambda_{p,p}(X_{p}) & \dots & \lambda_{p,p}(X_{p})\\
\end{array}\right) \\
&& =
\left(\begin{eqnarray}gin{array}{cccc}
\lambda_{1,1}(X_{1}) & 0 & \dots & 0\\
0 & \lambda_{2,2}(X_{2}) & \dots & 0\\
\vdots & \vdots & \dots & \vdots\\
0 & 0 & \dots & \lambda_{p,p}(X_{p})\\
\end{array}\right)
\left(\begin{eqnarray}gin{array}{cccc}
1 & 0 & \dots & 0\\
c_{2,1} & 1 & \dots & 0\\
\vdots & \vdots & \dots & \vdots\\
c_{p,1} & c_{p,2} & \dots & 1\\
\end{array}\right).
\end{eqnarray*}
\\
\fi
\vskip 5pt
Under the conditions of Lemma \ref{lem: cholesky diagonal}, we thus have $\hbox{cov}({\mathbf U}\vert{\mathbf X}) = \mbox{\boldmath ${\cal S}igma$}({\mathbf X})= \mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X}) \mbox{\boldmath ${\cal S}igma$}_{1} \mbox{\boldmath ${\cal L}ambda$}_{1}^{\rm T}({\mathbf X})$ with $\mbox{\boldmath ${\cal S}igma$}_{1}={\mathbf C}{\mathbf C}^{\rm T}$.
Introducing a latent factor $\mbox{\boldmath $\epsilon$}$, we can now write $({\mathbf U}\vert{\mathbf X},\mbox{\boldmath $\epsilon$})=\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X})\mbox{\boldmath $\epsilon$}$ with $\mbox{\boldmath $\epsilon$} \perp {\mathbf X}$ and $\hbox{cov}(\mbox{\boldmath $\epsilon$})=\mbox{\boldmath ${\cal S}igma$}_{1}$.
Due to the diagonal nature of $\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X})$, each component $\epsilon_{\ell}$ of $\mbox{\boldmath $\epsilon$}$ is exclusively associated with the corresponding component $U_{\ell}$ of ${\mathbf U}$
and may be treated as a scaled version of $U_{\ell}$.
Starting with a general latent factor model framework,
with two additional restrictions that are particularly relevant in multivariate measurement error settings,
we have now arrived at model (\ref{eq: mvt multiplicative structure}).
The problems of modeling $\hbox{cov}({\mathbf U}\vert{\mathbf X})$ and the shape of $f_{{\mathbf U}\vert{\mathbf X}}$ can now be achieved by separately modeling $\mbox{\boldmath ${\cal L}ambda$}_{1}({\mathbf X})$ and $f_{\mbox{\boldmath $\epsilon$}}$.
And $E({\mathbf U}\vert{\mathbf X})={\mathbf z}ero$ can be achieved by enforcing $E(\mbox{\boldmath $\epsilon$})={\mathbf z}ero$.
\subsection{Models for ${\mathbf U}\vert {\mathbf X}$ and $\hbox{cov}({\mathbf U}\vert{\mathbf X})$} \label{sec: mvt cov mat model}
In this section, we first revisit the models for conditionally varying measurement errors developed in Section \ref{sec: mvt density of errors} of the main paper.
A few plausible alternatives and generalizations,
the implied covariance structures, their strengths, limitations and connections with the adopted model are also discussed.
The model (\ref{eq: mvt multiplicative structure}) for conditionally varying measurement errors developed in Section \ref{sec: mvt density of errors} of the main paper assumes $({\mathbf U}_{ij}\vert{\mathbf X}_{i})={\mathbf S}({\mathbf X}_{i})\mbox{\boldmath $\epsilon$}_{ij\ell}$ where ${\mathbf S}({\mathbf X}_{i})=\hbox{diag}\{s_{1}(X_{i1}),\dots,s_{p}(X_{ip})\}$ and $\mbox{\boldmath $\epsilon$}_{ij\ell}$ are distributed independently of ${\mathbf X}$ with $E(\mbox{\boldmath $\epsilon$}_{ij})={\mathbf z}ero$.
This `separability' of ${\mathbf X}_{i}$ and $\mbox{\boldmath $\epsilon$}_{ij}$ allows us to incorporate distributional flexibility and enforce the mean zero restriction using the techniques developed for independent errors in Section \ref{sec: mvt density of homoscedastic errors} in the main paper.
The diagonal structure of ${\mathbf S}$ highlights the exclusive associations between $U_{ij\ell}$ and $X_{i\ell}$
but ignores weak dependencies of $U_{ij\ell}$ on $\{X_{im}\}_{m\neq \ell}$.
The general of shape of $f_{{\mathbf U}\vert{\mathbf X}}$ as well correlations between different components of ${\mathbf U}_{ij}$ are inherited from $f_{\mbox{\boldmath $\epsilon$}}$.
The associated dependency structure is summarized in Figure \ref{fig: graphical models supplementary materials}(b).
The novel two-stage procedure described in Sections \ref{sec: mvt posterior computation} and \ref{sec: mvt estimation of variance functions}
produces efficient and numerically stable posterior estimates.
As discussed in Section \ref{sec: mvt multiplicative errors}, the model also arises naturally in multivariate multiplicative measurement error settings ${\mathbf W}_{ij} = {\mathbf X}_{i} \circ \widetilde{\mathbf U}_{ij}$
where the errors $\widetilde{\mathbf U}_{ij}$ are distributed independently of ${\mathbf X}_{i}$ with $E(\widetilde{\mathbf U}_{ij})={\mathbf 1}$.
The model can be reformulated as ${\mathbf W}_{ij} = {\mathbf X}_{i} + {\mathbf U}_{ij}$,
where ${\mathbf U}_{ij}={\mathbf S}({\mathbf X}_{i})\mbox{\boldmath $\epsilon$}_{ij}$, ${\mathbf S}({\mathbf X}_{i})=\hbox{diag}\{X_{i1},\dots,X_{ip}\}$ and $\mbox{\boldmath $\epsilon$}_{ij}=(\widetilde{\mathbf U}_{ij}-1)$ with $E(\mbox{\boldmath $\epsilon$}_{ij})={\mathbf z}ero$.
It thus conforms to the conditionally varying additive measurement error model (\ref{eq: mvt multiplicative structure}) described above.
These results and the ones provided in Section \ref{sec: latent factor models for different covariance classes}
establish the fairly general nature of model (\ref{eq: mvt multiplicative structure}) and are also informative about cases outside its support.
A few such cases that are particularly relevant to measurement error problems and form part of our research aspirations
but are not pursued in detail in this article are briefly discussed below.
As informed by Lemma \ref{lem: cholesky me}, another class that implies $\hbox{var}(U_{ij\ell}\vert{\mathbf X}_{i})=s_{\ell}^{2}(X_{i\ell})$
and allows $\hbox{corr}(U_{ij\ell},U_{ijm}\vert{\mathbf X}_{i})$ to vary with $X_{i\ell}$ and $X_{im}$
is obtained by letting ${\mathbf U}_{ij}=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i}) \mbox{\boldmath $\epsilon$}_{ij}$ with $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})=((\lambda_{\ell,m}(X_{i\ell})))_{\ell=1,m=1}^{p,p}$.
The model highlights the exclusive associations between $U_{ij\ell}$ and $X_{i\ell}$
- $\hbox{var}(U_{ij\ell}\vert{\mathbf X}_{i})$ depends on $X_{i\ell}$ and $\hbox{cov}(U_{ij\ell},U_{ijm}\vert {\mathbf X}_{i})$ depends on $X_{i\ell}$ and $X_{im}$.
Modeling variations in conditional cross-moments is a daunting task in multivariate settings,
more so in the absence of precise information about ${\mathbf X}_{i}$.
Towards a more parsimonious representation, the off-diagonal elements $\{\lambda_{\ell,m}(X_{i\ell})\}_{\ell \neq m}$ may be shrunk towards zero,
resulting in a model that associates each $U_{ij\ell}$ with its own latent factor component $\epsilon_{ij\ell}$.
That is, $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})$ should be shrunk towards $\mbox{\boldmath ${\cal L}ambda$}_{0}({\mathbf X}_{i})=\hbox{diag}\{\lambda_{1,1}(X_{i1}),\dots,\lambda_{p,p}(X_{ip})\}$.
This limiting case still allows $\hbox{var}(U_{ij\ell}\vert {\mathbf X}_{i})$ to vary flexibly with $X_{i\ell}$, and $\hbox{cov}(U_{ij\ell},U_{ijm}\vert{\mathbf X})$ to vary with $X_{i\ell}$ and $X_{im}$,
but assumes the correlations $\hbox{corr}(U_{ij\ell},U_{ijm}\vert{\mathbf X}_{i})$ to not vary with ${\mathbf X}_{i}$.
Another flexible class of models for $({\mathbf U}_{ij}\vert{\mathbf X}_{i})$
that conforms to the dependency structure depicted in Figure \ref{fig: graphical models supplementary materials}(a)
is obtained by letting ${\mathbf U}_{ij}=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i}) \mbox{\boldmath $\epsilon$}_{ij}$
with $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})=((\lambda_{\ell,m}(X_{im})))_{\ell=1,m=1}^{p,p}$.
The implied covariance structure is given by $\hbox{cov}({\mathbf U}_{ij}\vert{\mathbf X}_{i})=\mbox{\boldmath ${\cal S}igma$}({\mathbf X}_{i})=\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})\mbox{\boldmath ${\cal S}igma$}_{\mbox{\boldmath $\epsilon$}}\mbox{\boldmath ${\cal L}ambda$}^{\rm T}({\mathbf X}_{i})$.
Specifically, we have $(U_{ij\ell} \vert {\mathbf X}_{i}) = \widetilde{\epsilon}xtstyle \sum_{m}\lambda_{\ell,m}(X_{im})\epsilon_{ijm}$ with
\\
\begin{eqnarray}gin{eqnarray*}
&& \hbox{cov}(U_{ij\ell_{1}},U_{ij\ell_{2}}\vert {\mathbf X}_{i}) = \widetilde{\epsilon}xtstyle \sum_{m_{1},m_{2}} \lambda_{\ell_{1},m_{1}}(X_{im_{1}}) \lambda_{\ell_{2},m_{2}}(X_{im_{2}}) \sigma_{m_{1},m_{2}}\\
&& ~~~ = \widetilde{\epsilon}xtstyle \lambda_{\ell_{1},\ell_{1}}(X_{i\ell_{1}}) \lambda_{\ell_{2},\ell_{2}}(X_{i\ell_{2}}) \sigma_{\ell_{1},\ell_{2}} + \sum_{m_{1}\neq \ell_{1},m_{2} \neq \ell_{2}} \lambda_{\ell_{1},m_{1}}(X_{im_{1}}) \lambda_{\ell_{2},m_{2}}(X_{im_{2}}) \sigma_{m_{1},m_{2}} \\
&&\widetilde{\epsilon}xt{and}~~~\hbox{var}(U_{ij\ell}\vert {\mathbf X}_{i}) = \widetilde{\epsilon}xtstyle \lambda_{\ell,\ell}^{2}(X_{i\ell}) \sigma_{\ell,\ell} + \sum_{m_{1}\neq \ell,m_{2} \neq \ell} \lambda_{\ell,m_{1}}(X_{im_{1}}) \lambda_{\ell,m_{2}}(X_{im_{2}}) \sigma_{m_{1},m_{2}}.
\end{eqnarray*}
\\
Ideally, to highlight the exclusive strong association between $U_{ij\ell}$ and $X_{i\ell}$,
the diagonal elements of $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})$, namely $\lambda_{\ell,\ell}(X_{i\ell})$, should dominate
and the remaining off-diagonal elements $\{\lambda_{\ell,m}(X_{im})\}_{\ell \neq m}$ may be shrunk towards zero.
That is, $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})$ should be shrunk towards $\mbox{\boldmath ${\cal L}ambda$}_{0}({\mathbf X}_{i})=\hbox{diag}\{\lambda_{1,1}(X_{i1}),\dots,\lambda_{p,p}(X_{ip})\}$.
Since measurement error problems are well known to be inherently computationally unstable,
it is not clear whether any practical gain in efficiency can be achieved
by modeling large number of off-diagonal functions in $\mbox{\boldmath ${\cal L}ambda$}({\mathbf X}_{i})$ at the expense of significantly increased model complexity.
Model (\ref{eq: mvt multiplicative structure}) considered in this article instead focuses on the special limiting cases with ${\mathbf S}({\mathbf X}_{i})=\mbox{\boldmath ${\cal L}ambda$}_{0}({\mathbf X}_{i})$.
Another extension results from mixtures of multiplicative and independent additive errors.
In univariate settings, such models were considered in Rocke a Durbin (2001) for studying gene expression levels measured by DNA slides.
In multivariate settings, we have ${\mathbf U}_{ij}={\mathbf X}_{i} \circ \mbox{\boldmath $\epsilon$}_{ij}^{(1)}+\mbox{\boldmath $\epsilon$}_{ij}^{(2)}$,
where $\mbox{\boldmath $\epsilon$}_{ij}^{(k)}$, $k=1,2$ are distributed independently of ${\mathbf X}_{i}$.
With $\hbox{cov}(\mbox{\boldmath $\epsilon$}_{ij}^{(k)})=\mbox{\boldmath ${\cal S}igma$}_{k}=((\sigma_{\ell,m}^{(k)}))_{m=1,\ell=1}^{p,p}$ for $k=1,2$,
the implied covariance structure is given by $\hbox{cov}({\mathbf U}_{ij}\vert{\mathbf X}_{i})={\mathbf S}({\mathbf X}_{i})\mbox{\boldmath ${\cal S}igma$}_{1}{\mathbf S}({\mathbf X}_{i})+\mbox{\boldmath ${\cal S}igma$}_{2}$,
where ${\mathbf S}({\mathbf X}_{i})=\hbox{diag}\{X_{i1},\dots,X_{ip}\}$, as above.
The model conforms to the dependency structure of Figure \ref{fig: graphical models supplementary materials}(b)
but can not be strictly written as model (\ref{eq: mvt multiplicative structure}).
However, as can be seen from Figure \ref{fig: mvt EATS data results VFn},
in our motivating nutritional epidemiology application,
smaller average consumptions naturally result in more precise 24 hour recalls,
the variability approaching 0 as the true consumption approaches 0.
Under the assumption of continuity, $\lim_{{\mathbf X}\to{\mathbf z}ero}\mbox{\boldmath ${\cal S}igma$}({\mathbf X}) \to {\mathbf z}ero^{p\times p}$ implies $\mbox{\boldmath ${\cal S}igma$}_{2}={\mathbf z}ero^{p\times p}$,
resulting in model (\ref{eq: mvt multiplicative structure}).
\subsection{Model Adequacy Checks}\label{sec: mvt model adequacy checks}
In Figure \ref{fig: mvt EATS data results VFn} in the main paper, we showed the plots of subject specific means $\overline{W}_{i\ell}$ of the replicates vs the corresponding subject-specific variances $S_{W,i\ell}^{2}$ for each of the four dietary components included in our analysis in Section \ref{sec: mvt data analysis}.
These plots suggest very strong conditional heteroscedasticity patterns in the measurement errors.
If we consider the plots of subject specific means $\overline{W}_{i\ell}$ vs subject specific variances $S_{W,im}^{2}$ for all possible pairs $(\ell,m)$,
we will see similar monotone increasing patterns not just for the pairs with $\ell=m$, but in pairs with $\ell\neq m$ too.
This can be explained by the high correlation between different components of ${\mathbf X}_{i}$, see Figure \ref{fig: mvt EATS data results XS},
and does not necessarily imply that the conditional variability in $U_{ij\ell}$ depends on other components of ${\mathbf X}_{i}$, not just $X_{i\ell}$.
As discussed in the previous subsections,
since the $\ell^{th}$ component $U_{ij\ell}$ is the measurement error associated exclusively with $X_{i\ell}$,
it is plausible to assume that the conditional variability of $U_{ij\ell}$
can be modeled mostly as a function of $X_{i\ell}$ only.
We present here some diagnostic plots to further validate the practical adequacy of this structural assumption.
Figure \ref{fig: mvt EATS data covariance model justification} shows the plots of $\widehat{X}_{i\ell}$ vs subject specific variances $\widehat{S}_{\epsilon,im}^{2}$ of $\widehat\epsilon_{ijm}$,
where $\widehat{X}_{i\ell}$ represent the posterior means of $X_{i\ell}$ values and $\widehat\epsilon_{ijm} = (W_{ijm}-\widehat{X}_{im})/\widehat{s}_{m}(\widehat{X}_{im})$
represent the corresponding scaled measurement error residuals
produced by the univariate submodels for the EATS data set analyzed in Section \ref{sec: mvt data analysis} of the main paper.
The figure indicates constant variance of the scaled measurement error residuals $\widehat\epsilon_{ij\ell}$ over the entire range of $X_{im}$ values for all $(\ell,m)$ pairs.
Nonparametric Eubank-Hart tests of no covariate effect (Eubank and Hart, 1992) applied to $(\widehat{X}_{i\ell},\widehat{S}_{\epsilon,im}^{2})$ for all $(\ell,m)$ pairs
(treating $\widehat{X}_{i\ell}$ as the covariate and $\widehat{S}_{\epsilon,im}^{2}$ as the response)
produced a minimum Benjamini-Hochberg adjusted p-value of $0.096$, suggesting that there is no residual heteroscedasticity left in $U_{ij\ell}$
after accounting for the variability in $U_{ij\ell}$ that can be sufficiently explained through $X_{i\ell}$ only.
See Table \ref{tab: mvt Eubank-Hart p-values}.
It may thus be concluded that for the EATS data application model (\ref{eq: mvt multiplicative structure})
developed in Section \ref{sec: mvt density of heteroscedastic errors} of the main paper
that implies $\hbox{var}(U_{ij\ell}\vert {\mathbf X}_{i}) = s_{\ell}^{2}(X_{i\ell})\hbox{var}(\epsilon_{ij\ell})$
suffices to explain the conditional variability in the measurement errors.
Model (\ref{eq: mvt multiplicative structure}) also assumed that only the conditional variability of ${\mathbf U}_{ij}$ depends on ${\mathbf X}_{i}$,
and derived other features of ${\mathbf U}_{ij}$ like skewness, multimodality, heavy-tails etc. from the scaled errors $\mbox{\boldmath $\epsilon$}_{ij}$.
As shown in Sarkar, et al. (2014), even in the much simpler univariate set up, in the absence of precise information on $X_{i\ell}$,
variations in other features of $U_{ij\ell}$ for varying values of $X_{i\ell}$, if any, are extremely difficult to detect.
More importantly, semiparametric methods that make the multiplicative structural assumption $(U_{ij\ell}\vert X_{i\ell}) = s_{\ell}(X_{i\ell})\epsilon_{ij\ell}$
are highly robust to departures from this assumption
and significantly outperform possible nonparametric alternatives that allow all order moments of $U_{ij\ell}$ to vary flexibly with $X_{i\ell}$, not just the conditional variance,
even in scenarios where the true data generating process closely conforms to these nonparametric alternatives.
\vskip 10pt
\begin{eqnarray}gin{table}[!ht]
\centering
\begin{eqnarray}gin{tabular}{|c|c|cccc|}
\hline
& Panel & p-values & BFN & BH & BY \\
\hline
1 & 1,1 & 0.991 & 1.000 & 0.991 & 1.000 \\
2 & 1,2 & 0.764 & 1.000 & 0.873 & 1.000 \\
3 & 1,3 & 0.251 & 1.000 & 0.446 & 1.000 \\
4 & 1,4 & 0.129 & 1.000 & 0.446 & 1.000 \\
5 & 2,1 & 0.598 & 1.000 & 0.736 & 1.000 \\
6 & 2,2 & 0.266 & 1.000 & 0.446 & 1.000 \\
7 & 2,3 & 0.037 & 0.592 & 0.197 & 0.667 \\
8 & 2,4 & 0.990 & 1.000 & 0.991 & 1.000 \\
9 & 3,1 & 0.224 & 1.000 & 0.446 & 1.000 \\
10 & 3,2 & 0.012 & 0.192 & 0.096 & 0.325 \\
11 & 3,3 & \bf{0.011} & \bf{0.176} & \bf{0.096} & \bf{0.325} \\
12 & 3,4 & 0.497 & 1.000 & 0.692 & 1.000 \\
13 & 4,1 & 0.519 & 1.000 & 0.692 & 1.000 \\
14 & 4,2 & 0.163 & 1.000 & 0.446 & 1.000 \\
15 & 4,3 & 0.279 & 1.000 & 0.446 & 1.000 \\
16 & 4,4 & 0.244 & 1.000 & 0.446 & 1.000 \\
\hline
\end{tabular}
\caption{\baselineskip=10pt
The original and adjusted p-values (BFN=Bonferroni, BH=Benjamini-Hochberg, BY=Benjamini-Yekutli) returned by nonparametric Eubank-Hart tests of no covariate effect applied to $(\widehat{X}_{i\ell},\widehat{S}_{\epsilon,im}^{2})$ for all $(\ell,m)$ pairs
treating $\widehat{X}_{i\ell}$ as the covariate and $\widehat{S}_{\epsilon,im}^{2}$ as the response.
The minimum values corresponding to panel $(3,3)$ are highlighted.
See Section \ref{sec: mvt model adequacy checks} and Figure \ref{fig: mvt EATS data covariance model justification} in the Supplementary Materials for additional details.
}
\label{tab: mvt Eubank-Hart p-values}
\end{table}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{EATS_Data_Variance_Model_Justification.pdf}
\end{center}
\caption{\baselineskip=10pt Panel $(\ell,m)$ shows the plot of estimates $\widehat{X}_{i\ell}$ of $X_{i\ell}$ vs subject specific variances $\widehat{S}_{\epsilon,im}^{2}$ of scaled measurement error residuals $\widehat\epsilon_{ijm}$,
produced by univariate deconvolution methods.
See Section \ref{sec: mvt model adequacy checks} of the Supplementary Materials for additional details.
The darker horizontal lines in each panel represent the upper 10\% trimmed mean of the subject specific variances $\widehat{S}_{\epsilon,i\ell}^{2}$.
The lighter solid lines in each panel represent nonparametric lowess fits.
}
\label{fig: mvt EATS data covariance model justification}
\end{figure}
\section{Finite vs Infinite Mixture Models} \label{sec: mvt finite vs infinite mixture models}
In this article, we modeled the $f_{{\mathbf X}}$ and the density of the scaled measurement errors $f_{\mbox{\boldmath $\epsilon$}}$
using mixtures of fixed finite number of multivariate normal kernels.
Alternative approaches that escape the need to prespecify the number of mixture components include models with potentially infinite number of mixture components,
models induced by Dirichlet processes (Ferguson, 1973; Escobar and West, 1995) being perhaps the most popular among such techniques.
Apart from flexibility, one major advantage of such techniques comes from the ability of associated MCMC machinery to perform model selection and model averaging implicitly and semiautomatically.
Model averaging is achieved by allowing the number of mixture components to vary from one MCMC iteration to the other.
The number of mixture components that is visited the maximum number of times by the sampler then provides
a maximum a-posteriori (MAP) estimate of the number of mixture components required to approximate the target density.
However, in complicated multivariate set up like ours, MCMC algorithms for such infinite dimensional models become computationally highly intensive.
Mixtures based on fixed finite number of components, on the other hand, can greatly reduce computational complexity.
Recent studies of asymptotic properties of the posterior of overfitted mixture models (Rousseau and Mengersen, 2011)
suggest that mixture models with sufficiently large number of components can perform automatic model selection and model averaging just like infinite dimensional models.
Additionally, as the proofs of the results in Section \ref{sec: mvt model flexibility} imply,
the use of mixture models with fixed finite number of components does not necessarily imply a compromise on the issue of flexibility.
The approaches adopted in this article try to take the best from both worlds.
Computational burden is reduced by keeping the number of mixture components fixed at some finite values.
At the same time, simultaneous semiautomatic model selection and model averaging is achieved by exploiting properties of overfitted mixture models.
We elaborate our arguments below,
pointing out the close connections and the subtle differences our adopted finite dimensional models have with the aforementioned infinite dimensional alternatives.
\subsection{Infinite Mixture Models as Limits of Finite Mixture Models} \label{sec: mvt limits of finite mixture models}
Let $G_{K}=\sum_{k=1}^{K}\pi_{k}\delta_{^{th}eta_{k}}$ with $(\pi_{1},\dots,\pi_{K})\sim {\cal D}ir(\alpha/K,\dots,\alpha/K)$ and $^{th}eta_{k} \sim H$.
Also, let $G_{\infty} \sim {\cal D}P(\alpha,H)$, a Dirichlet process with concentration parameter $\alpha$ and base measure $H$.
Then, $G_{\infty}$ can be represented as $G_{\infty}=\sum_{k=1}^{\infty}\widetilde\pi_{k}\delta_{^{th}eta_{k}}$ with $\widetilde\pi_{k} =V_{k}\hbox{pr}od_{\ell=1}^{k-1}(1-V_{\ell}), V_{\ell} \sim {\cal B}eta(1,\alpha)$ and $^{th}eta_{k} \sim H$ (Sethuraman, 1994).
As $K\to \infty$, $\int g(^{th}eta) dG_{K}(^{th}eta) \overset{d}{\to} \int g(^{th}eta) dG_{\infty}(^{th}eta)$
for any measurable function $g$ integrable with respect to $H$ (Ishwaran and Zarepour, 2000, 2002).
The finite mixtures of multivariate normal kernels with symmetric Dirichlet priors
that we used in this article to model both $f_{{\mathbf X}}$ and the density of the scaled measurement errors $f_{\mbox{\boldmath $\epsilon$}}$
have close connections with infinite dimensional Dirichlet process based mixture models.
Specifically, taking $g(^{th}eta)={\cal M}VN(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$ and appealing to the above result, we have $f_{{\mathbf X}}=\sum_{k=1}^{K_{{\mathbf X}}}\pi_{{\mathbf X},k} {\cal M}VN(\mbox{\boldmath $\mu$}_{{\mathbf X},k},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k}) \overset{d}{\to} \sum_{k=1}^{\infty}\widetilde\pi_{{\mathbf X},k}{\cal M}VN(\mbox{\boldmath $\mu$}_{{\mathbf X},k},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X},k})$ as $K_{{\mathbf X}} \to \infty$.
Our proposed mechanism to enforce the mean zero restriction on $f_{\mbox{\boldmath $\epsilon$}}$ specifically requires a finite dimensional symmetric prior on the mixture probabilities
and therefore does not admit a straightforward infinite dimensional extension.
But in the limit, as $K_{\mbox{\boldmath $\epsilon$}}\to \infty$, a reformulation of the model results in a complicated multivariate version of the infinite dimensional model of Sarkar, et al. (2014)
(See Lemma \ref{Lem: mvt lemma from Pelenis} in Section \ref{sec: mvt estimation of variance functions}).
\subsection{Computational Complexity} \label{sec: mvt computational complexity}
The implementation of complex infinite dimensional models, specially the complicated mean restricted model for the scaled errors,
will be computationally intensive in a multivariate setting like ours.
The computational simplicity of the finite dimensional methods proposed in this article make them particularly suitable for multivariate problems.
In this paragraph, we discuss additional mixing issues that render infinite dimensional models,
particularly the ones with non or semiconjugate priors on the component specific parameters (like our MLFA model), unsuitable for multivariate applications.
There are two main types of MCMC algorithms for fitting infinite dimensional mixture models - conditional methods and marginal methods.
In the conditional scheme, the mixture probabilities are sampled.
The mixture labels are then updated independently, conditional on the mixture probabilities.
The mixture probabilities in infinite dimensional mixture models can be stochastically ordered.
For instance, mixture probabilities in a Dirichlet process mixture model satisfy $E(\widetilde\pi_{k})>E(\widetilde\pi_{k+1})$ and $\Pr(\widetilde\pi_{k}>\widetilde\pi_{k+1})>0.5$ for all $k\in\mathbb{N}$.
This imposes weak identifiability on the mixture labels resulting in a complicated model space comprising many local modes of varying importance.
Different permutations of the mixture labels are not equivalent and exploration of the entire model space becomes important for valid inference.
In high dimensional and large data settings it is difficult to achieve even by sophisticated MCMC algorithms with carefully designed label switching moves (Hastie, et al. 2013).
The problem can be avoided with marginal methods (Neal, 2000) that integrate out the mixture probabilities and work with the resulting Polya urn scheme,
rendering the mixture labels dependent but nonidentifiable.
Unfortunately, such integration is possible only when conjugate priors are assigned to the component specific parameters.
Typically for infinite dimensional models with non or semiconjugate priors on the component specific parameters, good mixing is thus difficult to achieve,
particularly in complicated multivariate setup like ours.
Such issues also plague finite dimensional truncation based approximations to Dirichlet process mixture models
where the mixture probabilities are constructed as $\widetilde\pi_{k} =V_{k}\hbox{pr}od_{\ell=1}^{k-1}(1-V_{\ell}), V_{\ell} \sim {\cal B}eta(1,\alpha), k=1,\dots,(K-1)$, and $V_{K}=1$ (Ishwaran and James, 2002) and the mixture components remain weakly identifiable.
On the contrary, the issues of mixing and convergence become much less important for finite mixture models with symmetric priors
$(\pi_{1},\dots,\pi_{K})\sim {\cal D}ir(\alpha/K,\dots,\alpha/K)$ on the mixture probabilities.
With $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$ mixture components for the densities $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$, respectively,
the posterior is still multimodal but comprises $K_{{\mathbf X}}! \times K_{\mbox{\boldmath $\epsilon$}}!$ modal regions that are exact copies of each other.
For inference on the overall density or any other functions of interest that are invariant to permutations of the mixture labels,
it is only important that the MCMC sampler visits and explores at least one of the modal regions well
and label switching (or the lack of it) does not present any problem (Geweke, 2007).
\subsection{Model Selection and Model Averaging}\label{sec: mvt model selection and model averaging}
As mentioned at the beginning of Section \ref{sec: mvt finite vs infinite mixture models},
a major advantage of infinite dimensional mixture models is their ability to implicitly and semiautomatically perform model selection and model averaging.
Properties of overfitted mixture models can be exploited to achieve the same in finite dimensional models with sufficiently large number of components.
Recently Rousseau and Mengersen (2011) studied the asymptotic behavior of the posterior for overfitted mixture models with Dirichlet prior ${\cal D}ir(\alpha_{1},\dots,\alpha_{K})$ on the mixture probabilities
in a measurement error free set up and showed that
the hyper parameter $(\alpha_{1},\dots,\alpha_{k})$ strongly influences the way the posterior handles overfitting.
In particular, when $\max_{k=1,\dots,K}\alpha_{k} < L/2$, where $L$ denotes the number of parameters specifying the component kernels,
the posterior is asymptotically stable and concentrates in regions with empty redundant components.
In this article, we chose symmetric Dirichlet priors ${\cal D}ir(\alpha/K,\dots,\alpha/K)$ on the mixture probabilities
to model both the $f_{{\mathbf X}}$ and the density of the scaled measurement errors $f_{\mbox{\boldmath $\epsilon$}}$.
We set $\alpha_{{\mathbf X}}=\alpha_{\mbox{\boldmath $\epsilon$}} =1$ so that the condition $\alpha/K < L/2$ is satisfied for both $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$.
In simulation experiments reported in Section \ref{sec: mvt simulation studies}, the behavior of the posterior was similar to that observed by Rousseau and Mengersen (2011) in measurement error free set up.
That is, when $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$ were assigned sufficiently large values,
the MCMC chain quickly reached a stable stage where the redundant components became empty.
See Figure \ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 E1 Ind} in the main article
and Figure \ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 E1 AR} and \ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 E1 AR} in the Supplementary Materials
for illustrations, where, with some abuse of nomenclature, the $k^{th}$ component is called empty if the associated mixture probability $\pi_{k} \leq 0.05$.
Since such overfitted mixture models allow the number of nonempty mixture components to vary from one MCMC iteration to the next,
model averaging is automatically achieved.
MAP estimates of the numbers of mixture components required to approximate the target densities are given by the numbers of components
which are visited the maximum number of times by the MCMC sampler,
as in the case of infinite mixture models.
As discussed in the main paper, for the MIW method, when the measurement errors are conditionally heteroscedastic and the true covariance matrices are highly sparse,
the strategy usually overestimates the number of non-empty mixture components required to approximate the target densities well.
In these cases, the MIW method becomes highly numerically unstable and much larger sample sizes are required for the asymptotic results to hold.
See Figure \ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 E1 Ind} in the main article for an illustration.
This may be regarded more as a limitation of the MIW method than a limitation of the adopted strategy to determine $K_{{\mathbf X}}$ and $K_{\mbox{\boldmath $\epsilon$}}$.
For the numerically more stable MLFA model, the asymptotic results are valid even for moderate sample sizes and such models are also more robust to overestimation of the number of nonempty clusters.
\subsection{Model Flexibility}\label{sec: mvt supp mat model flexibility}
The proofs of the support results presented in Section \ref{sec: mvt model flexibility} require that
the number of mixture components of the corresponding mixture models be allowed to vary over the set of all positive integers.
However, as the technical details of the proofs reveal,
the use of mixture models with fixed finite number of components does not necessarily imply a compromise on the issue of flexibility.
Indeed, a common recurring idea in the proofs of all these results, including those for the variance functions, is to show that
any function coming from the target class can be approximated with any desired level of accuracy by the corresponding finite mixture models
provided the models comprise sufficiently large number of mixture components and the function satisfies some fairly minimal regularly conditions.
The requirement that the priors on the number of mixture components assign positive probability to all positive integers only helps us
reach the final conclusions as immediate consequences.
For any given data set of finite size, the number of mixture components required to approximate a target density
will always be bounded above by the number of latent or observed variables generated by the target density.
For most practical applications the required number would actually be much smaller than the number of variables generated by the target.
Even if one applies mixture models that a-priori allow potentially infinitely many mixture components,
the posterior will essentially concentrate on a finite set comprising moderately small positive integers.
This means that for all practical purposes, solutions based on finite mixture models with fixed but sufficiently large number of mixture components will essentially be as robust as
solutions based on their infinite or varying dimensional counterparts
while at the same time being significantly less burdensome from a computational viewpoint.
The requirement that the priors on the number of mixture components assign positive mass on \emph{all} positive integers may thus be relegated to the requirement
that the priors assign positive mass on sets of the form $\{1,\dots,K\}$, where $K$ is sufficiently large.
Posterior computation for such models might be even much more intensive and complex requiring reversible jump moves.
Since a mixture model with $K$ components is at least as flexible as a model with $(K-1)$ components,
properties of overfitted mixture models discussed in Section \ref{sec: mvt model selection and model averaging}
allow us to adopt a much simpler strategy.
We can simply keep the number of mixture components fixed at sufficiently large values for all MCMC iterations.
Carefully chosen priors for the mixture probabilities then result in a posterior that concentrates in regions favoring empty redundant components,
essentially eliminating the need to assign any priors on the number of mixture components.
We will still need some mechanism, preferably an automated and data adaptive one, to determine what values of $K$ would be sufficiently large.
This issue is discussed in the section on hyper-parameter choices in Section \ref{sec: mvt choice of hyper-parameters}.
The discussions of Section \ref{sec: mvt finite vs infinite mixture models} suggest that finite mixture models with
sufficiently large number of mixture components and carefully chosen priors for the mixture probabilities
can essentially retain the major advantages of infinite dimensional alternatives including flexibility, automated model averaging and model selection
while at the same time being computationally much less burdensome,
making them our preferred choice for complicated high dimensional problems.
\pagebreak
\section{Proofs of Theoretical Results of Section \ref{sec: mvt model flexibility}} \label{sec: mvt proofs of theoretical results}
\subsection{Proof of Lemma \ref{Lem: mvt KL support of the priors}}
Proof of part 1 of Lemma \ref{Lem: mvt KL support of the priors} follows mostly by modifications of the results of Norets and Pelenis (2012).
We present here only the proof of part 2 that requires additional modifications along the lines of Pelenis (2014)
to accommodate the mean zero restriction on the density of the measurement errors.
The first step is to construct finite mixture models of the form
\\
\begin{eqnarray}gin{eqnarray*}
f_{m}({\mathbf z}\vert \mbox{\boldmath $^{th}eta$}_{m}) = \sum_{k=1}^{m+2}\pi_{m,k}~{\cal M}VN_{p}({\mathbf z} \vert \mbox{\boldmath $\mu$}_{m,k},\mbox{\boldmath ${\cal S}igma$}_{m,k}) ~~~ \widetilde{\epsilon}xt{with}~~~\sum_{k=1}^{m+2}\pi_{m,k} \mbox{\boldmath $\mu$}_{m,k} = {\mathbf z}ero \nonumber
\end{eqnarray*}
\\
that can approximate any given density $f_{0}$ that has mean zero and satisfies Conditions \ref{cond: mvt regularity conditions on the density} with any desired level of accuracy.
The continuity of $f_{m}(\cdot\vert\mbox{\boldmath $^{th}eta$})$ implies that the KL distance between $f_{0}$ and $f_{m}$ remains small on sufficiently small open neighborhoods around $\mbox{\boldmath $^{th}eta$}_{m}$.
Both the MIW and the MLFA priors assign positive probability to open neighborhoods around $\mbox{\boldmath $^{th}eta$}_{m}$.
The conclusion of part 2 of Lemma \ref{Lem: mvt KL support of the priors} follows since the prior probability of having $(m+2)$ mixture components is also positive for all $m\in \mathbb{N}$.
\begin{eqnarray}gin{Lem} \label{Lem: mvt KL3}
For any $f_{0} \in \widetilde{{\cal F}}_{\mbox{\boldmath $\epsilon$}}$ and $\eta>0$, there exists $\mbox{\boldmath $^{th}eta$}_{m}$ such that
$d_{KL}\{f_{0}(\cdot),f_{m}(\cdot|\mbox{\boldmath $^{th}eta$}_{m})\} < \eta$.
\end{Lem}
\begin{eqnarray}gin{proof}
Let $\{A_{m,k}\}_{k=1}^{m}$ be adjacent cubes with side length $h_m$, and $A_{m,0} = \mathbb{R}^{p}-\cup_{k=1}^{m}A_{m,k}$
such that $h_{m} \downarrow 0$ but $\cup_{k=1}^{m}A_{m,k} \uparrow \mathbb{R}^{p}$ as $m \rightarrow \infty$.
So $\{A_{m,k}\}_{k=1}^{m}$ becomes finer but $\cup_{k=1}^{m}A_{m,k}$ covers more of $\mathbb{R}^{p}$ as $m$ increases.
Additionally, let the partition be constructed in such a way that for all $m$ sufficiently large, if $\mbox{\boldmath $\epsilon$} \in A_{m,0}$,
then $C_{r}(\mbox{\boldmath $\epsilon$})\cap A_{m,0}$ contains a hypercube $C_{0}(\mbox{\boldmath $\epsilon$})$ with side length $r/2$ and a vertex at $\mbox{\boldmath $\epsilon$}$;
and if $\mbox{\boldmath $\epsilon$} \notin A_{m,0}$, then $C_{r}(\mbox{\boldmath $\epsilon$})\cap (\mathbb{R}^{p}-A_{m,0})$ contains a hypercube $C_{1}(\mbox{\boldmath $\epsilon$})$ with side length $r/2$ and a vertex at $\mbox{\boldmath $\epsilon$}$.
Consider the model
\\
\begin{eqnarray}gin{eqnarray*}
f_{m}({\mathbf z}) &=& f_{m}({\mathbf z}\vert \mbox{\boldmath $^{th}eta$}_{m}) = \sum_{k=1}^{m+2}\pi_{m,k}~{\cal M}VN_{p}({\mathbf z} \vert \mbox{\boldmath $\mu$}_{m,k},\mbox{\boldmath ${\cal S}igma$}_{m,k}).
\end{eqnarray*}
\\
Set $\pi_{m,k} = \int_{A_{m,k}} f_{0}({\mathbf z})d{\mathbf z}$ for $k=1,2,\dots,m$ and $\pi_{m,k} = P_{f_0}(A_{m,0})/2 = \int_{A_{m,k}} f_{0}({\mathbf z})d{\mathbf z}/2$ for $k=(m+1),(m+2)$.
Then $\sum_{k=1}^{m+2}\pi_{m,k} = \int_{\mathbb{R}^p}f_{0}({\mathbf z})d{\mathbf z} = 1$.
Define $g({\mathbf d})=\sum_{k=1}^{m}\pi_{m,k}({\mathbf c}_{m,k}+{\mathbf d}) + \int_{A_{m,0}} {\mathbf z} f_{0}({\mathbf z}) d{\mathbf z}$, where ${\mathbf c}_{m,k}$ is the center of $A_{m,k}$ for $k=1,2,\dots,m$.
\\
\begin{eqnarray}gin{eqnarray*}
g(h_{m}{\mathbf 1}_{p}/2) &=& \sum_{k=1}^{m}\pi_{m,k}({\mathbf c}_{m,k}+h_{m}{\mathbf 1}_{p}/2) + \int_{A_{m,0}}{\mathbf z} f_{0}({\mathbf z})d{\mathbf z} \\
&=& \sum_{k=1}^{m} \int_{A_{m,k}} ({\mathbf c}_{m,k}+h_{m}{\mathbf 1}_{p}/2) f_{0}({\mathbf z})d{\mathbf z} + \int_{A_{m,0}}{\mathbf z} f_{0}({\mathbf z}) d{\mathbf z} \\
&\geq& \sum_{k=1}^{m} \int_{A_{m,k}} {\mathbf z} f_{0}({\mathbf z}) d{\mathbf z} + \int_{A_{m,0}}{\mathbf z} f_{0}({\mathbf z}) d{\mathbf z} = \int_{\mathbb{R}^p} {\mathbf z} f_{0}({\mathbf z})d{\mathbf z} = {\mathbf z}ero.
\end{eqnarray*}
\\
Similarly $g(-h_{m}{\mathbf 1}_{p}/2)\leq 0$.
Since $g(\cdot)$ is continuous, there exists ${\mathbf d}_{m}\in[-h_{m}/2,h_{m/2}]^{p}$ such that $g({\mathbf d}_{m})={\mathbf z}ero$.
Set $\mbox{\boldmath $\mu$}_{m,k} = ({\mathbf c}_{m,k}+{\mathbf d}_{m})$ for $k=1,2,\dots,m$.
Also set $\mbox{\boldmath $\mu$}_{m,m+1} = 2 \int_{A_{m,0}} {\mathbf z} f_{0}({\mathbf z})d{\mathbf z}/\int_{A_{m,0}} f_{0}({\mathbf z})d{\mathbf z}$ and $\mbox{\boldmath $\mu$}_{m,m+2} = {\mathbf z}ero$
when $\int_{A_{m,0}}f_{0}({\mathbf z})d{\mathbf z}>0$, and $\mbox{\boldmath $\mu$}_{m,0} = {\mathbf z}ero$ otherwise.
Then $\sum_{k=1}^{m+2}\pi_{m,k}\mbox{\boldmath $\mu$}_{m,k} = g({\mathbf d}_{m}) = {\mathbf z}ero$.
Also set $\mbox{\boldmath ${\cal S}igma$}_{m,k} = \sigma_{m}^{2}\hbox{I}_{p}$ for $k=1,2,\dots,m$ with $\sigma_{m}\to 0$, and ${\cal S}igma_{m,m+1} = {\cal S}igma_{m,m+2} = \sigma_{0}^{2}\hbox{I}_p$.
Consider a sequence $\{\delta_{m}\}_{m=1}^{\infty}$ satisfying $\delta_{m}>6p^{1/2}h_{m}$ and $\delta_{m}\to 0$.
Fix $\mbox{\boldmath $\epsilon$}\in \mathbb{R}^{p}$.
Define $C_{\delta_{m}}(\mbox{\boldmath $\epsilon$}) = [\mbox{\boldmath $\epsilon$}-\delta_{m}{\mathbf 1}_{p}/2,\mbox{\boldmath $\epsilon$}+\delta_{m}{\mathbf 1}_{p}/2]$.
For $m$ sufficiently large $C_{\delta_{m}}(\mbox{\boldmath $\epsilon$}) \subseteq \cup_{k=1}^{m}A_{m,k}$, $C_{\delta_{m}}(\mbox{\boldmath $\epsilon$}) \cap A_{m,0} = \phi$
and the set $\{k: 1\leq k \leq m, A_{m,k}\subset C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})\}$ is non-empty.
For $k=1,\dots,m$, when $A_{m,k}\subset C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})$,
$\pi_{m,k} \geq \inf_{{\mathbf z}\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z}) h_{m}^{p}$.
Therefore,
\\
\begin{eqnarray}gin{eqnarray*}
f_{m}(\mbox{\boldmath $\epsilon$}) &\geq& \sum_{\{k:1\leq k \leq m, A_{m,k}\subset C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})\}} ~ \pi_{m,k} ~ {\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{m}^{2}\hbox{I}_{p}) \\
&\geq& \inf_{z\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z}) \sum_{\{k:A_{m,k}\subset C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})\}} h_{m}^{p} ~ {\cal M}VN_{p}(\mbox{\boldmath $\epsilon$}\vert {\mathbf c}_{m,k}+{\mathbf d}_{m},\sigma_{m}^{2}\hbox{I}_{p}) \\
&\geq& \inf_{z\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z}) ~~ \left\{1-\frac{6p^{3/2}h_{m}\delta_{m}^{p-1}}{(2\pi)^{p/2}\sigma_{m}^{p}}-\frac{8p\sigma_{m}}{(2\pi)^{1/2}\delta_{m}}\right\},
\end{eqnarray*}
where the last step follows from Lemma 1 and Lemma 2 of Norets and Pelenis (2012).
Let $h_{m},\delta_{m},\sigma_{m}$ further satisfy $h_{m}/\sigma_{m}^{p} \to 0, \sigma_{m}/\delta_{m} \to 0$.
Then for any $\eta>0$ there exists an $M_{1}$ large enough such that for all $m>M_{1}$
\\
\begin{eqnarray}gin{eqnarray*}
f_{m}(\mbox{\boldmath $\epsilon$}) \geq \inf_{{\mathbf z}\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z}) \cdot (1-\eta).
\end{eqnarray*}
\\
Without loss of generality, we may assume $f_{0}(\mbox{\boldmath $\epsilon$})>0$.
Since $f_{0}(\cdot)$ is continuous and $\delta_{m} \rightarrow 0$, there also exists an $M_{2}$ such that for all $m>M_{2}$
we have $\inf_{{\mathbf z}\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z})>0$ and
\\
\begin{eqnarray}gin{eqnarray*}
\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{\inf_{{\mathbf z}\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})}f_{0}({\mathbf z})} \leq (1+\eta).
\end{eqnarray*}
\\
Therefore, for all $m>\max\{M_{1},M_{2}\}$, we have
\\
\begin{eqnarray}gin{eqnarray*}
1 \leq \max \left\{1,\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{f_{m}(\mbox{\boldmath $\epsilon$})} \right\} \leq \max\left\{1, \frac{f_{0}(\mbox{\boldmath $\epsilon$})}{\inf_{z\in C_{\delta_{m}}(\mbox{\boldmath $\epsilon$})}f_{0}(z) \cdot (1-\eta)}\right\} \leq \frac{(1+\eta)}{(1-\eta)}.
\end{eqnarray*}
\\
Thus, $\hbox{log}\max\{1,f_{0}(\mbox{\boldmath $\epsilon$})/f_{m}(\mbox{\boldmath $\epsilon$})\} \rightarrow 0$ as $m \rightarrow \infty$.
Pointwise convergence is thus established.
Next, we will find an integrable upper bound for $\hbox{log}\max\{1,f_{0}(\mbox{\boldmath $\epsilon$})/f_{m}(\mbox{\boldmath $\epsilon$})\}$.
For point wise convergence we can assume $\mbox{\boldmath $\epsilon$}\notin A_{m,0}$ for sufficiently large $m$.
But to find integrable upper bound, we have to consider both the cases $\mbox{\boldmath $\epsilon$} \in A_{m,0}$ and $\mbox{\boldmath $\epsilon$} \notin A_{m,0}$.
When $\mbox{\boldmath $\epsilon$}\in A_{m,0}$, we have $\hbox{P}_{f_0}(A_{m,0}) = \int_{A_{m,0}}f_0({\mathbf z})d{\mathbf z}
\geq \int_{A_{m,0}\cap C_{r}(\mbox{\boldmath $\epsilon$})}f_0({\mathbf z})d{\mathbf z} \geq \lambda\{A_{m,0}\cap C_{r}(\mbox{\boldmath $\epsilon$})\} \inf_{{\mathbf z}\in A_{m,0}\cap C_{r}(\mbox{\boldmath $\epsilon$})}f_{0}({\mathbf z})
\geq (r/2)^{p} \inf_{{\mathbf z}\in C_{r}(\mbox{\boldmath $\epsilon$})}f_{0}({\mathbf z})$, since $\lambda\{A_{m,0}\cap C_{r}(\mbox{\boldmath $\epsilon$})\} \geq \lambda\{C_{0}(\mbox{\boldmath $\epsilon$})\} \geq (r/2)^{p}$.
Using part 4 of Conditions \ref{cond: mvt regularity conditions on the density} and Lemma 1 and Lemma 2 of Norets and Pelenis (2012) again,
if $\mbox{\boldmath $\epsilon$} \notin A_{m,0}$, for $m$ sufficiently large
\\
\begin{eqnarray}gin{eqnarray*}
&&\hspace{-1cm} \sum_{\{k:A_{m,k}\subset C_{r}(\mbox{\boldmath $\epsilon$})\}} h_{m}^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{m}^{2}\hbox{I}_{p})
\geq \sum_{\{k:A_{m,k}\subset C_{1}(\mbox{\boldmath $\epsilon$})\}} h_{m}^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{m}^{2}\hbox{I}_{p}) \\
&& \geq \int_{C_{1}(\mbox{\boldmath $\epsilon$})}~{\cal M}VN_{p}({\mathbf z}\vert\mbox{\boldmath $\epsilon$},\sigma_{m}^{2}\hbox{I}_{p})d{\mathbf z} - \frac{3p^{3/2}(r/2)^{p-1}h_{m}}{(2\pi)^{p/2}\sigma_{m}^{p}} \\
&& \geq \left\{\frac{1}{2^p} - \frac{8p\sigma_{m}}{2^{p}(2\pi)^{1/2}r} - \frac{3p^{3/2}h_{m}r^{p-1}}{2^{p-1}(2\pi)^{p/2}\sigma_{m}^{p}}\right\} \geq \frac{1}{2^{p+1}},
\end{eqnarray*}
\\
This implies
\\
\begin{eqnarray}gin{eqnarray*}
f_{m}(\mbox{\boldmath $\epsilon$}) &=& \sum_{k=1}^{m}P_{f_0}(A_{m,k})~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{m}^{2}\hbox{I}_{p}) + \sum_{k=m+1}^{m+2}(1/2) P_{f_0}(A_{m,0})~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{0}^{2}\hbox{I}_{p}) \\
&\geq& \sum_{k=1}^{m}P_{f_0}(A_{m,k})~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{m}^{2}\hbox{I}_{p}) + (1/2) P_{f_0}(A_{m,0})~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p}) \\
&\geq& \{1-1(\mbox{\boldmath $\epsilon$}\in A_{m,0})\} ~ \inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z})\sum_{\{k:A_{m,k}\subset C_{r}(\mbox{\boldmath $\epsilon$})\}} \lambda(A_{m,k})~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert \mbox{\boldmath $\mu$}_{m,k},\sigma_{m}^{2}\hbox{I}_{p}) \\
&& + ~ 1(\mbox{\boldmath $\epsilon$}\in A_{m,0}) (1/2) P_{f_0}(A_{m,0})~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p}) \\
&\geq& (1/2)\{1-1(\mbox{\boldmath $\epsilon$}\in A_{m,0})\} ~ \inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z}) \\
&& + ~ 1(\mbox{\boldmath $\epsilon$}\in A_{m,0}) ~ (1/2) (r/2)^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p})~\inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z})\\
&\geq& (1/2) (r/2)^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p})~\inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z}).
\end{eqnarray*}
\\
The last step followed by choosing $\sigma_{0}^{2}$ large enough so that
$(r/2)^{p}\sup_{\mbox{\boldmath $\epsilon$}\in \mathbb{R}^p}{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p}) < (r/2)^{p}~\sigma_{0}^{-p}<2^{-(p+1)}<1$.
Therefore,
\\
\begin{eqnarray}gin{eqnarray*}
&&\hspace{-0.7cm} \hbox{log}\max\left\{1,\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{f_{m}(\mbox{\boldmath $\epsilon$})}\right\} \leq \hbox{log}\max\left\{1,\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{(1/2) (r/2)^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p})~\inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z})}\right\} \\
&&\hspace{-0.4cm} \leq \hbox{log} \left[\frac{1}{ (1/2) (r/2)^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p})} \max\left\{ (1/2) (r/2)^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p}), \frac{f_{0}(\mbox{\boldmath $\epsilon$})}{\inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z})}\right\}\right] \\
&&\hspace{-0.4cm} \leq - \hbox{log}\left\{ (1/2) (r/2)^{p}~{\cal M}VN_{p}(\mbox{\boldmath $\epsilon$} \vert {\mathbf z}ero,\sigma_{0}^{2}\hbox{I}_{p})\right\} + \hbox{log} \left\{\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{\inf_{{\mathbf z} \in C_{r}(\mbox{\boldmath $\epsilon$})} f_{0}({\mathbf z})}\right\}.
\end{eqnarray*}
\\
The first and the second terms are integrable by part 2 and part 3 of Conditions \ref{cond: mvt regularity conditions on the density}, respectively.
Since $\int f_{0}(\mbox{\boldmath $\epsilon$})\hbox{log}\{f_{0\mbox{\boldmath $\epsilon$}}/f_{m}(\mbox{\boldmath $\epsilon$})\} d\mbox{\boldmath $\epsilon$} \leq \int f_{0}(\mbox{\boldmath $\epsilon$})\hbox{log} \max \{1, f_{0\mbox{\boldmath $\epsilon$}}/f_{m}(\mbox{\boldmath $\epsilon$})\} d\mbox{\boldmath $\epsilon$}$,
the proof of Lemma \ref{Lem: mvt KL3} is completed applying dominated convergence theorem (DCT).
\end{proof}
Let $\eta>0$ be given.
According to Lemma \ref{Lem: mvt KL3},
there exists $\mbox{\boldmath $^{th}eta$}_{m}^{\star} = (\mbox{\boldmath $\pi$}_{1:(m+2)}^{\star},\mbox{\boldmath $\mu$}_{1:(m+2)}^{\star},\mbox{\boldmath ${\cal S}igma$}_{1:(m+2)}^{\star})$
with $\mbox{\boldmath ${\cal S}igma$}_{k}^{\star}=\sigma_{m}^{2\star}\hbox{I}_{p}$ for $k=1,\dots,m$ and $\mbox{\boldmath ${\cal S}igma$}_{k}^{\star}=\sigma_{0}^{2\star}\hbox{I}_{p}$ for $k=(m+1),(m+2)$
such that $d_{KL}\{f_{0}(\cdot),f_{m}(\cdot\vert\mbox{\boldmath $^{th}eta$}_{m}^{\star})\}<\eta/2$.
We have, for any $\mbox{\boldmath $^{th}eta$}_{m}$,
\\
\begin{eqnarray}gin{eqnarray*}
&&\hspace{-1cm}\int f_{0}(\mbox{\boldmath $\epsilon$}) ~\hbox{log}\left\{\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m})}\right\}d\mbox{\boldmath $\epsilon$}
= \int f_{0}(\mbox{\boldmath $\epsilon$})~\hbox{log}\left\{\frac{f_{0}(\mbox{\boldmath $\epsilon$})}{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m}^{\star})}\right\}d\mbox{\boldmath $\epsilon$}
+ \int f_{0}(\mbox{\boldmath $\epsilon$})~\hbox{log}\left\{\frac{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m}^{\star})}{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m})}\right\}d\mbox{\boldmath $\epsilon$}.
\end{eqnarray*}
Let the second term in the above expression be denoted by $g(\mbox{\boldmath $^{th}eta$}_{m})$.
The priors puts positive mass on arbitrarily small open neighborhoods around $\mbox{\boldmath $^{th}eta$}_{m}^{\star}$.
The result will follow if there exists an open neighborhood ${\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})$ around $\mbox{\boldmath $^{th}eta$}_{m}^{\star}$ such that
$\sup_{\mbox{\boldmath $^{th}eta$}_{m}\in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})} g(\mbox{\boldmath $^{th}eta$}_{m}) < \eta/2$.
Since $g(\mbox{\boldmath $^{th}eta$}_{m}^{\star}) = 0$, it suffices to show that the function $g(\mbox{\boldmath $^{th}eta$}_{m})$ is continuous at $\mbox{\boldmath $^{th}eta$}_{m}^{\star}$.
Now $g(\mbox{\boldmath $^{th}eta$})$ is continuous at $\mbox{\boldmath $^{th}eta$}_{m}^{\star}$ if for every sequence $\{\mbox{\boldmath $^{th}eta$}_{m,n}\}_{n=1}^{\infty}$ with $\mbox{\boldmath $^{th}eta$}_{m,n}\to\mbox{\boldmath $^{th}eta$}_{m}^{\star}$,
we have $g(\mbox{\boldmath $^{th}eta$}_{m,n}) \to g(\mbox{\boldmath $^{th}eta$}_{m}^{\star})$.
For all $\mbox{\boldmath $\epsilon$}\in \mathbb{R}^{p}$, we have $\hbox{log}\{f_{m}(\mbox{\boldmath $\epsilon$}\vert \mbox{\boldmath $^{th}eta$}_{m,n}^{\star})/f_{m}(\mbox{\boldmath $\epsilon$}\vert \mbox{\boldmath $^{th}eta$}_{m})\} \to 0$ as $\mbox{\boldmath $^{th}eta$}_{m,n} \to \mbox{\boldmath $^{th}eta$}_{m}^{\star}$.
Continuity of $g(\mbox{\boldmath $^{th}eta$}_{m})$ at $\mbox{\boldmath $^{th}eta$}_{m}^{\star}$ will follow from DCT if we can show that $\abs{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m}^{\star})/f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m,n})}$ has an integrable with respect to $f_{0}$ upper bound.
Since $\mbox{\boldmath $^{th}eta$}_{m,n}\to \mbox{\boldmath $^{th}eta$}_{m}^{\star}$, for any arbitrarily small open neighborhood ${\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})$ around $\mbox{\boldmath $^{th}eta$}_{m}^{\star}$,
we must have $\mbox{\boldmath $^{th}eta$}_{m,n}\in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})$ for all $n$ sufficiently large.
Let $\mbox{\boldmath $^{th}eta$}_{m} = (\mbox{\boldmath $\pi$}_{1:(m+2)},\mbox{\boldmath $\mu$}_{1:(m+2)},\mbox{\boldmath ${\cal S}igma$}_{1:(m+2)}) \in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})$.
Since the eigenvalues of a real symmetric matrix depend continuously on the matrix,
we must have
$(\lambda_{1}(\mbox{\boldmath ${\cal S}igma$}_{k}),\lambda_{p}(\mbox{\boldmath ${\cal S}igma$}_{k}))\subset (\underline\sigma_{m}^{2\star},\overline\sigma_{m}^{2\star})$ for $k=1,\dots,m$ and
$(\lambda_{1}(\mbox{\boldmath ${\cal S}igma$}_{k}),\lambda_{p}(\mbox{\boldmath ${\cal S}igma$}_{k}))\subset (\underline\sigma_{0}^{2\star},\overline\sigma_{0}^{2\star})$ for $k=(m+1),(m+2)$,
where $\underline\sigma_{m}^{2\star} < \sigma_{m}^{2\star}< \overline\sigma_{m}^{2\star}$ and $\underline\sigma_{0}^{2\star} < \sigma_{0}^{2\star}< \overline\sigma_{0}^{2\star}$.
Let $\underline\sigma^{2\star} = \min\{\underline\sigma_{m}^{2\star},\underline\sigma_{0}^{2\star}\}$ and $\overline\sigma^{2\star} = \max\{\overline\sigma_{m}^{2\star},\overline\sigma_{0}^{2\star}\}$.
Then $(\lambda_{1}(\mbox{\boldmath ${\cal S}igma$}_{k}),\lambda_{p}(\mbox{\boldmath ${\cal S}igma$}_{k}))\subset (\underline\sigma^{2\star},\overline\sigma^{2\star})$ for $k=1,\dots,(m+2)$.
Similarly, for some finite $\mu^{\star}$, we must have $\mbox{\boldmath $\mu$}_{m,k} \in (-\mu^{\star}{\mathbf 1}_{p},\mu^{\star}{\mathbf 1}_{p}) = {\cal N}_{\mu^{\star}}$ for $k=1,\dots,(m+2)$.
For any real positive definite matrix $\mbox{\boldmath ${\cal S}igma$}$, we have ${\mathbf z}^{\rm T} \mbox{\boldmath ${\cal S}igma$}^{-1} {\mathbf z} \leq \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$}) \norm{{\mathbf z}}^{2}$.
Therefore, for any $\mbox{\boldmath $\epsilon$} \in \mathbb{R}^{p}$ and for all $k=1,\dots,(m+2)$,
we must have $(\mbox{\boldmath $\epsilon$} - \mbox{\boldmath $\mu$}_{m,k})^{\rm T} \mbox{\boldmath ${\cal S}igma$}_{m,k}^{-1} (\mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}_{m,k}) \leq \underline\sigma^{-2\star}\{1(\mbox{\boldmath $\epsilon$}\in {\cal N}_{\mu^{\star}}) 2^{p}\mu^{\star p}+ 1(\mbox{\boldmath $\epsilon$}\notin {\cal N}_{\mu^{\star}})\norm{\mbox{\boldmath $\epsilon$} + \hbox{sign}(\mbox{\boldmath $\epsilon$})\mu^{\star}}^{2}\}$, where $\hbox{sign}(\mbox{\boldmath $\epsilon$}) = \{\hbox{sign}(\epsilon_{1}),\dots,\hbox{sign}(\epsilon_{p})\}^{\rm T}$.
Therefore, for any $\mbox{\boldmath $^{th}eta$}_{m}\in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})$, we have
\\
\begin{eqnarray}gin{eqnarray*}
[1(\mbox{\boldmath $\epsilon$}\in {\cal N}_{\mu^{\star}}) {\cal M}VN_{p}(2\mu^{\star}{\mathbf 1}_{p}\vert{\mathbf z}ero,\underline\sigma^{2\star}\hbox{I}_{p}) + 1(\mbox{\boldmath $\epsilon$}\notin {\cal N}_{\mu^{\star}}) {\cal M}VN_{p}\{\mbox{\boldmath $\epsilon$} + \hbox{sign}(\mbox{\boldmath $\epsilon$})\mu^{\star}\vert{\mathbf z}ero,\underline\sigma^{2\star}\hbox{I}_{p}\}]/\overline\sigma^{\star} \\
\leq f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m}) \leq 1/\underline\sigma^{\star}.
\end{eqnarray*}
\\
The upper bound is a constant and the logarithm of the lower bound is integrable
since, by part 2 of Conditions \ref{cond: mvt regularity conditions on the density}, the second order moments of $\mbox{\boldmath $\epsilon$}$ exist.
An $f_{0}$ integrable upper bound for the function $\sup_{\mbox{\boldmath $^{th}eta$}_{m}\in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})}\abs{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m})}$ thus exists.
Finally, DCT applies because
\\
\begin{eqnarray}gin{eqnarray*}
\int f_{0}(\mbox{\boldmath $\epsilon$})~\abs{\hbox{log}\left\{\frac{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m}^{\star})}{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m,n})}\right\}}d\mbox{\boldmath $\epsilon$}
&\leq&
\sup_{\mbox{\boldmath $^{th}eta$}_{m}\in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})} \int f_{0}(\mbox{\boldmath $\epsilon$})~\abs{\hbox{log}\left\{\frac{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m}^{\star})}{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m})}\right\}}d\mbox{\boldmath $\epsilon$} \\
&\leq& 2 \sup_{\mbox{\boldmath $^{th}eta$}_{m}\in {\cal N}(\mbox{\boldmath $^{th}eta$}_{m}^{\star})} \int f_{0}(\mbox{\boldmath $\epsilon$})~\abs{f_{m}(\mbox{\boldmath $\epsilon$}\vert\mbox{\boldmath $^{th}eta$}_{m})}d\mbox{\boldmath $\epsilon$}.
\end{eqnarray*}
\\
The conclusion of part 2 of Lemma \ref{Lem: mvt KL support of the priors} follows since
the prior probability of having $(m+2)$ mixture components is positive for all $m\in \mathbb{N}$.
\subsection{Proof of Lemma \ref{Lem: mvt sup norm support of priors on variance functions}} \label{sec: mvt proof of sup norm support of priors on variance functions}
Given $q$, let $\Pi_{q}$ denote a prior on $\mathbb{N}_{q} = \{q+1,q+2,\dots\}$ such that $\Pi_{q}(J) >0 ~ \forall J\in \mathbb{N}_{q}$.
Let $||\cdot||_{2}$ denote the Euclidean norm.
Let $\mathbb{R}^{+} = (0,\infty)$. Given $J\sim \Pi_{q}$, also let $\Pi_{\begin{eqnarray}ta\vert J}$ be a prior on $\mathbb{R}^{+J}$
such that $\Pi_{\begin{eqnarray}ta\vert J}\{N_{\delta}(\mbox{\boldmath $\begin{eqnarray}ta$}_{0})\} >0 $ for any $\delta>0$ and any $\mbox{\boldmath $\begin{eqnarray}ta$}_{0}\in \mathbb{R}^{J}$,
where $N_{\delta}(\mbox{\boldmath $\begin{eqnarray}ta$}_{0}) = \{\mbox{\boldmath $\begin{eqnarray}ta$}: \mbox{\boldmath $\begin{eqnarray}ta$}\in \mathbb{R}^{+J}, ||\mbox{\boldmath $\begin{eqnarray}ta$}-\mbox{\boldmath $\begin{eqnarray}ta$}_{0}||_{2} < \delta\}$.
Define ${\cal S}_{q,J} = \{v_{s}: v_{s} = {\mathbf B}_{q,J} \mbox{\boldmath $\begin{eqnarray}ta$} = \sum_{j=1}^{J}b_{q,j}\begin{eqnarray}ta_{j} ~ \hbox{for some}~ \mbox{\boldmath $\begin{eqnarray}ta$} \in\mathbb{R}^{+J}\}$.
Then $\Pi_{{\mathbf V}} = \Pi_{q}\times \Pi_{\begin{eqnarray}ta\vert J}$ is the induced prior on ${\cal S}_{q} = \cup_{J={q+1}}^{\infty}{\cal S}_{q,J}$.
Define $\psi(v_{0},h) = \sup_{X,X'\in[A,B], |X-X'|\leq h}|v_{0}(X)-v_{0}(X')|$.
Let $\lfloor\alpha\rfloor = \min\{n: n\in\mathbb{N}, n\geq\alpha\}$.
For any $X$,
$(i)~ b_{q,j}(X) \geq 0 ~ \forall j$,
$(ii)~ \sum_{j=1}^{J}b_{q,j}(X) = 1$,
$(iii)~ b_{q,j}$ is positive only inside the interval $[t_{j},t_{j+q+1}]$,
and $(iv)$ for $j\in\{(q+1), (q+2), \dots, (q+K)\}$, for any $X\in (t_j,t_{j+1})$, only $(q+1)$ B-splines $b_{q,j-q}(X),b_{q,j-q+1}(X),\dots,b_{q,j}(X)$ are positive.
Using these local support properties of B-splines, the results on page 147 of de Boor (2000) can be modified to show that, for any $v_{0}\in{\cal C}_{+}[A,B]$,
\\
\begin{eqnarray}gin{eqnarray*}
\inf_{v_{s}\in {\cal S}_{q,J}} ||v_{0}-v_{s}||_{\infty} \leq \lfloor (q+1)/2\rfloor ~ \psi(v_{0},{\cal D}elta_{\max}) \rightarrow 0 ~~\hbox{as}~{\cal D}elta_{\max} \rightarrow 0.
\end{eqnarray*}
\\
Also, if $q\geq(\alpha-1)$, we can modify the results on page 149 of de Boor (2000) to show that, for any $v\in {\cal C}_{+}^{\alpha}[A,B]$,
\\
\begin{eqnarray}gin{eqnarray*}
\inf_{v_{s}\in {\cal S}_{q,J}} ||v_{0}-v_{s}||_{\infty} &\leq& c(q) c(q-1) \dots c(q-\alpha_0+1) ~ ||v_{0}^{(\alpha_0)}||_{\infty} ~ {\cal D}elta_{\max}^{\alpha_0},
\end{eqnarray*}
\\
where $c(q) = \lfloor (q+1)/2\rfloor$.
For any two functions $g_1$ and $g_2$, $\sup| g_1g_2| \leq \sup |g_1|\sup|g_2|$.
Taking $g_1(X,X')= \{v_{0}^{(\alpha_0)}(X)-v_{0}^{(\alpha_0)}(X')\}/(X-X')^{(\alpha-\alpha_0)}$ and $g_2(X,X') = (X-X')^{(\alpha-\alpha_0)}$, we have
$||v_{0}^{(\alpha_0)}||_{\infty} \leq ||v_{0}||_{\alpha} (B-A)^{(\alpha-\alpha_0)}$.
Therefore,
\\
\begin{eqnarray}gin{eqnarray*}
\inf_{v_{s}\in {\cal S}_{q,J}} ||v_{0}-v_{s}||_{\infty} &\leq& c(q,\alpha_{0}) ~ (B-A)^{(\alpha-\alpha_0)} ~ ||v_{0}||_{\alpha} ~ {\cal D}elta_{\max}^{\alpha_0}.
\end{eqnarray*}
\\
Furthermore, when the knot points $\{t_{q+1+j}\}_{j=0}^{K}$ are equidistant
\\
\begin{eqnarray}gin{eqnarray*}
\inf_{v_{s}\in {\cal S}_{q,J}} ||v_{0}-v_{s}||_{\infty} \leq c(q,\alpha_{0}) ||v_{0}^{(\alpha)}||_{\infty} \frac{(B-A)^{\alpha}}{K^{\alpha_0}}\leq c(q,\alpha)||v_{0}||_{\alpha}K^{-\alpha}.
\end{eqnarray*}
Given any $v_{0}\in C_{+}[A,B] (\hbox{or}~C_{+}^{\alpha}[A,B])$ and $\delta>0$, find $J\in \mathbb{N}_{q}$ and $\mbox{\boldmath $\begin{eqnarray}ta$}_{0}\in \mathbb{R}^{+J}$ such that
$||v_{0}-{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}_{0}||_{\infty} = \inf_{v_{s}\in {\cal S}_{q,J}} ||v_{0}-v_{s}||_{\infty}<\delta/2$.
Next consider a neighborhood $N_{\eta}(\mbox{\boldmath $\begin{eqnarray}ta$}_{0})$ such that for any $\mbox{\boldmath $\begin{eqnarray}ta$}\in N_{\eta}(\mbox{\boldmath $\begin{eqnarray}ta$}_{0})$, we have
$||{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}-{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}_{0}||_{\infty} < \delta/2$.
Then for any $\mbox{\boldmath $\begin{eqnarray}ta$}\in N_{\eta}(\mbox{\boldmath $\begin{eqnarray}ta$}_{0})$, we have
$||{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}-v_{0}||_{\infty} \leq ||{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}-{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}_{0}||_{\infty} + ||{\mathbf B}_{q,J}\mbox{\boldmath $\begin{eqnarray}ta$}_{0}-v_{0}||_{\infty} <\delta$.
Also $\Pi_{{\mathbf V}}(||v-v_{0}||_{\infty} <\delta) \geq \Pi_{q}(J) ~ \Pi_{\begin{eqnarray}ta\vert J}\{N_{\eta}(\mbox{\boldmath $\begin{eqnarray}ta$}_{0})\} > 0$.
Proof of Lemma \ref{Lem: mvt sup norm support of priors on variance functions} then follows as a special case
taking $\mbox{\boldmath $\begin{eqnarray}ta$} = \exp(\mbox{\boldmath $\xi$})$ and taking $\Pi_{q}$ and $\Pi_{\begin{eqnarray}ta\vert J}$ to be the priors on $J$ and $\mbox{\boldmath $\begin{eqnarray}ta$}$ induced by $P_{0}(K)$ and $P_{0}(\mbox{\boldmath $\xi$}|K,\sigma_{\xi}^{2})$, respectively.
\subsection{Proof of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}}
We first prove some additional lemmas to used in the proof of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}.
\begin{eqnarray}gin{Lem}
$\Pi_{{\mathbf V}}(||v-v_{0}||_{\infty} <\delta)>0 ~ \forall \delta>0$ implies that
$\Pi_{{\mathbf V}}(||g\circ {v}-g\circ {v}_{0}||_{\infty} <\delta)>0 ~ \forall \delta>0$ for every continuous function $g: \mathbb{R} \rightarrow \mathbb{R}$.
\end{Lem}
\begin{eqnarray}gin{proof}
Let ${v}:[A,B] \rightarrow [C_1,D_1]$ and ${v}_{0}:[A,B]\rightarrow [C_2,D_2]$.
Then $({v}-{v}_{0}):[A,B] \rightarrow [C_1-D_2,D_1-C_2]=[C,D]$, say.
Then $g:[C,D]\rightarrow \mathbb{R}$ is a uniformly continuous function.
Therefore, given any $\delta>0$, there exists a $\eta>0$ such that $|g(Z_1)-g(Z_2)|<\delta$ whenever $|Z_1-Z_2|<\eta$.
Now let $||{v}-{v}_{0}||_{\infty} = \sup_{X\in[A,B]} |{v}(X)-{v}_{0}(X)| <\eta$.
This implies, for all $X\in[A,B]$, $|{v}(X)-{v}_{0}(X)| <\eta$.
Therefore, for all $X\in[A,B]$, $|g\{{v}(X)\}-g\{{v}_{0}(X)\}|<\delta$, and hence $||g\circ{v}-g\circ{v}_{0}||_{\infty}\leq\delta$.
Hence the proof.
\end{proof}
\begin{eqnarray}gin{Cor} \label{Cor: mvt sup norm support of priors on sqrt of variance functions}
In particular, taking $g(Z)=Z^{1/2} ~ \forall Z>0$ and $g(Z) = 0$ otherwise, we have $\Pi_{{\mathbf V}}( ||v^{1/2}-v_{0}^{1/2}||_{\infty} <\delta) = \Pi_{{\mathbf V}}( ||s-s_{0}||_{\infty} <\delta)>0 ~ \forall \delta>0$ for all ${v}_{0}\in {\cal C}_{+}[A,B] (\hbox{or}~ {\cal C}_{+}^{\alpha}[A,B])$.
\end{Cor}
Let $P_{\mbox{\boldmath $\epsilon$},K} \{(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})\vert \mbox{\boldmath $\pi$}_{1:K},\mbox{\boldmath $\mu$}_{1:K},\mbox{\boldmath ${\cal S}igma$}_{1:K}\} = \sum_{k=1}^{K}\pi_{k}\delta_{(\mbox{\boldmath $\mu$}_{k},\mbox{\boldmath ${\cal S}igma$}_{k})}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$, where $\delta_{\mbox{\boldmath $^{th}eta$}}$ denotes a point mass at $\mbox{\boldmath $^{th}eta$}$.
We have, with the the hyper-parameters implicit, $P_{0}(\mbox{\boldmath $\pi$}_{1:K},\mbox{\boldmath $\mu$}_{1:K},\mbox{\boldmath ${\cal S}igma$}_{1:K}) = P_{0\pi}(\mbox{\boldmath $\pi$}_{1:K}) P_{0\mu}(\mbox{\boldmath $\mu$}_{1:K}\vert \mbox{\boldmath $\pi$}_{1:K}) P_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}_{1:K})$.
Denoting $P_{\mbox{\boldmath $\epsilon$},K} \{(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})\vert \mbox{\boldmath $\pi$}_{1:K},\mbox{\boldmath $\mu$}_{1:K},\mbox{\boldmath ${\cal S}igma$}_{1:K}\}$ simply by $P_{\mbox{\boldmath $\epsilon$},K} (\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$.
Let $c$ be a generic for constants that are not of direct interest.
For any square matrix ${\mathbf A}$ of order $p$, let $\lambda_{1}({\mathbf A})\leq\dots\leq\lambda_{p}({\mathbf A})$ denote the ordered eigenvalues of ${\mathbf A}$.
The following lemma proves some properties of $P_{\mbox{\boldmath $\epsilon$},K}$ and $f_{\mbox{\boldmath $\epsilon$}}$.
\begin{eqnarray}gin{Lem} \label{Lem: mvt moments of fe}
1. $\int \norm{\mbox{\boldmath $\mu$}}_{2}^{2}dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) < \infty$ a.s.
~~~2. $\int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$}) dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) < \infty$ a.s. \\
3. $\int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2}dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) < \infty$ a.s.
\end{Lem}
\begin{eqnarray}gin{proof}
1.
The prior $P_{0\mu}(\mbox{\boldmath $\mu$}_{1:K}\vert \mbox{\boldmath $\pi$}_{1:K})$ is of the form (\ref{eq: conditional mvt normal posterior of the mean vector}), that is,
$P_{0\mu}(\mbox{\boldmath $\mu$}_{1:K}\vert \mbox{\boldmath $\pi$}_{1:K}) = {\cal M}VN_{Kp}({\mathbf z}ero,\mbox{\boldmath ${\cal S}igma$}^{0}-\mbox{\boldmath ${\cal S}igma$}_{1,R}^{0}\mbox{\boldmath ${\cal S}igma$}_{R,R}^{-1}\mbox{\boldmath ${\cal S}igma$}_{R,1}^{0})$,
where $\mbox{\boldmath ${\cal S}igma$}^{0}$ is a $Kp\times Kp$ block-diagonal matrix independent of $\mbox{\boldmath $\pi$}_{1:K}$, all $k$ principal blocks of order $p\times p$ being $\mbox{\boldmath ${\cal S}igma$}_{0}$.
The matrix $\mbox{\boldmath ${\cal S}igma$}_{1,R}^{0}\mbox{\boldmath ${\cal S}igma$}_{R,R}^{-1}\mbox{\boldmath ${\cal S}igma$}_{R,1}^{0}$ depends on $\mbox{\boldmath $\pi$}_{1:K}$ and is nonnegative definite so that its diagonal elements are all nonnegative.
Let $\mbox{\boldmath ${\cal S}igma$}_{0}=((\sigma_{0,ij}))$ and $\mbox{\boldmath ${\cal S}igma$}_{1,R}^{0}\mbox{\boldmath ${\cal S}igma$}_{R,R}^{-1}\mbox{\boldmath ${\cal S}igma$}_{R,1}^{0} = ((\sigma_{R,ij}))$.
Then,
$\int \norm{\mbox{\boldmath $\mu$}_{k}}_{2}^{2} dP_{0\mu}(\mbox{\boldmath $\mu$}_{1:K}\vert \mbox{\boldmath $\pi$}_{1:K}) = \left\{ \sum_{j=1}^{p}\sigma_{0,jj} - \sum_{j=(k-1)p+1}^{kp}\sigma_{R,jj} \right\} \leq \sum_{j=1}^{p}\sigma_{0,jj} = \hbox{trace}(\mbox{\boldmath ${\cal S}igma$}_{0})$.
Therefore,
\\
\begin{eqnarray}gin{eqnarray*}
&&\hspace{-1cm}
\int \int \norm{\mbox{\boldmath $\mu$}}_{2}^{2}dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) dP_{0}(\mbox{\boldmath $\pi$}_{1:K},\mbox{\boldmath $\mu$}_{1:K},\mbox{\boldmath ${\cal S}igma$}_{1:K})
= \sum_{k=1}^{K} \int \pi_{k} \norm{\mbox{\boldmath $\mu$}_{k}}_{2}^{2} dP_{0\mu}(\mbox{\boldmath $\mu$}_{1:K}\vert \mbox{\boldmath $\pi$}_{1:K}) dP_{0\pi}(\mbox{\boldmath $\pi$}_{1:K}) \\
&& \leq \hbox{trace}(\mbox{\boldmath ${\cal S}igma$}_{0}) <\infty.
\end{eqnarray*}
\noindent 2.
We have $\int \int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$}) dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) dP_{0}(\mbox{\boldmath $\pi$}_{1:K},\mbox{\boldmath $\mu$}_{1:K},\mbox{\boldmath ${\cal S}igma$}_{1:K}) = \int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$}) dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$})$.
When $\mbox{\boldmath ${\cal S}igma$} \sim \hbox{IW}_{p}(\nu_{0},\mbox{\boldmath $\Psi$}_{0})$, we have $\mbox{\boldmath $\Psi$}_{0}^{-1/2}\mbox{\boldmath ${\cal S}igma$}^{-1}\mbox{\boldmath $\Psi$}_{0}^{-1/2} \sim {\cal W}ish_{p}(\nu_{0},\hbox{I})$ and $\hbox{trace}(\mbox{\boldmath $\Psi$}_{0}^{-1}\mbox{\boldmath ${\cal S}igma$}^{-1}) = \hbox{trace}(\mbox{\boldmath $\Psi$}_{0}^{-1/2}\mbox{\boldmath ${\cal S}igma$}^{-1}\mbox{\boldmath $\Psi$}_{0}^{-1/2}) \sim \chi^{2}_{p\nu_{0}}$.
Here ${\cal W}ish_{p}(\nu,\mbox{\boldmath $\Psi$})$ denotes a Wishart distribution with degrees of freedom $\nu$ and mean $\nu\mbox{\boldmath $\Psi$}$.
For any two positive semidefinite matrices ${\mathbf A}$ and ${\mathbf B}$, we have
$\lambda_{1}({\mathbf A})\hbox{trace}({\mathbf B}) \leq \hbox{trace}({\mathbf A}{\mathbf B}) \leq \lambda_{p}({\mathbf A}) \hbox{trace}({\mathbf B})$.
Therefore,
$\lambda_{1}(\mbox{\boldmath $\Psi$}_{0}^{-1}) E \{\hbox{trace}(\mbox{\boldmath ${\cal S}igma$}^{-1})\} \leq E\{\hbox{trace}(\mbox{\boldmath $\Psi$}_{0}^{-1}\mbox{\boldmath ${\cal S}igma$}^{-1})\} = p\nu_{0}$.
Hence, $\int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$}) dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}) = E \lambda_{p}(\mbox{\boldmath ${\cal S}igma$}^{-1}) \leq E\{\hbox{trace}(\mbox{\boldmath ${\cal S}igma$}^{-1})\} < \infty$.
When $\mbox{\boldmath ${\cal S}igma$} = \mbox{\boldmath $\Omega$} + \mbox{\boldmath ${\cal L}ambda$} \mbox{\boldmath ${\cal L}ambda$}^{\rm T}$ with $\mbox{\boldmath $\Omega$} = \hbox{diag}(\sigma_{1}^{2},\dots,\sigma_{p}^{2})$, we have
$\hbox{trace}(\mbox{\boldmath ${\cal S}igma$}^{-1}) = \hbox{trace}\{\mbox{\boldmath $\Omega$}^{-1}-\mbox{\boldmath $\Omega$}^{-1}\mbox{\boldmath ${\cal G}amma$}(\hbox{I}_{p}+\mbox{\boldmath ${\cal G}amma$}^{\rm T}\mbox{\boldmath $\Omega$}^{-1}\mbox{\boldmath ${\cal G}amma$})^{-1}\mbox{\boldmath ${\cal G}amma$}^{\rm T}\mbox{\boldmath $\Omega$}^{-1}\}
\leq \hbox{trace}(\mbox{\boldmath $\Omega$}^{-1}) = \sum_{j=1}^{p}\sigma_{j}^{-2}$,
where $\mbox{\boldmath ${\cal G}amma$}$ is a $p\times p$ matrix satisfying $\mbox{\boldmath ${\cal G}amma$}\mbox{\boldmath ${\cal G}amma$}^{\rm T} = \mbox{\boldmath ${\cal L}ambda$}\mbox{\boldmath ${\cal L}ambda$}^{\rm T}$.
Thus, $\int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$}) dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}_{1:K}) = E \lambda_{p}(\mbox{\boldmath ${\cal S}igma$}^{-1}) \leq E\{\hbox{trace}(\mbox{\boldmath ${\cal S}igma$}^{-1})\} \leq \sum_{j=1}^{p}E\sigma_{j}^{-2} < \infty$
whenever $\sigma_{j}^{2}\sim\hbox{Inv-Ga}(a,b)$ with $a>1$.
\noindent 3.
When $\mbox{\boldmath ${\cal S}igma$} \sim \hbox{IW}_{p}(\nu_{0},\mbox{\boldmath $\Psi$}_{0})$, we have $\lambda_{1}^{p/2}(\mbox{\boldmath $\Psi$}_{0}^{-1}) E \{\hbox{trace}(\mbox{\boldmath ${\cal S}igma$}^{-1})\}^{p/2} \leq E\{\hbox{trace}(\mbox{\boldmath $\Psi$}_{0}^{-1}\mbox{\boldmath ${\cal S}igma$}^{-1})\}^{p/2} < \infty$.
Hence, $\int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2} dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}) = \int \hbox{pr}od_{j=1}^{p}\lambda_{j}^{1/2}(\mbox{\boldmath ${\cal S}igma$}^{-1})dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$})
\leq \int \lambda_{p}^{p/2}(\mbox{\boldmath ${\cal S}igma$}^{-1}) dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}) = E \lambda_{p}^{p/2}(\mbox{\boldmath ${\cal S}igma$}^{-1}) \leq E\{\hbox{trace}(\mbox{\boldmath ${\cal S}igma$}^{-1})\}^{p/2} < \infty$.
For any two positive semidefinite matrix ${\mathbf A}$ and ${\mathbf B}$, we have $\abs{{\mathbf A}+{\mathbf B}}\geq \abs{{\mathbf A}}$.
Therefore, when $\mbox{\boldmath ${\cal S}igma$} = \mbox{\boldmath $\Omega$} + \mbox{\boldmath ${\cal L}ambda$} \mbox{\boldmath ${\cal L}ambda$}^{\rm T}$, we have
$\int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2} dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}_{1:K}) \leq \int \abs{\mbox{\boldmath $\Omega$}}^{-1/2}dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}_{1:K})
= \int \hbox{pr}od_{j=1}^{p}\sigma_{j}^{-1}dP_{0{\cal S}igma}(\mbox{\boldmath ${\cal S}igma$}_{1:K}) = \hbox{pr}od_{j=1}^{p}E\sigma_{j}^{-1} < \infty$, whenever $\sigma_{j}^{2}\sim\hbox{Inv-Ga}(a,b)$ independently.
\end{proof}
The following lemma proves a property of $f_{\mbox{\boldmath $\epsilon$}}= \int \int f_{c\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$}\vert \mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})dP_{0}(K)$.
Here $P_{0}(K)$ denotes the prior on $K$, the number of mixture components.
\begin{eqnarray}gin{Lem} \label{Lem: mvt limit of KL divergence of involving fe}
Let $f_{0\mbox{\boldmath $\epsilon$}}\in\widetilde{\cal F}_{\mbox{\boldmath $\epsilon$}}$ and $f_{\mbox{\boldmath $\epsilon$}} \sim \Pi_{\mbox{\boldmath $\epsilon$}}$ and ${\mathbf D}(\mbox{\boldmath $\tau$})=\hbox{diag}(\tau_{1},\tau_{2},\dots,\tau_{p})$.
Then
\\
\begin{eqnarray}gin{eqnarray*}
\lim_{\mbox{\boldmath $\tau$}\rightarrow {\mathbf 1}} \int f_{0\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$}) ~ \hbox{log} \left[ \frac{f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})}{\abs{{\mathbf D}(\mbox{\boldmath $\tau$})}^{-1}f_{\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}\}} \right]~ d\mbox{\boldmath $\epsilon$} = 0.
\end{eqnarray*}
\end{Lem}
\begin{eqnarray}gin{proof}
We have $\abs{{\mathbf D}(\mbox{\boldmath $\tau$})}^{-1} f_{c\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$})\mbox{\boldmath $\epsilon$}\} \to f_{c\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})$ as $\mbox{\boldmath $\tau$}\to{\mathbf 1}$.
Since $\mbox{\boldmath $\tau$}\rightarrow {\mathbf 1}$, without loss of generality, we may assume $\abs{{\mathbf D}(\mbox{\boldmath $\tau$})}>1/2$.
Define $c = \int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2}dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$. Then $c < \infty$.
Also $\int \abs{{\mathbf D}(\mbox{\boldmath $\tau$})}^{-1} f_{c\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$})\mbox{\boldmath $\epsilon$}\vert \mbox{\boldmath $^{th}eta$}\} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \leq \int 2(2\pi)^{-p/2}\abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) < 2c < \infty$.
Applying DCT, $\abs{{\mathbf D}(\mbox{\boldmath $\tau$})}^{-1} f_{\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}\} \rightarrow f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})$ as $\mbox{\boldmath $\tau$} \rightarrow {\mathbf 1}$.
Therefore, for any $\mbox{\boldmath $\epsilon$}\in \mathbb{R}$,
\\
\begin{eqnarray}gin{eqnarray*}
\hbox{log} \left[\frac{f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})}{\abs{{\mathbf D}(\mbox{\boldmath $\tau$})}^{-1}f_{\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}\}}\right] \rightarrow 0 ~~~\hbox{as}~ \mbox{\boldmath $\tau$} \rightarrow {\mathbf 1}.
\end{eqnarray*}
\\
To find an integrable with respect to $f_{0\mbox{\boldmath $\epsilon$}}$ upper bound for
$\hbox{log} \left[\abs{{\mathbf D}(\mbox{\boldmath $\tau$})}f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})/f_{\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}\}\right]$, we use Lemma \ref{Lem: mvt moments of fe}.
To do so, we can ignore the prior $P_{0}(K)$
since the upper bounds obtained in Lemma \ref{Lem: mvt moments of fe} do not depend on the specific choice of $K$.
We have, using part 3 of Lemma \ref{Lem: mvt moments of fe},
\\
\begin{eqnarray}gin{eqnarray*}
\int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2}\exp\left[-\frac{1}{2}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}^{\rm T}\mbox{\boldmath ${\cal S}igma$}^{-1}\{{\mathbf D}(\mbox{\boldmath $\tau$})\mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}\right] dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \\
\leq \int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \leq c.
\end{eqnarray*}
\\
Since $\mbox{\boldmath $\tau$} \rightarrow {\mathbf 1}$, without loss of generality we may also assume $\tau_{k}<2$ for all $k$.
Therefore,
\\
\begin{eqnarray}gin{eqnarray*}
&& \hspace{-0.5cm} |\hbox{log} ~ f_{\mbox{\boldmath $\epsilon$}}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}\}| \\
&& \leq \hbox{log} (2\pi)^{p/2} + \left| \hbox{log} \int \abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2}\exp\left[-\frac{1}{2}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}^{\rm T}\mbox{\boldmath ${\cal S}igma$}^{-1}\{{\mathbf D}(\mbox{\boldmath $\tau$})\mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}\right] dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \right| \\
&&\leq \hbox{log} (2\pi)^{p/2} + |\hbox{log}~c| \\
&&~~~~~~~~~~~ - \hbox{log} \int c^{-1}\abs{\mbox{\boldmath ${\cal S}igma$}}^{-1/2}\exp\left[-\frac{1}{2}\{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}^{\rm T}\mbox{\boldmath ${\cal S}igma$}^{-1}\{{\mathbf D}(\mbox{\boldmath $\tau$})\mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}\right] dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \\
&&\leq \hbox{log} \{c(2\pi)^{p/2}\} + |\hbox{log}~c| \\
&&~~~~~~~~~~~ + \frac{1}{2} \int \hbox{log} \abs{\mbox{\boldmath ${\cal S}igma$}} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) + \frac{1}{2} \int \{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\}^{\rm T}\mbox{\boldmath ${\cal S}igma$}^{-1}\{{\mathbf D}(\mbox{\boldmath $\tau$})\mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}\} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \\
&&\leq \hbox{log} \{c(2\pi)^{p/2}\} + |\hbox{log}~c| \\
&&~~~~~~~~~~~ + \frac{1}{2} \int \hbox{log} \abs{\mbox{\boldmath ${\cal S}igma$}} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) + \frac{1}{2} \int \norm{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}-\mbox{\boldmath $\mu$}}_{2}^{2}\lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$})dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \\
&&\leq \hbox{log} \{c(2\pi)^{p/2}\} + |\hbox{log}~c| \\
&&~~~~~~~~~~~ + \frac{1}{2} \int \hbox{log} \abs{\mbox{\boldmath ${\cal S}igma$}} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) + \int \{\norm{{\mathbf D}(\mbox{\boldmath $\tau$}) \mbox{\boldmath $\epsilon$}}_{2}^{2}+\norm{\mbox{\boldmath $\mu$}}_{2}^{2}\}\lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$})dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \\
&&\leq \hbox{log} \{c(2\pi)^{p/2}\} + |\hbox{log}~c| + \frac{1}{2} \int \hbox{log} \abs{\mbox{\boldmath ${\cal S}igma$}} dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \\
&&~~~~~~~~~~~+ \norm{2\mbox{\boldmath $\epsilon$}}_{2}^{2} \int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$})dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) + \int \norm{\mbox{\boldmath $\mu$}}_{2}^{2}dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}) \int \lambda_{1}^{-1}(\mbox{\boldmath ${\cal S}igma$})dP_{\mbox{\boldmath $\epsilon$},K}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$}),
\end{eqnarray*}
\\
where the third step followed from application of Jensen's inequality on $g(Z) = -\hbox{log}~Z$.
The regularity assumptions on $f_{0\mbox{\boldmath $\epsilon$}}$ and Lemma \ref{Lem: mvt moments of fe} imply that the RHS above is $f_{0\mbox{\boldmath $\epsilon$}}$ integrable.
The conclusion of Lemma \ref{Lem: mvt limit of KL divergence of involving fe} follows from an application of DCT again.
\end{proof}
To prove Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}, let $f_{{\mathbf U}\vert {\mathbf S}}$ denote the density of ${\mathbf U} = {\mathbf S}({\mathbf X}) \mbox{\boldmath $\epsilon$}$,
where ${\mathbf S} = \hbox{diag}(s_{1},\dots,s_{p})$.
Then $f_{{\mathbf U}\vert {\mathbf X}} = f_{{\mathbf U}\vert {\mathbf S}({\mathbf X})}$.
We have $f_{{\mathbf U}\vert {\mathbf S}}({\mathbf U}) = \abs{{\mathbf S}}^{-1} f_{\mbox{\boldmath $\epsilon$}}({\mathbf S}^{-1}{\mathbf U})$.
This implies
\\
\begin{eqnarray}gin{eqnarray*}
\int f_{0{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U}) \hbox{log}\frac{f_{0{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U})}{f_{{\mathbf U}\vert {\mathbf S}}({\mathbf U})} d{\mathbf U} = \int f_{0{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U}) \hbox{log}\frac{f_{0{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U})}{f_{{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U})} d{\mathbf U} + \int f_{0{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U}) \hbox{log}\frac{f_{{\mathbf U}\vert {\mathbf S}_{0}}({\mathbf U})}{f_{{\mathbf U}\vert {\mathbf S}}({\mathbf U})} d{\mathbf U} \\
= \int f_{0\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$}) \hbox{log}\frac{f_{0\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})}{f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})} d\mbox{\boldmath $\epsilon$} + \int f_{0\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$}) \hbox{log} \frac{f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})}{\abs{{\mathbf S}}^{-1}\abs{{\mathbf S}_{0}}f_{\mbox{\boldmath $\epsilon$}}({\mathbf S}^{-1}{\mathbf S}_{0}\mbox{\boldmath $\epsilon$})} d\mbox{\boldmath $\epsilon$}.
\end{eqnarray*}
\\
Let $\delta>0$ be given.
By part 2 of Lemma \ref{Lem: mvt KL support of the priors}, $\Pi_{\mbox{\boldmath $\epsilon$}}\{f_{\mbox{\boldmath $\epsilon$}}: d_{KL}(f_{0\mbox{\boldmath $\epsilon$}},f_{\mbox{\boldmath $\epsilon$}})<\delta/2\}>0$.
Let ${\mathbf s} = (s_{1},\dots,s_{p})^{\rm T}$ and ${\mathbf s}_{0} = (s_{01},\dots,s_{0p})^{\rm T}$.
By Lemma \ref{Lem: mvt limit of KL divergence of involving fe}, there exists $\eta>0$ such that $\norm{{\mathbf s}_{0}-{\mathbf s}}_{\infty}<\eta$ implies
$\int f_{0\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$}) ~ \hbox{log} [f_{\mbox{\boldmath $\epsilon$}}(\mbox{\boldmath $\epsilon$})/\{\abs{{\mathbf S}}^{-1}\abs{{\mathbf S}_{0}} f_{\mbox{\boldmath $\epsilon$}}({\mathbf S}^{-1}{\mathbf S}_{0}\mbox{\boldmath $\epsilon$})\}] ~ d\mbox{\boldmath $\epsilon$}<\delta/2$ for every $f_{\mbox{\boldmath $\epsilon$}}\sim \Pi_{\mbox{\boldmath $\epsilon$}}$.
Using a straightforward multivariate extension of Corollary \ref{Cor: mvt sup norm support of priors on sqrt of variance functions},
we have $\Pi_{{\mathbf V}}(||{\mathbf s}_{0}-{\mathbf s}||_{\infty}<\eta) > 0$.
Combining these results,
$\Pi_{{\mathbf U}\vert {\mathbf V}}\{\sup_{{\mathbf X}\in{\cal X}}d_{KL}(f_{0{\mathbf U}\vert {\mathbf X}},f_{{\mathbf U}\vert {\mathbf X}})<\delta\} \geq \Pi_{\mbox{\boldmath $\epsilon$}}\{d_{KL}(f_{0\mbox{\boldmath $\epsilon$}},f_{\mbox{\boldmath $\epsilon$}})<\delta/2\} ~ \Pi_{{\mathbf V}}(||{{\mathbf s}}_{0}-{\mathbf s}||_{\infty}<\eta) > 0$.
Hence the proof of part 2 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}.
Part 1 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X} follows trivially from part 2 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}
since $||{\mathbf s}_{0}-{\mathbf s}||_{\infty}<\eta$ implies $\norm{{\mathbf s}_{0}({\mathbf X})-{\mathbf s}({\mathbf X})}_{\infty}<\eta$ for any ${\mathbf X}\in {\cal X}$.
To prove part 3 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}, note that
\\
\begin{eqnarray}gin{eqnarray*}
&& \hspace{-1cm} d_{KL}(f_{0,{\mathbf X},{\mathbf U}},f_{{\mathbf X},{\mathbf U}}) = \int_{{\cal X}\times \mathbb{R}^{p}} f_{0,{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X}) f_{0,{\mathbf X}}({\mathbf X}) ~\hbox{log}\frac{f_{0,{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X})f_{0,{\mathbf X}}({\mathbf X})}{f_{{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X})f_{\mathbf X}({\mathbf X})} d{\mathbf X} d{\mathbf U} \\
&=& \int_{{\cal X}} f_{0,{\mathbf X}}({\mathbf X}) \int_{\mathbb{R}^{p}}f_{0,{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X}) ~\hbox{log}\frac{f_{0,{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X})}{f_{{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X})} d{\mathbf U} d{\mathbf X} + \int_{{\cal X}} f_{0,{\mathbf X}}({\mathbf X}) ~\hbox{log}\frac{f_{0,{\mathbf X}}({\mathbf X})}{f_{{\mathbf X}}({\mathbf X})} d{\mathbf X} \\
&\leq& \sup_{{\mathbf X}\in{\cal X}} d_{KL}\{f_{0,{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X}),f_{{\mathbf U}\vert {\mathbf X}}({\mathbf U}\vert {\mathbf X})\} + d_{KL}(f_{0{\mathbf X}},f_{{\mathbf X}}).
\end{eqnarray*}
\\
Part 3 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X} now follows from part 2 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}
and part 1 of Lemma \ref{Lem: mvt KL support of the priors}.
\subsection{Proof of Theorem \ref{Thm: mvt L1 support of induced prior on density of W}}
Let $d_{H}(f_{0},f) = [\int \{f_{0}^{1/2}({\mathbf Z}) - f^{1/2}({\mathbf Z})\}^{2}d{\mathbf Z}]^{1/2}$ denote the Hellinger distance between any two densities $f_{0}$ and $f$.
From Chapter 1 of Ghosh and Ramamoorthi (2010), we have
\\
\begin{eqnarray}
d_{H}^{2}(f_{0},f) \leq ||f_{0}-f||_{1} \leq 2~d_{KL}^{1/2}(f_{0},f). \label{eq: mvt inequalities among distances 1}
\end{eqnarray}
\\
Using (\ref{eq: mvt inequalities among distances 1}), we have,
\\
\begin{eqnarray}gin{eqnarray*}
&~&\hspace{-1cm} ||f_{0{\mathbf W}}-f_{{\mathbf W}}||_{1} = \int |f_{0{\mathbf W}}({\mathbf W})-f_{{\mathbf W}}({\mathbf W})|d{\mathbf W} \\
&=& \int \left|\int f_{0{\mathbf X}}({\mathbf X})f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X} - \int f_{{\mathbf X}}({\mathbf X})f_{{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X}\right| d{\mathbf W} \\
&\leq& \int \left|\int f_{0{\mathbf X}}({\mathbf X})f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X} - \int f_{{\mathbf X}}({\mathbf X})f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X}\right|d{\mathbf W} \\
&& + \int\left|\int f_{{\mathbf X}}({\mathbf X})f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X} - \int f_{{\mathbf X}}({\mathbf X})f_{{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X}\right|d{\mathbf W} \\
&\leq& \int \int |f_{0{\mathbf X}}({\mathbf X})-f_{{\mathbf X}}({\mathbf X})| f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W})d{\mathbf X} d{\mathbf W} \\
&& + \int\int f_{{\mathbf X}}({\mathbf X})|f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W}) - f_{{\mathbf W}\vert {\mathbf X}}({\mathbf W})| d{\mathbf X} d{\mathbf W} \\
&=& \int |f_{0{\mathbf X}}({\mathbf X})-f_{{\mathbf X}}({\mathbf X})| d{\mathbf X} + \int f_{{\mathbf X}}({\mathbf X}) \int |f_{0{\mathbf W}\vert {\mathbf X}}({\mathbf W}) - f_{{\mathbf W}\vert {\mathbf X}}({\mathbf W})| d{\mathbf W} d{\mathbf X} \\
&=& \int |f_{0{\mathbf X}}({\mathbf X})-f_{{\mathbf X}}({\mathbf X})| d{\mathbf X} + \int f_{{\mathbf X}}({\mathbf X}) \int |f_{0{\mathbf U}\vert {\mathbf X}}({\mathbf W}-{\mathbf X}) - f_{{\mathbf U}\vert {\mathbf X}}({\mathbf W}-{\mathbf X})| d{\mathbf W} d{\mathbf X} \\
&\leq& ||f_{0{\mathbf X}} - f_{{\mathbf X}}||_{1} + \sup_{{\mathbf X}\in{\cal X}} ||f_{0{\mathbf U}\vert {\mathbf X}} - f_{{\mathbf U}\vert {\mathbf X}}||_{1} \\
&\leq& 2~d_{KL}^{1/2}(f_{0{\mathbf X}},f_{{\mathbf X}}) + 2 \sup_{{\mathbf X}\in{\cal X}} d_{KL}^{1/2}(f_{0{\mathbf U}\vert {\mathbf X}},f_{{\mathbf U}\vert {\mathbf X}}).
\end{eqnarray*}
\\
The proof of Theorem \ref{Thm: mvt L1 support of induced prior on density of W} follows by
combining part 1 of Lemma \ref{Lem: mvt KL support of the priors} and part 2 of Lemma \ref{Lem: mvt KL support of the prior on the density of U|X}.
\section{Additional Figures} \label{sec: mvt additional figures}
We first present, in Subsection \ref{sec: mvt additional figures Ind}, some additional figures summarizing the results of the simulation experiments for diagonal covariance matrices discussed in Section \ref{sec: mvt simulation studies} of the main paper.
Then in Subsection \ref{sec: mvt additional figures Ind}, we present figures that summarize the results of simulation experiments for covariance matrices with AR structure.
Finally in Subsection \ref{sec: mvt additional figures for EATS data set}, we present some additional figures summarizing the results of the EATS data set analyzed in Section \ref{sec: mvt data analysis} of the main paper.
\subsection{Additional Figures Summarizing the Results of the Simulation Experiments for Diagonal Covariance Structure} \label{sec: mvt additional figures Ind}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=14cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{Trace_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MIW (mixtures with inverse Wishart priors) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
See Section \ref{sec: mvt simulation studies} for additional details.
The results correspond to the simulation instance that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets,
when the number of mixture components for $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=6$ and $K_{\mbox{\boldmath $\epsilon$}}=5$.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The true number of mixture components were $K_{{\mathbf X}} = 3$ and $K_{\mbox{\boldmath $\epsilon$}} = 3$.
As can be seen from Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MIW X1 E1 Ind},
a mixture model with $2$ nonempty clusters can approximate the true density of the scaled errors well.
}
\label{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 E1 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{Trace_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_92.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MLFA (mixtures of latent factor analyzers) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
See Section \ref{sec: mvt simulation studies} for additional details.
The results correspond to the simulation instance that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets,
when the number of mixture components for $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=6$ and $K_{\mbox{\boldmath $\epsilon$}}=5$.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The true number of mixture components were $K_{{\mathbf X}} = 3$ and $K_{\mbox{\boldmath $\epsilon$}} = 3$.
As can be seen from Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 E1 Ind},
a mixture model with $2$ nonempty clusters can approximate the true density of the scaled errors well.
}
\label{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 E1 Ind}
\end{figure}
\subsection{Additional Figures Summarizing the Results of the Simulation Experiments for AR Covariance Structure} \label{sec: mvt additional figures AR}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=12cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{VFn_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_104.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the variance functions $s^{2}(X)$ produced by the univariate density deconvolution method for each component of ${\mathbf X}$ for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR).
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets for the MIW (mixtures with inverse Wishart priors) method.
For each component of ${\mathbf X}$, the true variance function is $s^{2}(X) = (1+X/4)^{2}$.
See Section \ref{sec: mvt density of heteroscedastic errors} and Section \ref{sec: mvt estimation of variance functions} for additional details.
In each panel, the true (lighter shaded green lines) and the estimated (darker shaded blue lines) variance functions
are superimposed over a plot of subject specific sample means vs subject specific sample variances.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 E1 AR}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{XS_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_104.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the $f_{{\mathbf X}}$ produced by the MIW (mixtures with inverse Wishart priors) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR).
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results XS d4 n1000 m3 MIW X1 E1 AR}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{XS_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_103.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the $f_{{\mathbf X}}$ produced by the MLFA (mixtures of latent factor analyzers) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR). The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results XS d4 n1000 m3 MLFA X1 E1 AR}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{ES_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_104.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$ produced by the MIW (mixtures with inverse Wishart priors) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR). The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results ES d4 n1000 m3 MIW X1 E1 AR}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{ES_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_103.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$ produced by the MLFA (mixtures of latent factor analyzers) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR).
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 E1 AR}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{Trace_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_104.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MIW (mixtures with inverse Wishart priors) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR).
See Section \ref{sec: mvt simulation studies} for additional details.
The results correspond to the simulation instance that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets,
when the number of mixture components for both $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=6$ and $K_{\mbox{\boldmath $\epsilon$}}=5$.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The true number of mixture components were $K_{{\mathbf X}} = 3$ and $K_{\mbox{\boldmath $\epsilon$}} = 3$.
As can be seen from Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MIW X1 E1 AR},
a mixture model with $2$ nonempty clusters can approximate the true density of the scaled errors well.
}
\label{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 E1 AR}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{Trace_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_E1_Cov_Struct_AR_simsize_3000_burnin_1000_seedno_103.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MLFA (mixtures of latent factor analyzers) method for the conditionally heteroscedastic error distribution $f_{\mbox{\boldmath $\epsilon$}}^{(2)}$ with sample size $n=1000$, $m_{i}=3$ replicates for each subject and component specific covariance matrices with autoregressive structure (AR).
See Section \ref{sec: mvt simulation studies} for additional details.
The results correspond to the simulation instance that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets,
when the number of mixture components for $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=6$ and $K_{\mbox{\boldmath $\epsilon$}}=5$.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The true number of mixture components were $K_{{\mathbf X}} = 3$ and $K_{\mbox{\boldmath $\epsilon$}} = 3$.
As can be seen from Figure \ref{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 E1 AR},
a mixture model with $2$ nonempty clusters can approximate the true density of the scaled errors well.
}
\label{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 E1 AR}
\end{figure}
\subsection{Additional Figures Summarizing the Results for the EATS Data Set Analyzed in Section \ref{sec: mvt data analysis} of the Main Paper} \label{sec: mvt additional figures for EATS data set}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{EATS_DATA_Plots_1_R_CARBO_R_FIBER_R_PROTEIN_R_POTASSIU_Trace_MIW.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MIW (mixtures with inverse Wishart priors) method for the EATS data example.
See Section \ref{sec: mvt data analysis} for additional details.
The number of mixture components for both $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=K_{\mbox{\boldmath $\epsilon$}}=7$.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
}
\label{fig: mvt EATS data results Trace Plots MIW}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{EATS_DATA_Plots_1_R_CARBO_R_FIBER_R_PROTEIN_R_POTASSIU_Trace_MLFA.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MLFA (mixtures of latent factor analyzers) method for the EATS data example.
See Section \ref{sec: mvt data analysis} for additional details.
The number of mixture components for both $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=K_{\mbox{\boldmath $\epsilon$}}=7$.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
}
\label{fig: mvt EATS data results Trace Plots MLFA}
\end{figure}
\section{Additional Simulation Experiments} \label{sec: mvt additional simulation studies}
This section presents the results of additional simulation experiments for multivariate t and multivariate Laplace distributed measurement errors.
Cases when $f_{{\mathbf X}}$ is multivariate t or mixture of multivariate t are also considered.
For easy reference, brief descriptions of these distributions are provided below.
\subsection{Multivariate t Distribution}
A random variable $Z$ following a Student's t-distribution with degrees of freedom $\nu$, mean $\mu$ and variance $\nu b/(\nu-2)$ can be represented as $Z=\mu+\nu^{1/2}b^{1/2}X/Y^{1/2}$,
where $Y$ and $X$ are independent, $Y$ follows a chi-square distribution with $\nu$ degrees of freedom, denoted by $Y\sim \chi^{2}_{\nu}$, and $X$ follows a standard normal distribution.
A natural extension to multivariate set up is given by
${\mathbf Z} = \mbox{\boldmath $\mu$} + \nu^{1/2} \mbox{\boldmath ${\cal S}igma$}^{1/2}{\mathbf X}/Y^{1/2}$,
where $Y\sim \chi^{2}_{\nu}$ and ${\mathbf X}\sim {\cal M}VN_{p}({\mathbf z}ero,{\mathbf I})$ independently.
The random vector ${\mathbf Z}$ is then said to follow a multivariate t-distribution (Kotz and Nadarajah, 2004) with degrees of freedom $\nu$, mean $\mbox{\boldmath $\mu$}$ and covariance $\nu\mbox{\boldmath ${\cal S}igma$}/(\nu-2)$, denoted by ${\cal M}VT_{p}(\nu,\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$.
The above characterization can be used to sample from a ${\cal M}VT_{p}(\nu,\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$ density.
The density of ${\mathbf Z}$ is given by
\\
\begin{eqnarray}gin{eqnarray*}
f_{{\mathbf Z}}({\mathbf z}) = \frac{{\cal G}amma{\{(\nu+p)/2\}}}{{\cal G}amma{(\nu/2)}(\nu\pi)^{p/2}\abs{\mbox{\boldmath ${\cal S}igma$}}^{1/2}} \cdot {\{1+({\mathbf z}-\mbox{\boldmath $\mu$})^{\rm T}\mbox{\boldmath ${\cal S}igma$}^{-1}({\mathbf z}-\mbox{\boldmath $\mu$})/\nu\}^{-(\nu+p)/2}}.
\end{eqnarray*}
\\
The characteristic function is given by
\\
\begin{eqnarray}gin{eqnarray*}
\phi({\mathbf t}) = \exp(i{\mathbf t}^{\rm T}\mbox{\boldmath $\mu$}) \cdot \frac{|| \nu^{1/2}\mbox{\boldmath ${\cal S}igma$}^{1/2}{\mathbf t} ||^{\nu/2}}{2^{\nu/2-1}{\cal G}amma(\nu/2)}\cdot H_{\nu/2}(|| \nu^{1/2}\mbox{\boldmath ${\cal S}igma$}^{1/2}{\mathbf t} ||),~~~{\mathbf t}\in \mathbb{R}^{p},
\end{eqnarray*}
\\
where $H_{\alpha}$ denotes a McDonald's function of order $\alpha(>1/2)$ and admits the integral representation
\\
\begin{eqnarray}gin{eqnarray*}
H_{\alpha}(t) = (2/t)^{\alpha} \cdot \frac{{\cal G}amma(\alpha+1/2)}{\pi^{1/2}} \int_{0}^{\infty}(1+u^{2})cos(tu)du,~~~t>0.
\end{eqnarray*}
\\
When $\mbox{\boldmath ${\cal S}igma$}={\mathbf I}$, the identity matrix, the components $Z_{i}$ and $Z_{j}$ are uncorrelated, but not statistically independent.
With $\mbox{\boldmath $\mu$}=(\mu_{1}\dots,\mu_{p})^{\rm T}$ and $\mbox{\boldmath ${\cal S}igma$} = ((\sigma_{ij}))$,
the $i^{th}$ random variable $Z_{i}$ marginally follows a univariate Student's t-distribution with degrees of freedom $\nu$, mean $\mu_{i}$ and variance $\nu\sigma_{ii}/(\nu-2)$.
\subsection{Multivariate Laplace Distribution}
A random variable $Z$ following a Laplace distribution with mean $\mu$ and variance $b$ has the density
\\
\begin{eqnarray}gin{eqnarray*}
f_{Z}(z) = (2b)^{-1/2} \exp(-2^{1/2}b^{-1/2}\abs{z-\mu}).
\end{eqnarray*}
\\
$Z$ can be represented as $Z=\mu+Y^{1/2}b^{1/2}X$,
where $Y$ and $X$ are independent and follow standard exponential and standard normal distributions, respectively.
A natural extension to multivariate set up is given by
${\mathbf Z} = \mbox{\boldmath $\mu$} + Y^{1/2} \mbox{\boldmath ${\cal S}igma$}^{1/2}{\mathbf X}$,
where $Y$ follows a standard exponential density and ${\mathbf X}\sim {\cal M}VN_{p}({\mathbf z}ero,{\mathbf I})$ independently of $Y$.
The random vector ${\mathbf Z}$ is then said to follow a multivariate Laplace distribution (Eltoft, et al. 2006) with mean $\mbox{\boldmath $\mu$}$ and covariance $\mbox{\boldmath ${\cal S}igma$}$, denoted by ${\cal M}VL_{p}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$.
The above characterization can be used to sample from a ${\cal M}VL_{p}(\mbox{\boldmath $\mu$},\mbox{\boldmath ${\cal S}igma$})$ density.
The density of ${\mathbf Z}$ is then given by
\\
\begin{eqnarray}gin{eqnarray*}
f_{{\mathbf Z}}({\mathbf z}) = \frac{2}{(2\pi)^{p/2}\abs{\mbox{\boldmath ${\cal S}igma$}}^{1/2}} \cdot \frac{K_{p/2-1}\{2^{1/2}h^{1/2}({\mathbf z})\}}{\{h({\mathbf z})/2\}^{p/4-1/2}},
\end{eqnarray*}
\\
where $h({\mathbf z}) = ({\mathbf z}-\mbox{\boldmath $\mu$})^{\rm T}\mbox{\boldmath ${\cal S}igma$}^{-1}({\mathbf z}-\mbox{\boldmath $\mu$})$ and $K_{m}$ denotes modified Bessel functions of the second kind of order $m$.
Using asymptotic formula for the Bessel functions, namely $K_{m}(z) = \{\pi/(2z)\}^{1/2}\exp(-z)$ as $\abs{z}\to \infty$, we have
\\
\begin{eqnarray}gin{eqnarray*}
f_{{\mathbf Z}}({\mathbf z})
\approx \frac{\pi^{1/2}}{(2\pi)^{p/2}\abs{\mbox{\boldmath ${\cal S}igma$}}^{1/2}} \cdot \frac{2^{(p-1)/4}}{h^{(p-1)/4}({\mathbf z})} \cdot \exp\{-2^{1/2}h^{1/2}({\mathbf z})\}.
\end{eqnarray*}
\\
The characteristic function is given by $\phi({\mathbf t}) = \exp(i{\mathbf t}^{\rm T}\mbox{\boldmath $\mu$})(1+{\mathbf t}^{\rm T}\mbox{\boldmath ${\cal S}igma$}{\mathbf t}/2)^{-1}$ for ${\mathbf t}\in \mathbb{R}^{p}$.
For $p>1$, the density has a singularity at $\mbox{\boldmath $\mu$}$.
When $\mbox{\boldmath ${\cal S}igma$}={\mathbf I}$, the identity matrix, the components $Z_{i}$ and $Z_{j}$ are uncorrelated, but not statistically independent.
With $\mbox{\boldmath $\mu$}=(\mu_{1}\dots,\mu_{p})^{\rm T}$ and $\mbox{\boldmath ${\cal S}igma$} = ((\sigma_{ij}))$,
the $i^{th}$ random variable $Z_{i}$ marginally follows a univariate Laplace distribution with mean $\mu_{i}$ and variance $\sigma_{ii}$.
\subsection{Summary of Results}
The results of the simulation experiments the measurement errors are distributed according to $f_{\mbox{\boldmath $\epsilon$}}^{(3)}={\cal M}VT_{4}(6,{\mathbf z}ero,\mbox{\boldmath ${\cal S}igma$})$
and $f_{\mbox{\boldmath $\epsilon$}}^{(4)}={\cal M}VL_{4}({\mathbf z}ero,\mbox{\boldmath ${\cal S}igma$})$ probability laws independently of ${\mathbf X}$ are presented in Table \ref{tab: mvt MISEs homoscedastic t and Laplace}.
The results for conditionally heteroscedastic measurement errors are presented in Table \ref{tab: mvt MISEs heteroscedastic t and Laplace}.
In both cases, ${\mathbf X}$ is distributed according to the mixture of multivariate normals described in Section \ref{sec: mvt simulation studies} of the main paper.
As in the main paper, in each case four different choices for the covariance matrix $\mbox{\boldmath ${\cal S}igma$}$ were considered.
The general patterns of the estimated MISEs are similar to that observed in Table \ref{tab: mvt MISEs heteroscedastic} of the main paper
where the true measurement error distributions were finite mixtures of multivariate normal kernels.
While in theory the MLFA model described in the main paper can approximate distributions like the multivariate Laplace that puts significant mass around the origin,
in practice, since it assumes $\mbox{\boldmath $\Omega$}_{k}=\mbox{\boldmath $\Omega$}=\hbox{diag}\{\sigma_{1}^{2},\dots,\sigma_{p}^{2}\}$ for all $k$,
it often smooths out the spikes at the origin.
A mild variation, referred to as the $\widetilde{\epsilon}xt{MLFA}_{2}$ model, that instead assumes $\mbox{\boldmath $\Omega$}_{k}=\sigma_{k}^{2}\hbox{I}_{p}$
and results in slight improvement in the MISE performance is also included in Table \ref{tab: mvt MISEs homoscedastic t and Laplace} and Table \ref{tab: mvt MISEs heteroscedastic t and Laplace}.
For the simulation experiments and the real data analysis presented in the main text, the two versions of the MLFA model perform very similarly and the latter version was not included.
Results for conditionally heteroscedastic multivariate Laplace errors with diagonal covariance structure are summarized in Figures \ref{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 HT_E0 Ind}-\ref{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 HT_E0 Ind}
with observations similar to those discussed in Section \ref{sec: mvt simulation studies} of the main paper.
^{th}ispagestyle{empty}
\newgeometry{left=2cm,right=2.5cm,top=2.5cm,bottom=0.1cm}
\begin{eqnarray}gin{table}[!ht]\footnotesize
\begin{eqnarray}gin{center}
\begin{eqnarray}gin{tabular}{|c|c|c|c c c c|}
\hline
\multirow{2}{80pt}{True Error Distribution} & \multirow{2}{50pt}{Covariance Structure} & \multirow{2}{*}{Sample Size} & \multicolumn{4}{|c|}{MISE $\times 10^4$} \\ \cline{4-7}
& & & $\widetilde{\epsilon}xt{MLFA}_{2}$ & MLFA & MIW & Naive \\ \hline\hline
\multirow{8}{80pt}{(c) Multivariate t} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{1.06} & 1.38 & 3.98 & 12.32 \\
& & 1000 & \bf{0.53} & 0.65 & 1.54 & 9.91 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{6.62} & 8.26 & 7.57 & 47.22 \\
& & 1000 & 4.73 & 5.78 & \bf{3.65} & 45.70 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering AR} & 500 & 12.69 & 13.56 & \bf{6.14} & 40.76 \\
& & 1000 & 11.36 & 9.16 & \bf{3.45} & 39.59 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering EXP} & 500 & 7.84 & 8.42 & \bf{5.00} & 26.85 \\
& & 1000 & 6.26 & 6.64 & \bf{2.38} & 26.04 \\ \hline\hline
\multirow{8}{80pt}{(d) Multivariate Laplace} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{1.08} & 1.32 & 3.08 & 8.22 \\
& & 1000 & \bf{0.50} & 0.63 & 1.20 & 6.25 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{4.41} & 5.57 & 5.66 & 32.31 \\
& & 1000 & \bf{2.38} & 3.53 & 2.84 & 31.10 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering AR} & 500 & 8.38 & 8.72 & \bf{5.14} & 27.30 \\
& & 1000 & 6.08 & 6.19 & \bf{2.56} & 26.19 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering EXP} & 500 & 5.24 & 5.67 & \bf{4.14} & 17.57 \\
& & 1000 & 3.58 & 4.17 & \bf{1.98} & 16.86 \\ \hline
\end{tabular}
\caption{\baselineskip=10pt
Mean integrated squared error (MISE) performance
of MLFA (mixtures of latent factor analyzers) and MIW (mixtures with inverse Wishart priors) density deconvolution models
for {\bf homoscedastic} errors compared with a naive method that ignores measurement errors for different measurement error distributions.
See Section \ref{sec: mvt density deconvolution models} and Section \ref{sec: mvt additional simulation studies} for additional details.
The minimum value in each row is highlighted.
}
\label{tab: mvt MISEs homoscedastic t and Laplace}
\end{center}
\end{table}
\begin{eqnarray}gin{table}[!ht]\footnotesize
\begin{eqnarray}gin{center}
\begin{eqnarray}gin{tabular}{|c|c|c|c c c c|}
\hline
\multirow{2}{80pt}{True Error Distribution} & \multirow{2}{50pt}{Covariance Structure} & \multirow{2}{*}{Sample Size} & \multicolumn{4}{|c|}{MISE $\times 10^4$} \\ \cline{4-7}
& & & $\widetilde{\epsilon}xt{MLFA}_{2}$ & MLFA & MIW & Naive \\ \hline\hline
\multirow{8}{80pt}{(c) Multivariate t} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{2.78} & 3.25 & 24.48 & 19.10 \\
& & 1000 & \bf{1.39} & 1.53 & 13.40 & 17.75 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{12.65} & 14.72 & 52.77 & 69.64 \\
& & 1000 & \bf{6.71} & 8.43 & 25.66 & 66.49 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering AR} & 500 & \bf{20.54} & 23.2 & 43.22 & 64.07 \\
& & 1000 & \bf{13.53} & 18.41 & 21.42 & 59.81 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering EXP} & 500 & \bf{11.56} & 14.12 & 37.68 & 43.57 \\
& & 1000 & \bf{8.19} & 11.97 &18.22 & 41.66 \\ \hline\hline
\multirow{8}{80pt}{(d) Multivariate Laplace} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{1.81} & 2.32 & 9.60 & 10.31 \\
& & 1000 & \bf{0.97} & 1.20 & 4.20 & 8.86 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering LF} & 500 & \bf{7.33} & 10.30 & 17.52 & 41.89 \\
& & 1000 & \bf{3.99} & 5.28 & 7.65 & 40.93 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering AR} & 500 & \bf{9.79} & 14.13 & 15.64 & 35.50 \\
& & 1000 & \bf{5.54} & 9.32 & 6.59 & 34.91 \\ \cline{2-7}
& \multirow{2}{50pt}{\centering EXP} & 500 & \bf{7.26} & 9.90 & 13.93 & 23.71 \\
& & 1000 & \bf{3.90} & 5.12 & 5.19 & 22.78 \\ \hline
\end{tabular}
\caption{\baselineskip=10pt
Mean integrated squared error (MISE) performance
of MLFA (mixtures of latent factor analyzers) and MIW (mixtures with inverse Wishart priors) density deconvolution models
for {\bf conditionally heteroscedastic} errors compared with a naive method that ignores measurement errors for different measurement error distributions.
See Section \ref{sec: mvt density deconvolution models} and Section \ref{sec: mvt additional simulation studies} for additional details.
The minimum value in each row is highlighted.
}
\label{tab: mvt MISEs heteroscedastic t and Laplace}
\end{center}
\end{table}
\restoregeometry
\pagebreak
^{th}ispagestyle{empty}
\baselineskip=17pt
We also extend the simulation experiments to scenarios when ${\mathbf X}$ is distributed according to
(B) $f_{{\mathbf X}}^{(3)} = {\cal M}VT_{4}(6,\mbox{\boldmath $\mu$}_{{\mathbf X}},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X}}), \mbox{\boldmath $\mu$}_{{\mathbf X}}=(2,2,2,2)^{\rm T}$,
(C) $f_{{\mathbf X}}^{(4)} = \sum_{k=1}^{2}\pi_{{\mathbf X},k} {\cal M}VT_{4}(6,\mbox{\boldmath $\mu$}_{{\mathbf X},k},\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X}})$, $\mbox{\boldmath $\pi$}_{{\mathbf X}}=(0.75,0.25)^{\rm T}, \mbox{\boldmath $\mu$}_{{\mathbf X},1}=(2,4,2,2)^{\rm T}, \mbox{\boldmath $\mu$}_{{\mathbf X},2}=(4,2,4,2)^{\rm T}$.
In each case, four different choices for $\mbox{\boldmath ${\cal S}igma$}_{{\mathbf X}}$ are considered as in Section \ref{sec: mvt simulation studies} of the main paper.
We focus on the case when the measurement errors are conditionally heteroscedastic.
Results are presented in Tables \ref{tab: mvt MISEs heteroscedastic mvt} and \ref{tab: mvt MISEs heteroscedastic mixture mvt}.
\begin{eqnarray}gin{table}[!ht]\footnotesize
\begin{eqnarray}gin{center}
\begin{eqnarray}gin{tabular}{|c|c|c|c|c c c|}
\hline
\multirow{2}{85pt}{True Distribution of Interest $f_{{\mathbf X}}$} & \multirow{2}{75pt}{True Error Distribution $f_{\mbox{\boldmath $\epsilon$}}$} & \multirow{2}{50pt}{Covariance Structure} & \multirow{2}{*}{Sample Size} & \multicolumn{3}{|c|}{MISE $\times 10^4$} \\ \cline{5-7}
&&& & $\widetilde{\epsilon}xt{MLFA}_{2}$ & MIW & Naive \\ \hline\hline
\multirow{32}{85pt}{(B) Multivariate t} & \multirow{8}{75pt}{(a) Multivariate Normal} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{4.35} & 20.36 & 18.17 \\
& & & 1000 & \bf{2.36} & 13.14 & 12.65 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{21.31} & 78.22 & 75.42 \\
& & & 1000 & \bf{15.57} & 52.73 & 67.77 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{33.18} & 59.77 & 63.33 \\
& & & 1000 & \bf{29.29} & 51.11 & 53.40 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{19.58} & 40.72 & 44.83 \\
& & & 1000 & \bf{17.78} & 32.01 & 37.58 \\ \cline{2-7}
& \multirow{8}{75pt}{(b) Mixture of Multivariate Normals} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{5.16} & 27.21 & 38.03 \\
& & & 1000 & \bf{2.87} & 18.17 & 35.99 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{27.89} & 73.75 & 159.29 \\
& & & 1000 & \bf{19.27} & 53.66 & 161.77 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{38.41} & 81.77 & 159.34 \\
& & & 1000 & \bf{34.22} & 55.25 & 156.05 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{21.95} & 45.76 & 100.33 \\
& & & 1000 & \bf{18.14} & 37.72 & 99.09 \\ \cline{2-7}
& \multirow{8}{80pt}{(c) Multivariate t} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{4.16} & 27.73 & 23.42 \\
& & & 1000 & \bf{2.34} & 19.87 & 20.36 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{22.83} & 91.04 & 90.39 \\
& & & 1000 & \bf{14.03} & 85.33 & 89.31 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{40.60} & 76.40 & 86.87 \\
& & & 1000 & \bf{36.93} & 70.76 & 75.19 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{26.36} & 55.65 & 61.25 \\
& & & 1000 & \bf{18.51} & 40.46 & 49.52 \\ \cline{2-7}
& \multirow{8}{80pt}{(d) Multivariate Laplace} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{3.93} & 16.48 & 16.14 \\
& & & 1000 & \bf{1.81} & 6.85 & 14.02 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{16.36} & 47.19 & 70.22 \\
& & & 1000 & \bf{12.13} & 27.64 & 59.48 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{29.46} & 42.44 & 63.79 \\
& & & 1000 & \bf{18.81} & 21.19 & 47.92 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{19.00} & 34.74 & 39.64 \\
& & & 1000 & \bf{13.30} & 16.24 & 32.76 \\ \hline
\end{tabular}
\caption{\baselineskip=11pt
Mean integrated squared error (MISE) performance
of MLFA (mixtures of latent factor analyzers) and MIW (mixtures with inverse Wishart priors) density deconvolution models
for {\bf conditionally heteroscedastic} errors compared with a naive method that ignores measurement errors for different measurement error distributions.
See Section \ref{sec: mvt density deconvolution models} and Section \ref{sec: mvt additional simulation studies} for additional details.
The minimum value in each row is highlighted.
}
\label{tab: mvt MISEs heteroscedastic mvt}
\end{center}
\end{table}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{table}[!ht]\footnotesize
\begin{eqnarray}gin{center}
\begin{eqnarray}gin{tabular}{|c|c|c|c|c c c|}
\hline
\multirow{2}{85pt}{True Distribution of Interest $f_{{\mathbf X}}$} & \multirow{2}{75pt}{True Error Distribution $f_{\mbox{\boldmath $\epsilon$}}$} & \multirow{2}{50pt}{Covariance Structure} & \multirow{2}{*}{Sample Size} & \multicolumn{3}{|c|}{MISE $\times 10^4$} \\ \cline{5-7}
&&& & $\widetilde{\epsilon}xt{MLFA}_{2}$ & MIW & Naive \\ \hline\hline
\multirow{32}{85pt}{(C) Mixture of Multivariate t} & \multirow{8}{75pt}{(a) Multivariate Normal} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{4.84} & 13.68 & 12.43 \\
& & & 1000 & \bf{2.82} & 7.41 & 10.15 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{21.62} & 30.01 & 47.95 \\
& & & 1000 & \bf{13.40} & 19.72 & 44.97 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{22.56} & 29.35 & 43.99 \\
& & & 1000 & \bf{19.80} & 25.59 & 39.63 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{18.36} & 27.27 & 28.00 \\
& & & 1000 & \bf{13.41} & 17.73 & 25.14 \\ \cline{2-7}
& \multirow{8}{75pt}{(b) Mixture of Multivariate Normals} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{5.39} & 14.64 & 22.90 \\
& & & 1000 & \bf{2.80} & 10.77 & 21.55 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{24.48} & 32.87 & 98.00 \\
& & & 1000 & \bf{15.62} & 20.52 & 98.79 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{26.73} & 31.09 & 90.78 \\
& & & 1000 & \bf{23.44} & 29.06 & 91.24 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{19.56} & 25.39 & 58.83 \\
& & & 1000 & \bf{13.90} & 18.29 & 59.93 \\ \cline{2-7}
& \multirow{8}{80pt}{(c) Multivariate t} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{4.91} & 18.09 & 16.30 \\
& & & 1000 & \bf{2.89} & 11.59 & 14.00 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{23.50} & 33.79 & 60.18 \\
& & & 1000 & \bf{15.85} & 25.83 & 58.20 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{26.98} & 33.78 & 54.07 \\
& & & 1000 & \bf{22.04} & 29.77 & 51.64 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{18.62} & 24.00 & 36.26 \\
& & & 1000 & \bf{12.64} & 18.57 & 33.61 \\ \cline{2-7}
& \multirow{8}{80pt}{(d) Multivariate Laplace} & \multirow{2}{50pt}{\centering I}
& 500 & \bf{4.76} & 9.34 & 15.96 \\
& & & 1000 & \bf{2.33} & 5.04 & 13.96 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering LF} & 500 & \bf{16.59} & 22.54 & 65.33 \\
& & & 1000 & \bf{11.69} & 13.41 & 59.25 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering AR} & 500 & \bf{24.73} & 26.21 & 58.87 \\
& & & 1000 & \bf{15.71} & 17.48 & 47.62 \\ \cline{3-7}
& & \multirow{2}{50pt}{\centering EXP} & 500 & \bf{14.26} & 19.12 & 34.53 \\
& & & 1000 & \bf{10.96} & 13.25 & 32.47 \\ \hline
\end{tabular}
\caption{\baselineskip=11pt
Mean integrated squared error (MISE) performance
of MLFA (mixtures of latent factor analyzers) and MIW (mixtures with inverse Wishart priors) density deconvolution models
for {\bf conditionally heteroscedastic} errors compared with a naive method that ignores measurement errors for different measurement error distributions.
See Section \ref{sec: mvt density deconvolution models} and Section \ref{sec: mvt additional simulation studies} for additional details.
The minimum value in each row is highlighted.
}
\label{tab: mvt MISEs heteroscedastic mixture mvt}
\end{center}
\end{table}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{VFn_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the variance functions $s^{2}(X)$
produced by the univariate density deconvolution method for each component of ${\mathbf X}$ for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets for the MIW (mixtures with inverse Wishart priors) method.
For each component of ${\mathbf X}$, the true variance function is $s^{2}(X) = (1+X/4)^{2}$.
See Section \ref{sec: mvt density of heteroscedastic errors} and Section \ref{sec: mvt estimation of variance functions} for additional details.
In each panel, the true (lighter shaded green lines) and the estimated (darker shaded blue lines) variance functions
are superimposed over a plot of subject specific sample means vs subject specific sample variances.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results VFn d4 n1000 m3 MIW X1 HT_E0 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{XS_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the $f_{{\mathbf X}}$
produced by the MIW (mixtures with inverse Wishart priors) method for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} and Section \ref{sec: mvt additional simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results XS d4 n1000 m3 MIW X1 HT_E0 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{XS_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_64.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the $f_{{\mathbf X}}$
produced by the $\widetilde{\epsilon}xt{MLFA}_{2}$ (mixtures of latent factor analyzers) method for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} and Section \ref{sec: mvt additional simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{X_{i},X_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results XS d4 n1000 m3 MLFA X1 HT_E0 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{ES_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$
produced by the MIW (mixtures with inverse Wishart priors) method for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} and Section \ref{sec: mvt additional simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results ES d4 n1000 m3 MIW X1 HT_E0 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{ES_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_64.pdf}
\end{center}
\caption{\baselineskip=10pt Results for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$
produced by the $\widetilde{\epsilon}xt{MLFA}_{2}$ (mixtures of latent factor analyzers) method for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
The results correspond to the data set that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets.
See Section \ref{sec: mvt simulation studies} and Section \ref{sec: mvt additional simulation studies} for additional details.
The upper triangular panels show the contour plots of the true two dimensional marginal densities.
The lower triangular diagonally opposite panels show the corresponding estimates.
The numbers $i,j$ at the bottom right corners of the off-diagonal panels show that the marginal densities $f_{\epsilon_{i},\epsilon_{j}}$ are plotted in those panels.
The diagonal panels show the true (lighter shaded green lines) and the estimated (darker shaded blue lines) one dimensional marginals.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt simulation results ES d4 n1000 m3 MLFA X1 HT_E0 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{Trace_IW_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_82.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the MIW (mixtures with inverse Wishart priors) method for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
See Section \ref{sec: mvt simulation studies} and Section \ref{sec: mvt additional simulation studies} for additional details.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The results correspond to the simulation instance that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets,
when the number of mixture components for both $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=5$ and $K_{\mbox{\boldmath $\epsilon$}}=6$, respectively.
}
\label{fig: mvt simulation results Trace Plots d4 n1000 m3 MIW X1 HT_E0 Ind}
\end{figure}
^{th}ispagestyle{empty}
\begin{eqnarray}gin{figure}[!ht]
\begin{eqnarray}gin{center}
\includegraphics[height=16cm, width=16cm, trim=0cm 0cm 0cm 0cm, clip=true]{Trace_MLFA_MVT_Decon_HTR_d_4_n_1000_reps_3_XS_density_X1_ES_density_HT_E0_Cov_Struct_Ind_simsize_3000_burnin_1000_seedno_64.pdf}
\end{center}
\caption{\baselineskip=10pt
Trace plots and frequency distributions of the number of nonempty clusters produced by the $\widetilde{\epsilon}xt{MLFA}_{2}$ (mixtures of latent factor analyzers) method for conditionally heteroscedastic multivariate Laplace ($f_{\mbox{\boldmath $\epsilon$}}^{(4)}$) distributed measurement errors with sample size $n=1000$, $m_{i}=3$ replicates for each subject and identity matrix (I) for the component specific covariance matrices.
See Section \ref{sec: mvt simulation studies} and Section \ref{sec: mvt additional simulation studies} for additional details.
The upper panels are for the $f_{{\mathbf X}}$ and the lower panels are for the density of the scaled errors $f_{\mbox{\boldmath $\epsilon$}}$.
The results correspond to the simulation instance that produced the median of the estimated integrated squared errors (ISE) out of a total of 100 simulated data sets,
when the number of mixture components for $f_{{\mathbf X}}$ and $f_{\mbox{\boldmath $\epsilon$}}$ were kept fixed at $K_{{\mathbf X}}=7$ and $K_{\mbox{\boldmath $\epsilon$}}=5$, respectively.
}
\label{fig: mvt simulation results Trace Plots d4 n1000 m3 MLFA X1 HT_E0 Ind}
\end{figure}
\section{Potential Impact on Nutritional Epidemiology}\label{sec: mvt potential impact}
The joint distribution of long-term average intakes of different dietary components allows nutritionists
to study the dietary habits of the population of interest in fine detail.
The plots of pairwise marginal distributions presented in Figure \ref{fig: mvt EATS data results XS}, for instance,
provide detailed information on the joint consumption patterns of different pairs of dietary components.
While such graphical summaries of the joint distributions may not be available for more than two components,
numerical summaries of the joint distribution can provide answers to important questions
such as what proportion of the population consume certain dietary components above, between or below certain amounts etc.
The last question is particularly important as it relates to the proportion of the population that are deficient in certain dietary components.
Focusing again on a two-dimensional case for illustration, namely Fiber and Potassium,
Figure \ref{fig: mvt fiber and potassium cdf} below shows their marginal and joint cumulative distribution function (CDF) on a set of grid points
from which such proportions can be readily obtained.
Dietary components are often reported in different measurement units.
The figures presented in Section \ref{sec: mvt data analysis} are based on a linear scale transformation $W_{ij\ell}=20 \times \{W_{ij\ell,obs}-W_{ij\ell,obs,min}\} / \{W_{ij\ell,obs, max} - W_{ij\ell,obs,min}\}$
so that the $W_{ij\ell}$ for different components are unitless and fall between 0 and 20 units.
Figure \ref{fig: mvt fiber and potassium cdf} report the marginal and the joint CDF of fiber and potassium on a set of grid points in their original measurement units.
We can readily see that, considered jointly,
approximately $59\%$ of adult Americans consume less than 20.55 grams of fiber and 3338.55 milligrams of potassium,
whereas the corresponding marginal values are $71.2\%$ and $67.6\%$, respectively.
The focus of the nutritional epidemiology examples considered in this article were
on the estimation of joint consumption patterns of a set of regularly consumed dietary components
whose reported intakes were all continuously measured.
In contrast, for dietary components that are consumed episodically, the reported intakes equal zero on non-consumption days, and are positive on consumption days.
The methodology developed in this article paves the way to more sophisticated deconvolution methods that can accommodate such zero inflated data.
We are pursuing this problem as the subject of a separate study, with promising preliminary results.
This will be a crucial step forward towards providing a highly flexible statistical framework for estimating the distribution of the U.S. Department of Agriculture's Healthy Eating Index (HEI, \href{www.cnpp.usda.gov/HealthyEatingIndex.htm}{www.cnpp.usda.gov/HealthyEatingIndex.htm}).
HEI is a measure of diet quality that involves six episodically and seven regularly consumed dietary components and is used to assess compliance with the U.S. Dietary Guidelines for Americans (\href{www.health.gov/dietaryguidelines}{www.health.gov/dietaryguidelines}) and monitor changes in dietary patterns.
Efficient estimation of the distribution of HEI will allow nutritionists to answer public health questions that have important policy implications.
We expect successful implementation of our methods
to eventually replace the currently popular NCI method
\href{www.riskfactor.cancer.gov/diet/usualintakes/method.html}{(www.riskfactor.cancer.gov/diet/usualintakes/method.html)} for estimation of HEI.
\pagebreak
^{th}ispagestyle{empty}
\newgeometry{left=2cm,right=2.5cm,top=1cm,bottom=1cm}
\begin{eqnarray}gin{figure}[ht]
\begin{eqnarray}gin{center}
\includegraphics[height=6cm, width=18cm, trim=0cm 0cm 1.5cm 1cm, clip=true]{EATS_DATA_Plots_1_R_FIBER_R_POTASSIU_MARGINAL_CDF.pdf}
\includegraphics[height=14cm, width=18cm, trim=0cm 0.25cm 0cm 0.5cm, clip=true]{EATS_DATA_Plots_1_R_FIBER_R_POTASSIU_JOINT_CDF.png}
\end{center}
\caption{\baselineskip=10pt Results for Fiber and Potassium in their commonly used measurement units.
The top two panels show their marginal cumulative distribution functions.
The bottom panel shows their joint cumulative distribution function for a set of grid points.
The figure is in color in the electronic version of this article.
}
\label{fig: mvt fiber and potassium cdf}
\end{figure}
\restoregeometry
\baselineskip=17pt
\section*{Additional References}
\par\vskip 2mm\noindent\refhg
Bickel, P. J. and Kleijn, B. J. K. (2012). The semiparametric Bernstein-von Mises theorem.
{\it Annals of Statistics}, 40, 206-237.
\par\vskip 2mm\noindent\refhg
Bontemps, D. (2011). Bernstein-von Mises theorems for Gaussian regression with increasing number of regressors.
{\it Annals of Statistics}, 39, 2557-2584.
\par\vskip 2mm\noindent\refhg
Carroll, R. J., Chen X. and Hu, Y. (2010). Identification and estimation of nonlinear models using two samples with nonclassical measurement errors.
{\it Journal of Nonparametric Statistics}, 22, 379-399.
\par\vskip 2mm\noindent\refhg
Castillo, I. and Nickl, R. (2014). On the Bernstein-von Mises phenomenon for nonparametric Bayes procedures.
{\it Annals of Statistics}, 42, 1941-1969.
\par\vskip 2mm\noindent\refhg
de Boor, C. (2000). \emph{A Practical Guide to Splines}.
New York: Springer.
\par\vskip 2mm\noindent\refhg
d'Haultfoeuille, X. (2011). On the completeness condition in nonparametric instrumental problems.
{\cal E}CTH, 27, 460-471.
\par\vskip 2mm\noindent\refhg
Eltoft, T., Kim, T. and Lee, T. W. (2006). On the multivariate Laplace distribution.
{\it IEEE}SPL, 13, 300-303.
\par\vskip 2mm\noindent\refhg
Escobar, M. D. and West, M. (1995). Bayesian density estimation and inference using mixtures.
{\it Journal of the American Statistical Association}, 90, 577-588.
\par\vskip 2mm\noindent\refhg
Eubank, R. L. and Hart, J. D. (1992). Testing goodness-of-fit in regression via order selection criteria.
{\it Annals of Statistics}, 20, 1412-1425.
\par\vskip 2mm\noindent\refhg
Ferguson, T. F. (1973). A Bayesian analysis of some nonparametric problems.
{\it Annals of Statistics}, 1, 209-230.
\par\vskip 2mm\noindent\refhg
Fraley, C. and Raftery, A. E. (2007). Model-based methods of classification: using the mclust software in chemometrics.
{\it Journal of Statistical Software}, 18, 1-13.
\par\vskip 2mm\noindent\refhg
Ghosh, J. K. and Ramamoorthi, R. V. (2010). \emph{Bayesian Nonparametrics}.
New York: Springer.
\par\vskip 2mm\noindent\refhg
Goldberg, R. R . (1961). \emph{Fourier transforms}. Volume 32. London: Cambridge.
\par\vskip 2mm\noindent\refhg
Green, J. P., Latuszynski, K. Pereyra, M. and Roberts, C. P. (2015). Bayesian computation: summary of the current state, and samples backwards and forwards.
{\cal S}aC, 25, 835-862.
\par\vskip 2mm\noindent\refhg
Hastie, D. I., Liverani, S. and Richrdson, S. (2015). Sampling from Dirichlet process mixture models with unknown concentration parameter: mixing issues in large data implementations.
{\cal S}aC, 25, 1023-1037.
\par\vskip 2mm\noindent\refhg
Ishwaran, H. and James, L. F. (2002). Approximate Dirichlet process computing in finite normal mixtures: smoothing and prior information.
{\it Journal of Computational and Graphical Statistics}, 11, 508-532.
\par\vskip 2mm\noindent\refhg
Ishwaran, H. and Zarepour, M. (2000). Markov chain Monte Carlo in approximate Dirichlet and beta two-parameter process hierarchical models.
{\cal B}IOK, 87, 371-390.
\par\vskip 2mm\noindent\refhg
Ishwaran, H. and Zarepour, M. (2002). Exact and approximate sum-representations for the Dirichlet process.
{\cal C}ANADAJS, 30, 269-283.
\par\vskip 2mm\noindent\refhg
Johnstone, I. M. (2010). High dimensional Bernstein-von Mises: simple examples.
{\it Institute of Mathematical Statistics Collections}, 6, 87-98.
\par\vskip 2mm\noindent\refhg
Kotz, S. and Nadarajah, S. (2004). \emph{Multivariate t Distributions and Their Applications}.
Cambridge: Cambridge University Press.
\par\vskip 2mm\noindent\refhg
Neal, R. M. (2000). Markov chain sampling methods for Dirichlet process mixture models.
{\it Journal of Computational and Graphical Statistics}, 9, 249-265.
\par\vskip 2mm\noindent\refhg
Norets, A. and Pelenis, J. (2012). Bayesian modeling of joint and conditional distributions.
{\it Journal of Econometrics}, 168, 332-346.
\par\vskip 2mm\noindent\refhg
Pati, D. and Dunson, D. (2013). Bayesian nonparametric regression with varying residual density.
{\it Annals of Statistics}ISM, 66, 1-13.
\par\vskip 2mm\noindent\refhg
Pelenis, J. (2014). Bayesian Regression with Heteroscedastic Error Density and Parametric Mean Function.
{\it Journal of Econometrics}, 178, 624-638.
\par\vskip 2mm\noindent\refhg
Rocke, D. and Durbin, B. (2001). {A model for measurement error for gene expression arrays}.
\emph{Journal of Computational Biology}, 8, 557-569.
\par\vskip 2mm\noindent\refhg
Rousseau, J. and Mengersen, K. (2011). Asymptotic behavior of the posterior distribution in overfitted mixture models
{\it Journal of the Royal Statistical Society, Series B}, 73, 689-710.
\par\vskip 2mm\noindent\refhg
Sethuraman, J. (1994). A constructive definition of Dirichlet priors.
{\cal S}SNC, 4, 639-650.
\par\vskip 2mm\noindent\refhg
Spokoiny, V. (2013). Bernstein-von Mises theorem for growing parameter dimension.
{\it arXiv preprint arXiv:1302.3430}.
\end{document}
|
\begin{document}
\title
{Finite sum of weighted composition operators with closed range}
\author{\sc S. Shamsigamchi}\author{\sc A. Alishahi} \author{\sc A. Ebadian}
\address{\sc Department of mathematics, Payame Noor University } \address{\sc Department of mathematics, Payame Noor University }
\email{[email protected]} \email{}\email{[email protected]}
\thanks{}
\subjclass[2010]{47B33}
\keywords{Weighted composition operators, closed range operators, invertible operators.}
\date{}
\dedicatory{}
\commby{}
\begin{abstract}
In this paper, first we characterize closedness of range of the finite sum of weighted composition operators between different $L^p$-spaces. Then we discuss polar decomposition and invertibility of these operators.
\end{abstract}
\maketitle
\section{introduction}
Weighted composition operators are a general class of operators and they appear naturally in the study of surjective isometries on most of the function spaces,
semigroup theory, dynamical systems, Brennans conjecture, etc. This type of operators are a generalization of multiplication operators and composition operators.\\
There are many great papers on the investigation of weighted composition operators acting on the spaces of measurable functions. For instance, one can see \circledastte{bon,dls,ey,ej,sh,gh,j1,ls,lau6,n2}. Also, some basic properties of weighted composition operators on $L^{p}$-spaces were studied by Parrott \circledastte{la}, Nordgern \circledastte{la2}, Singh and Manhas \circledastte{lau}, Takagi \circledastte{n1} and some other mathematicians. As far as we know finite sum of weighted composition operators were studied on $L^p$-spaces by Jabbarzadeh and Estaremi in \circledastte{gh1}. Also we investigated some basic properties of these operators in \circledastte{bon}.
Let $(X, \Sigma, \mu)$ be a $\sigma$-finite measure space.
We denote the linear space of all complex-valued
$\Sigma$-measurable functions on $X$ by $L^0(\Sigma)$. For any $\sigma$-finite subalgebra $\mathcal{A}\subseteq\Sigma$ such that $(X, \mathcal{A}, \mu_{\mathcal{A}})$ is also $\sigma$-finite , the
conditional expectation operator associated with $\mathcal{A}$ is
the mapping $f\rightarrow E^{\mathcal{A}}f$, defined for all
non-negative $f$ as well as for all $f\in L^p(\Sigma)$, $1\leq
p\leq \infty$, where $E^{\mathcal{A}}f$ is the unique
$\mathcal{A}$-measurable function satisfying
$$\int_{A}fd\mu=\int_{A}E^{\mathcal{A}}fd\mu, \ \ \ A\in \mathcal{A}.$$
As an operator on $L^{p}({\Sigma})$, $E^{\mathcal{A}}$ is
idempotent and $E^{\mathcal{A}}(L^p(\Sigma))=L^p(\mathcal{A})$.
For more details on the properties of $E^{\mathcal{A}}$ see
\circledastte{lambe} and \circledastte{rao}.
For a measurable function $u:X\rightarrow \mathcal{C}$ and non-singular measurable transformation $\varphirphi:X\rightarrow X$, i.e, the measure $\mu\circledastrc \varphirphi^{-1}$ is absolutely continuous with respect to $\mu$, we can define an operator $uC_{\varphirphi}:L^p(\Sigma)\rightarrow L^0(\Sigma)$ with $uC_{\varphirphi}(f)=u.f\circledastrc \varphirphi$ and it is called a weighted composition operator. For non-singular measurable transformations $\{\varphirphi_i\}^{n}_{i=1}$, we put $W=\sum_{i=1}^{n}u_iC_{\varphirphi_i}$.\\
In this paper, we are going to give some sufficient and necessary condition for closedness of range of finite sum of weighted composition operators between different $L^p$-spaces. Moreover, we compute the polar decomposition of these operators on $L^2$. Finally we talk a bit about invertibility and injectivity.
\section{main results}
In this section first we give an equivalent condition for closedness of range on the Hilbert space $L^2$.
\begin{theorem}
Let $W=\sum_{i=1}^{n}u_{i}C_{\varphirphi_{i}}$ be a bounded operator on $L^{2}(\mu)$ and $u_{i}(\varphirphi_{j}^{-1})=0 ,~~~ i\neq j$. The following statements are equivalent.
\begin{enumerate}
\item[(a)] $W$ has closed range.
\item[(b)] There is a constant $c>0$ such that $J=\sum_{i=1}^{n}h_{i}E_{i}(|u_{i}|^{2})\circledastrc \varphirphi_{i}^{-1} \geq c ~~~~~\mu - a.e$ on $Coz J=\{x\in X: J(x)\neq 0\}$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(b)\mathcal{R}ightarrow (a)$ Suppose that there is some constant $c>0$ such that $J\geq 0 ~~~~\mu-a.e$ on $Coz J$. We know that $\ker W \subseteq L^{2}_{|X\backslash Coz J}(\mu) $. Since $W^{*}Wf=Jf$ for every $f\in L^{2}(\mu)$,
\begin{eqnarray}
\nonumber
\|Wf\|_{2}^{2} &=& (Wf , Wf)\\
\nonumber & =& (W^{*}Wf , f)\\
\nonumber &=& \int_{X}J|f|^{2}d\mu = \int_{Coz J}J|f|^{2}d\mu+\int_{X\backslash Coz J}J|f|^{2}d\mu\\
\nonumber &\geq & c\|f\|_{2}^{2}.\\
\nonumber
\end{eqnarray}
Obviously $W_{| J}$ is injective and $W_{| J}(L^{2}_{\mid J}(\mu))$ is closed in $L^{2}(\mu)$, where $L^{2}_{\mid J}(\mu)=\{f\in L^{2}(\mu)~;~ f=0~ on~ X\backslash J\}$. Since $\ker W=L^{2}_{|X\backslash Coz J}(\mu) $, $W(L^{2}(\mu))$ must be closed in $L^{2}(\mu)$.\\
$(a)\mathcal{R}ightarrow (b)$ Assume $W$ has closed range. Then $W_{| J}(L^{2}_{| J}(\mu))$ is closed in $L^{2}(\mu)$. Since $W_{| J}$ is injective so there exists a constant $d>0$ such that $\|W_{| J}\|_{2}\geq d \|f\|_{2}$ for any $f\in L^{2}(\mu)$. Take $c=\frac{d^{2}}{n}$, $(b)$ follows immediately once we show that for any $E\in \Sigma$ with $E\subset Coz J$, $\int_{E}J d\mu\geq c\mu(E)$. PicK any $E\in \Sigma$ with $E\subset J$. We may assume $\mu(E)<\infty$. Then $\chi_{E}\in L^{2}_{| J}(\mu)$ and $n\int_{E}J d\mu=n\int_{X}J \chi_{E}d\mu\geq \|W_{| J}\chi_{E}\|\geq d^{2}\|\chi_{E}\|_{2}^{2}=d^{2}\mu(E)$ so $\int_{E}J d\mu \geq c \mu(E)$.
\end{proof}
Now we find some necessary and sufficient conditions for closedness of range when the operator act on the $L^p$ with $p>1$.
\begin{theorem}
Let $W=\sum_{i=1}^{m}u_{i}C_{\varphirphi_{i}}$ be a bounded operator on $L^{p}(\mu)$ with $p>1$. Then the followings hold.
\begin{enumerate}
\item[(a)] If $J(B)=0, ~~\mu -a.e$ and $\sum_{i\in \mathcal{B}bb N}J(A_{i})\mu(A_{i})<\infty$ then $W$ has closed range.
\item[(b)] If $W$ has closed range and is injective then $J(B)=0, ~~\mu -a.e$ .
\item[(c)] Let $\mu(X)<\infty$. If $W$ has closed range and is injective then there exists a constant $\delta>0$ such that $u=\sum_{i=1}^{n}u_{i}^{p}\geq \delta$ ox $X$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Take any sequence $(Wf_{n})_{n \in \mathcal{B}bb N}$ in $W(L^{p}(\mu))$ with $\|f_{n}\|<1$ and $\|Wf_n-g\|\rightarrow 0$. For a fixed $i\in \mathcal{B}bb N$ the sequence $(f_{n}(A_{i}))_{n \in \mathcal{B}bb N}$ is bounded by $\frac{1}{\sqrt[p]{\mu(A_{i})}}$. So we can find a subsequence $(f_{n_{k}})_{k \in \mathcal{B}bb N}$ such that with each fixed $i$, $f_{n_{k}}(A_{i})\rightarrow \alpha_{i}$ for some $\alpha_{i} \in \mathcal{B}bb C$. Define $f= \sum_{i=1}^{\infty}\alpha_{i} \chi_{A_{i}}$. By Fatous lemma we have $\int_{X}|f|^{p}d\mu \leq \lim\inf_{k\rightarrow \infty}\int_{X}|f_{n_{k}}|^{p}d\mu \leq 1 $, or $f \in L^{p}(\mu)$. Then we have
\begin{eqnarray}
\nonumber \|g-Wf\|_{p}&\leq& \|g-Wf_{n}\|_{p}+\|Wf_{n}-Wf_{n_{k}}\|_{p}+\|Wf_{n_{k}}-Wf\|_{p}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\int_{X}|W(f_{n_{k}}-f)|^{p}d\mu\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+m^{p-1}\int_{X}J|f_{n_{k}}-f|^{p}d\mu\\
\nonumber &= &\frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+m^{p-1}\sum_{i=1}^{\infty}J(A_{i})|f_{n_{k}}(A_{i})-\alpha_{i}|^{p}\mu(A_{i})\\
\nonumber &\longrightarrow& 0\\
\nonumber
\end{eqnarray}
Obviously $W(L^{p}(\mu))$ is closed in $L^{p}(\mu)$. \\
$(b)$ Suppose on the contrary, $\mu(\{x\in B~:~J(x)>0\})>0$. Then there exists $\delta>0$ such that the set $G=\{x\in B~:~J(x)\geq\delta\}$ has positive measure. We assume $\mu(G)<\infty$. Moreover, as $G$ is non atomic, we can further assume that $\mu(X\backslash G)>0$. Consider the Banach space $L^{p}_{\mid_{G}}(\mu)$ and the operator $W_{\mid_{G}}$ defined on $L^{p}_{\mid_{G}}(\mu)$. We claim that $W_{\mid_{G}}(L^{p}_{\mid_{G}}(\mu))$ is closed in $L^{p}(\mu)$. To prove we take any convergent sequence $(W_{\mid_{G}}(f_{n}))_{n\in \mathcal{B}bb N}$ in $W\mid_{G}(L^{p}_{\mid_{G}}(\mu))$. Let $g\in L^{p}(\mu)$ satisfy
$\|W_{\mid_{G}}(f_{n})-g\|_{p}\rightarrow 0$ as $n\rightarrow \infty$. Note that $(W_{\mid_{G}}(f_{n}))_{n\in \mathcal{B}bb N}$ can be regarded as a sequence in $W(L^{p}(\mu))$. The closedness of range of $W$ yields an $f\in L^{p}(\mu)$ with $g=Wf~~ \mu - a.e$ On $X$. Then assume $W$ has closed range and is injective so there exists a constant $d>0$ such that $\|W_{\mid_{G}}(f_{n})-Wf\|_{p}\geq d\|f_{n}-f\|_{p}$. As $\|W_{\mid_{ G}}(f_{n})-g\|_{p}=\|W_{\mid_{G}}(f_{n})-Wf\|_{p}=0$ and $\|f_{n}-f\|_{p}^{p}=\int_{G}|f_{n}-f|^{p}d\mu+\int_{X\backslash G}|f_{n}-f|^{p}d\mu$ we have that $\int_{X\backslash G}|f|^{p}d\mu=0$ and so $f\in L^{p}_{\mid_{G}}(\mu)$. Then there exists some constant $c>0$ such that $\|W_{\mid_{G}}f\|_{p}\geq c\|f\|_{p}$ for all $f\in L^{p}_{\mid_{G}}(\mu)$. We claim that this is impossible by showing that for any $\alpha>0 $, there is some $f_{\alpha}\in L^{p}_{\mid_{G}}(\mu)$ satisfying $\|W_{\mid_{G}}f\|_{p}<c\|f\|_{p}$. For any $n\in \mathcal{B}bb N$, define $G_{n}=\{x\in G~;~\left(\frac{(n-1)\alpha}{m^{\frac{p-1}{p}}}\right)^{p}\leq J(x)\leq \left(\frac{n\alpha}{m^{\frac{p-1}{p}}}\right)^{p}\}$. Then $G=\left(\cup_{n\in \mathcal{B}bb N}G_{n}\right)\cup\{x\in G ~;~ J(x)=\infty\}$. Since $W$ is a bounded operator on $L^{p}(\mu)$ so $J$ is finite valued $\mu$-a.e on $X$, then we have $\mu(\{x\in G ~;~ J(x)=\infty\})=0$. Now as $\mu(G)>0$, $\mu(G_{N})>0$ for some $N\in \mathcal{B}bb N$. Since $G_{N}$ is non- atomic, for any $\alpha>0$, we can choose some set $E_{\alpha}\in \Sigma$ such that $E_{\alpha}\subseteq G_{N}$ and $\mu(E_{\alpha})\leq\mu (G_{N})$. Take $f_{\alpha}=\chi_{E_{\alpha}}$. Obviously $f_{\alpha}\in L^{p}_{\mid_{G}}(\mu)$. Moreover $\|W_{\mid_{G}}f_{\alpha}\|_{p}\leq m^{\frac{p-1}{p}}\left(\int_{X}J|f_{\alpha}|^{p}d\mu\right)^{\frac{1}{p}}<m^{\frac{p-1}{p}}\left(\frac{N\alpha}{m^{\frac{p-1}{p}}}\right)\|f_{\alpha}\|_{p}=N\alpha \|f_{\alpha}\|_{p}$. This prove our claim and therefore we must have $J=0, ~~\mu -a.e$ on $B$. \\
$(c)$ Assume $W$ has closed range and is injective so there exists a constant $d>0$ such that $\|Wf\|_{p}\geq d \|f\|_{p}$ for any $f\in L^{p}(\mu)$.
\begin{eqnarray}
\nonumber n^{p-1}\int_{X}\sum_{i=1}^{n}u_{i}^{p} d\mu&\geq & n^{p-1}\sum_{i=1}^{n}\int_{\varphirphi_{i}^{-1}(X)}u_{i}^{p}d\mu\\
\nonumber & = &n^{p-1}\sum_{i=1}^{n}\int_{X}u_{i}^{p}\chi_{\varphirphi_{i}^{-1}(X)}d\mu\\
\nonumber &\geq &\int_{X}|W\chi_{X}|^{p}d\mu\\
\nonumber &= &\|W\chi_{X}\|_{p}^{p}\\
\nonumber &\geq & d^{p}\|\chi_{X}\|_{p}^{p}=d^{p}\mu(X)\\
\nonumber
\end{eqnarray}
so $u \geq \delta$ on $X$ . The proof is now complete.
\end{proof}
Here we give some necessary and sufficient conditions for closedness of range when the operator projects the $L^p$ into $L^{q}$ when $1\leq q<p<\infty$.
\begin{theorem}
Suppose that $1\leq q< p<\infty$ and let $W$ be a bounded operator from $L^{p}(\mu)$ into $L^{q}(\mu)$. The followings hold.
\begin{enumerate}
\item[(a)] If $W$ has closed range and is injective then the set $\{i\in\mathcal{B}bb N~;~J(=\sum_{r=1}^{n}h_{r}E_{r}(|u_{r}|^{q})\circledastrc \varphirphi_{r}^{-1})(A_{i})>0\}$ is finite.
\item[(b)] If $J(B)=0$, $\mu-a.e$~~ and the set $\{i\in\mathcal{B}bb N~;~J(=\sum_{r=1}^{n}h_{r}E_{r}(|u_{r}|^{q})\circledastrc \varphirphi_{r}^{-1})(A_{i})>0\}$ is finite then $W$ has closed range.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Suppose on the contrary, the set $\{i\in\mathcal{B}bb N~;~J(A_{i})>0\}$ is infinite. Since $W$ is injective and has closed range there exists $d>0$ such that $\|Wf\|_{q}\geq d\|f\|_{p}$ for all $f\in L^{p}(\mu)$. Thus for any $i\in \mathcal{B}bb N$, $\|W\chi_{A_{i}}\|_{q}^{q}\geq d^{q}\mu(A_{i})^{\frac{q}{p}}$ and so
\begin{eqnarray}
\nonumber d^{q}\mu(A_{i})^{\frac{q}{p}}&\leq & \|W\chi_{A_{i}}\|_{q}^{q}\\
\nonumber & = &\int_{X}|\sum_{r=1}^{n}u_{r}\chi_{A_{i}}\circledastrc \varphirphi_{r}|^{q}d\mu\\
\nonumber &\leq & n^{q-1}\int_{X}J\chi_{A_{i}}d\mu\\
\nonumber &= &n^{q-1}J(A_{i})\mu(A_{i}).\\
\nonumber
\end{eqnarray}
It follows from the preceding inequality that
\begin{eqnarray}
\nonumber \frac{d^{\frac{pq}{p-q}}}{n^{\frac{p(q-1)}{p-q}}}\leq J(A_{i})^{\frac{p}{p-q}}\mu(A_{i}).\\
\nonumber
\end{eqnarray}
Therefore,
\begin{eqnarray}
\nonumber \infty=\sum_{i\in\mathcal{B}bb N}\frac{d^{\frac{pq}{p-q}}}{n^{\frac{p(q-1)}{p-q}}}\leq \sum_{i\in\mathcal{B}bb N}J(A_{i})^{\frac{p}{p-q}}\mu(A_{i})<\infty.\\
\nonumber
\end{eqnarray}
This is a contradiction.\\
$(b)$ Let $g\in \overline{W(L^{p}(\mu))}$ then there exists a sequence $(Wf_{n})_{n\in\mathcal{B}bb N}\subseteq W(L^{p}(\mu))$ such that $Wf_{n}\longrightarrow g$ and $\|f_{n}\|<1$. If the set $\{i\in\mathcal{B}bb N~;~J(A_{i})>0\}$ is empty then $W$ is the zero operator . Otherwise we may assume there exists some $k\in \mathcal{B}bb N$ such that $J(A_{i})>0$ for $1\leq i \leq k$ and $J(A_{i})=0$ for any $i>k$. As $f_{n}\in L^{p}(\mu)$ for all $n$, $|f_{n}(A_{i})|\leq\frac{\|f_{n}\|_{p}}{\sqrt[p]{\mu(A_{i})}}\leq\frac{1}{\sqrt[p]{\mu(A_{i})}}$ for any $1\leq i\leq k$ and any $n\in \mathcal{B}bb N$. By Bolzano-Weierstrass there exists a subsequence of nutural number $(n_{j})_{j\in \mathcal{B}bb N}$ such that for each fixed $1\leq i\leq k$ the sequence $(f_{n_{j}}(A_{i}))_{j\in \mathcal{B}bb N}$ converges. Suppose $\lim_{j\rightarrow \infty}f_{n_{j}}(A_{i})=\varphirsigma_{j}(\in \mathcal{B}bb C)$ and define $f=\sum_{i=1}^{k}\varphirsigma_{j}\chi_{A_{i}}$. Then $f\in L^{p}(\mu)$. For every $\varphirepsilonilon>0$ we have that
\begin{eqnarray}
\nonumber \|g-Wf\|_{q}&\leq& \|g-Wf_{n}\|_{q}+\|Wf_{n}-Wf_{n_{j}}\|_{q}+\|Wf_{n_{j}}-Wf\|_{q}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\int_{X}|W(f_{n_{j}}-f)|^{q}d\mu\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+n^{q-1}\int_{X}J|f_{n_{j}}-f|^{q}d\mu\\
\nonumber &= &\frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+n^{q-1}\sum_{i=1}^{k}J(A_{i})|f_{n_{j}}(A_{i})-\varphirsigma_{j}|^{q}\mu(A_{i})\\
\nonumber &\longrightarrow& 0\\
\nonumber
\end{eqnarray}
\end{proof}
In the next theorem we obtain some necessary and sufficient conditions for closedness of range when the operator projects the $L^p$ into $L^{q}$ when $1\leq p<q<\infty$.
\begin{theorem}
Suppose that $1\leq p< q<\infty$ and $W=\sum_{i=1}^{m}u_{i}C_{\varphirphi_{i}}$ be a bounded operator from $L^{p}(\mu)$ into $L^{q}(\mu)$. Then the followings hold.
\begin{enumerate}
\item[(a)] If $J(B)=0, ~~\mu -a.e$ and $\sum_{i\in \mathcal{B}bb N}J(A_{i})\mu(A_{i})<\infty$ then $W$ has closed range.
\item[(b)] If $W$ has closed range and is injective then $J(B)=0, ~~\mu -a.e$ .
\item[(c)] Let $\mu(X)<\infty$. If $W$ has closed range and is injective then there exists a constant $\delta>0$ such that $u=\sum_{i=1}^{n}u_{i}^{p}\geq \delta$ on $X$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Take any sequence $(Wf_{n})_{n \in \mathcal{B}bb N}$ in $W(L^{p}(\mu))$ with $\|f_{n}\|<1$. For fixed $i\in \mathcal{B}bb N$ the sequence $(f_{n}(A_{i}))_{n \in \mathcal{B}bb N}$ is bounded by $\frac{1}{\sqrt[p]{\mu(A_{i})}}$. Applying contor's diagonalization procces, we extract a subsequence $(f_{n_{k}})_{k \in \mathcal{B}bb N}$ such that with each fixed $i$, $f_{n_{k}}(A_{i})\rightarrow \alpha_{i}$ for each $\alpha_{i} \in \mathcal{B}bb C$. Define $f= \sum_{i=1}^{\infty}\alpha_{i} \chi_{A_{i}}$. By fatous lemma we have $\int_{X}|f|^{p}d\mu \leq \lim\inf_{k\rightarrow \infty}\int_{X}|f_{n_{k}}|^{p}d\mu \leq 1 $, or $f \in L^{p}(\mu)$. Then we have
\begin{eqnarray}
\nonumber \|g-Wf\|_{q}&\leq& \|g-Wf_{n}\|_{q}+\|Wf_{n}-Wf_{n_{k}}\|_{q}+\|Wf_{n_{k}}-Wf\|_{q}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\int_{X}|W(f_{n_{k}}-f)|^{q}d\mu\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+m^{q-1}\int_{X}J|f_{n_{k}}-f|^{q}d\mu\\
\nonumber &= &\frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+m^{q-1}\sum_{i=1}^{\infty}J(A_{i})|f_{n_{k}}(A_{i})-\alpha_{i}|^{q}\mu(A_{i})\\
\nonumber &\longrightarrow& 0\\
\nonumber
\end{eqnarray}
Obviusly $W(L^{p}(\mu))$ is closed in $L^{q}(\mu)$. \\
$(b)$ Suppose on the contrary, $\mu(\{x\in B~:~J(x)>0\})>0$. Then there exists some $\delta>0$ such that the set $G=\{x\in B~:~J(x)\geq\delta\}$ has positive $\mu$- measure. We assume $\mu(G)<\infty$. Moreover, as $G$ is non atomic, we can further assume that $\mu(X\backslash G)>0$. Consider the Banach space $L^{p}_{\mid_{G}}(\mu)$ and the operator $W_{\mid_{G}}$ defined on $L^{p}_{\mid_{G}}(\mu)$. We claim that $W_{\mid_{G}}(L^{p}_{\mid_{G}}(\mu))$ is closed in $L^{q}(\mu)$. To prove we take any convergent sequence $(W_{\mid_{G}}(f_{n}))_{n\in \mathcal{B}bb N}$ in $W_{\mid_{G}}(L^{p}_{\mid_{G}}(\mu))$. Let $g\in L^{q}(\mu)$ satisfy
$\|W_{\mid_{G}}(f_{n})-g\|_{q}\rightarrow 0$ as $n\rightarrow \infty$. Note that $(W_{\mid_{G}}(f_{n}))_{n\in \mathcal{B}bb N}$ can be ragarded as a sequence in $W(L^{p}(\mu))$. The closedness of range of $W$ yeilds an $f\in L^{p}(\mu)$ with $g=Wf~~ \mu - a.e$ On $X$. Then assume $W$ has closed range and is injective so there exists a constant $d>0$ such that $\|W_{\mid_{G}}(f_{n})-Wf\|_{q}\geq d\|f_{n}-f\|_{p}$. As $\|W_{\mid_{ G}}(f_{n})-g\|_{q}=\|W_{\mid_{G}}(f_{n})-Wf\|_{q}=0$ and $\|f_{n}-f\|_{p}^{p}=\int_{G}|f_{n}-f|^{p}d\mu+\int_{X\backslash G}|f_{n}-f|^{p}d\mu$ we have that $\int_{X\backslash G}|f|^{p}d\mu=0$ and so $f\in L^{p}_{\mid_{G}}(\mu)$. Then there exists some conctant $c>0$ such that $\|W_{\mid_{G}}f\|_{q}\geq c\|f\|_{p}$ for all $f\in L^{p}_{\mid_{G}}(\mu)$. We claim that this is impossible by showing that for any $\alpha>0 $, there is some $f_{\alpha}\in L^{p}_{\mid_{G}}(\mu)$ satisfying $\|W_{\mid_{G}}f\|_{q}<c\|f\|_{p}$. For any $n\in \mathcal{B}bb N$, define $G_{n}=\{x\in G~;~n-1\leq J(x)\leq n\}$. Then $G=\left(\cup_{n\in \mathcal{B}bb N}G_{n}\right)\cup\{x\in G ~;~ J(x)=\infty\}$. Since $W$ is a bounded operator on $L^{p}(\mu)$ so $J$ is finite valued $\mu$-a.e on $X$, then we have $\mu(\{x\in G ~;~ J(x)=\infty\})=0$. Now as $\mu(G)>0$, $\mu(G_{N})>0$ for some $N\in \mathcal{B}bb N$. Since $G_{N}$ is non- atomic, for any $\alpha>0$, we can choose some set $E_{\alpha}\in \Sigma$ such that $E_{\alpha}\subseteq G_{N}$ and $\mu(E_{\alpha})=\frac{\mu (G_{N})}{K}$, where $K< \frac{N^{\frac{q}{q-p}}\mu(G_{N}}{\alpha^{\frac{pq}{q-p}}}$. Take $f_{\alpha}=\chi_{E_{\alpha}}$. Obviously $f_{\alpha}\in L^{p}_{\mid_{G}}(\mu)$. Moreover
\begin{eqnarray}
\nonumber
\|W_{\mid_{G}}f_{\alpha}\|_{p}&\leq & m^{\frac{q-1}{q}}\left(\int_{X}J|f_{\alpha}|^{q}d\mu\right)^{\frac{1}{q}}\\
\nonumber & < & m^{\frac{q-1}{q}}\left(\frac{N\mu(G_{N}}{K}\right)^{\frac{1}{q}}\\
\nonumber &=& m^{\frac{q-1}{q}}N^{\frac{1}{q}}\left(\frac{\mu(G_{N}}{K}\right)^{\frac{1}{p}+\frac{p-q}{pq}}\\
\nonumber &=& m^{\frac{q-1}{q}}N^{\frac{1}{q}}\|f_{\alpha}\|_{p}\left(\frac{K}{\mu(G_{N}}\right)^{\frac{q-p}{pq}}\\
\nonumber &<& \frac{N}{\alpha}\|f_{\alpha}\|_{p}.
\nonumber
\end{eqnarray}
This prove our claim and therefore we must have $J=0, ~~\mu -a.e$ on $B$. \\
$(c)$ Assume $W$ has closed range and is injective so there exists a constant $d>0$ such that $\|Wf\|_{p}\geq d \|f\|_{p}$ for any $f\in L^{p}(\mu)$.
\begin{eqnarray}
\nonumber n^{p-1}\int_{X}\sum_{i=1}^{n}u_{i}^{p} d\mu&\geq & n^{p-1}\sum_{i=1}^{n}\int_{\varphirphi_{i}^{-1}(X)}u_{i}^{p}d\mu\\
\nonumber & = &n^{p-1}\sum_{i=1}^{n}\int_{X}u_{i}^{p}\chi_{\varphirphi_{i}^{-1}(X)}d\mu\\
\nonumber &\geq &\int_{X}|W\chi_{X}|^{p}d\mu\\
\nonumber &= &\|W\chi_{X}\|_{p}^{p}\\
\nonumber &\geq & d^{p}\|\chi_{X}\|_{p}^{p}=d^{p}\mu(X)\\
\nonumber
\end{eqnarray}
so $u \geq \delta$ on $X$ . The proof is now complete.
\end{proof}
In the sequel we investigate the closedness of range the operator in from $L^{\infty}$ into $L^q$ and the converse. First we find some necessary and sufficient conditions for the case that $W$ is a bounded operator from $L^{\infty}$ into $L^q$ with $1<q<\infty$.
\begin{theorem}
Suppose that $1\leq q<\infty$. Let $J=\sum_{r=1}^{n}h_{r}E_{r}(|u_{r}|^{q})\circledastrc \varphirphi_{r}^{-1}$ and $W$ be a operator from $L^{\infty}(\mu)$ into $L^{q}(\mu)$. The followings hold.
\begin{enumerate}
\item[(a)]
If
\begin{enumerate}
\item[(1)] $W$ has closed range.
\item[(2)] $W$ is injective.
\item[(3)] $\sum_{i\in\mathcal{B}bb N}J(A_{i})\mu(A_{i})<\infty.$
\end{enumerate}
Then the set $\{i\in\mathcal{B}bb N~;~J(A_{i})>0\}$ is finite.
\item[(b)] If $J(B)=0$, $\mu-a.e$~~ and the set $\{i\in\mathcal{B}bb N~;~J(A_{i})>0\}$ is finite then $W$ has closed range.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Suppose on the contray, the set $\{i\in\mathcal{B}bb N~;~J(A_{i})>0\}$ is infinite. Since $W$ has closed range and is injective we can find some constant$d>0$ such that $\|Wf\|_{q}\geq d\|f\|_{\infty}$ for all $f\in L^{\infty}(\mu)$. Thus for any $i\in \mathcal{B}bb N$, $\|W\chi_{A_{i}}\|_{q}^{q}\geq d^{q}$ and so we have
\begin{eqnarray}
\nonumber d^{q}&\leq & \|W\chi_{A_{i}}\|_{q}^{q}\\
\nonumber & = &\int_{X}|\sum_{r=1}^{n}u_{r}\chi_{A_{i}}\circledastrc \varphirphi_{r}|^{q}d\mu\\
\nonumber &\leq & n^{q-1}\int_{X}J\chi_{A_{i}}d\mu\\
\nonumber &= &n^{q-1}J(A_{i})\mu(A_{i})\\
\nonumber
\end{eqnarray}
It follows from the preceding inequality that
\begin{eqnarray}
\nonumber \frac{d^{q}}{n^{q-1}}\leq J(A_{i})\mu(A_{i})\\
\nonumber
\end{eqnarray}
Therefore,
\begin{eqnarray}
\nonumber \infty=\sum_{i\in\mathcal{B}bb N}\frac{d^{q}}{n^{q-1}}\leq \sum_{i\in\mathcal{B}bb N}J(A_{i})\mu(A_{i})<\infty\\
\nonumber
\end{eqnarray}
contradiction arises. \\
$(b)$ Let $g\in \overline{W(L^{\infty}(\mu))}$ then there exists a sequence $(Wf_{n})_{n\in\mathcal{B}bb N}\subseteq W(L^{\infty}(\mu))$ such that$Wf_{n}\longrightarrow g$ with $\|f_{n}\|<1$. If the set $\{i\in\mathcal{B}bb N~;~J(A_{i})>0\}$ is empty then $W$ is the zero operator . Otherwise we may assume there exists some $k\in \mathcal{B}bb N$ such that $J(A_{i})>0$ for $1\leq i \leq k$ and $J(A_{i})=0$ for any $i>k$. As $f_{n}\in L^{\infty}(\mu)$ for all $n$, $|f_{n}(A_{i})|\leq\|f_{n}\|_{\infty}$ for any $1\leq i\leq k$ and any $n\in \mathcal{B}bb N$. By Bolzano-Weierstrass there exists a subsequence of nutural number $(n_{j})_{j\in \mathcal{B}bb N}$ such thst for each fixed $1\leq i\leq k$ the sequence $(f_{n_{j}}(A_{i}))_{j\in \mathcal{B}bb N}$ converjes. Suppose $\lim_{j\rightarrow \infty}f_{n_{j}}(A_{i})=\varphirsigma_{j}(\in \mathcal{B}bb C)$ and define $f=\sum_{i=1}^{k}\varphirsigma_{j}\chi_{A_{i}}$. Then $f\in L^{\infty}(\mu)$. For every $\varphirepsilonilon>0$ we have that
\begin{eqnarray}
\nonumber \|g-Wf\|_{q}&\leq& \|g-Wf_{n}\|_{q}+\|Wf_{n}-Wf_{n_{j}}\|_{q}+\|Wf_{n_{j}}-Wf\|_{q}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\int_{X}|W(f_{n_{j}}-f)|^{q}d\mu\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+n^{q-1}\int_{X}J|f_{n_{j}}-f|^{q}d\mu\\
\nonumber &= &\frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+n^{q-1}\sum_{i=1}^{k}J(A_{i})|f_{n_{j}}(A_{i})-\varphirsigma_{j}|^{q}\mu(A_{i})\\
\nonumber &\longrightarrow& 0\\
\nonumber
\end{eqnarray}
\end{proof}
Now we find some necessary and sufficient conditions for the case that $W$ is a bounded operator from $L^p$ into $L^{\infty}$ with $1<p<\infty$.
\begin{theorem}
Let $u_{i}$'s are nonnegative and $\mu(X)<\infty$. Suppose that $1\leq p<\infty$ and let $W$ be a operator from $L^{p}(\mu)$ into $L^{\infty}(\mu)$. The followings hold.
\begin{enumerate}
\item[(a)] If $(X, \Sigma, \mu)$ be a purely atomic space and $W$ is bounded operator then $W$ has closed range.
\item[(b)] If $W$ has closed range and is injective then there exists a constant $\delta>0$ such that $u=\sum_{i=1}^{n}u_{i}^{p}\geq \delta$ on $X$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Take any sequence $(Wf_{n})_{n \in \mathcal{B}bb N}$ in $W(L^{p}(\mu))$ with $\|f_{n}\|<1$. For fixed $i\in \mathcal{B}bb N$ the sequence $(f_{n}(A_{i}))_{n \in \mathcal{B}bb N}$ is bounded by $\frac{1}{\sqrt[p]{\mu(A_{i})}}$. Applying contor's diagonalization procces, we extract a subsequence $(f_{n_{k}})_{k \in \mathcal{B}bb N}$ such that with each fixed $i$, $f_{n_{k}}(A_{i})\rightarrow \alpha_{i}$ for each $\alpha_{i} \in \mathcal{B}bb C$. Define $f= \sum_{i=1}^{\infty}\alpha_{i} \chi_{A_{i}}$. By fatous lemma we have $\int_{X}|f|^{p}d\mu \leq \lim\inf_{k\rightarrow \infty}\int_{X}|f_{n_{k}}|^{p}d\mu \leq 1 $, or $f \in L^{p}(\mu)$. Then we have
\begin{eqnarray}
\nonumber \|g-Wf\|_{\infty}&\leq& \|g-Wf_{n}\|_{\infty}+\|Wf_{n}-Wf_{n_{k}}\|_{\infty}+\|Wf_{n_{k}}-Wf\|_{\infty}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\|W\|\int_{X}|f_{n_{k}}-f|^{q}d\mu\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\|W\|\int_{\cup_{i\in \mathcal{B}bb N}A_{i}}|f_{n_{k}}-f|^{q}d\mu\\
\nonumber &= &\frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\|W\|\sum_{i=1}^{\infty}|f_{n_{k}}(A_{i})-\alpha_{i}|^{q}\mu(A_{i})\\
\nonumber &\longrightarrow& 0\\
\nonumber
\end{eqnarray}
Obviusly $W(L^{p}(\mu))$ is closed in $L^{\infty}(\mu)$. \\
$(b)$ Assume $W$ has closed range and is injective so there exists a constant $d>0$ such that $\|Wf\|_{\infty}\geq d \|f\|_{p}$ for any $f\in L^{p}(\mu)$. Take $\delta=\frac{d^{p}\mu(X)}{n^{p-1}}$, Then,
\begin{eqnarray}
\nonumber |\sum_{i=1}^{n}u_{i}\chi_{X}\circledastrc \varphirphi_{i}|^{p} &\leq & n^{p-1}\sum_{i=1}^{n}u_{i}^{p}\\
\nonumber
\end{eqnarray}
Therefore,
\begin{eqnarray}
\nonumber n^{p-1}\sum_{i=1}^{n}u_{i}^{p}&\geq& (\sum_{i=1}^{n}u_{i})^{p}\\
\nonumber &\geq&\|W\chi_{X}\|_{\infty}^{p} \\
\nonumber &\geq & d^{p}\|\chi_{X}\|_{p}^{p}=d^{p}\mu(X)\\
\nonumber
\end{eqnarray}
so $u \geq \delta$ on $X$ . The proof is now complete.
\end{proof}
Here we consider $W$ as a bounded operator on $L^{\infty}$.
\begin{theorem}
Let $u_{i}$'s are nonnegative and $\mu(X)<\infty$. Suppose that $W$ be a bounded operator from $L^{\infty}(\mu)$ into $L^{\infty}(\mu)$. The followings hold.
\begin{enumerate}
\item[(a)] If If $(X, \Sigma, \mu)$ be a purely atomic space then $W$ has closed range.
\item[(b)] If $W$ has closed range and is injective then there exists a constant $\delta>0$ such that $u=\sum_{i=1}^{n}u_{i}\geq \delta$ on $X$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Take any sequence $(Wf_{n})_{n \in \mathcal{B}bb N}$ in $W(L^{\infty}(\mu))$ with $\|f_{n}\|<1$. For fixed $i\in \mathcal{B}bb N$ the sequence $(f_{n}(A_{i}))_{n \in \mathcal{B}bb N}$ is bounded by $|f_{n}(A_{i})|\leq \|f_{n}\|<1$. Applying contor's diagonalization procces, we extract a subsequence $(f_{n_{k}})_{k \in \mathcal{B}bb N}$ such that with each fixed $i$, $f_{n_{k}}(A_{i})\rightarrow \alpha_{i}$ for each $\alpha_{i} \in \mathcal{B}bb C$. Define $f= \sum_{i=1}^{\infty}\alpha_{i} \chi_{A_{i}}$. Then we have
\begin{eqnarray}
\nonumber \|g-Wf\|_{\infty}&\leq& \|g-Wf_{n}\|_{\infty}+\|Wf_{n}-Wf_{n_{k}}\|_{\infty}+\|Wf_{n_{k}}-Wf\|_{\infty}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\|W\|\|f_{n_{k}}-f\|_{\infty}\\
\nonumber &\leq& \frac{\varphirepsilonilon}{3}+\frac{\varphirepsilonilon}{3}+\|W\|\sup_{i\in \mathcal{B}bb N}|f_{n_{k}}(A_{i})-\alpha_{i}|\\
\nonumber &\longrightarrow& 0\\
\nonumber
\end{eqnarray}
Obviously $W(L^{\infty}(\mu))$ is closed in $L^{\infty}(\mu)$. \\
$(b)$ Assume $W$ has closed range and is injective so there exists a constant $d>0$ such that $\|Wf\|_{\infty}\geq d \|f\|_{\infty}$ for any $f\in L^{\infty}(\mu)$. Take $\delta=d$. Then ,
\begin{eqnarray}
\nonumber |\sum_{i=1}^{n}u_{i}\chi_{X}\circledastrc \varphirphi_{i}| &\leq & \sum_{i=1}^{n}u_{i}\\
\nonumber
\end{eqnarray}
Therefore,
\begin{eqnarray}
\nonumber \sum_{i=1}^{n}u_{i} &\geq& \sum_{i=1}^{n}u_{i}\chi_{X}\circledastrc \varphirphi_{i}\\
\nonumber &\geq & d\\
\nonumber
\end{eqnarray}
so $u \geq \delta$ on $X$ . The proof is now complete.
\end{proof}
In the next theorem we obtain the polar decomposition of $W$ as a bounded operator on the Hilbert space $L^2$.
\begin{theorem}
Suppose $u_{i}(\varphirphi_{j}^{-1})= 0, ~~~~i\neq j$. The unique polar decomposition of $W=\sum_{i=1}^{n}u_{i}C_{\varphirphi_{i}}$ is $V|W|$ where $|W|(f)=M_{J}f$, $V(g)=\sum_{i=1}^{n}u_{i}\frac{\chi_{B}g}{\sqrt{J}}\circledastrc \varphirphi_{i}$ and $B=Coz (J=\sum_{i=1}^{n}h_{i}E_{i}(u_{i}^{2})\circledastrc \varphirphi_{i}^{-1})$.
\end{theorem}
\begin{proof}
We have that
\begin{eqnarray}
\nonumber
\|Wf\|_{2}^{2} &=& (Wf , Wf)\\
\nonumber & =& (W^{*}Wf , f)\\
\nonumber &=& \int_{X}J|f|^{2}d\mu = \int_{B}J|f|^{2}d\mu+\int_{X\backslash B}J|f|^{2}d\mu\\
\nonumber &=& \int_{B}J|f|^{2}d\mu
\nonumber
\end{eqnarray}
where $J=\sum_{i=1}^{n}h_{i}E_{i}(u_{i})^{2}\circledastrc \varphirphi_{i}^{-1}$. Then $\ker W=L^{2}(X\backslash B)=(L^{2}(B))^{\bot}$. For each $f\in L^{2}(\mu)$ write $f=\chi_{B}f+\chi_{X\backslash B}f$ so that $Wf=W\chi_{B}f$. We may define partial isometry $V$ with initial space $(\ker W)^{\bot}=L^{2}(B)$ and final space $Ran W$ by $V(g)=\sum_{i=1}^{n}u_{i}\frac{\chi_{B}g}{\sqrt{J}}\circledastrc \varphirphi_{i}, ~~~~~g\in L^{2}(\mu)$. Then the unique polar for $W$ is given $W=VM_{\sqrt{J}}$.
\end{proof}
Finally, the next two assertions we investigate invertibility of $W$.
\begin{theorem}
Let $(X, \Sigma, \mu)$ be apurely atomic measure space, $0\neq u_{i}\in L^{\infty}(\mu)$ and $W$ be a sum finite of weighted composition operators on $L^{p}(\mu)$. If there is a positive integer $N_{i}$ such that $\varphirphi_{i}^{N_{i}}(A_{n})=A_{n}$ up to a null set for all $n\geq 1$ and $u_{i}(\varphirphi_{j})=0,~~~i\neq j$ then
\begin{enumerate}
\item[(a)] $W$ is invertible.
\item[(b)] The set function $E$ that is defined as $E(B)=M_{\chi_{B}\circledastrc v}$ for all borel sets $B$ of $\mathcal{B}bb C$ is a spectral measure where $v=\sum_{i=1}^{n}u_{i}u_{i}\circledastrc \varphirphi_{i}\cdots u_{i}\circledastrc \varphirphi_{i}^{N-1}$, $N=[N_{1}, \cdots, N_{n}]$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(a)$ Not that $\ker W^{r}\subseteq \ker W^{r+1}$ and $W^{r+1}(L^{p}(\mu))\supseteq W^{r}(L^{p}(\mu))$. If there is a positive integer $N_{i}$ such that $\varphirphi_{i}^{N_{i}}(A_{n})=A_{n}$ up to a null set for all $n\geq 1$ then $W^{N}$ is a multiplication operator induced by function $v=\sum_{i=1}^{n}u_{i}u_{i}\circledastrc \varphirphi_{i}\cdots u_{i}\circledastrc \varphirphi_{i}^{N-1}$,where $N=[N_{1}, \cdots, N_{n}]$.\\
If $f\in \ker W^{N}$ then $W^{N}f(A_{n})=0$ for all $n\geq 1$. We have that $vf(A_{n})=0$ therefor $f=0$, $\mu-a.e$ on $X$. So $W$ is injective.\\
Let $g\in L^{p}(\mu)$ then $W^{N}(\frac{g}{v})(A_{n})=g(A_{n})$ for all $n\geq 1$. So $W$ is surjective.\\
$(b)$ As observed by Rho and Yoo (\circledastte{Rho}, example 1), the multiplication operator $M_{\chi_{v}}$ is spectral. In fact the spectral measure $E$ is given by $E(B)=M_{\chi_{B}\circledastrc v }$ for all Borel set $B$ of $\mathcal{B}bb C$.
\end{proof}
\begin{theorem}
Let $W=\sum_{i=1}^{n}u_{i}C_{\varphirphi_{i}}$ be a bounded operator on $L^{2}(\mu)$ and $u_{i}(\varphirphi_{j}^{-1})=0 ,~~~ i\neq j$. The following statements are equivalent.
\begin{enumerate}
\item[(a)] $W$ is injective.
\item[(b)] $J=\sum_{i=1}^{n}h_{i}E_{i}(|u_{i}|^{2})\circledastrc \varphirphi_{i}^{-1} >0 ~~~~~\mu - a.e$ on $X$.
\item[(c)] whenever $J(E)=0$ for $E\in \Sigma$, $\mu(E)=0$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(b)\mathcal{R}ightarrow (a)$ Take any $f\in\ker W$, then we have
\begin{eqnarray}
\nonumber
0=\|Wf\|_{2}^{2} &=& (Wf , Wf)\\
\nonumber & =& (W^{*}Wf , f)\\
\nonumber &=& \int_{X}J|f|^{2}d\mu = \int_{Coz f}J|f|^{2}d\mu+\int_{X\backslash Coz f}J|f|^{2}d\mu\\
\nonumber &=& \int_{Coz f}J|f|^{2}d\mu\\
\nonumber
\end{eqnarray}
Since $J >0 ~~~~~\mu - a.e$ on $Coz f$, it follows that $\mu(Coz f)=0$ or $f=0~~~\mu - a.e$ on $X$.\\
$(a)\mathcal{R}ightarrow (c)$ Let $E\in \Sigma$ satisfy $J(E)=0$ we may also assume $\mu(E)<\infty$. Then $\chi_{E} \in L^{2}(\mu)$ and $\|W\chi_{E}\|_{2}^{2}=\int_{X}J\chi_{E}d\mu=\int_{E}Jd\mu=0$. Now the injectivity of $W$ implies that $\chi_{E}=0, ~~~\mu - a.e$ on $X$. Hence $\mu(E)=0$. \\
$(c)\mathcal{R}ightarrow (b)$ Put $B=Coz J$. Clearly, $X\backslash B\in \Sigma$. Moreover, since $J(X\backslash B)=0$
We must have $\mu(X\backslash B)=0$. This shows that $J >0, ~~~~~\mu - a.e$ on $X$.
\end{proof}
\end{document}
|
\begin{document}
\title{Confined Contextuality in Neutron Interferometry: \\ Observing the Quantum Pigeonhole Effect}
\author{Mordecai Waegell}
\affiliation{Institute for Quantum Studies, Chapman University, Orange, CA 92866, USA}
\affiliation{Schmid College of Science and Technology, Chapman University, Orange, CA 92866, USA}
\author{Tobias Denkmayr}
\affiliation{Atominstitut, TU-Wien, Stadionallee 2, 1020 Vienna, Austria}
\author{Hermann Geppert}
\affiliation{Atominstitut, TU-Wien, Stadionallee 2, 1020 Vienna, Austria}
\author{David Ebner}
\affiliation{Atominstitut, TU-Wien, Stadionallee 2, 1020 Vienna, Austria}
\author{Tobias Jenke}
\affiliation{Institut Laue-Langevin 6, Rue Jules Horowitz, 38042 Grenoble Cedex 9, France}
\author{Yuji Hasegawa}
\affiliation{Atominstitut, TU-Wien, Stadionallee 2, 1020 Vienna, Austria}
\author{Stephan Sponar}
\affiliation{Atominstitut, TU-Wien, Stadionallee 2, 1020 Vienna, Austria}
\author{Justin Dressel}
\affiliation{Institute for Quantum Studies, Chapman University, Orange, CA 92866, USA}
\affiliation{Schmid College of Science and Technology, Chapman University, Orange, CA 92866, USA}
\author{Jeff Tollaksen}
\affiliation{Institute for Quantum Studies, Chapman University, Orange, CA 92866, USA}
\affiliation{Schmid College of Science and Technology, Chapman University, Orange, CA 92866, USA}
\begin{abstract}
Previous experimental tests of quantum contextuality based on the Bell-Kochen-Specker (BKS) theorem have demonstrated that not all observables among a given set can be assigned noncontextual eigenvalue predictions, but have never identified which specific observables must fail such assignment. We now remedy this shortcoming by showing that BKS contextuality can be confined to particular observables by pre- and postselection, resulting in anomalous weak values that we measure using modern neutron interferometry. We construct a confined contextuality witness from weak values, which we measure experimentally to obtain a $5\sigma$ average violation of the noncontextual bound, with one contributing term violating an independent bound by more than $99\sigma$. This weakly measured confined BKS contextuality also confirms the quantum pigeonhole effect, wherein eigenvalue assignments to contextual observables apparently violate the classical pigeonhole principle.
\end{abstract}
\maketitle
\section{Introduction}
Quantum contextuality, as introduced by Bell, Kochen and Specker (BKS) \cite{Bell2,KS}, forbids all observable properties of a system from being predefined independently from how they are observed. This phenomenon is one of the most counterintuitive aspects of quantum mechanics, and finds itself at the heart of recent quantum information processing applications \cite{howard2014contextuality, bechmann2000quantum,cabello2011hybrid,abbott2012strong, spekkens2009preparation,W_Primitive}.
The BKS theorem is proved by exhibiting a \emph{BKS-set} of observables \cite{WA_3qubits,WA_Nqubits} that contains geometrically related and mutually commuting subsets (or measurement \emph{contexts}) that result in a logical incompatibility: Any noncontextual hidden variable theory (NCHVT) that pre-assigns eigenvalues globally to the entire BKS-set (i.e., \emph{non}contextually) results in a contradiction with the predictions of quantum mechanics. That is, at least one eigenvalue in a global assignment to a BKS-set cannot be predefined without violating a constraint on the product of eigenvalues within some context, which we call the \emph{contradictory context}. See Appendix \ref{A:Theory} for a \ review of the BKS theorem and how the BKS-sets used in this article prove it.
Previous contextuality experiments \cite{kirchmair2009state,bartosik2009,d2013experimental} have confirmed such a global contradiction. However, neither the BKS theorem, nor these experiments, specify which contexts are contradictory.
In this article, using recently developed weak measurement techniques in neutron interferometry \cite{rauch00,rauch02,hasegawa03,klepp2014,denkmayr2014,sponar2015,denkmayr2016experimental}, we experimentally demonstrate
which specific measurement context within a BKS-set (Fig.~\ref{SquareSteps}a) must contain contradictory value assignments, essentially \emph{confining} the contextuality \cite{waegell2015contextuality}. Like squeezing a balloon, we condition the BKS-set through pre- and postselection (Fig.~\ref{SquareSteps}b) to force the contradiction to appear in a particular context (Fig.~\ref{SquareSteps}c) \cite{cabello1997no,PhysRevLett.95.200405,pusey2015logical}. Remarkably, measuring the weak values \cite{aharonov1988result} within that context explicitly reveals the contradiction that is left implicit in the original BKS proof. The measured weak values violate the classical pigeonhole principle \cite{aharonov2016quantum}, and contradict NCHVT value assignments to the projectors in that context, which we call \emph{forbidden projectors}. We show that the confinement of contextuality in the quantum pigeonhole effect forces some of the forbidden projectors to have negative weak values. The appearance of these negative weak values thus witnesses the confined contextuality, making the forbidden projectors \emph{witness observables} for contextuality. These witnesses corroborate recent results \cite{pusey2014anomalous,mazurek2015experimental,Piacentini2016} that link negative projector weak values to contextuality using Spekkens' generalization of contextuality \cite{spekkens2005}, which encompasses the original notion of BKS.
\begin{figure}
\caption{Confining Bell-Kochen-Specker (BKS) contextuality in the 3-spin Square. Each row or column (measurement context) of the Square mutually commutes. (a) According to quantum mechanics, the product of the three 3-spin measurement outcomes in each row is $+1$ (thin line), while their product in each column is $-1$ (thick line). (b) A particular preparation and postselection fixes the values of two rows. (c) In any noncontextual hidden variable theory, the remaining values must be $-1$, which confines the BKS contradiction to the top row (blue dashed line). This also demonstrates the quantum pigeonhole paradox: all pairs in the row appear anticorrelated, which violates the classical pigeonhole principle. Weak measurements confirm the paradox, revealing the correlation of each pair to be $-0.972 \pm 0.132$, $-1.050 \pm 0.140$, and $-1.020 \pm 0.137$, from left to right.}
\label{SquareSteps}
\end{figure}
In our experiment, we witnessed the BKS-contextuality of neutron spin. We measured the spin using neutron interferometry by performing path-dependent spin rotations, making the path a weakly-coupled meter for the spin (Fig.~\ref{fig:setup}); conditioning the path measurements on spin postselections then reveals the desired weak values \cite{sponar2015}. We collected seventeen independent data sets of neutron spin measurements, indexed $n=1,\ldots,17$. We use these single-spin data sets to show confined contextuality within the $N$-spin Wheel BKS-sets \cite{WA_Nqubits} (see Appendix \ref{A:Theory}) for odd numbers of spins $N=3,5,\ldots,17$, using data sets $n=1,\ldots,N$ and the following simplification. While most contexts in an $N$-spin Wheel are entangled, we use separable pre- and postselection to fix the eigenvalues of certain observables (as in Fig.~\ref{SquareSteps}b), and by the definition of noncontextuality, any NCHVT must assign the same eigenvalues to those observables in the entangled contexts as it does in the separable contexts --- just as in \cite{cabello1997no}. This would be true even if we were considering $N$ 2-level quantum systems of all different types --- photons, superconducting qubits, trapped ions, diamond NV-centers, and so on, regardless of where or when they are located with respect to one another --- it is a logical consequence of noncontextuality, which has nothing to do with actually performing joint measurements. We weakly measure only the remaining separable observables, noting that the weak values are noncontextual by definition. As such, we are able to treat each of the single-spin data sets as representing a distinct spin within an $N$-spin Wheel BKS-set. We are not claiming this is the same as performing genuine $N$-spin measurements; we are claiming that $N$ single-spin measurements are sufficient to reveal the contradiction inherent in the $N$-spin Wheel BKS set, between quantum mechanics and NCHVTs.
Weak values do not appear shot-by-shot in our experiment, but only as conditioned averages from ensembles of identically preselected and postselected data. The fact that we are constructing $N$-spin weak values from single-spin weak value measurements may seem odd, but this construction is generally valid for any averages from probability distributions describing independent (separable) systems --- including our distinct sets of neutron measurements. That is, the complete set of collected single-neutron data was naturally divided into 17 smaller and independent subsets, each collected sequentially in time to minimize experimental drift, and each chosen to be sufficiently large to achieve acceptable statistical error for estimating a single-spin weak value. The number 17 was limited only by the total collected statistics, which was limited by the neutron flux from the reactor and the stability of the experimental setup. The measured $N$-spin witnesses violate their noncontextuality bounds by $\agt\!5\sigma$, showing that the contextuality was indeed confined in our experiment (Fig.~\ref{WeakValues}). One particular 5-spin weak value exceeded its bound by more than $99 \sigma$. The $N$-spin confined contextuality observed here also violates the classical pigeonhole principle for putting $N$ pigeons in 2 boxes. Our experiment verifies the quantum predictions, and rules out NCHVTs of quantum mechanics.
This article is organized as follows. In Section~\ref{sec:confine}, we discuss how to confine contextuality to particular contexts with pre- and postselection, and the relationship between such confined contextuality and the quantum pigeonhole effect. In Section~\ref{sec:witness} we show how the confined contextuality of the Wheel family of BKS-Sets permits the construction of $N$-spin contextuality witnesses that may be factored into measurable single-spin weak values. In Section~\ref{sec:procedure}, we detail the experimental procedure used to measure the single-spin weak values using neutron interferometry. In Section~\ref{sec:results}, we summarize the main results for the $N$-spin contextuality witnesses. We conclude in Section~\ref{sec:conclusion}. For completeness, we also include two appendices. In Appendix~\ref{A:Theory}, we describe the construction of the Wheel family of BKS-sets used in the main text. In Appendix~\ref{B:Experiment}, we provide additional details about the experimental determination of the single-spin weak values used to construct the results reported in the main text.
\section{Confining BKS contextuality}\label{sec:confine}
BKS contextuality confinement follows from the Aharonov-Bergmann-Lebowitz (ABL) formula \cite{aharonov1964time}, which gives the probability of obtaining a particular strong measurement outcome $j$, between a preparation $\ket{\psi}$ and a postselection $\bra{\phi}$. The outcome $j$ corresponds to a projection operator $\Pi_j$ that is part of a complete measurement basis $\mathcal{B}$ (i.e., context) such that $\sum_{j \in \mathcal{B}} \Pi_j = I$. The ABL formula can be expressed in terms of weak values $(\Pi_j)_w = \langle \phi | \Pi_j | \psi \rangle / \langle \phi | \psi \rangle$ \cite{waegell2015contextuality},
\begin{equation}
P_{\textrm{ABL}}(\Pi_j=1 \space \; \vert \; \psi, \phi, \mathcal{B}) = \frac{ |(\Pi_{j})_w|^2 } { \sum_{k\in\mathcal{B}} | (\Pi_{k})_w|^2 }. \label{ABL}
\end{equation}
It then follows from $\sum_{k\in\mathcal{B}} (\Pi_k)_w = 1$ that $P_{\textrm{ABL}}(\Pi_j=1 \; \vert \; \psi, \phi, \mathcal{B}) = 1$ implies $(\Pi_j)_w = 1$. Furthermore, if $\mathcal{B}$ contains only two outcomes, then the converse also follows: $(\Pi_j)_w = 1$ implies $P_{\textrm{ABL}}(\Pi_j=1 \; \vert \; \psi, \phi, \mathcal{B}) = 1$. As shown in Ref.~\cite{pusey2015logical}, the ABL formula constrains any NCHVT since a projection with an ABL probability of 1 must also be assigned a value of 1 in any NCHVT. Thus, in this case, measuring a projector weak value $(\Pi_j)_w$ of $1$ implies that any NCHVT must also assign $\Pi_j$ a value of $1$ --- and a value of $0$ to all projectors orthogonal to $\Pi_j$.
Specifying to $N$ independent neutron spins, we use $I,X,Y,Z$ to denote the independent spin components (Pauli matrices). We prepare the spins in the product state $|\psi\rangle = |{+}X\rangle^{\otimes N}$ (all $X$ eigenvalues +1), and postselect onto the product state $|\phi\rangle = |{+}Y\rangle^{\otimes N}$. Since the predictions of products of $X$ and $Y$ by an NCHVT must be consistent with these boundary conditions, only products involving $Z$ are left undetermined (see Fig.~\ref{SquareSteps}b). The ABL rule then determines these values, as we now explain.
For our specific case of $N>2$ spins, consider a product of any two spin operators $ZZ$, with spectral decomposition $ZZ = (+1)\Pi_{\rm even} + (-1)\Pi_{\rm odd}$ in terms of the rank-2 parity projectors,
\begin{align}\label{ZZ}
\Pi_{\rm even}
&= \Pi_+\!\otimes\Pi_+ + \Pi_-\!\otimes\Pi_-, \\
\Pi_{\rm odd}
&= \Pi_+\!\otimes\Pi_- + \Pi_-\!\otimes\Pi_+, \nonumber
\end{align}
with $\Pi_\pm \equiv |{\pm}Z\rangle\langle{\pm}Z| = (1 \pm Z)/2$. Given $|\psi\rangle$ and $|\phi\rangle$ defined above, $(\Pi_{\rm even})_w = 0$ and $(\Pi_{\rm odd})_w = 1$, and thus $(ZZ)_w = -1$. The ABL rule in Eq.~\eqref{ABL} then implies $ZZ = -1$ for all pairs of spins in any NCHVT, as illustrated in Fig.~\ref{SquareSteps}.
This pairwise constraint is the \emph{quantum pigeonhole effect} \cite{aharonov2016quantum}. To see this, let the spin eigenstates $\ket{\pm Z}$ correspond to two boxes in which pigeons may be placed. The projectors in Eq.~\eqref{ZZ} describe definite numbers of pigeons in each box, up to an exchange of boxes; i.e., $\Pi_{\rm even}$ denotes two pigeons in one box, while $\Pi_{\rm odd}$ denotes one pigeon in each. The pigeonhole principle states that if $N>2$ pigeons are placed in two boxes, then at least one box must contain multiple pigeons. However, the constraint $ZZ = -1$ for all pairs implies that, regardless of how many pigeons are placed in the two boxes, no two pigeons are ever in the same box!
\section{Witnessing BKS contextuality}\label{sec:witness}
Following the pigeon analogy, all NCHVT assignments of definite numbers of pigeons to each box are forbidden
. The projectors corresponding to such forbidden assignments for $N=3$ are
\begin{align}\label{ZZZ}
\Pi^{(3)}_0
&= \Pi_+\!\otimes\Pi_+\!\otimes\Pi_+ + \Pi_-\!\otimes\Pi_-\!\otimes\Pi_-, \\
\Pi^{(3)}_1
&= \Pi_+\!\otimes\Pi_+\!\otimes\Pi_- + \Pi_-\!\otimes\Pi_-\!\otimes\Pi_+ ,
\nonumber \\
\Pi^{(3)}_2
&= \Pi_+\!\otimes\Pi_-\!\otimes\Pi_+ + \Pi_-\!\otimes\Pi_+\!\otimes\Pi_-,
\nonumber \\
\Pi^{(3)}_3
&= \Pi_+\!\otimes\Pi_-\!\otimes\Pi_- + \Pi_-\!\otimes\Pi_+\!\otimes\Pi_+ ,
\nonumber
\end{align}
which are the invariant eigenspaces of the first row of Fig.~\ref{SquareSteps} ($\Pi^{(3)}_0$ indicates all three pigeons in one box, while $\Pi^{(3)}_{1,2,3}$ are the permutations of two and one). Any NCHVT assigns 0 to all forbidden projectors. We call complete sets of forbidden projectors like this \emph{contextual bases}.
Crucially, such forbidden BKS value assignments manifest as anomalous projector weak values (with real part outside the range $[0,1]$) in the contextual basis of Eq.~\eqref{ZZZ} \cite{waegell2015contextuality}---classical assignments of pigeons to boxes must respect the range $[0,1]$. An anomaly indicates contradictory noncontextual value assignments to the corresponding context. Thus, the forbidden projectors constitute witness observables such that negative weak values imply confined BKS contextuality, and contradict the assignment of 0 by an NCHVT. These witnesses promote the logical contradiction of the quantum pigeonhole effect into an experimentally robust inequality.
For the explicit example in Fig.~\ref{SquareSteps}, the weak value of $\Pi^{(3)}_0$ is
\begin{align}\label{wv3}
(\Pi_0^{(3)})_w
&= \prod_{n=1}^3 \frac{1 + Z_w^{(n)}}{2} + \prod_{n=1}^3 \frac{1 - Z_w^{(n)}}{2} = -\frac{1}{2},
\end{align}
with $\ket{\psi}$ and $\bra{\phi}$ above, where each $n$ is a distinct spin, and $Z_w = \langle {+}Y|\, Z\, |{+}X\rangle / \langle {+}Y|{+}X\rangle = i$ is a purely imaginary single-spin weak value, implying $(ZZ)_w=(Z)_w(Z)_w = -1$. This example also illustrates a subtle point about the connection between anomalous weak values and contextuality. As discussed above, a projector weak value for a separable composite system with a negative real part is a witness of contextuality. However, the projector weak values for each spin are $(\Pi_\pm)_w = (1 \pm Z_w)/2 = e^{\pm i \pi/4}/\sqrt{2}$, which have positive real parts. Nevertheless, the product of three such weak values, $(\Pi_\pm^{\otimes 3})_w = (\Pi_\pm)^3_w = e^{\pm i 3\pi/4}/\sqrt{8}$, has a negative real part, enabling the contextuality witness. In this sense, the observation of a nonzero phase for a projector weak value on a single system already implies a contextuality witness on a larger composite system.
The logic for the above construction for $N=3$ generalizes to odd $N > 3$ (see the family of Wheel BKS-sets \cite{WA_Nqubits}). That is, all classical assignments of $N$ pigeons to 2 boxes are forbidden. Analogously to Fig.~\ref{SquareSteps}, the contextuality is confined to a context in the $N$-spin Wheel-set consisting of $N$ pairwise observables $ZZ$ arranged in a ring. All pairs $(ZZ)_w=-1$ as before, since each $Z_w = i$. We label the invariant eigenspace projectors corresponding to this context by defining the $N$-digit binary sequences $x^{(N)}_j$, $j \in 0 \ldots 2^{N-1}-1$, e.g., $x^{(3)}_0 = (0,0,0)$, $x^{(3)}_1 = (0,0,1)$, $x^{(3)}_2 = (0,1,0)$, $x^{(3)}_3 = (0,1,1)$. The weak values of the $2^{N-1}$ forbidden projectors (witness observables) in this contextual basis are then
\begin{equation}\label{Proj}
(\Pi^{(N)}_j)_w = \prod^{N}_{n = 1} \frac{1 + (-1)^{x^{(N)}_{j,n}}\, Z_w^{(n)}}{2} + \prod^{N}_{n = 1} \frac{1 - (-1)^{x^{(N)}_{j,n}}\,Z_w^{(n)}}{2},
\end{equation}
where $x^{(N)}_{j,n}$ is the $n$th digit of $x^{(N)}_j$. As in Eq.~\eqref{wv3}, these projector weak values may be computed from $N$ single-spin $Z_w$ values. This great simplification enables us to construct all forbidden projectors for any number of spins by measuring single-spin $Z_w$. All projector weak values then evaluate to $(\Pi_j^{(N)})_w = \pm2^{-(N-1)/2}$, with a sign depending on the index.
Finally, we construct an \emph{unbiased contextuality witness} $C^{(N)}$, using all $2^{N-1}$ rank-2 projectors in an $N$-spin contextual basis, that aggregates the contextuality of the entire basis,
\begin{equation}\label{C}
C^{(N)} = I^{(N)} - \sum_{j=0}^{2^{N-1}-1} s_j\, \Pi^{(N)}_j,
\end{equation}
with $s_j = \textrm{sign}[\text{Re}(\Pi_j^{(N)})_w]$, using the predicted value of $(\Pi_j^{(N)})_w$. Regardless of the signs $s_j$, if all $0 \leq \text{Re}(\Pi_j^{(N)})_w \leq 1$, then $\text{Re}\,C^{(N)}_w \geq 0$. Observing $\text{Re}\,C^{(N)}_w < 0$ is thus an experimental witness of confined BKS-contextuality. This choice of the signs $s_j$ optimizes $C^{(N)}_w$ by accumulating anomalous parts of the weak values (below 0 or above 1), producing the ideal values $\text{Re}\,C^{(N)}_w = 1 - 2^{(N-1)/2}$.
\begin{figure}
\caption{Experimental setup. The unpolarized neutron beam passes a magnetically birefringent prism (P) that permits only spin-up neutrons to fulfill the Bragg condition for entering the interferometer. To prevent depolarization, a magnetic guide field (GF) is applied over the whole setup. A DC coil (DC1) aligns the incoming neutron spin along the positive $x$ direction. Inside the interferometer, the neutrons split into two paths (P1) and (P2), where two spin rotators (SRs) can independently rotate the neutron spin in the $xy$ plane. A cadmium slab (CD) can optionally block one of the paths. To tune the relative phase $\chi$ between the path eigenstates, a phase shifter (PS) is inserted into the interferometer. After the interferometer, a second DC coil (DC2) mounted on a translation stage, in combination with a polarizing supermirror (A), postselects a specific spin component. The neutrons are detected by $^3$He detectors (O \& H).}
\label{fig:setup}
\end{figure}
\section{Experimental procedure}\label{sec:procedure}
In our experiment, we measure the weak value $Z_w$ of the neutron spin in the $z$-direction using an interferometer. The neutron's path is used as a pointer to measure both the real and imaginary parts of $Z_w$. This approach has already been successfully used to completely determine weak values of massive systems~\cite{sponar2015}.
The experiment was conducted at the instrument S18 at the high flux research reactor of the Institute Laue-Langevin (ILL) in Grenoble, France. The experimental setup is depicted in Fig.~\ref{fig:setup}.
A perfect silicon crystal selects neutrons with a wavelength of $\lambda_0=1.91$~\AA\, ($\lambda/ \lambda_0\sim 0.02$) by Bragg reflection from a white neutron beam \cite{sponar2015}. Between the monochromator and the interferometer crystal, two magnetically birefringent prisms (P) split the unpolarized beam in two beams, one with the neutron spin aligned parallel to the positive $z$-direction and one aligned antiparallel. Even though the angular separation is just four seconds of arc (exaggerated in Fig. \ref{fig:setup}), only the beam with spin up component fulfills Bragg's condition at the interferometer's first plate. The degree of polarization is above 99\% with the neutron spin state given by $\ket{+Z}$. The other beam passes through unaffected and does not further contribute to the experiment.
\begin{figure*}
\caption{Experimental results witnessing confined contextuality for $N$-spin Wheel BKS-sets, for $N=3,5,7,\ldots,17$. While noncontextual hidden variable predictions for the witness observables lie above the classical bound of 0 (red, dashed), quantum predictions (blue, solid) and experimental data (orange, error bars showing one standard deviation) violate this bound. (a) Unbiased witnesses $C^{(N)}
\label{WeakValues}
\end{figure*}
A DC coil (DC1) in front of the interferometer generates a constant magnetic field $B_y$ in $y$-direction. After entering the coil, the neutron experiences a non-adiabatic field change and its spin starts to precess around $B_y$. If the magnetic field magnitude is adjusted accordingly, the neutron spin will turn by exactly $\pi/2$ in the coil. This changes the initial spin state from $\ket{+Z}$ to $\ket{+X}$, completing the spin preselection.
At the first interferometer plate, the beam is coherently split by amplitude division. In each path, (P1) and (P2), a spin rotator (SR)---small coil in a Helmholtz configuration---produces a weak magnetic field in the $\pm z$ direction. To prevent thermal stress on the interferometer, the coils are water cooled. The weak magnetic fields lead to path-dependent spin rotations around the field axis, causing (weak) entanglement between the spin and path degrees of freedom of each neutron. For all measurements, the angle of rotation was set to $\alpha=15^\circ$. The infidelity $\mathcal{I}=\sin^2\alpha$ between the partial path states corresponding to $z = \pm 1$ quantifies the measurement strength. Our weak measurement has infidelity $\mathcal{I} = 0.067$ \cite{sponar2015}, compared to a strong measurement with $\alpha = 90^\circ$ and $\mathcal{I}=1$.
Between the second and final interferometer plate, a sapphire phase shifter (PS) is inserted. A phase shifter in combination with a Cadmium beam block (CD) mounted on a rotational stage provides full control over the neutron's path for the pointer readout. The phase shifter can change the path state in the equatorial plane of the Bloch sphere, while the beam block permits access to the path eigenstates at the poles.
At the final interferometer plate, the two paths are recombined. A second DC coil (DC2), in combination with a CoTi supermirror array \cite{mezei1976,mezei1977} (A), enables arbitrary spin-state postselection.
The neutrons are detected by $^3$He counter tubes (O) and (H). Of the two outgoing ports, only (O) is analyzed to postselect the spin state $\bra{+Y}$.
All measurements were performed using an IN/OUT method. For each fixed phase shifter position, the intensity is recorded with the spin-path coupling field turned on (IN), and then the intensity is recorded with the coupling field turned off (OUT). Background intensities are also recorded in order to calibrate the counters. This method permits the spin-independent relative phase $\chi$ to be determined for the path postselection state $(\Ket{P1} + e^{i\chi}\Ket{P2})/\sqrt{2}$. After curve-fitting an intensity scan over $\chi$ on the Bloch sphere equator (see Appendix \ref{B:Experiment}), the intensities for the $y$-eigenstates at points $\chi=\pi/2,\,3\pi/2$ are identified by inserting the phase values obtained from the OUT measurements into the IN measurement fit functions. These intensities determine the real part of the spin weak value $\text{Re}\, Z_w$ (Eq.~(19) of \cite{sponar2015}). This method also maximally reduces the influences of phase drift in the interferogram (due to unavoidable instability of the apparatus). To determine the imaginary part $\text{Im}\, Z_w$, it is also necessary to postselect neutron path eigenstates (Eq.~(20) of \cite{sponar2015}), which is accomplished by blocking one path at a time. If an intensity is recorded while path $P2$ is blocked, a postselection onto the state $\Ket{P1}$ is performed, and vice versa. For our choice of pre- and postselection, the expected weak value is $Z_w = i$. The negligible phase shift observed between the IN and OUT interferograms confirms that $\text{Re}Z_w \approx 0$. In contrast to that, the imaginary part shifts the pointer state towards the Bloch sphere poles, changing the relative path intensities.
In the experiment the weak values $Z_w$ of 17 individual spins were determined with high precision. To extract $Z_w$ from one neutron spin data set, two $\chi$-scans were recorded, as well as two single intensities. Together with the required background measurements, a total collection time of ${\sim}10000$ seconds was needed to determine the real and imaginary part of each $Z_w$.
\section{Results}\label{sec:results}
The measured $Z_w$ are used to construct the pairwise anticorrelations $(ZZ)_w \approx -1$ (see Appendix \ref{B:Experiment}), and the $N$-spin witnesses in Eq.~\eqref{Proj} and \eqref{C}.
Fig.~\ref{WeakValues}a,b shows the final results that violate the noncontextuality bound $\text{Re}\,C^{(N)}_w \geq 0$. Fig.~\ref{WeakValues}c,d shows final results that violate independent noncontextuality bounds $\text{Re}(\Pi^{(N)}_j)_w \geq 0$. The contextuality witnesses $C^{(N)}_w$ and $(\Pi^{(N)}_j)_w$ were calculated using Eqs.~\eqref{C} and \eqref{Proj}, respectively, for all odd numbers of spins from $N=3$ to 17. Note that the pair of forbidden projectors $\Pi^{(5)}_0$ and $\Pi^{(13)}_0$ have the remarkable geometric property that first order errors vanish when $Z_w = i$, explaining the small statistical standard deviation $\sigma$ observed in the experimental data. The chosen witnesses for other $N$ in Fig.~\ref{WeakValues}c,d are the projectors $\Pi^{(3)}_0$, $\Pi^{(7)}_1$, $\Pi^{(9)}_3$, $\Pi^{(11)}_7$, $\Pi^{(15)}_1$, and $\Pi^{(17)}_3$. The data for $N=5$ is most statistically significant, with $\text{Re}\,C^{(5)}_w = -2.85 \pm 0.41$ violating the bound of 0 by ${\sim}7\sigma$, and $\text{Re}(\Pi^{(5)}_0)_w = -0.2508 \pm 0.0025$ by ${\sim}99\sigma$.
\section{Concluding remarks}\label{sec:conclusion}
We have experimentally shown the confinement of contextuality within a BKS-set of observables to a particular measurement context, using modern techniques in neutron interferometry to measure weak-valued contextuality witnesses. Using $N$-spin Wheel BKS-sets \cite{WA_Nqubits}, we have reduced the problem of witnessing contextuality to weakly measuring a particular context, consisting of neighboring pairs of observables $ZZ$ arranged in a ring, with the remaining observables in the BKS-set fixed by a particular pre- and postselection. It follows that $(ZZ)_w=-1$ for all such pairs, implying anticorrelation that violates the classical pigeonhole principle \cite{aharonov2016quantum}. Moreover, the weak values of the invariant subspace projectors $(\Pi_j^{(N)})_w$ of this context contain anomalies, witnessing the failure of classical value assignments. Our unbiased contextuality witness $C^{(N)}_w$ uses all such projector weak values within the context to witness the same failure.
Unlike the implicit global contradictions inherent to existing BKS experiments \cite{kirchmair2009state,bartosik2009,d2013experimental}, our method confines the apparent contradiction to a particular context, where its physical consequences may be explicitly revealed through weak measurements. Notably, unlike existing approaches to demonstrating BKS-contextuality \cite{pusey2014anomalous}, our witness does not require entangled preparations or measurements, or indeed any interaction between the different spins at all. The entangled measurement contexts that would normally be required have values that are forced by the pre- and postselection according to the geometry of the BKS-set itself, so they need not be measured. In this way, confining the contextuality serves to simplify its experimental observation.
Such a simplification not only raises interesting foundational questions \cite{aharonov2016quantum}, but may also suggest future quantum information processing applications \cite{howard2014contextuality,abbott2012strong}.
\begin{acknowledgments}
This research was supported (in part) by the Fetzer-Franklin Fund of the John E. Fetzer Memorial Trust and the Austrian Science Fund (FWF): Projects No. P25795-N20 and No. P24973-N20.
T.D., H.G., D.E., T.J., S.S., and Y.H. performed the experiment and analyzed the data. T.D., M.W. and J.D. performed the error analysis and generated figures. T.D., M.W., and J.D. co-wrote the manuscript. M.W. and J.T. developed the original theory.
\end{acknowledgments}
\appendix
\section{Theory}\label{A:Theory}
\begin{figure}
\caption{The 5-spin Wheel: (a) The product of the five observables in each ring is $+I$ (thin line), and the product of the observables in each spoke is $-I$ (thick line). (b) A particular preparation and postselection fixes the values of two rings. (c) In any noncontextual hidden variable theory, the remaining values must be $-1$, which confines the BKS contradiction to the outer ring (blue dashed circle). This also demonstrates the quantum pigeonhole paradox: all pairs in the row appear anticorrelated, which violates the classical pigeonhole principle.}
\label{Wheel5_0}
\label{Wheel5_1}
\label{Wheel5_2}
\label{Wheel5}
\end{figure}
The family of $N$-spin Wheel KS sets \cite{WA_Nqubits} prove the BKS theorem \cite{Bell2,KS} for all odd $N \geq 3$, with the 3-spin Wheel presented as the 3-spin Square \cite{WA_3qubits} in the main text for compactness, and the 5-spin Wheel shown in Fig. \ref{Wheel5}. Each Wheel set contains three rings composed of the $N$ pairwise Pauli observables $ZZ$, $XX$, and $YY$ respectively, of neighboring pairs in a ring of $N$ spins. Each Wheel also contains $N$ `spokes,' which contain the three observables $ZZ$, $XX$, and $YY$ for a particular neighboring pair in the ring. Each ring and spoke contains a set of mutually commuting observables that define a joint measurement basis. The product of the observables in each ring (spoke) is $+I$ ($-I$), and thus quantum mechanics predicts that the product of the measurement outcomes for the observables in each ring (spoke) is $+1$ ($-1$).
A noncontextual hidden variable theory that assigns an eigenvalue prediction $ \pm 1$ to each of the $3N$ observables must violate at least one of these product predictions, which proves the BKS theorem. To see this, consider the overall product of the predicted eigenvalues along each ring and along each spoke. According to the quantum predictions, this product must be $-1$, since there are odd number of spokes. However, for any noncontextual value assignment this product is $+1$, since each observable appears in one ring and one spoke, and thus all of the $3N$ eigenvalue predictions are squared in the overall product.
Preparation of $|+X\rangle^{\otimes N}$ and postselection of $|+Y\rangle^{\otimes N}$ fixes all of the pairwise $XX$ and $YY$ observables in the $N$-spin Wheel to have eigenvalue assignment $+1$ in a noncontextual hidden variable theory, and the Aharonov-Bergmann-Lebowitz rule shows that all of the $ZZ$ observables have assignment $-1$. This results in a violation of the classical pigeonhole principle as well an apparent violation of the quantum prediction for the product in the $N$-spin pairwise $ZZ$-ring context. The joint eigenspaces of this context are the projectors $\Pi^{(N)}_j$ of the main text, which together form the composite observable $C^{(N)}$ for the ring. These observables witness contextuality when weakly measuring them reveals negative weak values.
\begin{figure}
\caption{Measured interferogram for one data set: Since the weak value's real part is zero, no phase shift is seen between the IN and the OUT curve. $\text{I}
\label{fig:set16}
\end{figure}
\section{Experiment}\label{B:Experiment}
\begin{figure}
\caption{Experimentally determined weak values for 17 different data sets. We used the first $N$ spin weak values shown here for our analysis of $N$-qubit contextuality witnesses in the main text.}
\label{Z}
\end{figure}
\begin{figure}
\caption{Experimentally determined weak values for pairwise products, showing anticorrelations between each neighboring pair in closed rings of $N$ spins, for all odd $3 \leq N \leq 17$. The pairwise anticorrelations in these rings violate the classical pigeonhole principle.}
\label{ZZC}
\end{figure}
To determine the weak value of the Pauli spin operator $Z$ the spin degree of freedom is weakly coupled to the path degree of freedom~\cite{sponar2015}. As described in the main body of the paper the weak value's real part is then inferred from an interference fringe, while two single intensity measurements are necessary to determine the weak value's imaginary part. To determine $Z_w$ three interference fringes are recorded:
\begin{enumerate}
\item The OUT curve with no interaction, to evaluate the phase of the empty interferogram.
\item The IN curve with a path-dependent spin rotation of $\alpha=15^\circ$ and a (weak) interaction strength of $\sin^2(\alpha) = 0.067$ in each of the interferometer's arms, which yields $\text{I}_{y\pm}$.
\item One interference fringe with orthogonal preparation and postselection spin states, which is then subtracted from the IN/OUT curve as an effective background.
\end{enumerate}
Additionally two single intensities with one or the other beam blocked are recorded ($\text{I}_{z\pm}$), and again background measurements with orthogonal preparation and postselection states are performed and subtracted from the signal. Figure~\ref{fig:set16} shows a typical IN and OUT curve of one experimental run. The data for a complete phase-shifter scan of $\chi$ is fit to a sine function, which allows us to determine the intensity at the correct values of $\chi$, with statistical uncertainty. The measurement procedure was repeated until altogether 17 different data sets were recorded. For each data set the real and imaginary part of the the Pauli spin operator's weak value is extracted. The results are listed in Fig. \ref{Z}, and the relevant pairwise correlations are listed in Fig. \ref{ZZC}. It is also noteworthy that the errors of sets 1 to 6 are smaller than the others due to a change in reactor power. While the first six interferograms were recorded at a power of $\sim58$~MW, for the last eleven a power was $\sim43$~MW. The increase in reactor power leads to an increase in neutron flux, and the higher count rate offers better statistics and reduces the uncertainty of the recorded values.
\end{document}
|
\begin{document}
\begin{abstract} We give a complete characterization of the graph products of cyclic groups admitting a Polish group topology, and show that they are all realizable as the group of automorphisms of a countable structure. In particular, we characterize the right-angled Coxeter groups (resp. Artin groups) admitting a Polish group topology. This generalizes results from \cite{shelah}, \cite{shelah_1} and \cite{paolini&shelah}.
\end{abstract}
\title{Polish Topologies for Graph Products of Cyclic Groups}
\thanks{Partially supported by European Research Council grant 338821. No. 1115 on Shelah's publication list.}
\author{Gianluca Paolini}
\address{Einstein Institute of Mathematics, The Hebrew University of Jerusalem, Israel}
\author{Saharon Shelah}
\address{Einstein Institute of Mathematics, The Hebrew University of Jerusalem, Israel \and Department of Mathematics, Rutgers University, U.S.A.}
\date{\today}
\maketitle
\section{Introduction}
\begin{definition}\label{def_cyclic_prod} Let $\Gamma = (V, E)$ be a graph and $\mathfrak{p}: V \rightarrow \{ p^n : p \text{ prime and } 1 \leqslant n \} \cup \{ \infty \}$ a graph colouring. We define a group $G(\Gamma, \mathfrak{p})$ with the following presentation:
$$ \langle V \mid a^{\mathfrak{p}(a)} = 1, \; bc = cb : \mathfrak{p}(a) \neq \infty \text{ and } b E c \rangle.$$
\end{definition}
We call the group $G(\Gamma, \mathfrak{p})$ the {\em $\Gamma$-product\footnote{Notice that this is consistent with the general definition of graph products of groups from \cite{green}. In fact every graph product of cyclic groups can be represented as $G(\Gamma, \mathfrak{p})$ for some $\Gamma$ and $\mathfrak{p}$ as above.} of the cyclic groups} $\{ C_{\mathfrak{p}(v)} : v \in \Gamma \}$, or simply the {\em graph product of} $(\Gamma, \mathfrak{p})$. The groups $G(\Gamma, \mathfrak{p})$ where $\mathfrak{p}$ is constant of value $\infty$ (resp. of value $2$) are known as {\em right-angled Artin groups} $A(\Gamma)$ (resp. {\em right-angled Coxeter groups} $C(\Gamma)$). These groups have received much attention in combinatorial and geometric group theory. In the present paper we tackle the following problem:
\begin{problem}\label{problem} Characterize the graph products of cyclic groups admitting a Polish group topology, and which among these are realizable as the group of automorphisms of a countable structure.
\end{problem}
This problem is motivated by the work of Shelah \cite{shelah} and Solecki \cite{solecki}, who showed that no uncountable Polish group can be free or free abelian (notice that for $\Gamma$ discrete (resp. complete) $A(\Gamma)$ is a free group (resp. a free abelian group)). These negative results have been later generalized by the authors to the class of uncountable right-angled Artin groups \cite{paolini&shelah}. In this paper we give a complete solution to Problem \ref{problem} proving the following theorem:
\begin{theorem}\label{main_th} Let $G = G(\Gamma, \mathfrak{p})$, and recall that $\mathfrak{p}$ is a graph colouring (cf. Definition \ref{def_cyclic_prod}), and so we refer to the elements in the range of $\mathfrak{p}$ as colors. Then $G$ admits a Polish group topology if only if $(\Gamma, \mathfrak{p})$ satisfies the following four conditions:
\begin{enumerate}[(a)]
\item there exists a countable $A \subseteq \Gamma$ such that for every $a \in \Gamma$ and $a \neq b \in \Gamma - A$, $a$ is adjacent to $b$;
\item there are only finitely many colors $c$ such that the set of vertices of color $c$ is uncountable;
\item there are only countably many vertices of color $\infty$;
\item if there are uncountably many vertices of color $c$, then the set of vertices of color $c$ has the size of the continuum.
\end{enumerate}
Furthermore, if $(\Gamma, \mathfrak{p})$ satisfies conditions (a)-(d) above, then $G$ can be realized as the group of automorphisms of a countable structure.
\end{theorem}
Thus, the only graph products of cyclic groups admitting a Polish group topology are the direct sums $G_1 \oplus G_2$ with $G_1$ a countable graph product of cyclic groups and $G_2$ a direct sum of finitely many continuum sized vector spaces over a finite field. From our general result we deduce a solution to Problem \ref{problem} in the particular case of right-angled Artin groups (already proved in \cite{paolini&shelah}) \mbox{and right-angled Coxeter groups.}
\begin{corollary} No uncountable Polish group can be a right-angled Artin group.
\end{corollary}
\begin{corollary} An uncountable right-angled Coxeter group $C(\Gamma)$ admits a Polish group topology if and only if it is realizable as the group of automorphisms of a countable structure if and only if $|\Gamma| = 2^{\omega}$ and there exists a countable $A \subseteq \Gamma$ such that for every $a \in \Gamma$ and $a \neq b \in \Gamma - A$, $a$ is adjacent to $b$.
\end{corollary}
In works in preparation we deal with the characterization problem faced here in the more general setting of graph products of general groups \cite{paolini&shelah2}, and with questions of embeddability of graph products of groups into Polish groups \cite{paolini&shelah1}.
\section{Preliminaries}
We will make a crucial use of the following special case of \cite[3.1]{shelah_1}.
\begin{notation} By a group term $\sigma(\bar{x})$ we mean a word in the alphabet $\{ x : x \in \bar{x} \}$, i.e. an expression of the form $x_{1}^{\varepsilon_{1}} \cdots x_{n}^{\varepsilon_{n}}$,
where $x_1,..., x_n$ are from $\bar{x}$ and each $\varepsilon_i$ is either $1$ or $-1$. The number $n$ is known as the length of the group term $\sigma(\bar{x})$.
\end{notation}
\begin{fact}[\cite{shelah_1}]\label{771_fact} Let $G = (G, \mathfrak{d})$ be a Polish group and $\bar{g} = (\bar{g}_n : n < \omega)$, with $\bar{g}_n \in G^{\ell(n)}$ and $\ell(n) < \omega$.
\begin{enumerate}[(1)]
\item For every non-decreasing $f \in \omega^\omega$ with $f(n) \geqslant 1$ and $(\varepsilon_n)_{n < \omega} \in (0, 1)^{\omega}_{\mathbb{R}}$ there is a sequence $(\zeta_n)_{n < \omega}$ (which we call an $f$-continuity sequence for $(G, \mathfrak{d}, \bar{g})$, or simply an $f$-continuity sequence) satisfying the following conditions:
\begin{enumerate}[(A)]
\item for every $n < \omega$:
\begin{enumerate}[(a)]
\item $\zeta_n \in (0, 1)_{\mathbb{R}}$ and $\zeta_n < \varepsilon_n$;
\item $\zeta_{n+1} < \zeta_{n}/2$;
\end{enumerate}
\end{enumerate}
\begin{enumerate}[(B)]
\item for every $n < \omega$, group term $\sigma(x_0, ..., x_{m-1}, \bar{y}_n)$ and $(h_{(\ell, 1)})_{\ell < m}, (h_{(\ell, 2)})_{\ell < m} \in G^m$, the $\mathfrak{d}$-distance from $\sigma(h_{(0, 1)}, ..., h_{(m-1, 1)}, \bar{g}_n)$ to $\sigma(h_{(0, 2)}, ..., h_{(m-1, 2)}, \bar{g}_n)$ is $< \zeta_n$, when:
\begin{enumerate}[(a)]
\item $m \leqslant n+1$;
\item $\sigma(x_0, ..., x_{m-1}, \bar{y}_n)$ has length $\leqslant f(n)+1$;
\item $h_{(\ell, 1)}, h_{(\ell, 2)} \in Ball(e; \zeta_{n+1})$;
\item $G \models \sigma(e, ..., e, \bar{g}_n) = e$.
\end{enumerate}
\end{enumerate}
\item The set of equations $\Gamma = \{ x_n = (x_{n+1})^{k(n)} d_{n} : n < \omega \}$ is solvable in $G$ when for every $n < \omega$:
\begin{enumerate}[(a)]
\item $f \in \omega^\omega$ is non-decreasing and $f(n) \geqslant 1$;
\item $1 \leqslant k(n) < f(n)$;
\item $(\zeta_n)_{n < \omega}$ is an $f$-continuity sequence;
\item $\mathfrak{d}(d_{n}, e) < \zeta_{n+1}$.
\end{enumerate}
\end{enumerate}
\end{fact}
\begin{convention}\label{convention} If we apply Fact \ref{771_fact}(1) without mentioning $\bar{g}$ it means that we apply Fact \ref{771_fact}(1) for $\bar{g}_n = \emptyset$, for every $n < \omega$.
\end{convention}
We shall use the following observation freely throughout the paper.
\begin{observation}\label{observation_prelim} Suppose that $(G, \mathfrak{d})$ is Polish, $A \subseteq G$ is uncountable and $\zeta > 0$. Then for some $g_1 \neq g_2 \in A$ we have $\mathfrak{d}((g_1)^{-1}g_2, e) < \zeta$.
\end{observation}
\begin{proof} First of all, notice that we can find $g_1 \in A$ such that $g_1$ is an accumulation point of $A$, because otherwise we contradict the separability of $(G, \mathfrak{d})$. Furthermore, the function $(x, y) \mapsto x^{-1}y$ is continuous and so for every $(x_1, y_1) \in G^2$ and $\zeta > 0$ there is $\delta > 0$ such that, for every $(x_2, y_2) \in G^2$, if $\mathfrak{d}(x_1, x_2), \mathfrak{d}(y_1, y_2) < \delta$, then $\mathfrak{d}((x_1)^{-1}y_1, (x_2)^{-1}y_2) < \zeta$. Let now $g_2 \in Ball(g_1; \delta) \cap A - \{g_1\}$, then $\mathfrak{d}((g_1)^{-1}g_2, (g_1)^{-1}g_1) = \mathfrak{d}((g_1)^{-1}g_2, e) < \zeta$, and so we are done.
\end{proof}
Before proving Lemma \ref{lemma1} we need some preliminary work. Given $A \subseteq \Gamma$ we denote the induced subgraph of $\Gamma$ on vertex set $A$ as $\Gamma_A$.
\begin{fact}\label{fact} Let $G = G(\Gamma, \mathfrak{p})$, $A \subseteq \Gamma$ and $G_A = (\Gamma_A, \mathfrak{p} \restriction A)$. Then there exists a unique homomorphism $\mathbf{p} = \mathbf{p}_A: G \rightarrow G_A$ such that $\mathbf{p}(c) = c$ if $c \in A$, and $\mathbf{p}(c) = e$ if $c \notin A$.
\end{fact}
\begin{proof} For arbitrary $G = G(\Gamma, \mathfrak{p})$, let $\Omega_{(\Gamma, \mathfrak{p})}$ be the set of equations from Definition \ref{def_cyclic_prod} defining $G(\Gamma, \mathfrak{p})$. Then for the $\Omega_{(\Gamma, \mathfrak{p})}$ of the statement of the fact we have $\Omega_{(\Gamma, \mathfrak{p})} = \Omega_1 \cup \Omega_2 \cup \Omega_3$, where:
\begin{enumerate}[(a)]
\item $\Omega_1 = \Omega_{(\Gamma_A, \mathfrak{p} \restriction A)}$;
\item $\Omega_2 = \Omega_{(\Gamma_{\Gamma - A}, \mathfrak{p} \restriction \Gamma - A})$;
\item $\Omega_3 = \{ bc = cb : b E_{\Gamma} c \text{ and } \{ b, c \} \not\subseteq A \}$.
\end{enumerate}
Notice now that $\mathbf{p}$ maps each equation in $\Omega_1$ to itself and each equation in $\Omega_2 \cup \Omega_3$ to a trivial equation, and so $p$ is an homomorphism (clearly unique).
\end{proof}
\begin{definition} Let $(\Gamma, \mathfrak{p})$ be as usual and $G = G(\Gamma, \mathfrak{p})$.
\begin{enumerate}[(1)]
\item A word $w$ in the alphabet $\Gamma$ is a sequence $(a_1^{\alpha_1}, ..., a_k^{\alpha_k})$, with $a_1 \neq a_2 \neq \cdots \neq a_k \in \Gamma$ and $\alpha_1, ..., \alpha_k \in \mathbb{Z} - \{0 \}$.
\item We denote words simply as $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ instead of $(a_1^{\alpha_1}, ..., a_k^{\alpha_k})$.
\item We call each $a_i^{\alpha_i}$ a syllable of the word $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$.
\item We say that the word $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ spells the element $g \in G$ if $ G \models g = a_1^{\alpha_1} \cdots a_k^{\alpha_k}$.
\item We say that the word $w$ is reduced if there is no word with fewer syllables which spells the same element of $G$.
\item We say that the consecutive syllables $a_i^{\alpha_i}$ and $a_{i+1}^{\alpha_{i+1}}$ are adjacent if $a_iE_{\Gamma}a_{i+1}$.
\item We say that the word $w$ is a normal form for $g$ if it spells $g$ and it is reduced.
\item We say that two normal forms are equivalent if there they spell the same element $g \in G$.
\end{enumerate}
\end{definition}
As usual, when useful we identify words with the elements they spell.
\begin{fact}[{\cite[Lemmas 2.2 and 2.3]{gut}}]\label{fact_word_1} Let $G = G(\Gamma, \mathfrak{p})$.
\begin{enumerate}[(1)]
\item If the word $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ spelling the element $g \in G$ is not reduced, then there exist $1 \leqslant p < q \leqslant k$ such that $a_p = a_q$ and $a_p$ is adjacent to each vertex $a_{p + 1}, a_{p + 2}, ..., a_{q-1}$.
\item If $w_1 = a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ and $w_2 = b_1^{\beta_1} \cdots b_k^{\beta_k}$ are normal forms for $g \in G$, then $w_1$ can be transformed into $w_2$ by repetedly swapping the order of adjacent syllables.
\end{enumerate}
\end{fact}
\begin{definition} Let $g \in G(\Gamma, \mathfrak{p})$. We define:
\begin{enumerate}[(1)]
\item $sp(g) = \{ a_i \in \Gamma : a_1^{\alpha_1} \cdots a_i^{\alpha_i} \cdots a_k^{\alpha_k} \text{ is a normal form for } g \}$;
\item $F(g) = \{ a_1^{\alpha_1} : a_1^{\alpha_1} \cdots a_k^{\alpha_k} \text{ is a normal form for } g\}$;
\item $L(g) = \{ a_k^{\alpha_k} : a_1^{\alpha_1} \cdots a_k^{\alpha_k} \text{ is a normal form for } g\}$;
\item $\hat{L}(g) = \{ a_k^{-\alpha_k}: a_k^{\alpha_k} \in L(g) \}$.
\end{enumerate}
\end{definition}
\begin{definition} We say that the normal form $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ is cyclically normal if either $k = 1$ or there is no equivalent normal form $b_1^{\beta_1} \cdots b_k^{\beta_k}$ with $b_1 = b_k$.
\end{definition}
\begin{observation}
\begin{enumerate}[(1)]
\item Notice that if $g \in G(\Gamma, G_a)$ is spelled by a cyclically normal form, then any of the normal forms spelling $g$ are cyclically normal.
\item We say that the group element $g \in G(\Gamma, G_a)$ is cyclically normal if any of the normal forms (which are words) spelling $g$ are cyclically normal.
\end{enumerate}
\end{observation}
\begin{notation}\label{notation} Given a sequence of words $w_1, ..., w_k$ with some of them possibly empty, we say that the word $w_1 \cdots w_k$ is a normal form (resp. a cyclically normal form) if after deleting the empty words the resulting word is a normal form (resp. a cyclically normal form).
\end{notation}
Recall that given $A \subseteq \Gamma$ we denote the induced subgraph of $\Gamma$ on vertex set $A$ as $\Gamma_A$.
\begin{fact}[{\cite[Corollary 24]{bark}}]\label{bark_fact} Any element $g \in G(\Gamma, \mathfrak{p})$ can be written in the form $w_1 w_2 w_3 w'_2 w^{-1}_1$, where:
\begin{enumerate}[(1)]
\item $w_1 w_2 w_3 w'_2 w^{-1}_1$ is a normal form;
\item $w_3 w'_2 w_2$ is cyclically normal;
\item $sp(w_2) = sp(w'_2)$;
\item if $w_2 \neq e$, then $\Gamma_{sp(w_2)}$ is a complete graph;
\item $F(w_2) \cap \hat{L}(w'_2) = \emptyset$.
\end{enumerate}
\end{fact}
\begin{proposition}\label{fact_word} Let $G = G(\Gamma, \mathfrak{p})$, and assume that $\mathfrak{p}$ has finite range $\{ c_1, ..., c_t \}$. Let $p$ be a prime such that if $c_i \neq \infty$ then $p > c_i$, for $i =1, ..., t$. Then for every $g \in G$ we have $sp(g) \subseteq sp(g^{p})$.
\end{proposition}
\begin{proof} Let $g$ be written as $w_1 w_2 w_3 w'_2 w^{-1}_1$ as in Fact \ref{bark_fact}, and assume $g \neq e$. We make a case distinction.
\newline \underline{\em Case 1}. $w_3 = e$.
\newline Notice that $w_2 w'_2 \neq e$, because by assumption $g \neq e$, and that $w_2 w'_2$ is a normal form (recall Notation \ref{notation}). Let $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ be a normal form for $w_2 w'_2$. Then by items (3) and (4) of Fact \ref{bark_fact} we have:
$$g^{p} = w_1 (a_1^{\alpha_1} \cdots a_k^{\alpha_k})^{p} w^{-1}_1 = w_1 a_1^{p\alpha_{1}} \cdots a_k^{p\alpha_k} w^{-1}_1.$$
Now, necessarily, for every $\ell \in \{ 1, ..., k \}$, $a_{\ell}^{p \alpha_\ell} \neq e$, since the order of $a_{\ell}$ does not divide $\alpha_\ell$ and $p$ is a prime. Thus, we are done.
\newline \underline{\em Case 2}. $w_2 = e$.
\newline By item (3) of Fact \ref{bark_fact} also $w'_2 = e$, and so, by item (2) of Fact \ref{bark_fact}, $w_3 w'_2 w_2 = w_3 \neq e$ is cyclically normal. Let $a_1^{\alpha_1} \cdots a_k^{\alpha_k}$ be a normal form for $w_3$.
\newline \underline{\em Case 2.1}. $k = 1$.
\newline In this case, letting $a_k^{\alpha_k} = a^{\alpha}$, we have $g^{p} = w_1 a^{p\alpha} w^{-1}_1$, and so, arguing as in Case 1, we are done.
\newline \underline{\em Case 2.2}. $k > 1$.
\newline In this case $g^{p}$ is spelled by the following normal form:
$$w_1 \underbrace{w_3 \cdots w_3}_{p} w^{-1}_1,$$
and so, clearly, we are done.
\newline \underline{\em Case 3}. $w_3 \neq e$ and $w_2 \neq e$.
\newline In this case, letting $w'_0$ stand for a normal form for $w_3 w'_2 w_2$, $g^{p}$ is spelled by the following normal form:
$$g^{p} = w_1 w_2\underbrace{w'_0 \cdots w'_0}_{p-1}w_3w'_2 w^{-1}_1,$$
Furthermore, by item (3) and (5) of Fact \ref{bark_fact}, $sp(w'_0) = sp(w_3) \cup sp(w_2) = sp(w_3) \cup sp(w'_2) = sp(w_3) \cup sp(w_2) \cup sp(w'_2)$, and so we are done.
\end{proof}
\begin{proposition}\label{prop_for_first_nec} Let $G = G(\Gamma, \mathfrak{p})$ and $g \in G$.
\begin{enumerate}[(1)]
\item If $a_1, a_2, b_1, b_2 \in \Gamma - sp(g)$ are distinct and $a_i$ is not adjacent to $b_i$ ($i = 1, 2$), then for every $n \geqslant 2$ the element $ga_1^{-1}a_2b_1^{-1}b_2$ has no $n$-th root.
\item If $a, b_1, b_2, b_3, b_4 \in \Gamma$ are distinct, $a$ is not adjacent to $b_i$ ($i = 1, 2, 3, 4$), and $\{ b_1, b_2, b_3, b_4 \} \cap sp(g) = \emptyset$, then for every $n \geqslant 2$ the element $ga^{-1}b_1^{-1}b_2ab_3^{-1}b_4$ has no $n$-th root.
\end{enumerate}
\end{proposition}
\begin{proof} We prove (1). Let $g_* = ga_1^{-1}a_2b_1^{-1}b_2$, $A = \{ a_2, b_2 \}$ and $\mathbf{p} = \mathbf{p}_A$ the homomorphism from Fact \ref{fact}. Then $\mathbf{p}(g_*) = a_2b_2$. Since $a_2$ is not adjacent to $b_2$, for every $n \geqslant 2$ the element $a_2b_2$ does not have an $n$-th root. As $\mathbf{p}_A$ is an homomorphism, we are done.
\noindent We prove (2). Let $g_* = ga^{-1}b_1^{-1}b_2ab_3^{-1}b_4$, $A = \{ a, b_1, b_2, b_3, b_4 \}$ and $\mathbf{p} = \mathbf{p}_A$ the homomorphism from Fact \ref{fact}. There are two cases:
\newline \underline{\em Case 1}. $\mathbf{p}(g) = e$.
\newline Then $\mathbf{p}(g_*) = a^{-1}b_1^{-1}b_2ab_3^{-1}b_4$. Since $a$ is not adjacent to $b_i$ ($i = 1, 2, 3, 4)$, for every $n \geqslant 2$ the element $a^{-1}b_1^{-1}b_2ab_3^{-1}b_4$ does not have an $n$-th root.
\newline \underline{\em Case 2}. $\mathbf{p}(g) \neq e$.
\newline Since $sp(\mathbf{p}(g)) \subseteq sp(g) \cap \{ a, b_1, b_2, b_3, b_4 \} \subseteq \{ a \}$ and $\mathbf{p}(g) \neq e$, we must have $sp(\mathbf{p}(g)) = \{ a \}$. Hence, $\mathbf{p}(g_*) = a^{\alpha}b_1^{-1}b_2ab_3^{-1}b_4$, for $\alpha \in \mathbb{Z} - \{ 0 \}$. Since $a$ is not adjacent to $b_i$ ($i = 1, 2, 3, 4)$, for every $n \geqslant 2$ the element $a^{\alpha}b_1^{-1}b_2ab_3^{-1}b_4$ does not have an $n$-th root.
\end{proof}
\section{Negative Side}
In this section we show that conditions (a)-(d) of Theorem \ref{main_th} are necessary. Concerning conditions (a)-(c) we prove three separate lemmas: Lemmas \ref{lemma1}, \ref{lemma2} and \ref{lemma3}. Lemmas \ref{lemma2} and \ref{lemma3} are more general that needed for the proof of Theorem \ref{main_th}, and of independent interest. Concerning condition (d), it follows from Lemma \ref{lemma4} and Observation \ref{lemma5}, which are also more general that needed for our purposes.
We denote the cyclic groups by $C_n, C_{\infty}$ (or $\mathbb{Z}_n, \mathbb{Z}_{\infty} = \mathbb{Z}$ in additive notation).
\begin{lemma}\label{lemma1} Let $G = G(\Gamma, \mathfrak{p})$, with $|\Gamma| = 2^\omega$. Suppose that there does not exist a countable $A \subseteq \Gamma$ such that for every $a \in \Gamma$ and $a \neq b \in \Gamma - A$, $a$ is adjacent to $b$. Then $G$ does not admit a Polish group topology.
\end{lemma}
\begin{proof} Suppose that $G = (\Gamma, \mathfrak{p})$ is as in the assumptions of the theorem, and that $G = (G, \mathfrak{d})$ is Polish. Then either of the following cases happens:
\begin{enumerate}[(i)]
\item in $\Gamma$ there are $\{ a_i : i < \omega_1 \}$ and $\{ b_i : i < \omega_1 \}$ such that if $i < j < \omega_1$, then $a_i \neq a_j$, $b_i \neq b_j$, $|\{ a_i, a_j, b_i, b_j \}| = 4$ and $a_i$ is not adjacent to $b_i$;
\item in $\Gamma$ there are $a_*$ and $\{ b_i : i < \omega_1 \}$ such that if $i < j < \omega_1$, then $|\{ a_*, b_i, b_j \}| = 3$ and $a_*$ is not adjacent to $b_i$.
\end{enumerate}
\underline{\em Case 1}. There are $\{ a_i : i < \omega_1 \}$ and $\{ b_i : i < \omega_1 \}$ as in (i) above.
\newline Without loss of generality we can assume that all the $\{ a_i : i < \omega_1 \}$ have fixed color $k^*_1$ and all the $\{ b_i : i < \omega_1 \}$ have fixed color $k^{*}_2$, for some $k^*_1, k^*_2 \in \{ p^n : p \text{ prime and } 1 \leqslant n \} \cup \{ \infty \}$. Let
$p$ be a prime such that if $k^*_\ell \neq \infty$ then $p > k^*_\ell$, for $\ell = 1, 2$. Recalling Convention \ref{convention}, let $(\zeta_n)_{n < \omega} \in (0, 1)_{\mathbb{R}}^\omega$ be as in Fact \ref{771_fact} for $f \in \omega^{\omega}$ constantly $p + 10$.
Using Observation \ref{observation_prelim}, by induction on $n < \omega$, choose $(i_n = i(n), j_n = j(n))$ such that:
\begin{enumerate}[(a)]
\item if $m < n$, then $j_m < i_n$;
\item $i_n < j_n < \omega_1$;
\item $\mathfrak{d}(a_{j(n)}^{-1}a_{i(n)}, e), \mathfrak{d}(b_{j(n)}^{-1}b_{i(n)}, e) < \zeta_{n+8}$.
\end{enumerate}
Consider now the following set of equations:
$$ \Delta = \{ x_n = (x_{n+1})^{p}h_n^{-1} : n < \omega \},$$
where $h_n = b_{i(n)}^{-1} b_{j(n)} a_{i(n)}^{-1} a_{j(n)}$. By (c) above and Fact \ref{771_fact}(1)(B) we have $\mathfrak{d}(h_n^{-1}, e) < \zeta_{n+1}$, and so by Fact \ref{771_fact}(2) the set $\Delta$ is solvable in $G$. Let $(g'_n)_{n < \omega}$ witness this. Let $A$ the set of vertices of color $k^*_1$ or $k^*_2$, $\mathbf{p} = \mathbf{p}_A$ the homomorphism from Fact \ref{fact} and let $g_n = \mathbf{p}(g'_n)$. Then for every $n < \omega$ we have:
$$
G \models (g_{n+1})^{p} = g_nh_n,
$$
and so by Proposition \ref{fact_word} we have:
$$
sp(g_n) \subseteq sp(g_0) \cup \{ b_{i(\ell)}, b_{j(\ell)}, a_{i(\ell)}, a_{j(\ell)}: \ell < n \}.
$$
Let $n < \omega$ be such that $sp(g_0) \cap \{ b_{i(n)}, b_{j(n)}, a_{i(n)}, a_{j(n)} \} = \emptyset$. Then:
$$(g_{n+1})^{p} = g_nb_{i(n)}^{-1} b_{j(n)} a_{i(n)}^{-1} a_{j(n)} \text{ and } sp(g_n) \cap \{ b_{i(n)}, b_{j(n)}, a_{i(n)}, a_{j(n)} \} = \emptyset,$$
which contradicts Proposition \ref{prop_for_first_nec}(1).
\newline \underline{\em Case 2}. There are $a_*$ and $\{ b_i : i < \omega_1 \}$ as in (ii) above.
\newline Let $k^*_1 = \mathfrak{p}(a_*)$. Without loss of generality, we can assume that all the $\{ b_i : i < \omega_1 \}$ have fixed color $k^{*}_2$, for some $k^{*}_2 \in \{ p^n : p \text{ prime and } 1 \leqslant n \} \cup \{ \infty \}$. Let
$p$ be a prime such that if $k^*_\ell \neq \infty$ then $p > k^*_\ell$, for $\ell = 1, 2$. Let $(\zeta_n)_{n < \omega} \in (0, 1)_{\mathbb{R}}^\omega$ be as in Fact \ref{771_fact} for $\bar{g}_n = (a_*)$ (and so in particular $\ell(n) = 1$) and $f \in \omega^{\omega}$ constantly $p + 10$.
Using Observation \ref{observation_prelim}, by induction on $n < \omega$, choose $(i_n = i(n), j_n = j(n), i'_n = i'(n), j'_n = j'(n))$ such that:
\begin{enumerate}[(a)]
\item if $m < n$, then $j'_m < i_n$;
\item $i_n < j_n < i'_n < j'_n < \omega_1$;
\item $\mathfrak{d}(b_{j(n)}^{-1} b_{i(n)}, e), \mathfrak{d}(b_{j'(n)}^{-1} b_{i'(n)}, e) < \zeta_{n+8}$.
\end{enumerate}
Consider now the following set of equations:
$$ \Delta = \{ x_n = (x_{n+1})^{p}h_n^{-1} : n < \omega \},$$
where $h_n = a^{-1}_*b_{i(n)}^{-1} b_{j(n)} a_*b_{i'(n)}^{-1} b_{j'(n)}$. By (c) above and Fact \ref{771_fact}(1)(B) we have $\mathfrak{d}(h_n^{-1}, e) < \zeta_{n+1}$, and so by Fact \ref{771_fact}(2) the set $\Delta$ is solvable in $G$. Let $(g'_n)_{n < \omega}$ witness this. Let $A$ be the set of vertices of color $k^{*}_1$ or $k^{*}_2$, $\mathbf{p} = \mathbf{p}_A$ the homomorphism from Fact \ref{fact} and let $g_n = \mathbf{p}(g'_n)$. Then for every $n < \omega$ we have:
$$
G \models (g_{n+1})^{p} = g_nh_n,
$$
and so by Proposition \ref{fact_word} we have:
$$
sp(g_n) \subseteq sp(g_0) \cup \{ a_*\} \cup \{ b_{i(\ell)}, b_{j(\ell)}, b_{i'(\ell)}, b_{j'(\ell)}: \ell < n \}.
$$
Let $n < \omega$ be such that $sp(g_0) \cap \{ b_{i(n)}, b_{j(n)}, b_{i'(\ell)}, b_{j'(\ell)}\} = \emptyset$. Then:
$$(g_{n+1})^{p} = g_na^{-1}_*b_{i(n)}^{-1} b_{j(n)} a_*b_{i'(n)}^{-1} b_{j'(n)} \text{ and } sp(g_n) \cap \{ b_{i(n)}, b_{j(n)}, b_{i'(n)}^{-1} b_{j'(n)} \} = \emptyset,$$
which contradicts Proposition \ref{prop_for_first_nec}(2).
\end{proof}
Recall that we denote the cyclic group of order $n$ by $C_n$.
\begin{lemma}\label{lemma2} Let $G = G' \oplus G''$, with $G'' = \bigoplus_{n < \omega} G_n$, $G_n = \bigoplus_{\alpha < \lambda_n} C_{k(n)}$, $\aleph_0 < \lambda_n$, $k(n) = p_n^{t(n)}$, for $p_n$ prime and $1 \leqslant t(n)$, and the $k(n)$ pairwise distinct. Then $G$ does not admit a Polish group topology.
\end{lemma}
\begin{proof}
Suppose that $G = (G, \mathfrak{d})$ is Polish and let $(\zeta_n)_{n < \omega} \in (0, 1)_{\mathbb{R}}^\omega$ be as in Fact \ref{771_fact} for $f \in \omega^{\omega}$ such that $f(n) = k(n) + 2$.
Assume that $G = G' \oplus G''$ is as in the assumptions of the lemma.
Without loss of generality we can assume that either of the following cases happens:
\begin{enumerate}[(i)]
\item for every $n < m < \omega$, $p_n < p_m$;
\item for every $n < \omega$, $p_n = p$ and $\prod_{i < n} p^{t(i)}$ is not divisible by $p^{t(n)}$.
\end{enumerate}
Using Observation \ref{observation_prelim}, by induction on $n < \omega$, choose $g_n, h_n \in G_n$ such that $g_n, h_n$ and $h_n^{-1}g_n$ have order $k(n)$ and $\mathfrak{d}(h_n^{-1}g_n, e) < \zeta_{n+1}$. Consider now the following set of equations:
$$ \Gamma = \{ x_n = (x_{n+1})^{k(n)} h_n^{-1}g_n : n < \omega \}.$$
By Fact \ref{771_fact}(2) the set $\Gamma$ is solvable in $G$. Let $(d_n)_{n < \omega}$ witness this. Let then $n < \omega$ be such that $d_0 \in G' \oplus \bigoplus_{i < n} G_i$. Notice now that:
\[ \begin{array}{rcl}
d_0 & = & (d_1)^{k(0)}h^{-1}_0g_0 \\
& = & ((d_2)^{k(1)}h^{-1}_1g_1)^{k(0)}h^{-1}_0g_0 \\
& = & (...((d_{n_{}+1})^{k(n)}h^{-1}_{n}g_{n})^{k(n-1)} \cdots h^{-1}_0g_0.
\end{array} \]
Let $\mathbf{p} = \mathbf{p}_{n}$ be the projection of $G$ onto $G_{n}$. Then we have:
$$G_{n} \models e = d_0 = (\mathbf{p}(d_{n+1})^{k(n)}h^{-1}_{n}g_{n})^{\prod_{i < n} k(i)} = (h^{-1}_{n}g_{n})^{\prod_{i < n} k(i)},$$
which is absurd.
\end{proof}
When we write $G = \bigoplus_{\alpha < \lambda} \mathbb{Z}x_\alpha$ we mean that $x_{\alpha}$ is the generator of the $\alpha$-th copy of $\mathbb{Z}$. This convention is used in Lemmas \ref{lemma3} and \ref{lemma4}, and Observation \ref{lemma5}.
\begin{lemma}\label{lemma3} Let $G = G_1 \oplus G_2$, with $G_2 = \bigoplus_{\alpha < \lambda} \mathbb{Z}x_\alpha$ and $\lambda > \aleph_0$. Then $G$ does not admit a Polish group topology.
\end{lemma}
\begin{proof} Suppose that $G = (G, \mathfrak{d})$ is Polish and let $(\zeta_n)_{n < \omega} \in (0, 1)_{\mathbb{R}}^\omega$ be as in Fact \ref{771_fact} for $f \in \omega^{\omega}$ constantly $2 + 10$. Assume that $G = G_1 \oplus G_2$ is as in the assumptions of the lemma.
Using Observation \ref{observation_prelim}, by induction on $n < \omega$, choose $(i_n, j_n)$ such that:
\begin{enumerate}[(i)]
\item if $m < n$, then $j_m < i_n$;
\item $i_n < j_n < \omega_1 \leqslant \lambda$;
\item $\mathfrak{d}(x_{i_n}, x_{j_n}) < \zeta_{n+1}$.
\end{enumerate}
For every $n < \omega$ let:
\begin{enumerate}[(a)]
\item $x_{i_n} = h_n$;
\item $x_{j_n} = g_n$;
\item $\mathbb{Z}x_{i_n} \oplus \mathbb{Z}x_{j_n} = H_n$.
\end{enumerate}
Consider now the following set of equations:
$$ \Gamma = \{ x_n = (x_{n+1})^{2} h_n^{-1}g_n : n < \omega \}.$$
By Fact \ref{771_fact}(2) the set $\Gamma$ is solvable in $G$. Let $(d_n)_{n < \omega}$ witness this. Let then $n < \omega$ be such that $d_0 \in G_1 \oplus \bigoplus_{i < n} H_n$. Notice now that:
\[ \begin{array}{rcl}
d_0 & = & (d_1)^{2}h^{-1}_0g_0 \\
& = & ((d_2)^{2}h^{-1}_1g_1)^{2}h^{-1}_0g_0 \\
& = & (...((d_{n_{}+1})^{2}h^{-1}_{n}g_{n})^{2} \cdots h^{-1}_0g_0.
\end{array} \]
Let $\mathbf{p}$ be the projection of $G$ onto $H_n$. Then we have:
$$H_{n} \models e = d_0 = (\mathbf{p}(d_{n+1})^{2}h^{-1}_{n}g_{n})^{2^n} = (h^{-1}_{n}g_{n})^{2^n},$$
which is absurd, since $H_n = \mathbb{Z}x_{i_n} \oplus \mathbb{Z}x_{j_n}$ is torsion-free and $h^{-1}_{n}g_{n} \neq e$.
\end{proof}
In the rest of this section we use additive notation.
\begin{lemma}\label{lemma4} Let $G = (G, \mathfrak{d})$ be an uncountable Polish group, $p$ a prime and $1 \leqslant t < \omega$. Suppose that $G = G_1 \oplus G_2$, with $G_2 = \bigoplus_{\alpha < \lambda} \mathbb{Z}_{p^t}x_{\alpha}$. If $\lambda > \aleph_0$, then there is $\bar{y} \subseteq G$ such that:
\begin{enumerate}[(a)]
\item $\bar{y} = (y_{\alpha} : \alpha < 2^{\aleph_0})$;
\item $p^t y_{\alpha} = 0$ and, for $\ell < t$, $p^{\ell} y_{\alpha} \neq 0$;
\item if $\alpha < \beta$, then $p^{t} (y_{\alpha} - y_{\beta}) = 0$, and, for $\ell < t$, $p^{\ell} (y_{\alpha} - y_{\beta}) \neq 0$;
\item if $\alpha < \beta$, then $y_{\alpha} - y_{\beta}$ is not divisible by $p$ in $G$.
\end{enumerate}
\end{lemma}
\begin{proof}
By induction on $n < \omega$, choose $(i_n, j_n)$ such that:
\begin{enumerate}[(i)]
\item if $m < n$, then $j_m < i_n$;
\item $i_n < j_n < \omega_1$;
\item $\mathfrak{d}(x_{i_n}, x_{j_n}) < 2^{-2^n}$.
\end{enumerate}
For $A \subseteq \omega$ and $n < \omega$, let:
$$y_{A, n} = \sum \{ x_{i_k} - x_{j_k} : k \in A \text{, } k < n\}.$$
Then for every $A \subseteq \omega$, $(y_{A, n})_{n < \omega}$ is Cauchy. Let $y_A \in G$ be its limit. Then by continuity we have:
\begin{enumerate}[(a)]
\item $p^{t} y_A = 0$, and, for $\ell < t$, $p^{\ell} y_A \neq 0$;
\item if $A \neq B \subseteq \omega$, then $y_A$ and $y_B$ commute, $p^{t} (y_{A} - y_{B}) = 0$ and, for $\ell < t$, $p^{\ell} (y_{A} - y_{B}) \neq 0$.
\end{enumerate}
We define the following equivalence relation $E$ on $\mathcal{P}(\omega)$:
$$A_1EA_2 \; \Leftrightarrow \; \exists x \in G(y_{A_1} - y_{A_2} = px).$$
We then have:
\begin{enumerate}[(I)]
\item $E$ is analytic;
\item if $B \subseteq \omega$, $n \in B$ and $A = B - \{ n \}$, then $\neg(y_A E y_B)$;
\item by \cite[Lemma 13]{sh_for_CH} we have $|\mathcal{P}(\omega)/E| = 2^{\omega}$.
\end{enumerate}
Hence, we can find $( y_{\alpha} : \alpha < 2^{\aleph_0})$ as wanted.
\end{proof}
\begin{observation}\label{lemma5} Let $G = (G, \mathfrak{d})$ be an uncountable Polish group, $p$ a prime and $1 \leqslant t < \omega$. Suppose that $G = G_0 \oplus G_1 \oplus G_2$, with $G_0$ countable, $G_1$ abelian, $\lambda > \aleph_0$ and $G_2 = \bigoplus_{\alpha < \lambda} \mathbb{Z}_{p^t}x_{\alpha}$. Let $(y_{\alpha} : \alpha < 2^{\aleph_0})$ be as in Lemma \ref{lemma4} with respect to the decomposition $G'_1 \oplus G'_2$ for $G'_1 = G_0 \oplus G_1$ and $G'_2 = G_2$. Then there is a pure embedding of $H = \bigoplus_{\alpha < 2^{\aleph_0}} \mathbb{Z}_{p^t} y_{\alpha}$ into the abelian group $G_1 \oplus G_2$.
\end{observation}
\begin{proof} Define:
$$\mathcal{U}_1 = \{ \alpha < 2^{\aleph_0} : \text{ for no } \xi \in \bigoplus_{\beta < \alpha} \mathbb{Z}_{p^t} y_{\beta} \text{ we have } y_{\alpha} - \xi \text{ is divisible by } p \text{ in } G_1 \oplus G_2\},$$
$$\mathcal{U}_2 = \{ \alpha < 2^{\aleph_0} : \text{ for no } \xi \in \bigoplus_{\beta < \alpha} \mathbb{Z}_{p^t} y_{\beta} \text{ and } \ell < t \text{ we have } p^\ell(y_{\alpha} - \xi) = 0 \}.$$
Let $\mathcal{U} = \mathcal{U}_1 \cap \mathcal{U}_2$. For $\alpha \notin \mathcal{U}$, let $(\xi_{\alpha}, \ell_{\alpha})$ be witnesses of $\alpha \notin \mathcal{U}$, with $\ell = t$ if $\alpha \notin \mathcal{U}_1$.
\begin{claim} $|\mathcal{U}| = 2^{\aleph_0}$.
\end{claim}
\begin{claimproof} Suppose that $|\mathcal{U}| < 2^{\aleph_0}$ and let $\mu = \aleph_0 + |\mathcal{U}|$. Hence $\mathcal{U} \cap \mu^+$ is bounded. Let $\alpha_* = sup(\mathcal{U} \cap \mu^+)$. By Fodor's lemma for some stationary set $S \subseteq \mu^+ - (\alpha_* + 1)$ we have $\alpha \in S$ implies $(\xi_{\alpha}, \ell_{\alpha}) = (\xi_{*}, \ell_{*})$. Let $\alpha_1 < \alpha_2 \in S$. Then if $\ell_* = t$ we have that $y_{\alpha_2} - y_{\alpha_1}$ is divisible by $p$ in $G_1 \oplus G_2$, and if $\ell_* < t$ we have that $p^\ell(y_{\alpha_2} - y_{\alpha_1}) = 0$. In both cases we reach a contradiction, and so $|\mathcal{U}| = 2^{\aleph_0}$.
\end{claimproof}
\noindent Let now $\mathbf{p}_{\ell}$ be the canonical projection of $G$ onto $G_{\ell}$ ($\ell = 1, 2$). Then, by the claim above, $\{ (\mathbf{p}_{1} + \mathbf{p}_{2}) (y_\alpha): \alpha \in \mathcal{U} \}$ is a basis of a pure subgroup of $G_1 \oplus G_2$ isomorphic to $H$, and so we are done.
\end{proof}
\section{Positive Side}
In this section we prove the the sufficiency of conditions (a)-(d) of Theorem \ref{main_th}.
\begin{lemma}\label{positive} Suppose that $G = G(\Gamma, \mathfrak{p})$ satisfies conditions (a)-(d) of Theorem \ref{main_th} and $|\Gamma| = 2^{\omega}$. Then $G$ is realizable as the group of automorphisms of a countable structure.
\end{lemma}
\begin{proof} Let $G = G(\Gamma, \mathfrak{p})$ be as in the assumptions of the theorem. Then we have:
$$G \cong H \oplus \bigoplus_{p^n \mid n_*} \bigoplus_{\alpha < \lambda_{(p, n)}} \mathbb{Z}_{p^n},$$
for some countable group $H$, natural number $n_* < \omega$, and $\lambda_{(p, n)} \in \{ 0, 2^{\aleph_0} \}$ (here we are crucially using conditions (a)-(d) of the statement of the theorem, of course). Since finite sums of groups realizable as groups of automorphisms of countable structures are realizable as groups of automorphisms of countable structures, it suffices to show that for given $p^n$ the group:
$$H_1 = \bigoplus_{\alpha < 2^{\aleph_0}} \mathbb{Z}_{p^n} \cong \mathbb{Z}_{p^n}^{\omega}$$ is realizable as the group of automorphisms of countable structure. To this extent, let $A$ be a countable first-order structure such that $Aut(A) = \mathbb{Z}_{p^n}$. Let $B$ be the disjoint union of $\aleph_0$ copies of $A$, then $\mathbb{Z}_{p^n}^{\omega} \cong Aut(B)$, and so we are done.
\end{proof}
\end{document}
|
\begin{document}
\title{{\LARGE \textbf{Optimal Dynamic Formation Control of Multi-Agent Systems in
Environments with Obstacles}}}
\author{Xinmiao Sun and Christos G. Cassandras\thanks{{\footnotesize The authors' work
is supported in part by NSF under grants CNS-1239021 and IIP-1430145, by AFOSR
under grant FA9550-12-1-0113, and by ONR under grant N00014-09-1-1051.}
}\thanks{{\footnotesize Division of Systems Engineering and Center for
Information and Systems Engineering, Boston University; e-mail: \{xmsun,cgc\}
@bu.edu}}}
\maketitle
\begin{abstract}
We address the optimal dynamic formation problem in mobile leader-follower
networks where an optimal formation is generated to maximize a given objective
function while continuously preserving connectivity. We show that in a convex
mission space, the connectivity constraints can be satisfied by any feasible
solution to a mixed integer nonlinear optimization problem. When the optimal
formation objective is to maximize coverage in a mission space cluttered with
obstacles, we separate the process into intervals with no obstacles detected
and intervals where one or more obstacles are detected. In the latter case, we
propose a minimum-effort reconfiguration approach for the formation which
still optimizes the objective function while avoiding the obstacles and
ensuring connectivity. We include simulation results illustrating this dynamic
formation process.
\end{abstract}
\section{Introduction}
The multi-agent system framework consists of a team of autonomous agents
cooperating to carry out complex tasks within a given environment that is
potentially highly dynamic, hazardous, and even adversarial. The overall
objective of the system may be time-varying and combines exploration, data
collection, and tracking to define a \textquotedblleft
mission\textquotedblright. Related problems are often referred to as
multi-agent coordination
\cite{Cao2013TIICoordination,boutilier1999sequential,beard2001coordination} or
cooperative control \cite{Shamma:2008,Choi20092802,cgc2005}. In many cases,
mobile agents are required to establish and maintain a certain spatial
configuration, leading to a variety of \emph{formation control} problems.
These problems are generally approached in two ways: in the leader-follower
setting, an agent is designated as a team leader moving on some given
trajectory with the remaining agents tracking this trajectory while
maintaining the formation; in the leaderless setting the formation must be
maintained without any such benefit. Examples of formation control problems
may be found in \cite{cao2011maintaining},\cite{Kwang2014}
,\cite{Desai1999,Yamaguchi1994,jiannan2013,ji2007distributed} and references
therein. In robotics, this is a well-studied problem; for instance in
\cite{Yamaguchi1994}, a desired shape for a networked\ strongly connected
group of robots is achieved by designing a quadratic spread potential field on
a relative distance space. In \cite{Desai1999}, a leader and several followers
move in an area with obstacles which necessitate the transition from an
initial formation shape to a desired new shape; however, the actual choice of
formations for a particular mission is not addressed in \cite{Desai1999}, an
issue which is central to our approach in this paper. In
\cite{ji2007distributed} the authors consider the problem of preserving
connectivity when the nodes have limited sensing and communication ranges;
this is accomplished through a control law based on the gradient of an
edge-tension function. More recently, in \cite{jiannan2013}, the goal is to
integrate formation control with trajectory tracking and obstacle avoidance
using an optimal control framework.
In this paper, we take a different viewpoint of formations. Since agent teams
are typically assigned a mission, there is an objective (or cost) function
associated with the team's operation which depends on the spatial
configuration (formation) of the team. Therefore, we view a formation as the
result of an optimization problem which the agent team solves in either
centralized or distributed manner. We adopt a leader-follower approach,
whereby the leader moves according to a trajectory that only he/she controls.
During the mission, the formation is preserved or must adapt if the mission
(hence the objective function) changes or if the composition of the team is
altered (by additions or subtractions of agents) or if the team encounters
obstacles which must be avoided. In the latter case in particular, we expect
that the team adapts to a new formation which still seeks to optimize an
objective function so as to continue the team's mission by attaining the best
possible performance. The problem is complicated by the fact that such
adaptation must take place in real time. Thus, if the optimization problem
determining the optimal formation is computationally demanding, we must seek a
fast and efficient control approach which yields possibly suboptimal
formations, but guarantees that the initial connectivity attained is
preserved. Obviously, once obstacles are cleared, the team is expected to
return to its nominal optimal formation.
Although the optimal dynamic formation control framework proposed here is not
limited by the choice of tasks assigned to the team, we will focus on the
coverage control problem because it is well studied and amenable to efficient
distributed optimization methods
\cite{SM2011,CM2004,cgc2005,caicedo2008coverage,caicedo2008performing,breitenmoser2010voronoi,Minyi2011,Gusrialdi2011}
, while also presenting the challenge of being generally non-convex and
sensitive to the agent locations during the execution of a mission. The local
optimality issue, which depends on the choice of objective function, is
addressed in \cite{Sun2014,schwager2008,gusrialdi2013improved}, while the
problem of connectivity preservation in view of limited communication ranges
is considered in \cite{ji2007distributed,Minyi2011}.
The contribution of this paper is to formulate an optimization problem which
jointly seeks to position agents in a two-dimensional mission space so as to
optimize a given objective function while at the same time ensuring that the
leader and remaining agents maintain a connected graph dictated by minimum
distances between agents, thus resulting in an \emph{optimal} formation. The
minimum distances may capture limited communication ranges as well as any
other constraint imposed on the team. We show that the solution to this
problem guarantees this connectivity. The formation becomes \emph{dynamic} as
soon as the leader starts moving along a given trajectory which may either be
known to all agents in advance or determined only by the leader. Thus, it is
the team's responsibility to maintain an optimal formation. We show that this
is relatively simple as long as no obstacles are encountered. When one or more
obstacles are encountered (i.e., they come within the sensing range of one or
more agents), then we propose a scheme for adapting with minimal effort to a
new formation which maintains connectivity while still seeking to optimize the
original team objective.
The paper is organized as follows. In Sec. II, we formulate a general optimal
formation control problem. In Sec. III, we focus on a convex feasible space
and derive a mixed integer nonlinear problem whose solution is shown to ensure
connectivity while maintaining an optimal formation. In Sec. IV, we propose a
scheme to solve the optimal formation problem in a mission space with
obstacles. We propose an algorithm to first obtain a connected formation and
then optimize it while maintaining connectivity. Simulation results are
included in Sec. V.
\section{Optimal Formation Problem Formulation}
Consider a set of $N+1$ agents with a leader labeled $0$ and $N$ followers
labeled 1 through $N$ in a mission space $\Omega\in\mathbb{R}^{2}$. Agent $i$
is located at $s_{i}(t)\in\mathbb{R}^{2}$ and let $\mathbf{s}(t)=(s_{0}
(t),...,s_{N}(t))$ be the full agent location vector at $t$. The leader
follows a predefined trajectory $s_{0}(t)$ over $t\in\lbrack0,T]$ which is
generally not known in advance by the remaining agents. We model the agent
team as a undirected graph
$\mathscr{G(\mathbf{s})}=(\mathscr{N},\mathscr{E},\mathbf{s})$, where
$\mathscr{N}=\{0,1,...,N\}$ is the set of agent indices and let
$\mathscr{N}_{F}=\{1,\ldots,N\}\subset\mathscr{N}$ be the set of follower
indices. In this model, the set of edges $\mathscr{E}=\{(i,j):i,j\in
\mathscr{N}\}$ contains all possible agent pairs for which constraints may be imposed.
In performing a mission, let $H(\mathbf{s}(t))$ be an objective function
dependent on the agent locations $\mathbf{s}(t)$. If the locations are
unconstrained, the problem is posed as $\max_{\mathbf{s}(t)\in\Omega
}H(\mathbf{s}(t))$ subject to dynamics that may characterize the motion of
each agent. If $t$ is fixed, then this is a nonlinear parametric optimization
problem over the mission space $\Omega$ \cite{Minyi2011}. If, on the other
hand, agents are required to also satisfy some constraints relative to each
other's position, then a \emph{formation} is defined as a graph that satisfies
these constraints. We then introduce a Boolean variable $c(s_{i},s_{j})$ to
indicate whether two agents satisfy these constraints:
\begin{equation}
{\label{connnectedCon}}c(s_{i},s_{j})=\left\{
\begin{array}
[c]{cl}
1 & \text{all constraints are satisfied}\\
0 & \text{otherwise}
\end{array}
\right.
\end{equation}
and if $c(s_{i},s_{j})=1$ we say that agents $i$ and $j$ are \emph{connected}.
A loop-free path from $i$ to the leader, $\pi_{i}=\{0,\ldots,a,b,\ldots,i\}$,
is defined as an ordered set where neighboring agents are connected such that
$c(s_{a},s_{b})=1$. Let $\Pi_{i}$ be the set of all possible paths connected
to the leader. The graph $\mathscr{G(\textbf{s})}$ is connected if $\Pi
_{i}\neq\emptyset$ for all $i\in\mathscr{N}_{F}$. We can now formulate an
optimal formation problem with connectivity preservation as follows, for any
fixed $t\in\lbrack0,T]$:
\begin{equation}
\begin{split}
& \max_{\mathbf{s}(t)\in\Omega}\text{ }H(\mathbf{s}(t))\\
\text{s.t.}\quad s_{i}(t) & \in F\subseteq\Omega,\text{ \ }i\in
\mathscr{N}_{F}\\
s_{0}(t) & \text{ is given}\\
\mathscr{G}(\mathbf{s}(t)) & \text{ is connected}
\end{split}
\label{obj1}
\end{equation}
For the sake of generality, we impose the constraint $s_{i}(t)\in
F\subseteq\Omega$ for all follower agents to capture the possibility that a
formation is constrained. The \emph{feasible space} $F$ can be convex (e.g.,
followers may be required to be located on one side of the leader relative to
a line in $\Omega$ that goes through $s_{0}(t)$) or non-convex (e.g.,
followers may be forbidden to enter polygonal obstacles and $F$ is the set
$\Omega$ excluding all interior points of the obstacles). The solution to this
problem is an \emph{optimal formation} at time $t$ and is denoted by
$\mathscr{G}_{F}(\mathbf{s}(t))$. Given a time interval $[t_{1},t_{2}]$, the
formation is \emph{maintained} in $[t_{1},t_{2}]$ if $s_{i}(t)-s_{i}
(t_{1})=s_{0}(t)-s_{0}(t_{1})$ holds for all $t\in\lbrack t_{1},t_{2}]$,
$i\in\mathscr{N}_{F}$; otherwise, it is a new formation. Figure
\ref{fig:missionSpace} shows an example of optimal dynamic formation control
in a mission space with obstacles. \begin{figure}
\caption{A mission space example where the triangle is
the leader and the red line is a predefined trajectory in $[0,T]$. The circles
are followers and the rectangle is an obstacle. The formation is maintained in
$[0,t_{1}
\label{figce}
\end{figure}Clearly, this is a challenging problem. To begin with, the last
constraint in (\ref{obj1}) is imprecise and may be different in a convex or
non-convex feasible space. In addition, the computational complexity of
obtaining a solution may be manageable in determining an initial formation but
becomes infeasible if a new formation $\mathscr{G}_{F}(\mathbf{s}(t))$ is
required during the real-time execution of a mission. In the following two
sections, we first propose an approach to solve this problem in a convex
feasible space and then use this solution to enable the maintenance of a
formation in a non-convex case.
\section{Optimal Dynamic Formation Control in a Convex Feasible Space}
In a convex feasible space, the simplest connection constraints are of the
form $d_{ij}(t)\equiv\Vert s_{i}(t)-s_{j}(t)\Vert\leq C_{ij}$ for some pair
$(i,j)$, $i,j\in\{0,1,...,N\}$, where $C_{ij}>0$ is a given scalar. This may
be the minimum distance needed to establish communication or $d_{ij}$ may be
used to enforce a specific desired shape in the formation. Techniques based on
the graph Laplacian \cite{merris1994laplacian} are often used to solve this
kind of problem, e.g., \cite{R2004}. However, our goal is to determine a
formation which solves the optimization problem in (\ref{obj1}) for a given
$H(\mathbf{s}(t))$. Thus, we describe next an approach to transform the last
constraint in (\ref{obj1}) into a mixed integer nonlinear optimization problem
by introducing a set of \emph{flow variables} over $\mathscr{G(\textbf{s})}$.
The leader $0$ is assumed to be a source node which sends $N$ units of flow
through the graph $\mathscr{G(\textbf{s})}$ to all other agents. Let
$\rho_{ij}\in\mathbb{Z}^{+},$ $i\in\mathscr{N},$ $j\in\mathscr{N}_{F}$ be an
integer flow amount through link $(i,j)$. Note that, in general, $\rho
_{ij}\neq\rho_{ji}$ and that either $\rho_{ij}>0$ or $\rho_{ji}>0$ implies
that $c(s_{i},s_{j})=1$. We can then define a flow vector $\mathbf{\rho}
=(\rho_{01},\rho_{11},\rho_{N1},\ldots,\rho_{0N},\ldots,\rho_{NN})$. Observe
that $\rho_{i0},i\in\mathscr{N}$ is not a flow variable in $\mathbf{\rho}$
since the leader is not allowed to receive any flows from the followers. For
each follower $j$, we define an auxiliary variable $N_{j}$ to be the net flow
at node $j$:
\begin{equation}
N_{j}=\sum_{i\in\mathscr{N}}\rho_{ij}-\sum_{i\in\mathscr{N}_{F}}\rho_{ji}
\end{equation}
Using this notation, we introduce next a number of linear constraints that
represent a connected graph. First, the leader provides $N$ units of flow:
\begin{equation}
\sum_{i\in\mathscr{N}_{F}}\rho_{0i}=N\label{leader_total}
\end{equation}
Next, each follower $j$ must receive a net flow $N_{j}=1$ in order to ensure
that there is one path from the leader to $j$:
\begin{equation}
N_{j}=\sum_{i\in\mathscr{N}}\rho_{ij}-\sum_{i\in\mathscr{N}_{F}}\rho
_{ji}=1,\text{ \ }j\in\mathscr{N}_{F}\label{follower_conservation}
\end{equation}
To prohibit self loops we require that
\begin{equation}
\rho_{ii}=0,\text{ \ }i\in\mathscr{N}\label{self_loop}
\end{equation}
Finally, the maximal flow capacity is upper bounded by the source amount $N$:
\begin{equation}
\rho_{ij}\leq N,\text{ \ }i\in\mathscr{N},\text{ \ }j\in\mathscr{N}_{F}
\label{flow_capacity}
\end{equation}
Observe that (\ref{leader_total}) and (\ref{follower_conservation}) are
linearly dependent since $\sum_{j}N_{j}=N$. Thus, the constraint
(\ref{leader_total}) is redundant and may be omitted.
\textbf{Theorem 1} If there exists a flow vector $\mathbf{\rho}$ such that
constraints (\ref{follower_conservation})-(\ref{flow_capacity}) hold, then
there exists a connected graph $\mathscr{G(\mathbf{s})}$. Moreover, the number
of possible graphs is finite.
\textbf{Proof:} We use a contradiction argument. Assume that at least one
follower agent is not connected to the leader while satisfying
(\ref{follower_conservation})-(\ref{flow_capacity}). We can separate the
follower agents into two sets: $N_{1}=\{k:\Pi_{k}\neq\emptyset\}$ and
$N_{2}=\{j:\Pi_{j}=\emptyset\}$. Then, $\rho_{kj}=0$ must be true for all
$k\in N_{1}$ and $j\in N_{2}$. This is because if $\rho_{kj}>0$, then there
exists a path $\pi_{j}=\{\pi_{k},j\}$ where $\pi_{k}\in\Pi_{k}$, which
contradicts the fact that $j\in N_{2}$. In addition, obviously $\rho_{0j}=0$
for $j\in N_{2}$. Summing the left-hand-sides of all constraints
(\ref{follower_conservation}) such that $j\in N_{2}$, we obtain
\begin{equation}
\begin{split}
& \sum_{j\in N_{2}}N_{j}=\sum_{j\in N_{2}}\left( \sum_{k\in\mathscr{N}}
\rho_{kj}-\sum_{k\in\mathscr{N}_{F}}\rho_{jk}\right) \\
= & \sum_{j\in N_{2}}\left[ \sum_{k\in N_{1}}\rho_{kj}+\sum_{k\in N_{2}}
\rho_{kj}+\rho_{0j}-\left( \sum_{k\in N_{1}}\rho_{jk}+\sum_{k\in N_{2}}
\rho_{jk}\right) \right] \\
= & \sum_{j\in N_{2}}\sum_{k\in N_{2}}\rho_{kj}-\sum_{j\in N_{2}}\sum_{k\in
N_{2}}\rho_{jk}-\sum_{j\in N_{2}}\sum_{k\in N_{1}}\rho_{jk}\\
= & -\sum_{j\in N_{2}}\sum_{k\in N_{1}}\rho_{jk}\leq0
\end{split}
\end{equation}
Next, summing the right-hand-sides of the constraints
(\ref{follower_conservation}) over $j\in N_{2}$ we get $\sum_{j\in N_{2}}
N_{j}=N>0$. This contradicts the constraint (\ref{follower_conservation})
leading to the conclusion that the graph $\mathscr{G(\mathbf{s})}$ is
connected. The additional constraints (\ref{self_loop})-(\ref{flow_capacity})
are necessary to ensure that the number of feasible flow vectors
$\mathbf{\rho}$ is finite. Clearly, (\ref{self_loop}) prohibits self-loops
while (\ref{flow_capacity}) prevents an infinite number of solutions where
edges $(i,j)$ in $\mathscr{G(\textbf{s})}$ may take any unbounded flow value
$\rho_{ij}>0$. $\blacksquare$
Observe that $\rho_{ij}>0$ indicates a connection between agents $i$ and $j$.
This can be combined with the constraint $d_{ij}(t)\leq C_{ij}$ to write
$\rho_{ij}(d_{ij}(t)-C_{ij})\leq0$ for all edges $(i,j)$ in
$\mathscr{G(\textbf{s})}$. Moreover, the convex set $F$ can be expressed
through linear constraints. Thus, the optimal formation problem with
connectivity preservation at any fixed $t\in\lbrack0,T]$ becomes a Mixed
Integer Nonlinear Problem (MINLP):
\begin{equation}
\begin{split}
& \min_{\mathbf{s}(t),\mathbf{\rho}}\text{ }-H(\mathbf{s}(t),\mathbf{\rho})\\
\text{s.t.}\quad s_{i}(t) & \in F\subseteq\Omega,\text{ \ }i=0,\ldots,N\\
\sum_{i\in\mathscr{N}}\rho_{ij} & -\sum_{i\in\mathscr{N}_{F}}\rho
_{ji}=1,\text{ \ }j\in\mathscr{N}_{F}\\
\rho_{ij}(d_{ij}(t)-C_{ij}) & \leq0,\text{ }i\in\mathscr{N},\text{ \ }
j\in\mathscr{N}_{F}\\
\rho_{ii} & =0,\text{ \ }i\in\mathscr{N}_{F}\\
\rho_{ij} & \leq N,\text{ \ }i\in\mathscr{N},\text{ \ }j\in\mathscr{N}_{F}
\end{split}
\label{objwconstraint}
\end{equation}
Note that any agent position vector $\mathbf{s}(t)$ specifies a graph at time
$t$. The role of $\mathbf{\rho}$ is in ensuring that this graph is connected
by satisfying the constraints in (\ref{objwconstraint}), thus creating an
optimal formation. However, there is no advance information regarding what the
optimal formation looks like and how the optimal formation changes over time
as the leader moves in a time interval $[0,T]$ unless $H(\mathbf{s}(t))$ is
given some specific structure.
For the remainder of this paper, we will consider the class of coverage
control problems
\cite{SM2011,CM2004,cgc2005,caicedo2008coverage,caicedo2008performing,breitenmoser2010voronoi,Minyi2011,Gusrialdi2011}
which impose a particular structure on $H(\mathbf{s}(t))$. Agents are assumed
to be equipped with some sensing and some communication capabilities. In
particular, we assume that agent $i$'s sensing is limited to a set $\Omega
_{i}(t)\subset\Omega$. For simplicity, we let $\Omega_{i}(t)$ be a circle
centered at $s_{i}(t)$ with radius $\delta_{i}$. Thus, $\Omega_{i}
(t)=\{x:d_{i}(x,t)\leq\delta_{i}\}$ where $d_{i}(x,t)=\Vert x-s_{i}(t)\Vert$,
the standard Euclidean norm. To further maintain simplicity without affecting
the generality of the analysis, we set $\delta_{i}=\delta$ for all agents. We
define $p_{i}(x,s_{i}(t))$ to be the probability that $i$ detects an event
occurring at point $x$. This function is defined to have the following
properties: $(i)$ $p_{i}(x,s_{i}(t))=0$ if $x\notin\Omega_{i}(t)$, and $(ii)$
$p_{i}(x,s_{i}(t))\geq0$ is a monotonically nonincreasing function of
$d_{i}(x,t)$. The overall \emph{sensing detection probability} is denoted by
$\hat{p}_{i}(x,s_{i}(t))$ and defined as
\begin{equation}
{\label{SensingModel}}\hat{p}_{i}(x,s_{i}(t))=
\begin{cases}
p_{i}(x,s_{i}(t)) & \text{if}\quad x\in\Omega_{i}(t)\\
0 & \text{if}\quad x\notin\Omega_{i}(t)
\end{cases}
\end{equation}
Note that $\hat{p}_{i}(x,s_{i}(t))$ may not be continuous in $s_{i}(t)$. The
joint detection probability, denoted by $P(x,\mathbf{s}(t))$, captures the
sensing ability of the entire agent team. That is, an event at $x\in\Omega$ is
detected by at least one of the $N$ cooperating agents with probability
$P(x,\mathbf{s}(t))$ is given by
\begin{equation}
P(x,\mathbf{s}(t))=1-\prod_{i=0}^{N}[1-\hat{p}_{i}(x,s_{i}(t))]\label{jointP}
\end{equation}
where we assume that agents sense independently of each other. In addition to
sensing, the communication capabilities of agents are defined by their
relative distance: agents $i$ and $j$ can establish a communication link if
$\Vert s_{i}(t)-s_{j}(t)\Vert\leq C$. Thus, in this class of problems a
formation is required to maintain full communication among agents. Finally,
one of the agents, indexed by $0$, is designated as the leader whose position
$s_{0}(t)$ is given.
The objective function for optimal coverage is the same as in \cite{Minyi2011}
except for the presence of a leader whose position is predefined. For any
$x\in\Omega$, the function $R(x):\Omega\rightarrow\mathbb{R}$ captures an a
priori estimate of the frequency of event occurrences at $x$ and is referred
to as an \textquotedblleft event density\textquotedblright\ satisfying
$R(x)\geq0$ for all $x\in\Omega$ and $\int_{\Omega}R(x)dx<\infty$. In this
problem, we assume that the event density is a constant for any $x\in\Omega$.
We are interested in maximizing the total detection probability over the
mission space $\Omega$:
\begin{equation}
\max_{\mathbf{s}(t)}\text{ }H(\mathbf{s}(t))=\int_{\Omega}R(x)P(x,\mathbf{s}
(t))dx \label{covcontrolH}
\end{equation}
so that the objective in (\ref{objwconstraint}) is $H(\mathbf{s}
(t),\mathbf{\rho})=\int_{\Omega}R(x)P(x,\mathbf{s}(t))dx$. Figures
\ref{fig:ExampleMINLP} and \ref{fig:MINLPSolution} show optimal formation
examples obtained by solving (\ref{objwconstraint}) at time $t$ with
$s_{0}(t)$ located at the center of the mission space.
\begin{figure}
\caption{Optimal formation for 5 followers (numbers) and one leader (L) in a bounded mission space.}
\label{fig:ExampleMINLP}
\caption{Optimal formation for 11 followers and a leader. Followers are constrained to the left side of the leader.}
\label{fig:MINLPSolution}
\end{figure}
A solution of this MINLP is computationally costly so that it is not realistic
to expect re-solving it over the course of a mission $t\in\lbrack0,T]$ as the
leader moves. However, it is not always necessary to repeatedly solve this
problem over $[0,T]$. Theorem 2 presents a condition under which we only need
to solve the problem at $t=0$. This simply formalizes the rather obvious fact
that if no new constraints (e.g., obstacles) are encountered over $t\in(0,T]$,
then the optimal formation at $t=0$ can be preserved by maintaining fixed
relative positions for all agents.
\textbf{Theorem 2} Let $\mathbf{s}(0)$ be an optimal solution of problem
(\ref{objwconstraint}) at $t=0$ and assume that $\Omega_i(t) \subset F, i \in \mathscr{N}$ and that $s_{0}(t)$ is known to all
followers for all $t\in(0,T]$. If $s_{i}(t)=s_{i}(0)+s_{0}(t)-s_{0}(0)$,
$i\in\mathscr{N}_{F}$, then $\mathbf{s}(t)$ maximizes $H(\mathbf{s}(t))$ in
(\ref{covcontrolH}).
\textbf{Proof:} Let us introduce a local polar coordinate system for each
agent $i$, so that the origin of $i$'s local coordinate system is $s_{i}$ and
the axes are parallel to those in the mission Cartesian coordinate system.
Given any point $x=(x_{x},x_{y})\in F$, let $l=(r_{i},\theta_{i})$ be the
polar coordinates in $i$'s local coordinate system. Then, the transformation
that maps $(r_{i},\theta_{i})$ onto the global coordinate system is
$x=s_{i}(t)+[r_{i}\cos\theta_{i}$ $r_{i}\sin\theta_{i}]^{T}$. Upon switching
to this local coordinate system, the sensing probability becomes
$p_{i}(x,s_{i}(t))=p_{i}(r_{i})$ if $r_{i}<\delta$. Since $\Omega_{i}(t)\in F$
for all $t\in\lbrack0,T]$, the local sensing range of $s_{i}(t)$, which is
denoted by $\Omega_{i}^{L}=\{(r_{i},\theta_{i}):r_{i}\leq C,0\leq\theta
_{i}\leq2\pi\}$, is time-invariant. Therefore, recalling (\ref{jointP}), the
objective function in (\ref{covcontrolH}) is
\begin{equation}
\begin{split}
H(\mathbf{s}(t)) & =\int_{\Omega}R(x)P(x,\mathbf{s}(t))dx\\
& =\int_{\bigcup_{i=0}^{N}\Omega_{i}(t)}R(x)P(x,\mathbf{s}(t))dx\\
& =\int_{\bigcup_{i=0}^{N}\Omega_{i}(t)}R(x)\{1-\prod_{i=0}^{N}
[1-p_{i}(x,s_{i}(t))]\}dx\\
& =\int_{\bigcup_{i=0}^{N}\Omega_{i}^{L}}r_{i}R(r_{i},\theta_{i}
)\{1-\prod_{i=0}^{N}[1-p_{i}(r_{i})]\}dr_{i}d\theta_{i}
\end{split}
\label{covHLocalCoordinate}
\end{equation}
so that the objective function value remains fixed for any $t\in\lbrack0,T]$.
Since for any agents $i$ and $j$, by assumption, $s_{i}(t)-s_{j}
(t)=s_{i}(0)+s_{0}(t)-s_{0}(0)-\left( s_{j}(0)+s_{0}(t)-s_{0}(0)\right)
=s_{i}(0)-s_{j}(0)$, and $\mathbf{s}(0)$ is an optimal solution of
(\ref{objwconstraint}), it follows that $\mathscr{G}(\mathbf{s}(0))$ is
connected, therefore, $\mathscr{G}(\mathbf{s}(t))$ is also connected and we
conclude that $\mathbf{s}(t)$ maximizes $H(\mathbf{s}(t))$. $\blacksquare$
The implication of Theorem 2 is that when a mission space has no obstacles in
it or the leader follows a trajectory where no obstacles are encountered by
any agent, our problem is reduced to one of ensuring that all agents
accurately track the leader's trajectory. We may discretize time so that
agents update their locations at $0<t_{1}<\cdots<t_{K}=T$. Assuming that
problem (\ref{objwconstraint}) is solved at $t=0$, an optimal formation is
obtained and we subsequently strive to maintain this formation until a
significant \textquotedblleft event\textquotedblright\ occurs such as an agent
failure, a change in objective function $H(\mathbf{s}(t))$, or encountering
obstacles; at such a point, some amount of reconfiguration is required while
still aiming to maximize $H(\mathbf{s}(t))$.
\section{Optimal Dynamic Formation Control in a Mission Space with Obstacles}
We have thus far solved an optimal dynamic formation problem with connectivity
constraints in a convex feasible space $F$ by solving a MINLP. However, this
method may fail when $F$ is non-convex, e.g., when $F$ cannot be described
through linear or nonlinear constraints. In this section, we address the
optimal dynamic formation problem in a mission space with obstacles, thus
considering a non-convex feasible space.
We model the obstacles as $m$ non-self-intersecting polygons denoted by
$M_{j}$, $j=1,\ldots,m$. The interior of $M_{j}$ is denoted by $\mathring
{M_{j}}$, so that the overall feasible space is $F=\Omega\setminus
(\mathring{M_{1}}\cup\ldots\cup\mathring{M_{m}})$, i.e., the space $\Omega$
excluding all interior points of the obstacles. In this setting, we seek to
ensure the following two requirements. First, the distance between two
connected agents must be $\leq C$. We define $c_{1}(s_{i},s_{j})$ to indicate
whether this requirement is satisfied:
\begin{equation}
c_{1}(s_{i},s_{j})=\left\{
\begin{array}
[c]{cl}
1 & \Vert s_{i}-s_{j}\Vert\leq C\\
0 & \text{otherwise}
\end{array}
\right.
\end{equation}
Second, the connected agents are required to have a line of sight with respect
to each other. We define $c_{2}(s_{i},s_{j})$ to indicate this requirement:
\begin{equation}
c_{2}(s_{i},s_{j})=\left\{
\begin{array}
[c]{cl}
1 & \alpha s_{i}+(1-\alpha)s_{j}\in F\text{ for all }\alpha\in\lbrack0,1]\\
0 & \text{otherwise}
\end{array}
\right.
\end{equation}
Agents $i$ and $j$ satisfying $c_{1}(s_{i},s_{j})=1$ as well as $c_{2}
(s_{i},s_{j})=1$ are referred to as \emph{connected}. We also define
$c(s_{i},s_{j})=c_{1}(s_{i},s_{j})c_{2}(s_{i},s_{j})$.
A version of this connectivity preservation problem was addressed in
\cite{Minyi2011}, where agents are required to remain connected with a fixed
base while at the same time maximizing the objective function in
(\ref{covcontrolH}). A gradient-based algorithm, termed \emph{Connectivity
Preservation Algorithm} (CPA), was developed for agent position updating and
it was shown that, given an initially connected network and if only one agent
updates its position at any given time, the CPA preserves connectivity. The
algorithm is applied iteratively over one agent at a time and it converges to
a (generally local) optimum. The CPA exploits the existence of distributed
optimization algorithms for optimal coverage to attain optimal agent locations
while also preserving connectivity to a base (details on the CPA and its
complexity are provided in \cite{Minyi2011}).
Our approach here is to take advantage of the CPA. In our problem, however,
the conditions for applying the CPA do not generally hold; this is because the
leader's motion does not take connectivity with its neighbors into account and
the presence of an obstacle, for example, may cause it to disconnect from one
or more followers. This is illustrated in Fig. \ref{fig: Algorithm}: At time
$t$, the agent network shown (represented by three blue circles and a blue
triangle as the leader) is connected. At $t+\epsilon$, the leader (triangle)
moves to $s_{0}(t+\epsilon)$ and if agent 2 moves to the point shown in yellow
(as expected by Theorem 2), then it becomes disconnected from the leader
because of the obstacle present. \begin{figure}
\caption{An example of a connected network at $t$ and
constructed connected network by Algorithm 1 at $t+\epsilon$.}
\label{fig: Algorithm}
\end{figure}We propose an algorithm next to construct a connected graph, which
may no longer be optimal in the sense of problem (\ref{objwconstraint}) but it
does provide a valid initial condition for invoking the CPA described above
(this is illustrated in Fig. \ref{fig: Algorithm} as the solid red graph).
This immediately allows us to iteratively apply the CPA so as to obtain a new
(locally optimal) formation.
Clearly, it is also possible to invoke (\ref{objwconstraint}) as soon as a
formation reconfiguration is needed. However, the set $F$ is no longer convex
and the computational complexity of this problem makes it infeasible for the
on-line adaptation required, whereas the approach we propose and the use of
the CPA render this process computationally manageable. In particular, whereas
the MINLP is generally NP hard, in the CPA each agent $i$ determines its new
position through a gradient-based scheme using only its neighbor set and its downstream and upstream agent sets relative to the leader (formally defined in the next section). When the
number of agents increases, note that the the number of neighbors of $i$ may not be affected. The overall increase in complexity is linear in the network size.
Before proceeding, we identify the precise instants when formation
reconfiguration is necessary due to obstacles encountered by agents as the
mission unfolds over $[0,T]$. We define two states that the agent team can be
in: $(i)$ The \emph{constrained} state occurs when the sensing capability of
an agent is hindered by an obstacle, captured by the condition $\left(
\bigcup_{i=0}^{N}\Omega_{i}\right) \bigcap\left( \bigcup_{i=1}^{m}
\mathring{M_{i}}\right) \neq\emptyset$, i.e., the intersection of the sensed
part of $\Omega$ and the set of interior points of any obstacle is not empty,
and $(ii)$ The \emph{free} state corresponding to $\left( \bigcup_{i=0}
^{N}\Omega_{i}\right) \bigcap\left( \bigcup_{i=1}^{m}\mathring{M_{i}
}\right) =\emptyset$. Thus, the interval $[0,T]$ is partitioned into free and
constrained intervals with transitions at times $t_{f}^{0}<t_{c}^{1}<t_{f}
^{1}<...<t_{c}^{i}<t_{f}^{i}<...t_{f}^{z}<T$. This is described in Fig.
\ref{fig:stateTransition}. Next, we consider how to generate optimal
formations over different alternating intervals $[t_{f}^{k},t_{c}^{k+1})$ and
$[t_{c}^{k+1},t_{f}^{k+1})$. \begin{figure}
\caption{Two states of the agents network and the
transition time points between the two states.}
\label{fig:stateTransition}
\end{figure}
\subsection{Optimal formation control in free states}
When the agent network enters a free state at time $t_{f}^{k},$ $k=0,\ldots
,z$, since $\left( \bigcup_{i=0}^{N}\Omega_{i}(t)\right) \bigcap\left(
\bigcup_{i=1}^{m}\mathring{M_{i}}\right) =\emptyset$ for all $t \in [t_f^k,t_c^{k+1})$ and $F=\Omega
\setminus(\mathring{M_{1}}\cup\ldots\cup\mathring{M_{m}})$, so $\Omega_{i}(t)\in F$ for any $i$ over $t \in [t_f^k,t_c^{k+1})$, the optimal formation is maintained based on Theorem 2.
\subsection{Optimal formation control in constrained states}
We begin this subsection with some additional notation and definitions. Given
a connected graph $\mathscr{G}(\mathbf{s})$, we have defined a loop-free path
connecting agent $i$ to the leader as $\pi_{i}=\{0,\ldots,a,b,\ldots,i\}$, an
ordered set where neighboring agents are connected; we have also defined
$\Pi_{i}$ to be the set of all possible paths connecting $i$ to the leader.
Let $\pi_{i,k}$ be the $k$th path in $\Pi_{i}$ and we use $\pi_{i,k}^{j}$ to
denote the $j$th element in $\pi_{i,k}$. Let $\mathscr{D}_{i}=\cup_{j,k}
w_{i}(\pi_{j,k})$ be the set of agents \emph{downstream} from $i$ (further
away from the leader 0) where
\begin{equation}
w_{i}(\pi_{j,k})=\left\{
\begin{array}
[c]{cl}
\pi_{j,k}^{l+1} & \text{ if }i\in\pi_{j,k},\text{ }i\neq j\text{ and }
i=\pi_{j,k}^{l}\\
\emptyset & \text{otherwise}
\end{array}
\right.
\end{equation}
We also define the set of \emph{upstream} agents from $i$ as $\mathscr{U}_{i}
=\{j:i\in\mathscr{D}_{j},j\in{0,\dots,N}\}$.
The length of a path $\pi_{i,k}$ is defined as $\Psi(\pi_{i,k})=\sum
_{l=1}^{|\pi_{i,k}|-1}\Vert s_{\pi_{i,k}^{l}}-s_{\pi_{i,k}^{l+1}}\Vert$, where
$|\pi_{i,k}|$ is the cardinality of $\pi_{i,k}$. For agent $i$, the shortest
path connected to the leader is
\[
\pi_{i}^{\ast}=\arg\min_{\pi_{i,k}\in\Pi_{i}}\Psi(\pi_{i,k})
\]
For example in Fig. \ref{fig: Algorithm}, in the path $\pi_{3,1}=\{0,2,3\}$,
we have 3 $\in\mathscr{D}_{2}$, 0 $\in\mathscr{U}_{2}$, $\Psi(\pi_{3,1})=\Vert
s_{0}-s_{2}\Vert+\Vert s_{2}-s_{3}\Vert$; for the path $\pi_{3,2}
=\{0,1,2,3\}$, we have $\Psi(\pi_{3,2})=\Vert s_{0}-s_{1}\Vert+\Vert
s_{1}-s_{2}\Vert+\Vert s_{2}-s_{3}\Vert$. Therefore, $\pi_{3}^{\ast}=\pi
_{3,1}$ is the shortest path from agent $3$ to the leader.
Let $\pi_{i}$ and $\pi_{j}$ be two paths. Then, we define $\pi_{i}+\pi
_{j}=\{\pi_{i},\pi_{k}\}$, where $\pi_{k}=\pi_{j}\setminus\pi_{i}$, as an
ordered set. Note that $\pi_{i}+\pi_{j}$ is generally different from $\pi
_{j}+\pi_{i}$ because of the order involved. Given a connected graph
$\mathscr{G}(\mathbf{s})$, We define
\begin{equation}
Q(\mathscr{G}(\mathbf{s}))=\pi_{1}^{\ast}+\ldots+\pi_{N}^{\ast}\label{Qdef}
\end{equation}
to be an ordered set containing a permutation of the agent set $\{0,1,...,N\}$
constructed so as to start with the shortest path $\pi_{1}^{\ast}$ from $0$ to
agent $1$, followed by $\pi_{2}^{\ast}\setminus\pi_{1}^{\ast}$ and so on. It
immediately follows from this construction that the first element of
$Q(\mathscr{G}(\mathbf{s}))$ is $0$ and that $|Q(\mathscr{G}(\mathbf{s}
))|=N+1$. Therefore, we can rewrite $Q(\mathscr{G}(\mathbf{s}))$ as
\[
Q(\mathscr{G}(\mathbf{s}))=\{0,q_{2},\ldots,q_{N+1}\}
\]
where $q_{j}\in\mathscr{N}_{F},j=2,\ldots,N+1$. For example, in Fig.
\ref{fig: Algorithm}, at time $t$, $Q(\mathscr{G}(\mathbf{s}(t)))=\{0,1,2,3\}$
. We show next that $Q(\mathscr{G}(\mathbf{s}))$ has the following property
regarding the order of its elements.
\textbf{Lemma 1 } If $q_{i}$ is the $i$th element of $Q(\mathscr{G}(\mathbf{s}
))$ constructed from a connected graph $\mathscr{G}(\mathbf{s})$, then there
exists $q_{j}\in\mathscr{U}_{q_{i}}$ such that $q_{j}$ is the $j$th element of
$Q(\mathscr{G}(\mathbf{s}))$, and $j<i$ for all $q_{i}\in\mathscr{N}_{F}$.
\textbf{Proof:} If for all $q_{j}\in\mathscr{U}_{q_{i}}$, $j>i$, we cannot
find a subset of $Q(\mathscr{G}(\mathbf{s}))$ that includes $\{q_{j},q_{i}\}$,
$q_{j}\in\mathscr{U}_{q_{i}}$, then there is no path connected to $q_{i}$.
This contradicts the assumption that $Q(\mathscr{G}(\mathbf{s}))$ is
constructed from a connected graph. $\blacksquare$
We also define a projection of $x\in\mathbb{R}^{2}$ on a set $A\in
\mathbb{R}^{2}$ as
\[
P_{A}(x)=\arg\min_{y\in A}\Vert x-y\Vert
\]
Next, let $\mathscr{Y}(s_{i})=\{y:y\in\mathbb{R}^{2},$ $c(s_{i},y)=1)$.
Recalling the definition of $c(\cdot,\cdot)$, $\mathscr{Y}(s_{i})$ is the set
of points with which $s_{i}$ can establish a connection. For any subset of
agents $\mathscr{V}\subset\mathscr{N}$, let $\Sigma(\mathscr{V})=\bigcup
_{i\in\mathscr{V}}\mathscr{Y}(s_{i})$ be the union of all connection regions
for agents in $\mathscr{V}$. For example, in Fig. \ref{fig: Algorithm}, the
grey area is $\Sigma(\mathscr{V})$ for $\mathscr{V}=\{0,1\}$ at time
$t+\epsilon$.
We are now ready to deal with the situation where the formation is in a
constrained state and may lose connectivity at time $t+\epsilon$ given that
the graph $\mathscr{G}(\mathbf{s}(t))$ is connected. In particular, suppose
that when the leader is about to move to $s_{0}(t+\epsilon)$ and informs the
followers, at least one of the agents will lose connectivity with the
formation. Our task is to obtain an optimal formation at $t+\epsilon$ and this
is accomplished in two steps: $(i)$ Construct a connected graph
$\mathscr{G}(\mathbf{s}(t+\epsilon))$ for time $t+\epsilon$, and $(ii)$ Use
this connected graph $\mathscr{G}(\mathbf{s}(t+\epsilon))$ as an input to
invoke the CPA. Step $(i)$ is crucial because of the fact that the CPA relies
on an initially connected graph before it can be executed to seek (locally)
optimal agent locations which still preserve connectivity. This first step is
carried out by constructing a connected graph through Algorithm
\ref{alg: ConGraph}. \begin{algorithm}
\caption{ Connected Graph Construction Algorithm}
\label{alg: ConGraph}
\textbf{Input}: Graph $\mathscr{G}(\mathbf{s}(t))$, $s_0(t+\epsilon)$ \\
\textbf{Output}: Graph $\mathscr{G}(\mathbf{s}(t+\epsilon))$ \\
\textbf{Initialization:} $\mathscr{U}_i, \mathscr{D}_i$ for $i \in \mathscr{N}$, $\mathscr{V}=\{ 0 \}$, $Q(\mathscr{G}(\mathbf{s}(t)))=\{0, q_2,\ldots, q_{N+1} \}$ using (\ref{Qdef}) \\
\textbf{For} agent $i=q_j, j=2,\ldots, N+1$ \\
Do the following procedure:
\begin{algorithmic}[1]
\STATE{\label{Candidate}} Generate a candidate next location for $i$: $\hat{s}_i=s_i(t)+\Delta_L$.
\STATE{\label{UpstreamCheck}} For all agents $v \in \mathscr{U}_i \bigcap \mathscr{V}$, if $c(\hat{s}_i, s_v(t+\epsilon)) = 0$, go to Step \ref{UpstreamProj}; else, go to Step \ref{Update}.
\STATE{\label{UpstreamProj}} Project $s_i$ onto $\Sigma(\mathscr{U}_i \bigcap \mathscr{V})$. Set $\hat{s_i} = P_{\Sigma(\mathscr{U}_i \bigcap \mathscr{V})}(s_i)$.
\STATE{\label{Update}} Set $s_i(t+\epsilon) = \hat{s}_i$.
\STATE{\label{UpdateI}} Add $i$ to $\mathscr{V}$
\end{algorithmic}
\textbf{End}
\end{algorithm}We use $\Delta_{L}(t)=s_{0}(t+\epsilon)-s_{0}(t)$ to denote the
position change vector of the leader from $t$ to $t+\epsilon$, where we assume
that followers have the $\Delta_{L}(t)$ information available at $t$.
\textbf{Theorem 3 } $\mathscr{G}(\mathbf{s}(t+\epsilon))$ obtained by
Algorithm \ref{alg: ConGraph} is connected.
\textbf{Proof}: Since $\mathscr{G}(\mathbf{s}(t))$ is connected,
$\mathscr{U}_{i}\neq\emptyset$ for $i\in\mathscr{N}_{F}$. We then use
induction to prove that the graph constructed by agents in $\mathscr{V}$
remains connected at Step \ref{UpdateI} in every iteration. Initially,
$\mathscr{V}=\{0\}$ which is connected. Next, assuming there are $n$ agents in
$\mathscr{V}$ and the graph they form is connected, we will prove that after
adding the $(n+1)$th agent, say $i$, the graph remains connected.
The addition of $i$ to $\mathscr{V}$ occurs at Step 5. There are two possible
sequences for reaching this step: \ref{Candidate}-\ref{UpstreamCheck}
-\ref{Update} and \ref{Candidate}-\ref{UpstreamCheck}-\ref{UpstreamProj}
-\ref{Update}. At Step \ref{UpstreamCheck}, $\mathscr{U}_{i}\bigcap
\mathscr{V}\neq\emptyset$ because of the property of $Q(\mathscr{G}(\mathbf{s}
))$ in Lemma 1. It follows that before $i$ performs the procedure, there is at
least one upstream agent in $\mathscr{V}$. In the \ref{Candidate}
-\ref{UpstreamCheck}-\ref{Update} sequence, there exists some $m\in
\mathscr{V}\cap\mathscr{U}_{i}$ such that $c(\hat{s}_{i},s_{m}(t+\epsilon
))=1$. Therefore, all agents in $\mathscr{V}$ including $i$ will be connected.
In the \ref{Candidate}-\ref{UpstreamCheck}-\ref{UpstreamProj}-\ref{Update}
sequence, at Step \ref{UpstreamProj}, agent $i$'s position is projected onto
the connection ranges of all $v\in\mathscr{V}\cap\mathscr{U}_{i}$. It follows
that the graph formed by agents in $\{\mathscr{V},i\}$ is connected. Step 5
adds agents to $\mathscr{V}$ one by one until $\mathscr{V}=\mathscr{N}$,
therefore, the graph $\mathscr{G}(\mathbf{s}(t+\epsilon))$ is connected.
$\blacksquare$
Obviously, Algorithm \ref{alg: ConGraph} does not provide a unique way to
construct a connected graph. For example, the formation could be adjusted to a
line or a star configuration with $s_{0}(t+\epsilon)$ as the center of the
star. However, this would entail a major formation restructuring whereas in
Algorithm \ref{alg: ConGraph} we seek to retain the \emph{closest possible
formation} to the original (optimal) one by setting candidate locations as
seen in Step \ref{Candidate}. If such a candidate is not feasible, then the
agent will move a minimal distance (in the projection sense) to be connected.
Once step $(i)$ above is completed by obtaining this connected graph
$\mathscr{G}(\mathbf{s}(t))$, step $(ii)$ is performed by invoking the CPA to
optimize the agent locations within the new formation. Clearly, once obstacles
are cleared and the agent team re-enters a free state (see Fig.
\ref{fig:stateTransition}), we may revert to the original optimal formation.
\section{Simulation Results}
In this section, we provide a simulation example illustrating what the optimal
formation maximizing coverage in a mission space with obstacles looks like and
how it changes at some significant instants.
We choose the event density functions to be uniform, i.e., $R(x)=1$. The
mission space is a $60\times50$ rectangle. The distance constraint is $C=10$
and the sensing range of each agent is $\delta=8$. At every step, the leader
moves to the right one distance unit per unit of time. The mission space is
colored from dark to lighter as the joint detection probability decreases (the
joint detection probability is $\geq0.50$ for green areas, and near zero for
white areas). The leader (labeled \textquotedblleft L\textquotedblright) moves
along a predefined trajectory (the purple dashed line). There are 8 followers,
indicated by numbers, which are restricted to locations on the left
side of the leader during any movement.
Figures \ref{fig:MINLPStart}-\ref{fig:MissionEnd} show snapshots of the
process at selected events of interest over $[0,T]$. Figure
\ref{fig:MINLPStart} shows the initial configuration at $t=0$, where the agent
team is located in a convex feasible space. As shown in Sec. III, in this
case, the optimal formation can be obtained by solving a MINLP
\cite{Bussieck2014}. In the results shown, we have used TOMLAB, a MATLAB-based
optimization solver. For the non-convex objective function defined in
(\ref{covcontrolH}), the solution is usually a local maximum; we sought to
find the best local (possibly global) optimum possible by implementing a
multi-start algorithm on the solver. This is done at the start of the mission,
when an off-line computationally intensive procedure is possible. Moreover,
this local maximum can be improved by applying the CPA; in fact, in this
example the use of the CPA led to an improvement from $H(\mathbf{s})=741.5$ to
$H(\mathbf{s})=816.7$, as shown in Fig. \ref{fig:CPAFromMINLP}. Thus, in
general, supplying the CPA with an initial connected graph obtained by solving
the MINLP enables it to converge to a better value. For example, Fig.
\ref{fig:CPAStart} is a local maximum attained by starting with a star-like
connected graph shown in Fig. \ref{fig:starStart} with the objective function
value $H(\mathbf{s})=781.1$ (although this is still worse than the value in
Fig. \ref{fig:CPAFromMINLP}).
\begin{figure}
\caption{At $t=0$, optimal formation from MINLP, $H(\mathbf{s}
\label{fig:MINLPStart}
\caption{At $t=0$, optimal formation improved by CPA of Fig. \ref{fig:MINLPStart}
\label{fig:CPAFromMINLP}
\caption{At $t=5$, agent 5 needs projection in Step 3 of Algorithm 1}
\label{fig:firstProjection}
\caption{At $t=7$, apply CPA after projection in \ref{fig:firstProjection}
\label{fig: CPAProjection}
\end{figure}
\begin{figure}
\caption{At $t=12$, the structure of the tree changes}
\label{fig:Reconfiguration}
\caption{At $t=35$, the end of the mission}
\label{fig:MissionEnd}
\caption{A star-like connected graph}
\label{fig:starStart}
\caption{Apply CPA from \ref{fig:starStart}
\label{fig:CPAStart}
\end{figure}
In the time interval $[0,5]$, the formation is maintained. At $t=5$, agent 5
is located at a vertex of an obstacle and will therefore lose connectivity as
the leader moves to the next step at $t=6$. At this point, agent 5 will
determine its next position $s_{5}(6)$ by applying a projection at Step
\ref{UpstreamProj} of Algorithm \ref{alg: ConGraph}. Note that only agent 5
needs to perform this projection, rather than the whole team of agents, hence
the computational effort is minimal. Figure \ref{fig: CPAProjection} captures
the optimal formation following Fig. \ref{fig:firstProjection}.
Observe that over the period $[0,12)$, although the optimal formation remains
a tree, it is no longer the same as the original one. However, for each agent
$i$, its downstream node set $\mathscr{D}_{i}$ and upstream node set
$\mathscr{U}_{i}$ remain unchanged. At $t=12$, clearly, the structure of the
formation has been changed. This is a consequence of either the projection
step in Algorithm 1 or the CPA. At the end of the mission at $t=35$, the
formation is shown in Fig. \ref{fig:MissionEnd}. The agents seek to form a
line to go through the narrow region of the mission space while at the same
time maximizing coverage. During the remaining interval $[12,35]$, the process
is similar to what is seen over $[5,12]$.
As we pointed out in the last section, constructing a connected graph can be
accomplished in a variety of ways. As shown in Fig. \ref{fig:starStart}, a
star-like graph is an inferior formation to that of Fig.
\ref{fig:CPAFromMINLP}; this is expected since the latter was obtained
specifically to maximize the objective function in (\ref{covcontrolH}). In
addition, a reconfiguration process as shown in Fig. \ref{fig:CPAStart}
requires agents to move longer distances, hence consuming more energy.
\section{Conclusions and future work}
We have addressed the issue of optimal dynamic formation of multi-agent
systems in mission spaces with obstacles. When the agent team is in a free
state (no obstacles in the mission space affecting them), a locally optimal
solution of a MINLP can provide an initial formation that agents maintain or
it is a good initial point for using the CPA (developed in prior work
\cite{Minyi2011}) to obtain a better local optimum. When the feasible space is
non-convex and connectivity is lost, we have developed an algorithm to
construct a connected graph as an input for the CPA while seeking to maintain
the original formation with minimal effort.
Future work aims at investigating optimal dynamic formation control for more
general classes of objective functions, beyond the coverage control problem.
\end{document}
|
\begin{document}
\title{Sharp well-posedness and ill-posedness of the Navier-Stokes initial value problem in Besov-type spaces hanks{This work is supported by the
China National Natural Science Foundation under the grant number 11571381.}
\begin{abstract}
Let $B^{s,\sigma}_{pq}(\mathbb{R}^n)$ ($s\in\mathbb{R}$, $\sigma\geqslant 0$, $p,q\in [1,\infty]$) be the logrithmically refined Besov space, which
is defined by replacing $2^{js}$ in the definition of the Besov space $B^s_{pq}(\mathbb{R}^n)$ with $2^{js}j^{\sigma}$ for all $j\in\mathbb{N}$.
Let $B^{s,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ ($s\in\mathbb{R}$, $\sigma\geqslant 0$, $q\in [1,\infty]$) be the closure of the Schwartz space
$S(\mathbb{R}^n)$ in $B^{s,\sigma}_{\infty q}(\mathbb{R}^n)$. We prove that the Navier-Stokes initial value problem is locally well-posed in
$B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ for $1\leqslant q\leqslant\infty$ and $\sigma\geqslant\sigma_q:=1-\min\{1-\frac{1}{q},\frac{1}{q}\}$,
and ill-posed in $B^{-1,\sigma}_{\infty\,q}(\mathbb{R}^n)$ for $1\leqslant q\leqslant\infty$ and $0\leqslant\sigma<\sigma_q$. The well-posedness
result is proved by using some sharp bilinear estimates obtained from some Hardy-Littlewood type inequalities. The ill-posedness assertion is
proved by refining the arguments of Wang \cite{Wang} and Yoneda \cite{Yon10}.
\textbf{Keywords}: Navier-Stokes equations; initial value problem; well-posedness; ill-posedness; Besov type space.
\textbf{2000 AMS Subject Classification}: 35Q35, 76W05, 35B65
\end{abstract}
\section{Introduction}
\hskip 2em
This paper addresses the following question which has attracted much attention during the past two decades (cf. also Wang \cite{Wang}):
What is the largest Besov-type space in which the initial value problem of the Navier-Stokes equations is well-posed? The purpose of this
paper is to give an answer to this question
\footnotemark[1].
\footnotetext[1]{This is the second version of the paper with the same title publicized in ArXiv under the number 1505.00865. It remedies
the incorrect proof of Lemma 4.2 of the previous version and some other small mistakes. The main result of this paper has been
written in the book of Lemari\'{e}-Rieusset \cite{LEM16} as Theorem 9.6 (without proof). The author is glad to acknowledge his sincere
thanks to Weipeng Zhu for helping him finding the mistakes in the previous version.}
Recall that the initial value problem of the Navier-Stokes equations reads as follows:
\begin{eqnarray}
\left\{
\begin{array}{l}
\partial_t{\mbox{\boldmath $u$}}-\Delta{\mbox{\boldmath $u$}}+({\mbox{\boldmath $u$}}\cdot\nabla){\mbox{\boldmath $u$}}+\nabla\pi=0\quad \mbox{in}\;\,\mathbb{R}^n\times\mathbb{R}_+,\\
\nabla\cdot{\mbox{\boldmath $u$}}=0\quad \mbox{in}\;\,\mathbb{R}^n\times\mathbb{R}_+,\\
{\mbox{\boldmath $u$}}(x,0)={\mbox{\boldmath $u$}}_0(x)\quad \mbox{for}\;\, x\in\mathbb{R}^n,
\end{array}
\right.
\end{eqnarray}
where $n\geq 2$, ${\mbox{\boldmath $u$}}={\mbox{\boldmath $u$}}(x,t)=(u_1(x,t),u_2(x,t),\cdots,u_n(x,t))$ is an unknown $n$-vector function in $(x,t)$ variables, $x\in\mathbb{R}^n$,
$t\geq 0$, $\pi=\pi(x,t)$ is an unknown scalar function, ${\mbox{\boldmath $u$}}_0={\mbox{\boldmath $u$}}_0(x)$ is a given $n$-vector function, $\Delta$ is the Laplacian in the $x$
variables, $\nabla=(\partial_{x_1},\partial_{x_2},\cdots,\partial_{x_n})$, and $\mathbb{R}_+=(0,\infty)$.
Let $\mathbb{P}=I+\nabla(-\Delta)^{-1}\nabla$ be the Helmholtz-Weyl projection operator, i.e., the $n\times n$ matrix pseudo-differential operator
in $\mathbb{R}^n$ with the matrix symbol $\Big(\delta_{ij}-\frac{\xi_i\xi_j}{|\xi|^2}\Big)_{i,j=1}^n$, where $\delta_{ij}$'s are the Kronecker symbols.
It is well-known that when only the $L^2_{uloc,x}L^2_t$-class solutions (see \cite{LEM02} for this notion) are considered, which is the case in this
paper, the problem (1.1) is equivalent to the following formally simpler problem:
\begin{eqnarray}
\left\{
\begin{array}{l}
\partial_t{\mbox{\boldmath $u$}}-\Delta{\mbox{\boldmath $u$}}+\mathbb{P}\nabla\cdot({\mbox{\boldmath $u$}}\otimes{\mbox{\boldmath $u$}})=0\quad \mbox{in}\;\,\mathbb{R}^n\times\mathbb{R}_+,\\
{\mbox{\boldmath $u$}}(x,t)={\mbox{\boldmath $u$}}_0(x)\quad \mbox{for}\;\, x\in\mathbb{R}^n.
\end{array}
\right.
\end{eqnarray}
Throughout this paper, for any scaler function space $\mathscr{X}$ we shall use the same notation $\mathscr{X}$ to denote its $n$-vector counterpart
to simplify the notation. Let $X$ be a function space continuously embedded in $S'(\mathbb{R}^n)$, the space of temperate distributions on
$\mathbb{R}^n$ endowed with the dual topology of the Schwartz space $S(\mathbb{R}^n)$. Recall that the initial value problem $(1.1)$ is said to be
{\em locally well-posed in $X$} if for any ${\mbox{\boldmath $u$}}_0\in X$ with ${\rm div}{\mbox{\boldmath $u$}}_0=0$ there exists corresponding $T>0$ and a continuously embedded subspace
$Y_T$ of $C([0,T],X)$ such that the problem $(1.2)$ has a unique solution ${\mbox{\boldmath $u$}}$ in $Y_T$, and the solution map ${\mbox{\boldmath $u$}}_0\mapsto{\mbox{\boldmath $u$}}$ is continuous
with respect to the norm topologies of $X$ and $C([0,T],X)$. If $(1.1)$ is not locally well-posed in a function space $X$, then it is called {\em
ill-posed in $X$}. Also recall that $(1.1)$ is said to be {\em semi-globally well-posed in $X$ for small initial data} if for any $T>0$ there exists
corresponding constant $\varepsilon>0$ and a continuously embedded subspace $Y_T$ of $C([0,T],X)$ such that for any ${\mbox{\boldmath $u$}}_0\in X$ with ${\rm div}{\mbox{\boldmath $u$}}_0=0$
and $\|{\mbox{\boldmath $u$}}_0\|_X<\varepsilon$ the problem $(1.2)$ has a unique solution ${\mbox{\boldmath $u$}}$ in $Y_T$, and the solution map ${\mbox{\boldmath $u$}}_0\mapsto{\mbox{\boldmath $u$}}$ is continuous with
respect to the norm topologies of $X$ and $C([0,T],X)$. If there exists constant $\varepsilon>0$ such that for any ${\mbox{\boldmath $u$}}_0\in X$ with ${\rm div}{\mbox{\boldmath $u$}}_0=0$
and $\|{\mbox{\boldmath $u$}}_0\|_X<\varepsilon$ the problem $(1.2)$ has a unique solution ${\mbox{\boldmath $u$}}$ in some subspace of $C([0,\infty),X)\cap L^{\infty}((0,\infty),X)$,
and the solution map ${\mbox{\boldmath $u$}}_0\mapsto{\mbox{\boldmath $u$}}$ is continuous with respect to the norm topologies of $X$ and $L^{\infty}((0,\infty),X)$, then $(1.1)$ is
said to be {\em globally well-posed in $X$ for small initial data}.
The topic of well-posedness of the problem (1.1) in various function spaces has been deeply investigated during the past 50 years. In 1964 Fujita and
Kato \cite{FUJK64} obtained the first result on this topic by proving that the problem $(1.1)$ is locally well-posed in $H^s(\mathbb{R}^n)$ for
$s\geq\frac{n}{2}-1$ and globally well-posed in $H^{\frac{n}{2}-1}(\mathbb{R}^n)$ for small initial data. These results were later extended to various
other function spaces, cf. \cite{BAR96, CAN97, FABJR72, GIG86, GM89, KAT84, KOCT01, LEM07, PLA96, TER99, WEI81} and references cited therein. Note
that the literatures listed here are far from being complete; we refer the reader to see \cite{CAN04} and \cite{LEM02} for expositions and more
references. Here we particularly mention that by Cannone \cite{CAN97} and Planchon \cite{PLA96}, the problem (1.1) is well-posed in the Besov spaces
$B^{s}_{pq}(\mathbb{R}^n)$ for $s\geqslant-1+\frac{n}{p}$, $1\leqslant p<\infty$, $1\leqslant q\leqslant\infty$ (for $q=\infty$, this means that
it is well-posed in the closure of $S(\mathbb{R}^n)$ in $B^{s}_{p\infty}(\mathbb{R}^n)$, and in what follows, similar remark should be made in
any case where the $\infty$ index appears), and by Koch and Tataru \cite{KOCT01},
it is well-posed in $BMO^{-1}$. Note that the inhomogeneous version $bmo^{-1}$ of $BMO^{-1}$ is the largest initial value space in which the problem
(1.1) is known to be locally well-posed.
On the other hand, in 2008 Bourgain and Pavlovi\'{c} \cite{BP08} proved that the problem (1.1) is ill-posed in the Besov space
$\dot{B}^{-1}_{\infty\infty}(\mathbb{R}^n)$. Yoneda \cite{Yon10} further proved that (1.1) is ill-posed in the Besov spaces
$\dot{B}^{-1}_{\infty q}(\mathbb{R}^n)$ and the Triebel-Lizorkin spaces $\dot{F}^{-1}_{\infty q}(\mathbb{R}^n)$ for $2<q\leqslant\infty$. Recently,
Wang \cite{Wang} proved that the problem (1.1) is also ill-posed in the Besov spaces $\dot{B}^{-1}_{\infty q}(\mathbb{R}^n)$ for $1\leqslant
q\leqslant 2$, which is a remarkable result because previously it had been commonly conjectured that (1.1) is well-posed in
$\dot{B}^{-1}_{\infty q}(\mathbb{R}^n)$ for $1\leqslant q\leqslant 2$ due to the fact that they are smaller than $BMO^{-1}$. Note that all the
above-mentioned ill-posedness results also hold for the corresponding inhomogeneous spaces, because all the arguments used in \cite{BP08},
\cite{Yon10} and \cite{Wang} also work for the corresponding inhomogeneous spaces.
Recalling that $BMO^{-1}=\dot{F}^{-1}_{\infty 2}(\mathbb{R}^n)$ and $bmo^{-1}=F^{-1}_{\infty 2}(\mathbb{R}^n)$, we see that $BMO^{-1}$ and $bmo^{-1}$
are respectively the largest homogeneous and inhomogeneous Triebel-Lizorkin spaces in which the problem (1.1) is well-posed. Naturally, we want to
know what is the largest Besov-type space in which the problem (1.1) is well-posed. To give an answer to this question we need to refine the
classification of the Besov space and introduce the logarithmically refined Besov space $B^{s,\sigma}_{pq}(\mathbb{R}^n)$ as
follows (cf. \cite{Yon10}):
{\bf Definition 1.1} \ \ {\em $(1)$\ Let $s\in\mathbb{R}$, $\sigma\geqslant 0$ and $p,q\in[1,\infty]$. The function space
$B^{s,\sigma}_{pq}(\mathbb{R}^n)$ consists of all temperate distributions $u$ on $\mathbb{R}^n$ such that $S_0u\in L^p(\mathbb{R}^n)$, $\Delta_j u
\in L^p(\mathbb{R}^n)$, $j=1,2,\cdots$, and $\{2^{js}j^{-\sigma}\|\Delta_j u\|_p\}_{j=1}^\infty\in l^q$, where $S_0$ and $\Delta_j$ are the
frequency-localizing operators appearing in the Littlewood-Paley decomposition $u=S_0u+\sum_{j=1}^{\infty}\Delta_j u$ $($see the next section$)$.
The norm of $u\in B^{s,\sigma}_{pq}(\mathbb{R}^n)$ is given by $\|u\|_{B^{s,\sigma}_{pq}}=\|S_0u\|_p+\|\{2^{js}j^{-\sigma}
\|\Delta_j u\|_p\}_{j=1}^\infty\|_{l^q}$, i.e.,
$$
\|u\|_{B^{s,\sigma}_{pq}}=\left\{
\begin{array}{ll}
\displaystyle\|S_0u\|_p+\Big[\sum_{j=1}^\infty\Big(2^{js}j^{\sigma}\|\Delta_j u\|_p
\Big)^q\Big]^{\frac{1}{q}} \quad &\mbox{for}\;\; 1\leqslant q<\infty,\\ [0.3cm]
\displaystyle\|S_0u\|_p+\sup_{j\in\mathbb{N}}\Big(2^{js}j^{\sigma}\|\Delta_j u\|_p
\Big) \quad &\mbox{for}\;\; q=\infty.
\end{array}
\right.
$$
Here and throughout the paper $\|\cdot\|_p$ denotes the norm of $L^p(\mathbb{R}^n)$ $(1\leqslant p\leqslant\infty)$.
$(2)$\ For $s\in\mathbb{R}$, $\sigma\geqslant 0$ and $p,q\in[1,\infty]$, we denote by $B^{s,\sigma}_{pq0}(\mathbb{R}^n)$ the closure of
$S(\mathbb{R}^n)$ in $B^{s,\sigma}_{pq}(\mathbb{R}^n)$.}
It is easy to prove that $B^{s,\sigma}_{pq}(\mathbb{R}^n)$ is a Banach space, and clearly $B^{s,0}_{pq}(\mathbb{R}^n)=B^s_{pq}(\mathbb{R}^n)$, i.e.,
when $\sigma=0$, $B^{s,\sigma}_{pq}(\mathbb{R}^n)$ coincides to the usual Besov space $B^s_{pq}(\mathbb{R}^n)$. Moreover, it is also easy to prove
that the following embedding relations hold:
\begin{itemize}
\item For $t>s$, $\tau>\sigma>0$ and $p,q\in[1,\infty]$, we have
$$
B^{t}_{pq}(\mathbb{R}^n)\subseteq B^{s,\tau}_{pq}(\mathbb{R}^n)\subseteq B^{s,\sigma}_{pq}(\mathbb{R}^n)\subseteq
B^{s}_{pq}(\mathbb{R}^n)
$$
with continuous embedding.
\item For $s\in\mathbb{R}$, $\sigma_1\geqslant\sigma_2\geqslant 0$ and $p,q_1,q_2\in[1,\infty]$ such that $\sigma_1+1/q_1>\sigma_2+1/q_2$, we have
$$
B^{s,\sigma_1}_{pq_1}(\mathbb{R}^n)\subseteq B^{s,\sigma_2}_{pq_2}(\mathbb{R}^n)
$$
with continuous embedding.
\end{itemize}
In our previous work \cite{Cui}, we proved that the problem $(1.1)$ is locally well-posed in $B^{-1,1}_{\infty\infty 0}(\mathbb{R}^n)$ and
semi-globally well-posed in $B^{-1,1}_{\infty\infty}(\mathbb{R}^n)$ for small initial data (cf. Theorems 2.1 and 2.2 of \cite{Cui}). The arguments
used in \cite{Cui} can be easily extended to prove that $(1.1)$ is also locally well-posed in $B^{-1,\sigma}_{\infty\infty 0}(\mathbb{R}^n)$ and
semi-globally well-posed in $B^{-1,\sigma}_{\infty\infty}(\mathbb{R}^n)$ for small initial data for any $\sigma\geqslant 1$. Our first main result
of this paper extends these results to $B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ and $B^{-1,\sigma}_{\infty\,q}(\mathbb{R}^n)$ for $1\leqslant q<
\infty$ and $\sigma\geqslant\sigma_q$, where
$$
\sigma_q=1-\min\Big\{1-\frac{1}{q},\frac{1}{q}\Big\} \quad \mbox{for}\;\; 1\leqslant q\leqslant\infty,
$$
i.e., we have the following result:
{\bf Theorem 1.2}\ \ {\em Let $1\leqslant q<\infty$ and assume that $\sigma\geqslant\sigma_q$. Then the following assertions hold:
$(1)$\ The problem $(1.1)$ is locally well-posed in $B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$. More precisely, for any ${\mbox{\boldmath $u$}}_0\in
B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ with $\nabla\cdot{\mbox{\boldmath $u$}}_0=0$, there exists corresponding $T>0$ such that the problem $(1.2)$ has a unique
mild solution in the class
\begin{equation}
\left\{
\begin{array}{rcl}
&{\mbox{\boldmath $u$}}\in C([0,T],B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n))\cap L^\infty_{\rm loc}((0,T],L^\infty(\mathbb{R}^n)), \quad
\nabla\cdot{\mbox{\boldmath $u$}}=0,& \\
&\displaystyle\sup_{t\in (0,T)}\sqrt{t}\Big|\!\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(t)\|_{\infty}<\infty, \quad
\sqrt{t}\Big|\!\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(t)\|_{\infty}\in L^q\Big((0,T),\frac{\mbox{\rm d} t}{t}\Big),&
\end{array}
\right.
\end{equation}
and the solution map ${\mbox{\boldmath $u$}}_0\mapsto {\mbox{\boldmath $u$}}$ from a neighborhood of ${\mbox{\boldmath $u$}}_0$ in $B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ to the Banach space of the
above class of functions on $\mathbb{R}^n\times (0,T)$ is Lipschitz continuous.
$(2)$\ The problem $(1.1)$ is semi-globally well-posed in $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ for small initial data. More precisely, for any
$T>0$ there exists corresponding constant $\varepsilon>0$ such that for any ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ with $\nabla\cdot
{\mbox{\boldmath $u$}}_0=0$ and $\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\, q}}<\varepsilon$, the problem $(1.2)$ has a unique mild solution in the class
\begin{equation}
\left\{
\begin{array}{rcl}
&{\mbox{\boldmath $u$}}\in L^\infty((0,T),B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n))\cap L^\infty_{\rm loc}((0,T],L^\infty(\mathbb{R}^n)), \quad
\nabla\cdot{\mbox{\boldmath $u$}}=0,& \\
&\displaystyle\sup_{t\in (0,T)}\sqrt{t}\Big|\!\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(t)\|_{\infty}<\infty, \quad
\sqrt{t}\Big|\!\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(t)\|_{\infty}\in L^q\Big((0,T),\frac{\mbox{\rm d} t}{t}\Big),& \\
&\mbox{the map $t\mapsto{\mbox{\boldmath $u$}}(t)$ is continuous with respect to $S'(\mathbb{R}^n)$-weak topology for $0<t<T$},&
\end{array}
\right.
\end{equation}
and the solution map ${\mbox{\boldmath $u$}}_0\mapsto {\mbox{\boldmath $u$}}$ from a neighborhood of ${\mbox{\boldmath $u$}}_0$ in $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ to the Banach space of the
above class of functions on $\mathbb{R}^n\times (0,T)$ is Lipschitz continuous.}
In contrast to the above result, for the case $0\leqslant\sigma<\sigma_q$ we have the following result:
{\bf Theorem 1.3}\ \ {\em For $1\leqslant q\leqslant\infty$ and $0\leqslant\sigma<\sigma_q$, the problem $(1.1)$ is ill-posed in
$B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$. More precisely, for $0<\delta\ll1$ and $N\gg1$ there exists ${\mbox{\boldmath $u$}}_0\in S(\mathbb{R}^n)$ with
$\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}\lesssim 1$ such that if we denote by ${\mbox{\boldmath $u$}}={\mbox{\boldmath $u$}}(\delta,t)$ the solution of the problem $(1.1)$ with initial
data $\delta{\mbox{\boldmath $u$}}_0$ $($in case such a solution exists$)$, then
\begin{equation*}
\|{\mbox{\boldmath $u$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}}\gtrsim (\ln N)^{\sigma_q-\sigma}
\end{equation*}
for some $0<t\leqslant1/N$.}
From the above result and Theorems 2.1 and 2.2 of \cite{Cui} we see that the largest Besov-type spaces in which the initial value problem of the
Navier-Stokes equations is well-posed are the spaces $B^{-1,1-\frac{1}{q}}_{\infty\,q}(\mathbb{R}^n)$, $2\leqslant q\leqslant\infty$. This answers
the question mentioned in the beginning of this paper.
The organization of the rest part is as follows. In the next section we make some preliminary preparations. Section 3 is devoted to giving the
proofs of Theorem 1.2. The proof of Theorem 1.3 will be given in the last section.
\section{Preliminary preparations}
\setcounter{equation}{0}
\hskip 2em
In this section we make some preliminary preparations.
Choose and fix a nonnegative non-increasing function $\phi\in C^{\infty}[0,\infty)$ such that
$$
0\leqslant\phi\leqslant1, \quad \phi(t)=1 \;\; \mbox{for}\;\; 0\leqslant t\leqslant\frac{5}{4} \quad \mbox{and} \quad
\phi(t)=0 \;\; \mbox{for}\;\; t\geqslant\frac{3}{2},
$$
and set
$$
\varphi(\xi)=\phi(|\xi|), \quad \psi(\xi)=\phi(|\xi|)-\phi(2|\xi|), \quad \psi_j(\xi)=\psi(2^{-j}\xi)\;\,(j=0,1,2,\cdots)
\quad \mbox{for}\;\; \xi\in\mathbb{R}^n.
$$
It is easy to see that $\varphi=1$ on $\bar{B}(0,5/4)$ and ${\rm supp}\varphi\subseteq\bar{B}(0,3/2)$, $\psi=1$ on $\bar{B}(0,5/4)\backslash B(0,3/4)$
and ${\rm supp}\psi\subseteq\bar{B}(0,3/2)\backslash B(0,5/8)$. Here $B(a,r)$ and $\bar{B}(a,r)$ ($a\in\mathbb{R}^n$, $r>0$) respectively represent the
open and closed balls in $\mathbb{R}^n$ with center $a$ and radius $r$. We also note that
$$
\psi_j=1 \quad \mbox{on}\;\; \bar{B}(0,5\cdot 2^{j-2})\backslash B(0,3\cdot 2^{j-2}) \quad \mbox{and} \quad
{\rm supp}\psi_j\subseteq B(0, 2^{j+1})\backslash\bar{B}(0, 2^{j-1})
$$
$(j=0,1,2,\cdots)$. Moreover we have
$$
\varphi(\xi)+\sum_{j=1}^{\infty}\psi_j(\xi)=1 \quad \mbox{for}\;\; \xi\in\mathbb{R}^n.
$$
We denote by $\;\hat{}\;$ and $\mathscr{F}$ the Fourier transform, and by $\;\check{}\;$ and $\mathscr{F}^{-1}$ the inverse Fourier transform. The
notation $O_M(\mathbb{R}^n)$ denotes the topological vector space of temperate smooth functions on $\mathbb{R}^n$, i.e. $u\in O_M(\mathbb{R}^n)$
if and only if $u\in C^{\infty}(\mathbb{R}^n)$ and for any $\alpha\in\mathbb{Z}_+^n$, there exists corresponding $r\in\mathbb{R}$ such that
$|\partial^{\,\alpha}u(x)|\lesssim (1+|x|)^r$ for $x\in\mathbb{R}^n$. Then we define $S_0:S'(\mathbb{R}^n)\to O_M(\mathbb{R}^n)$ and $\Delta_j:
S'(\mathbb{R}^n)\to O_M(\mathbb{R}^n)$ $(j=0,1,2,\cdots)$ to be the following operators:
$$
S_0(u)=\mathscr{F}^{-1}(\varphi\hat{u}), \quad \Delta_j(u)=\mathscr{F}^{-1}(\psi_j\hat{u}) \quad \mbox{for}\;\; u\in S'(\mathbb{R}^n)
$$
$(j=0,1,2,\cdots)$. It is well-known that for any $u\in S'(\mathbb{R}^n)$ there holds the relation
$$
S_0(u)+\sum_{j=1}^{\infty}\Delta_j(u)=u
$$
in $S'(\mathbb{R}^n)$-weak topology.
As usual for $t\geqslant 0$ we denote by ${\rm e}^{t\Delta}$ the pseudo-differential operator on $\mathbb{R}^n$ with symbol ${\rm e}^{-t|\xi|^2}$, i.e.,
${\rm e}^{t\Delta}$ is the continuous linear operator in $S'(\mathbb{R}^n)$ defined by
$$
{\rm e}^{t\Delta}u=\mathscr{F}^{-1}({\rm e}^{-t|\xi|^2}\hat{u}(\xi)) \quad \mbox{for}\;\; u\in S'(\mathbb{R}^n).
$$
It is well-known that when restricted on shift-invariant Banach space of test functions (see \cite{LEM02} for this concept), the family of operators
$\{{\rm e}^{t\Delta}\}_{t\geqslant 0}$ forms a $C_0$-semigroup of contractions (i.e. $\|{\rm e}^{t\Delta}u\|\leqslant\|u\|$ for all $t\geqslant 0$), and
when restricted on shift-invariant Banach space of distributions (also see \cite{LEM02} for this concept), $\{{\rm e}^{t\Delta}\}_{t\geqslant 0}$ is a
semigroup of contractions, but it is not necessarily strongly continuous at $t=0$ (it is strongly continuous for $t>0$).
In the proofs of Theorems 1.3 and 1.6 we shall use the following characterization of the space $B^{s,\sigma}_{pq}(\mathbb{R}^n)$:
{\bf Lemma 2.1}\ \ {\em Let $s\in\mathbb{R}$, $\sigma\geqslant 0$ and $p,q\in[1,\infty]$. Let $u\in S'(\mathbb{R}^n)$. Let $t_0>0$ be
given. Let $\gamma\geqslant 0$ and $\gamma>s$. Then $u\in B^{s,\sigma}_{pq}(\mathbb{R}^n)$ if and only if for any $t>0$ we have $e^{t\Delta}u\in
L^p(\mathbb{R}^n)$ and $t^{-\frac{s}{2}}|\ln(\frac{t}{{\rm e} t_0})|^{\sigma}\|(\sqrt{-t\Delta})^{\gamma}e^{t\Delta}u\|_q\in L^q((0,t_0),\frac{dt}{t})$.
Moreover, the norms $\|u\|_{B^{s,\sigma}_{pq}}$ and
$$
\|u\|_{B^{s,\sigma}_{pq,t_0}}=\left\{
\begin{array}{ll}
\displaystyle\|e^{t_0\Delta}u\|_p+\Big[\int_0^{t_0}\!\Big(t^{-\frac{s}{2}}\Big|\ln\Big(\frac{t}{{\rm e} t_0}\Big)\Big|^{\sigma}
\|(\sqrt{-t\Delta})^{\gamma}e^{t\Delta}u\|_p\Big)^q\frac{dt}{t}\Big]^{\frac{1}{q}} \quad &\mbox{for}\;\; 1\leqslant q<\infty\\ [0.3cm]
\displaystyle\|e^{t_0\Delta}u\|_p+
\sup_{0<t<t_0}t^{-\frac{s}{2}}\Big|\ln\Big(\frac{t}{{\rm e} t_0}\Big)\Big|^{\sigma}\|(\sqrt{-t\Delta})^{\gamma}e^{t\Delta}u\|_p \quad
&\mbox{for}\;\; q=\infty
\end{array}
\right.
$$
are equivalent.}
The proof is not hard; one needs only to slightly modify the arguments used in the proof of Theorem 5.3 in \cite{LEM02} to fit the present
situation. We omit it here. $\quad\Box$
Note that if $s<0$ and $\gamma=0$ then for $0<t_1<t_2$ we have
$$
\|u\|_{B^{s,\sigma}_{pq,t_1}}\leqslant\|u\|_{B^{s,\sigma}_{pq,t_2}}\leqslant
\Big(\frac{t_2}{t_1}\Big)^{\frac{|s|}{2}}\|u\|_{B^{s,\sigma}_{pq,t_1}} \quad \mbox{for}\;\; u\in B^{s,\sigma}_{pq}(\mathbb{R}^n).
$$
It is well-known that the problem (1.2) is equivalent to the following integral equation:
$$
{\mbox{\boldmath $u$}}(t)={\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0+\int_0^t{\rm e}^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes{\mbox{\boldmath $u$}}(\tau)]\mbox{\rm d}\tau.
$$
Given $T>0$, let $B$ be the following bilinear form:
$$
B({\mbox{\boldmath $u$}},{\mbox{\boldmath $v$}})(t)=\int_0^t{\rm e}^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes{\mbox{\boldmath $v$}}(\tau)]\mbox{\rm d}\tau.
$$
The following very useful preliminary result is well-known (cf. Chapter 15 of \cite{LEM02}):
{\bf Lemma 2.2}\ \ {\em For any ${\mbox{\boldmath $u$}},{\mbox{\boldmath $v$}}\in L^1_{loc}((0,T),[L^{\infty}(\mathbb{R}^n)]^n)$ such that the right-hand side makes sense for
a. e. $t\in (0,T)$, the following estimate holds:}
$$
\;\;\qquad\qquad\|B({\mbox{\boldmath $u$}},{\mbox{\boldmath $v$}})(t)\|_{\infty}\lesssim\int_0^t(t-\tau)^{-\frac{1}{2}}\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|{\mbox{\boldmath $v$}}(\tau)\|_{\infty}\mbox{\rm d}\tau \quad
\mbox{for a. e. }\; t\in (0,T). \qquad\qquad\;\; \Box
$$
To make estimate of the right-hand of the above inequality, we need some Hardy-Littlewood type inequalities, which are given in the following two
lemmas:
{\bf Lemma 2.3}\ \ {\em Let $T>0$ and $F(t)=\displaystyle\int_0^t\!f(\tau)\frac{\mbox{\rm d}\tau}{\tau}$, $0<t<T$, where $f$ is a measurable function
defined in $(0,T)$. Then for any $2\leqslant q\leqslant\infty$ the following estimate holds:
\begin{equation}
\Big[\int_0^T\!\!\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{q-1}|F(t)|^q\frac{\mbox{\rm d} t}{t}\Big]^{\frac{1}{q}}\lesssim_q
\Big[\int_0^T\!\!\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{q-1}|f(t)|^{\frac{q}{2}}\frac{\mbox{\rm d} t}{t}\Big]^{\frac{2}{q}},
\end{equation}
where for $q=\infty$ the integration is understood in the conventional way.}
{\em Proof}:\ \ First, by using the Minkowsky inequality we have
\begin{eqnarray*}
\Big[\int_0^T\!\!\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||F(t)|^2\frac{\mbox{\rm d} t}{t}\Big]^{\frac{1}{2}}
&\;\leqslant\;&\Big[\int_0^T\!\!\Big(\int_0^t\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\frac{1}{2}}|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}\Big)^2
\frac{\mbox{\rm d} t}{t}\Big]^{\frac{1}{2}}
\\
&\leqslant&\int_0^T\!\!\Big(\int_{\tau}^T\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|\frac{\mbox{\rm d} t}{t}\Big)^{\frac{1}{2}}|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}
\\
&\lesssim&\int_0^T\!\!\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big||f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}.
\end{eqnarray*}
This proves that (2.1) holds for $q=2$. Next we have
\begin{eqnarray*}
|F(t)|&\;\leqslant\;&\int_0^t\!|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}=\int_0^t\!\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^2|f(\tau)|\cdot
\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2}\frac{\mbox{\rm d}\tau}{\tau}
\\
&\leqslant&\sup_{0<\tau<T}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^2|f(\tau)|\cdot
\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2}\,\frac{\mbox{\rm d}\tau}{\tau}
\\
&\leqslant&\sup_{0<\tau<T}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^2|f(\tau)|\cdot\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{-1}, \quad
\forall t\in (0,T),
\end{eqnarray*}
so that
$$
\sup_{0<t<T}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||F(t)|\lesssim
\sup_{0<t<T}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^2|f(t)|,
$$
showing that (2.1) also holds for $q=\infty$. Hence, by interpolation we see that (2.1) holds for all $2\leqslant q\leqslant\infty$. $\quad\Box$
{\bf Lemma 2.4}\ \ {\em Let $T>0$ and $F(t)=\displaystyle\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-1}\!f(\tau)\frac{\mbox{\rm d}\tau}{\tau}$,
$0<t<T$, where $f$ is a measurable function defined in $(0,T)$. Then for any $1\leqslant q\leqslant\infty$ the following estimate holds:
\begin{equation}
\Big\{\int_0^T\!\!\Big[\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||F(t)|\Big]^q\frac{\mbox{\rm d} t}{t}\Big\}^{\frac{1}{q}}\lesssim
\Big\{\int_0^T\!\!\Big[\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||f(t)|\Big]^q\frac{\mbox{\rm d} t}{t}\Big\}^{\frac{1}{q}},
\end{equation}
where for $q=\infty$ the integration is understood in the conventional way.}
{\em Proof}:\ \ First we have
\begin{eqnarray*}
\int_0^T\!\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||F(t)|\frac{\mbox{\rm d} t}{t}&\;\leqslant\;&
\int_0^T\!\!\Big[\int_0^t\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-1}\!|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}
\Big]\frac{\mbox{\rm d} t}{t}
\\
&=&\int_0^T\!\!\Big[\int_{\tau}^T\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|\frac{\mbox{\rm d} t}{t}\Big]
\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-1}\!|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}
\\
&\leqslant&\frac{1}{2}\int_0^T\!\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^2\cdot\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-1}\!
|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}
\\
&=&\frac{1}{2}\int_0^T\!\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big||f(\tau)|\frac{\mbox{\rm d}\tau}{\tau},
\end{eqnarray*}
showing that (2.2) holds for $q=1$. Next we have
\begin{eqnarray*}
\sup_{0<t<T}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||F(t)|&\;\leqslant\;&
\sup_{0<t<T}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-1}\!|f(\tau)|\frac{\mbox{\rm d}\tau}{\tau}
\\
&\leqslant&\sup_{0<t<T}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2}\,\frac{\mbox{\rm d}\tau}{\tau}
\cdot\sup_{0<\tau<T}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big||f(\tau)|
\\
&\leqslant&\sup_{0<t<T}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big||f(t)|,
\end{eqnarray*}
showing that (2.2) also holds for $q=\infty$. Hence, by interpolation we see that (2.2) holds for all $1\leqslant q\leqslant\infty$. $\quad\Box$
Now let $\mathcal{B}$ be the following bilinear operator:
$$
\mathcal{B}(f,g)(t)=\int_0^t(t-\tau)^{-\frac{1}{2}}f(\tau)g(\tau)\mbox{\rm d}\tau, \quad t>0.
$$
For $1\leqslant q\leqslant\infty$, $\sigma\geqslant 0$ and $T>0$, we denote by $\dot{\mathscr{K}}^{\sigma}_q(T)$ the following function space on
$(0,T)$:
$$
\dot{\mathscr{K}}^{\sigma}_q(T)=\Big\{f\in L^1_{\rm loc}(0,T]:\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}f(t)\in
L^q\Big((0,T);\frac{\mbox{\rm d} t}{t}\Big)\Big\},
$$
with norm
$$
\|f\|_{\dot{\mathscr{K}}^{\sigma}_q}=\Big\|\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}f(t)\Big\|_{L^q((0,T);\frac{dt}{t})},
$$
and by $\mathscr{K}^{\sigma}_q(T)$ the following function space on $(0,T)$: $\mathscr{K}^{\sigma}_{\infty}(T)=\dot{\mathscr{K}}^{\sigma}_{\infty}(T)$
and for $1\leqslant q<\infty$,
$$
\mathscr{K}^{\sigma}_q(T)=\dot{\mathscr{K}}^{\sigma}_q(T)\cap\dot{\mathscr{K}}^{\sigma}_{\infty}(T),
$$
with norm
$$
\|f\|_{\mathscr{K}^{\sigma}_q}=\|f\|_{\dot{\mathscr{K}}^{\sigma}_q}+\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}.
$$
It is easy to prove that both $\dot{\mathscr{K}}^{\sigma}_q(T)$ and $\mathscr{K}^{\sigma}_q(T)$ ($1\leqslant q\leqslant\infty$) are Banach spaces.
We use the notation $\mathscr{K}^{\sigma}_{\infty\,0}(T)$ to denote the closure of the subspace of simple functions in $[0,T]$ in
$\mathscr{K}^{\sigma}_{\infty}(T)$. From the interpolation theory for $L^p$-spaces with change of measures (cf. Sections 5.4 and 5.5 of
\cite{BerLof}) we see that the following interpolation relations hold: For $1\leqslant q_0,q_1<\infty$,
$$
[\dot{\mathscr{K}}^{\sigma_0}_{q_0}(T),\dot{\mathscr{K}}^{\sigma_1}_{q_1}(T)]_{[\theta]}=\dot{\mathscr{K}}^{\sigma}_q(T), \qquad
[\mathscr{K}^{\sigma_0}_{q_0}(T),\mathscr{K}^{\sigma_1}_{q_1}(T)]_{[\theta]}=\mathscr{K}^{\sigma}_q(T),
$$
and for $1\leqslant q_0<\infty$, $q_1=\infty$,
$$
[\dot{\mathscr{K}}^{\sigma_0}_{q_0}(T),\dot{\mathscr{K}}^{\sigma_1}_{\infty\,0}(T)]_{[\theta]}=\dot{\mathscr{K}}^{\sigma}_q(T), \qquad
[\mathscr{K}^{\sigma_0}_{q_0}(T),\mathscr{K}^{\sigma_1}_{\infty\,0}(T)]_{[\theta]}=\mathscr{K}^{\sigma}_q(T),
$$
where $0<\theta<1$, $\sigma=(1\!-\!\theta)\sigma_0+\theta\sigma_1$, $1/q=(1\!-\!\theta)/q_0+\theta/q_1$. Besides, it is clear that
$$
\mathscr{K}^{\sigma}_{1}(T)\subseteq\mathscr{K}^{\sigma}_{q_1}(T)\subseteq\mathscr{K}^{\sigma}_{q_2}(T)\subseteq\mathscr{K}^{\sigma}_{\infty\,0}(T)
\quad \mbox{for}\;\; 1\leqslant q_1\leqslant q_2<\infty, \quad \sigma\geqslant 0,
$$
and
$$
\dot{\mathscr{K}}^{\sigma_1}_q(T)\subseteq\dot{\mathscr{K}}^{\sigma_2}_q(T), \qquad
\mathscr{K}^{\sigma_1}_q(T)\subseteq\mathscr{K}^{\sigma_2}_q(T)
\quad \mbox{for}\;\; \sigma_1\geqslant\sigma_2\geqslant 0, \quad 1\leqslant q\leqslant\infty.
$$
The following bilinear estimate will play a fundamental role in the proof of Theorem 1.2:
{\bf Lemma 2.5}\ \ {\em Let $T>0$ be given and assume that $(q,\sigma)$ satisfies one of the following two conditions: $(a)$ $\sigma\geqslant 1$ and
$1\leqslant q\leqslant\infty$; $(b)$ $1/2\leqslant\sigma<1$ and $1/\sigma\leqslant q\leqslant 1/(1\!-\!\sigma)$. Then the following estimate
holds:}
\begin{equation}
\|\mathcal{B}(f,g)\|_{\mathscr{K}^{\sigma}_q}\lesssim_{q,\sigma}\|f\|_{\mathscr{K}^{\sigma}_q}\|g\|_{\mathscr{K}^{\sigma}_q}, \quad
\forall f,g\in\mathscr{K}^{\sigma}_q(T).
\end{equation}
{\em Proof}:\ \ First we have
\begin{eqnarray*}
|\mathcal{B}(f,g)(t)|&\;\leqslant\;&\int_0^t(t-\tau)^{-\frac{1}{2}}|f(\tau)||g(\tau)|\mbox{\rm d}\tau
\\
&\leqslant&\sqrt{\frac{2}{t}}\int_0^t|f(\tau)||g(\tau)|\mbox{\rm d}\tau+\int_{\frac{t}{2}}^t(t-\tau)^{-\frac{1}{2}}|f(\tau)||g(\tau)|\mbox{\rm d}\tau
:=J_1(t)+J_2(t).
\end{eqnarray*}
For $J_1(t)$ we have
$$
\sqrt{t}J_1(t)=\sqrt{2}\int_0^t\sqrt{\tau}|f(\tau)|\cdot\sqrt{\tau}|g(\tau)|\frac{\mbox{\rm d}\tau}{\tau}, \quad 0<t<T.
$$
Applying (2.1) to the cases $q=\infty$ and $q=2$ we respectively get
\begin{eqnarray}
&\|J_1\|_{\dot{\mathscr{K}}^1_{\infty}}\lesssim\|f\|_{\dot{\mathscr{K}}^1_{\infty}}\|g\|_{\dot{\mathscr{K}}^1_{\infty}}, \quad
\forall f,g\in\dot{\mathscr{K}}^1_{\infty}(T),&
\\
&\|J_1\|_{\dot{\mathscr{K}}^{\frac{1}{2}}_2}\lesssim\|f\|_{\dot{\mathscr{K}}^{\frac{1}{2}}_2}\|g\|_{\dot{\mathscr{K}}^{\frac{1}{2}}_2}, \quad
\forall f,g\in\dot{\mathscr{K}}^{\frac{1}{2}}_2(T).&
\end{eqnarray}
Next we note that
$$
\sqrt{t}J_1(t)\leqslant\sqrt{2}\sup_{0<\tau<T}\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big||f(\tau)|\cdot
\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-1}\!\sqrt{\tau}|g(\tau)|\frac{\mbox{\rm d}\tau}{\tau}, \quad 0<t<T,
$$
so that by applying (2.2) to the case $q=1$ we obtain
\begin{equation}
\|J_1\|_{\dot{\mathscr{K}}^1_1}\lesssim\|f\|_{\dot{\mathscr{K}}^1_{\infty}}\|g\|_{\dot{\mathscr{K}}^1_1}, \quad
\forall f\in\dot{\mathscr{K}}^1_{\infty}(T),\;\; \forall g\in\dot{\mathscr{K}}^1_1(T).
\end{equation}
By using bilinear interpolation (cf. Theorem 4.4.1 of \cite{BerLof}), from (2.4) $\sim$ (2.6) we easily get the following estimate provided that
$1/2\leqslant\sigma\leqslant 1$ and $1/\sigma\leqslant q\leqslant 1/(1\!-\!\sigma)$:
$$
\|J_1\|_{\dot{\mathscr{K}}^{\sigma}_q}\lesssim\|f\|_{\dot{\mathscr{K}}^{\sigma}_{r_{\sigma}}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_q}, \quad
\forall f\in\dot{\mathscr{K}}^{\sigma}_{r_{\sigma}}(T),\;\; \forall g\in\dot{\mathscr{K}}^{\sigma}_q(T),
$$
where $r_{\sigma}=1/(1\!-\!\sigma)$, which immediately implies that
\begin{equation}
\|J_1\|_{\mathscr{K}^{\sigma}_q}\lesssim\|f\|_{\mathscr{K}^{\sigma}_q}\|g\|_{\mathscr{K}^{\sigma}_q}, \quad
\forall f,g\in\mathscr{K}^{\sigma}_q(T),
\end{equation}
provided that $1/2\leqslant\sigma\leqslant 1$ and $1/\sigma\leqslant q\leqslant 1/(1\!-\!\sigma)$. For $J_2(t)$, since $\tau\sim t$, we have
\begin{eqnarray*}
\sup_{0<t<T}\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}J_2(t)
&\;\leqslant\;&\sup_{0<\tau<T}\sqrt{\tau}|f(\tau)|\cdot\sup_{0<\tau<T}\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}|g(\tau)|\cdot
\int_{\frac{t}{2}}^t(t-\tau)^{-\frac{1}{2}}\tau^{-\frac{1}{2}}\mbox{\rm d}\tau
\\
&\;\lesssim\;&\sup_{0<\tau<T}\sqrt{\tau}|f(\tau)|\cdot\sup_{0<\tau<T}\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}|g(\tau)|,
\quad \forall\sigma\geqslant 0.
\end{eqnarray*}
Hence
\begin{equation}
\|J_2\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\lesssim\|f\|_{\dot{\mathscr{K}}^{0}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}
\lesssim\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}},
\quad \forall f,g\in\dot{\mathscr{K}}^{\sigma}_{\infty}(T), \quad \forall\sigma\geqslant 0.
\end{equation}
Besides, since $\tau\sim t$ also implies that
$$
\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}J_2(t)\leqslant\sup_{0<\tau<T}\sqrt{\tau}|f(\tau)|\cdot
\int_{\frac{t}{2}}^t(t-\tau)^{-\frac{1}{2}}\tau^{-\frac{1}{2}}\cdot\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}|g(\tau)|\mbox{\rm d}\tau,
$$
we further have
\begin{eqnarray*}
\int_0^T\!\!\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}J_2(t)\frac{\mbox{\rm d} t}{t}
&\;\leqslant\;&\sup_{0<\tau<T}\sqrt{\tau}|f(\tau)|\cdot\int_0^T\!\Big(\int_{\tau}^{2\tau}\!(t-\tau)^{-\frac{1}{2}}t^{-1}\mbox{\rm d} t\Big)
\tau^{-\frac{1}{2}}\cdot\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}|g(\tau)|\mbox{\rm d}\tau
\\
&\;\lesssim\;&\sup_{0<\tau<T}\sqrt{\tau}|f(\tau)|\cdot\int_0^T\!\!\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}|g(\tau)|
\frac{\mbox{\rm d}\tau}{\tau}, \quad \forall\sigma\geqslant 0,
\end{eqnarray*}
i.e.,
\begin{equation}
\|J_2\|_{\dot{\mathscr{K}}^{\sigma}_1}\lesssim\|f\|_{\dot{\mathscr{K}}^{0}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_1}
\lesssim\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_1},
\quad \forall f\in\dot{\mathscr{K}}^{\sigma}_{\infty}(T),\;\; \forall g\in\dot{\mathscr{K}}^{\sigma}_1(T),
\quad \forall\sigma\geqslant 0.
\end{equation}
By interpolation, from (2.8) and (2.9) we get
$$
\|J_2\|_{\dot{\mathscr{K}}^{\sigma}_q}\lesssim\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_q},
\quad \forall f\in\dot{\mathscr{K}}^{\sigma}_{\infty}(T),\;\; \forall g\in\dot{\mathscr{K}}^{\sigma}_q(T),
\quad \forall q\in [1,\infty],\;\; \forall \sigma\geqslant 0,
$$
which implies that
\begin{equation}
\|J_2\|_{\mathscr{K}^{\sigma}_q}\lesssim\|f\|_{\mathscr{K}^{\sigma}_q}\|g\|_{\mathscr{K}^{\sigma}_q},
\quad \forall f,g\in\mathscr{K}^{\sigma}_q(T), \quad \forall q\in [1,\infty],\;\; \forall \sigma\geqslant 0.
\end{equation}
Combining (2.7) and (2.10), we obtain (2.3) in the case $1/2\leqslant\sigma\leqslant 1$ and $1/\sigma\leqslant q\leqslant 1/(1\!-\!\sigma)$.
Proof of (2.3) in the rest case $\sigma>1$ and $1\leqslant q\leqslant\infty$ is much easier. Indeed, since for any $1<q\leqslant\infty$, the
condition $\sigma>1$ implies that $2q'\sigma>1$, we see that for $\sigma>1$ and $1<q\leqslant\infty$,
\begin{eqnarray*}
\sqrt{t}J_1(t)&\;\leqslant\;&\sqrt{2}\sup_{0<\tau<T}\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}|f(\tau)|\cdot
\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2\sigma}\cdot\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}
|g(\tau)|\frac{\mbox{\rm d}\tau}{\tau}
\\
&\;\lesssim\;&\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\cdot\|g\|_{\dot{\mathscr{K}}^{\sigma}_q}
\Big(\int_0^t\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2q'\sigma}\frac{\mbox{\rm d}\tau}{\tau}\Big)^{\frac{1}{q'}}
\lesssim\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_q}
\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{1-\frac{1}{q}-2\sigma}.
\end{eqnarray*}
Since $\sigma>1$ implies that $\displaystyle\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{1-\frac{1}{q}-\sigma}\in L^q\Big((0,T),\frac{dt}{t}\Big)$,
we see that $J_1\in\dot{\mathscr{K}}^{\sigma}_q(T)$, and
\begin{equation}
\|J_1\|_{\dot{\mathscr{K}}^{\sigma}_q}\leqslant\|f\|_{\dot{\mathscr{K}}^{\sigma}_{\infty}}\|g\|_{\dot{\mathscr{K}}^{\sigma}_q},
\quad \forall f\in\mathscr{K}^{\sigma}_{\infty}(T),\;\; \forall g\in\mathscr{K}^{\sigma}_q(T).
\end{equation}
It is easy to see that the above estimate also holds for the case $q=1$ and $\sigma>1$. From (2.10) and (2.11) we obtain (2.3) in the case $\sigma>1$
and $1\leqslant q\leqslant\infty$. This completes the proof of Lemma 2.6. $\quad\Box$
\section{The proof of Theorem 1.2}
\setcounter{equation}{0}
\hskip 2em
In this section we give the proof of Theorems 1.2. We shall first derive some linear and bilinear estimates, and next use these estimates to prove
Theorem 1.2.
Let $1\leqslant q<\infty$ and $\sigma\geqslant 0$. Given $T>0$, we introduce a path space $\mathscr{X}_T$ as follows:
\begin{eqnarray*}
\mathscr{X}_T &=&\{{\mbox{\boldmath $u$}}\in L^{\infty}_{\rm loc}((0,T],L^\infty(\mathbb{R}^n)): \nabla\cdot{\mbox{\boldmath $u$}}=0,\;\;
\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}<\infty\},
\end{eqnarray*}
where
\begin{eqnarray*}
\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}&=&\sup_{0<t<T}\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(t)\|_{\infty}
+\Big[\int_0^T\!\!\Big(\sqrt{t}\Big|\ln\Big(\frac{t}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(t)\|_{\infty}\Big)^q\frac{\mbox{\rm d} t}{t}\,\Big]^{\frac{1}{q}}.
\end{eqnarray*}
It is clear that $(\mathscr{X}_T,\|\cdot\|_{\mathscr{X}_T})$ is a Banach space. We shall also consider the following path spaces:
\begin{eqnarray*}
&\mathscr{Y}_T=L^{\infty}((0,T),B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n))\cap\mathscr{X}_T,&
\\
&\mathscr{Y}_T^0=C([0,T],B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n))\cap\mathscr{X}_T.&
\end{eqnarray*}
{\bf Lemma 3.1}\ \ {\em Let $1\leqslant q\leqslant\infty$ and $\sigma\geqslant 0$. If ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\,q}(\mathbb{R}^n)$ then
$e^{t\Delta}{\mbox{\boldmath $u$}}_0\in\mathscr{Y}_T$ for any finite $T>0$, and
$$
\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{\mathscr{X}_T}+\sup_{t\in (0,T)}\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}\lesssim_T
\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}.
$$
If furthermore ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ then in addition to the above estimate we also have $e^{t\Delta}{\mbox{\boldmath $u$}}_0\in
\mathscr{Y}_T^0$, and
$$
\lim_{T\to 0^+}\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{\mathscr{X}_T}=0.
$$}
{\em Proof}:\ \ It is easy to see that $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ is a shift-invariant Banach space of distributions. Hence by
Propositions 4.1 and 4.4 of \cite{LEM02} we see that ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ implies that $e^{t\Delta}{\mbox{\boldmath $u$}}_0\in
C_\ast([0,\infty),B^{-1,\sigma}_{\infty,\, q}(\mathbb{R}^n))$, i.e., for any $t\geqslant 0$ we have $e^{t\Delta}{\mbox{\boldmath $u$}}_0\in
B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$, and the map $t\mapsto e^{t\Delta}{\mbox{\boldmath $u$}}_0$ from $[0,\infty)$ to $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$
is continuous for $t>0$ with respect to the norm topology of $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ and continuous at $t=0$ with respect to the
$\ast$-weak topology of $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$, and
$$
\sup_{t>0}\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}\leq\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}.
$$
Moreover, from Lemma 2.1 (choosing $p=\infty$, $s=-1$, $\gamma=0$ and $t_0=T$) and the embedding $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)
\subseteq B^{-1,\sigma}_{\infty\infty}(\mathbb{R}^n)$ (for $1\leqslant q<\infty$ and $\sigma\geqslant 0$) we see that ${\mbox{\boldmath $u$}}_0\in
B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ implies that $e^{t\Delta}{\mbox{\boldmath $u$}}_0\in\mathscr{X}_T$, and
$$
\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{\mathscr{X}_T}\lesssim_T\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}.
$$
Hence the first part of the lemma follows. The second part of the lemma follows from a standard density argument, cf. the proof of Lemma 2.5 in
\cite{Cui}; we omit the details. $\quad\Box$
{\bf Lemma 3.2} \ \ {\em Let $T>0$ be given and assume that $(q,\sigma)$ satisfies one of the following two conditions: $(a)$ $\sigma\geqslant 1$
and $1\leqslant q\leqslant\infty$; $(b)$ $1/2\leqslant\sigma<1$ and $1/\sigma\leqslant q\leqslant 1/(1\!-\!\sigma)$. Then $B({\mbox{\boldmath $u$}},\mathbf{v})\in
{\mathscr{X}_T}$, and}
\begin{equation}
\|B({\mbox{\boldmath $u$}},\mathbf{v})\|_{\mathscr{X}_T}\lesssim\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}.
\end{equation}
{\em Proof}:\ \ This is an immediate consequence of Lemma 2.2 and Lemma 2.5. $\quad\Box$
{\bf Lemma 3.3} \ \ {\em Let $T>0$ be given and assume that $(q,\sigma)$ satisfies one of the following two conditions: $(a)$ $\sigma\geqslant 1$
and $1\leqslant q\leqslant\infty$; $(b)$ $1/2\leqslant\sigma<1$ and $1/\sigma\leqslant q\leqslant 1/(1\!-\!\sigma)$. Then for any ${\mbox{\boldmath $u$}},\mathbf{v}
\in {\mathscr{X}_T}$ we have $B({\mbox{\boldmath $u$}},\mathbf{v})\in L^{\infty}((0,T),B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n))$, and}
\begin{equation}
\sup_{0<t<T}\|B({\mbox{\boldmath $u$}},\mathbf{v})(t)\|_{B^{-1,\sigma}_{\infty\, q}}\lesssim_T\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}.
\end{equation}
{\em Proof}:\ \ We first assume that $1\leqslant q<\infty$. By Lemma 2.2, for any $s>0$ and $t\in (0,T)$ we have
\begin{eqnarray*}
\|e^{s\Delta}B({\mbox{\boldmath $u$}},\mathbf{v})(t)\|_{\infty}
&=&\Big\|\int_0^t\! e^{(t+s-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau\Big\|_{\infty}
\\
&\lesssim &\int_0^{t}\!(t+s-\tau)^{-\frac{1}{2}}\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau.
\end{eqnarray*}
Hence
\begin{eqnarray*}
&& \Big[\int_0^T\!\!\Big(\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}\|e^{s\Delta}B({\mbox{\boldmath $u$}},\mathbf{v})(t)\|_{\infty}
\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}
\nonumber\\
&\;\lesssim\; &\Big[\int_0^t\!\!\Big(\int_0^s\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}(t+s-\tau)^{-\frac{1}{2}}
\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}
\nonumber\\
&& \quad +\Big[\int_0^t\!\!\Big(\int_s^t\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}(t+s-\tau)^{-\frac{1}{2}}
\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}
\nonumber\\
&& \quad +\Big[\int_t^T\!\!\Big(\int_0^t\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}(t+s-\tau)^{-\frac{1}{2}}
\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}
\nonumber\\ [0.3cm]
&\;:=\; & K_1(t)+ K_2(t)+ K_3(t).
\end{eqnarray*}
In $K_1(t)$ and $K_3(t)$ there holds the relation $\tau<s$, so that
$$
K_1(t)+K_3(t)\lesssim\Big[\int_0^T\!\!\Big(\int_0^s\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}(s-\tau)^{-\frac{1}{2}}
\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}, \quad \forall t\in (0,T).
$$
By Lemma 2.5, the right-hand side is bounded by $\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}$. Hence
\begin{equation}
K_1(t)+K_3(t)\lesssim\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}, \quad \forall t\in (0,T).
\end{equation}
The estimate of $K_2(t)$ is easy. Indeed, by applying the Minkowsky inequality we have
\begin{eqnarray*}
K_2(t)&\;\leqslant\;& \int_0^t\!\Big(\int_0^{\tau}s^{\frac{q}{2}-1}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma q}\mbox{\rm d} s\Big)^{\frac{1}{q}}
(t-\tau)^{-\frac{1}{2}}\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau
\nonumber\\
&\lesssim &\int_0^t\!\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}(t-\tau)^{-\frac{1}{2}}
\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\mbox{\rm d}\tau
\nonumber\\
&\lesssim &\sup_{0<\tau<T}\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\cdot
\sup_{0<\tau<T}\sqrt{\tau}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}\|\mathbf{v}(\tau)\|_{\infty}
\nonumber\\
&& \quad \times\int_0^t\!(t-\tau)^{-\frac{1}{2}}\tau^{-\frac{1}{2}}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-\sigma}\mbox{\rm d}\tau
\lesssim\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}, \quad \forall t\in (0,T).
\end{eqnarray*}
Combining this estimate with (3.3) we see that
\begin{equation}
\Big[\int_0^T\!\!\Big(\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}\|e^{s\Delta}B({\mbox{\boldmath $u$}},\mathbf{v})(t)\|_{\infty}
\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}\lesssim\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}, \quad \forall t\in (0,T).
\end{equation}
Similarly we can prove that
\begin{equation}
\sup_{0<s<T}\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}\|e^{s\Delta}B({\mbox{\boldmath $u$}},\mathbf{v})(t)\|_{\infty}
\lesssim\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}, \quad \forall t\in (0,T).
\end{equation}
Having proved (3.4) and (3.5), we now apply Lemma 2.1 to conclude that $B({\mbox{\boldmath $u$}},\mathbf{v})(t)\in B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ for all
$0<t<T$, and moreover,
\begin{eqnarray*}
\|B({\mbox{\boldmath $u$}},\mathbf{v})(t)\|_{B^{-1,\sigma}_{\infty\, q}}\lesssim_T\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T},
\quad \forall t\in (0,T),
\end{eqnarray*}
which proves (3.2). $\quad\Box$
{\bf Lemma 3.4} \ \ {\em Let $1\leqslant q<\infty$ and $\sigma\geqslant\sigma_q$. Let $T>0$ be given and assume that ${\mbox{\boldmath $u$}},\mathbf{v}\in
{\mathscr{X}_T}$. Then $B({\mbox{\boldmath $u$}},\mathbf{v})\in C_w([0,T],B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n))$, i.e., the map $t\mapsto B({\mbox{\boldmath $u$}},\mathbf{v})(t)$
from $[0,T]$ to $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ is continuous with respect to $S'(\mathbb{R}^n)$-weak topology. If furthermore
either ${\mbox{\boldmath $u$}}\in\mathscr{Y}_T^0$ or $\mathbf{v}\in\mathscr{Y}_T^0$ then also $B({\mbox{\boldmath $u$}},\mathbf{v})\in\mathscr{Y}_T^0$, and $B({\mbox{\boldmath $u$}},\mathbf{v})\in
C([0,T],B^{-1,\sigma}_{\infty\, q0}(\mathbb{R}^n))$, i.e., $B({\mbox{\boldmath $u$}},\mathbf{v})(t)$ is continuous with respect to
$B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$-norm, and moreover,}
\begin{equation}
\lim_{t\to 0^+}\|B(\mathbf{u},\mathbf{v})(t)\|_{B^{-1,\sigma}_{\infty\,q}}=0.
\end{equation}
{\em Proof}:\ \ We first prove that $B({\mbox{\boldmath $u$}},\mathbf{v})(t)$ is continuous at $t=0$ with respect to $S'(\mathbb{R}^n)$-weak topology. Indeed, since
the condition $\sigma\geqslant\sigma_q$ implies that $\displaystyle\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2\sigma}\in L^{q'}\Big((0,T),
\frac{\mbox{\rm d}\tau}{\tau}\Big)$, it follows that for any $\mathbf{w}\in S(\mathbb{R}^n)$ we have
\begin{eqnarray*}
|\langle B({\mbox{\boldmath $u$}},\mathbf{v})(t),\mathbf{w}\rangle|
&=&|\int_0^t\langle e^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)],\mathbf{w}\rangle \mbox{\rm d}\tau|
\\
&\leqslant&\int_0^t\|{\mbox{\boldmath $u$}}(\tau)\|_{\infty}\|\mathbf{v}(\tau)\|_{\infty}\|\mathbb{P}\nabla\mathbf{w}\|_{1} \mbox{\rm d}\tau
\\
&\leqslant&\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbb{P}\nabla\mathbf{w}\|_{1}\Big[\int_0^t\!\!\Big(\sqrt{\tau}
\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}\|\mathbf{v}(\tau)\|_{\infty}\Big)^q\frac{\mbox{\rm d}\tau}{\tau}\Big]^{\frac{1}{q}}
\cdot\Big\|\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2\sigma}\Big\|_{L^{q'}((0,T),\frac{d\tau}{\tau})}
\\
&\lesssim _T&\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbb{P}\nabla\mathbf{w}\|_{1}\Big[\int_0^t\!\!\Big(\sqrt{\tau}
\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{\sigma}\|\mathbf{v}(\tau)\|_{\infty}\Big)^q\frac{\mbox{\rm d}\tau}{\tau}\Big]^{\frac{1}{q}}\to 0 \;\;
(\mbox{as}\;\; t\to 0^+),
\end{eqnarray*}
which proves the desired assertion. Next, let $0<t_0\leq T$. If $t_0<t<T$ then we write
\begin{eqnarray}
&&B({\mbox{\boldmath $u$}},\mathbf{v})(t)-B({\mbox{\boldmath $u$}},\mathbf{v})(t_0)
\nonumber\\
&=&\int_0^t e^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
-\int_0^{t_0}e^{(t_0-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
\nonumber\\
&=&\int_{t_0}^t e^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
+[e^{(t-t_0)\Delta}-I]\int_0^{t_0}e^{(t_0-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
\nonumber\\
&=:& A(t)+B(t),
\end{eqnarray}
and if $0<t_0-\delta<t<t_0$ then we write
\begin{eqnarray}
&&B({\mbox{\boldmath $u$}},\mathbf{v})(t_0)-B({\mbox{\boldmath $u$}},\mathbf{v})(t)
\nonumber\\
&=&\int_0^{t_0}e^{(t_0-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
-\int_0^t e^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
\nonumber\\
&=&\int_t^{t_0} e^{(t_0-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
+[e^{(t_0-t)\Delta}-I]\int_0^t e^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
\nonumber\\
&=&\int_t^{t_0} e^{(t_0-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
+[e^{(t_0-t)\Delta}-I]\int_{t_0-\delta}^t e^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
\nonumber\\
&&+e^{(t-t_0+\delta)\Delta}[e^{(t_0-t)\Delta}-I]\int_0^{t_0-\delta}e^{(t_0-\delta-\tau)\Delta}\mathbb{P}\nabla\cdot
[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau
\nonumber\\
&=:& A_1(t)+B_1(t)+B_2(t).
\end{eqnarray}
For $A(t)$ we have (see the proof of (2.7))
\begin{eqnarray*}
\|A(t)\|_{B^{-1,\sigma}_{\infty\, q}}&\leqslant&\sup_{0<s<T}\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}
\Big\|\int_{t_0}^t e^{(t+s-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau\Big\|_{\infty}
\\
&&+\Big[\int_0^T\!\!\Big(\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}
\Big\|\int_{t_0}^t e^{(t+s-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\tau)\otimes\mathbf{v}(\tau)]\mbox{\rm d}\tau\Big\|_{\infty}
\Big)^q\frac{\mbox{\rm d} s}{s}\Big]^{\frac{1}{q}}
\\
&\lesssim &
\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}\Big\{\sup_{0<s<T}\sqrt{s}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma}
\int_{t_0}^t(t+s-\tau)^{-\frac{1}{2}}\tau^{-1}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2\sigma}\mbox{\rm d}\tau
\\
&&+\Big[\int_0^T\!\!s^{\frac{q}{2}-1}\Big|\ln\Big(\frac{s}{{\rm e} T}\Big)\Big|^{\sigma q}\Big(
\Big\|\int_{t_0}^t(t+s-\tau)^{-\frac{1}{2}}\tau^{-1}\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2\sigma}\mbox{\rm d}\tau
\Big)^q\mbox{\rm d} s\Big]^{\frac{1}{q}}\Big\}
\\
&\lesssim_T&
\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}\|\mathbf{v}\|_{\mathscr{X}_T}\int_{t_0}^t(t-\tau)^{-\frac{1}{2}}\tau^{-1}
\Big|\ln\Big(\frac{\tau}{{\rm e} T}\Big)\Big|^{-2\sigma}\mbox{\rm d}\tau.
\end{eqnarray*}
Since $t_0>0$, we see that $\displaystyle\lim_{t\to t_0^+}\|A(t)\|_{B^{-1,\sigma}_{\infty\, q}}=0$. Moreover, since $B(t)=[e^{(t-t_0)\Delta}-I]
B({\mbox{\boldmath $u$}},\mathbf{v})(t_0)$ and $B({\mbox{\boldmath $u$}},\mathbf{v})(t_0)\in B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$, by the assertion proved before we see that
$\displaystyle\lim_{t\to t_0^+}B(t)=0$ in $S'(\mathbb{R}^n)$-weak topology. Hence
$$
\lim_{t\to t_0^+}B({\mbox{\boldmath $u$}},\mathbf{v})(t)=B({\mbox{\boldmath $u$}},\mathbf{v})(t_0) \quad \mbox{in $S'(\mathbb{R}^n)$-weak topology}.
$$
Next, similarly as for $A(t)$ we have $\displaystyle\lim_{t\to t_0^-}\|A_1(t)\|_{B^{-1,\sigma}_{\infty\, q}}=0$. Moreover, similarly as for the
treatment of $A(t)$ we have that by choosing $\delta$ sufficiently small, $\|B_1(t)\|_{B^{-1,\sigma}_{\infty\, q}}$ can be as small as we expect,
and when $\delta$ is chosen and fixed, $B_2(t)$ can be treated similarly as for $B(t)$ to get that for any $\mathbf{w}\in (S'(\mathbb{R}^n))^n$,
$\displaystyle\lim_{t\to t_0^-}\langle B_2(t),\mathbf{w}\rangle=0$. Hence
$$
\lim_{t\to t_0^-}B({\mbox{\boldmath $u$}},\mathbf{v})(t)=B({\mbox{\boldmath $u$}},\mathbf{v})(t_0) \quad \mbox{in $S'(\mathbb{R}^n)$-weak topology}.
$$
This proves $B({\mbox{\boldmath $u$}},\mathbf{v})\in C_w([0,T],B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n))$. Finally, since $B({\mbox{\boldmath $u$}},\mathbf{v})\in
L^{\infty}((0,T),B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n))$ for ${\mbox{\boldmath $u$}},{\mbox{\boldmath $v$}}\in\mathscr{Y}_T$, it follows that if either ${\mbox{\boldmath $u$}}\in\mathscr{Y}_T^0$ or
$\mathbf{v}\in\mathscr{Y}_T^0$ then by a standard density argument we see that $B({\mbox{\boldmath $u$}},\mathbf{v})\in C([0,T],B^{-1,\sigma}_{\infty\, q0}
(\mathbb{R}^n))$, cf. the proof of the last assertion in Lemma 2.5 of \cite{Cui}. We omit the details here. $\quad\Box$
We are now ready to give the proof of Theorem 1.2.
{\bf Proof of Theorem 1.2}:\ \ Let $1\leqslant q<\infty$ and $\sigma\geqslant\sigma_q$ be given. We rewrite the problem (1.2) into the following
equivalent integral equation:
$$
{\mbox{\boldmath $u$}}(t)=e^{t\Delta}{\mbox{\boldmath $u$}}_0+B({\mbox{\boldmath $u$}},{\mbox{\boldmath $u$}})(t).
$$
Given ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ with ${\rm div}{\mbox{\boldmath $u$}}_0=0$ and $T>0$, we define a map $\mathscr{J}:\mathscr{X}_T\to\mathscr{X}_T$ as
follows: For any ${\mbox{\boldmath $u$}}\in\mathscr{X}_T$, $\mathscr{J}({\mbox{\boldmath $u$}})$ equals to the right-hand side of the above equation. By Lemmas 3.1 and 3.2, $\mathscr{J}$
is a self-mapping in $\mathscr{X}_T$ and the following estimates hold:
\begin{eqnarray*}
\|\mathscr{J}({\mbox{\boldmath $u$}})\|_{\mathscr{X}_T}&\,\leq\,&
\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{\mathscr{X}_T}+C\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}^2,
\\
\|\mathscr{J}({\mbox{\boldmath $u$}})-\mathscr{J}(\mathbf{v})\|_{\mathscr{X}_T}&\,\leq\,&
C(\|{\mbox{\boldmath $u$}}\|_{\mathscr{X}_T}+\|\mathbf{v}\|_{\mathscr{X}_T})
\|{\mbox{\boldmath $u$}}-\mathbf{v}\|_{\mathscr{X}_T}.
\end{eqnarray*}
Choose a number $\varepsilon>0$ sufficiently small such that $4C\varepsilon<1$, where $C$ is the larger constant appearing in the above estimates. To prove
the assertion $(1)$ of Theorem 1.2, for any ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\,q\,0}(\mathbb{R}^n)$ with ${\rm div}{\mbox{\boldmath $u$}}_0=0$ we choose $T>0$ so small that
$\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{\mathscr{X}_T}\leq\varepsilon$. By Lemma 3.1, such $T$ exists. Then from Lemma 3.4 and the first inequality in the above we
easily see that $\mathscr{J}$ maps the closed ball $\overline{B}(0,2\varepsilon)$ in $\mathscr{X}_T^0$ into itself, and the second inequality ensures that
$\mathscr{J}$ is a contraction mapping when restricted to this ball. Hence, by the fixed point theorem of Banach, $\mathscr{J}$ has a unique fixed
point in this ball. Since $e^{t\Delta}{\mbox{\boldmath $u$}}_0\in\mathscr{Y}_T^0$ and $B({\mbox{\boldmath $u$}},{\mbox{\boldmath $u$}})\in\mathscr{Y}_T^0$ for ${\mbox{\boldmath $u$}}\in\mathscr{Y}_T^0$, from the iteration
procedure we see that this fixed point lies in $\mathscr{Y}_T^0$. Hence we have obtain a mild solution of the problem (1.1) in the path space
$\mathscr{Y}_T^0$. This proves the assertion $(1)$. To prove the assertion $(2)$, for given $T>0$ we let ${\mbox{\boldmath $u$}}_0\in B^{-1,\sigma}_{\infty\,q}
(\mathbb{R}^n)$ (with ${\rm div}{\mbox{\boldmath $u$}}_0=0$) be so small that $\|e^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{\mathscr{X}_T}\leq\varepsilon$. Then from the first inequality in the above
we easily see that $\mathscr{J}$ maps the closed ball $\overline{B}(0,2\varepsilon)$ in $\mathscr{X}_T$ into itself, and the second inequality ensures that
$\mathscr{J}$ is a contraction mapping when restricted to this ball. Hence, again by the fixed point theorem of Banach, $\mathscr{J}$ has a unique
fixed point in this ball. Since $e^{t\Delta}{\mbox{\boldmath $u$}}_0\in\mathscr{Y}_T$ and $B({\mbox{\boldmath $u$}},{\mbox{\boldmath $u$}})\in\mathscr{Y}_T$ for ${\mbox{\boldmath $u$}}\in \mathscr{Y}_T$, by a similar
argument as above we get a mild solution of the problem (1.1) which lies in the path space $\mathscr{Y}_T$. This proves the assertion $(2)$. The
proof of Theorem 1.2 is complete. $\quad\Box$
\section{The proof of Theorem 1.3}
\setcounter{equation}{0}
\hskip 2em
In this section we give the proof of Theorem 1.3. We shall mainly consider the case $2<q\leqslant\infty$ and $1-2/q\leqslant\sigma<1-1/q$, because
the rest cases $1\leqslant q\leqslant 2$, $0\leqslant\sigma<1/q$ and $2<q\leqslant \infty$, $0\leqslant\sigma<1-2/q$ are easier to treat. In the
end of this section we shall explain how to modify the arguments given below to get proofs for these two cases. Besides, we only give proof for the
case $n\geqslant 3$, and proof for the two-dimension case is omitted.
For a sufficiently large positive integer $m$, we denote
$$
\mathscr{A}_m=\{4k:\;k\in\mathbb{N},\; 4m+1\leqslant k\leqslant 5m\}, \qquad
\mathscr{B}_m=\{4k:\;k\in\mathbb{N},\; m+1\leqslant k\leqslant 2m\}.
$$
Clearly $|\mathscr{A}_m|=|\mathscr{B}_m|=m$. Let $\varepsilon$ be a sufficiently small positive number. For every positive integer $k$, we introduce
three $n$-dimensional vector $a_k$, $b_k$ and $c_k$ as follows:
$$
a_k=2^kn^{-\frac{1}{2}}(1,1,\cdots,1), \quad b_k=2^{k-1}(\varepsilon,2\varepsilon,\sqrt{1-5\varepsilon^2},0,\cdots,0), \quad
c_k=2^k(1,0,\cdots,0).
$$
Note that $|a_k|=2|b_k|=|c_k|=2^k$, $k=1,2,\cdots$. Let $\phi$ be as in Section 2 and set $\rho(\xi)=\phi(8|\xi|)$, $\xi\in\mathbb{R}^n$. It is clear
that $\rho(\xi)=1$ for $|\xi|\leqslant 5/32$ and ${\rm supp}\rho\subseteq\bar{B}(0,3/16)$. We denote
\begin{eqnarray*}
\Phi_{kl}^{++}(\xi)={\rm e}^{{\mbox{\boldmath $i$}} c_l\xi}\rho(\xi-a_k-b_l), & \quad & \Phi_{kl}^{+-}(\xi)={\rm e}^{{\mbox{\boldmath $i$}} c_l\xi}\rho(\xi-a_k+b_l),
\\
\Phi_{kl}^{-+}(\xi)={\rm e}^{{\mbox{\boldmath $i$}} c_l\xi}\rho(\xi+a_k-b_l), & \quad & \Phi_{kl}^{--}(\xi)={\rm e}^{{\mbox{\boldmath $i$}} c_l\xi}\rho(\xi+a_k+b_l),
\end{eqnarray*}
$$
\Psi_{kl}(\xi)=\Phi_{kl}^{++}(\xi)+\Phi_{kl}^{+-}(\xi)+\Phi_{kl}^{-+}(\xi)+\Phi_{kl}^{--}(\xi)
$$
($k,l=1,2,\cdots$). We now consider the initial value problem
\begin{eqnarray}
\left\{
\begin{array}{l}
\partial_t{\mbox{\boldmath $u$}}-\Delta{\mbox{\boldmath $u$}}+\mathbb{P}\nabla\cdot({\mbox{\boldmath $u$}}\otimes{\mbox{\boldmath $u$}})=0\quad \mbox{in}\;\,\mathbb{R}^n\times\mathbb{R}_+,\\
{\mbox{\boldmath $u$}}(x,t)=\delta{\mbox{\boldmath $u$}}_0(x)\quad \mbox{for}\;\, x\in\mathbb{R}^n,
\end{array}
\right.
\end{eqnarray}
with a sufficiently small $\delta>0$ and ${\mbox{\boldmath $u$}}_0=(u_1^0,u_2^0,\cdots,u_n^0)$, where
\begin{equation}
\left\{
\begin{array}{rcl}
u_1^0(x)&=&\displaystyle m^{-\sigma-\frac{1}{q}}\sum_{k\in\mathscr{A}_m}\sum_{l\in\mathscr{B}_m}2^k\mathscr{F}^{-1}[\Psi_{kl}(\xi)],
\\ [0.3cm]
u_2^0(x)&=&\displaystyle -m^{-\sigma-\frac{1}{q}}\sum_{k\in\mathscr{A}_m}\sum_{l\in\mathscr{B}_m}
2^k\mathscr{F}^{-1}\Big[\frac{\xi_1}{\xi_2}\Psi_{kl}(\xi)\Big],
\\ [0.3cm]
u_3^0(x)&=&\cdots=u_n^0(x)=0
\end{array}
\right.
\end{equation}
(for $x\in\mathbb{R}^n$). here we follow the convention that $1/\infty=0$. Note that
$$
u_1^0(x)= m^{-\sigma-\frac{1}{q}}\sum_{k\in\mathscr{A}_m}\sum_{l\in\mathscr{B}_m}2^k[\cos(a_k+b_l)(x+c_l)+\cos(a_k-b_l)(x+c_l)]
\check{\rho}(x+c_l)
$$
and
$$
u_2^0(x)=-u_1^0(x)+m^{-\sigma-\frac{1}{q}}\sum_{k\in\mathscr{A}_m}\sum_{l\in\mathscr{B}_m}
2^k\mathscr{F}^{-1}\Big[\frac{\xi_2-\xi_1}{\xi_2}\Psi_{kl}(\xi)\Big].
$$
Note also that $u_i^0\in S'(\mathbb{R}^n)$ ($i=1,2,\cdots,n$) and ${\rm div}{\mbox{\boldmath $u$}}_0=0$. Besides, it is clear that
\begin{equation}
{\rm supp}\Psi_{kl}\subseteq \{\xi\in\mathbb{R}^n:2^{k-1}<|\xi|<2^{k+1}\} \quad \mbox{for}\;\; k\in\mathscr{A}_m, \;\; l\in\mathscr{B}_m.
\end{equation}
{\bf Lemma 4.1} \ \ {\em Let $m\gg-\ln\varepsilon$. Then for any $1\leqslant q\leqslant\infty$ we have}
\begin{equation}
\|u_i^0\|_{B^{-1,\sigma}_{\infty\,q}}\lesssim 1, \quad i=1,2.
\end{equation}
{\em Proof}:\ \ By (4.1), it is clear that ${\rm supp}\,\widehat{u_1^0}$ does not intersects $\bar{B}(0,3/2)$. Hence
\begin{eqnarray*}
\|u_1^0\|_{B^{-1,\sigma}_{\infty\,q}}&=&\Big[\sum_{j=1}^{\infty}\Big(2^{-j}j^{\sigma}\|\check{\psi}_j\ast u_1^0\|_{\infty}\Big)^q\,\Big]^{\frac{1}{q}}
\lesssim m^{-\sigma-\frac{1}{q}}\Big[\sum_{j\in\mathscr{A}_m}
\Big(j^{\sigma}\Big\|\sum_{l\in\mathscr{B}_m}|\check{\rho}(x+c_l)|\Big\|_{\infty}\Big)^q\,\Big]^{\frac{1}{q}}
\\
&\lesssim\; & m^{-\sigma-\frac{1}{q}}\cdot m^{\sigma+\frac{1}{q}}\Big\|\sum_{l\in\mathscr{B}_m}|\check{\rho}(x+c_l)|\Big\|_{\infty}
\lesssim 1 \quad \mbox{for}\;\; m\gg-\ln\varepsilon.
\end{eqnarray*}
Next, choose a function $\chi\in C^{\infty}(\mathbb{R}^n\backslash\{0\})$ such that it is homogeneous of degree zero, $\chi(\xi)=1$ for
$|\xi/|\xi|-e|\leqslant 1/32$, where $e=(1,1,\cdots,1)/\sqrt{n}$, and $\chi(\xi)=0$ for $|\xi/|\xi|-e|\geqslant 1/16$, and set $\psi'(\xi)=
(\xi_1/\xi_2)\chi(\xi)\psi(\xi)$, $\psi'_j(\xi)=\psi'(2^{-j}\xi)$, $j=1,2,\cdots$. Then since $\psi_j(\xi)\widehat{u_2^0}(\xi)=\psi'_j(\xi)
\widehat{u_1^0}(\xi)$, $j=1,2,\cdots$, we have
\begin{eqnarray*}
\|u_2^0\|_{B^{-1,\sigma}_{\infty\,q}}&=&\Big[\sum_{j=1}^{\infty}\Big(2^{-j}j^{\sigma}\|\check{\psi}_j\ast u_2^0\|_{\infty}\Big)^q\,\Big]^{\frac{1}{q}}
=\Big[\sum_{j=1}^{\infty}\Big(2^{-j}j^{\sigma}\|\check{\psi}_j'\ast u_1^0\|_{\infty}\Big)^q\,\Big]^{\frac{1}{q}}
\lesssim 1.
\end{eqnarray*}
The last inequality follows from a similar argument as above. This proves the lemma. $\quad\Box$
In what follows, for $s\in\mathbb{R}$, $\sigma\geqslant 0$, $p,q\in[1,\infty]$ and nonempty subset $A$ of $\mathbb{N}$, we denote
$$
\|u\|_{B^{s,\sigma}_{p\,q}(A)}=\left\{
\begin{array}{ll}
\displaystyle\Big[\sum_{j\in A}\Big(2^{js}j^{\sigma}\|\Delta_j u\|_p\Big)^q\Big]^{\frac{1}{q}} \quad &\mbox{for}\;\; 1\leqslant q<\infty,
\\ [0.3cm]
\displaystyle\sup_{j\in A}\Big(2^{js}j^{\sigma}\|\Delta_j u\|_p\Big) \quad &\mbox{for}\;\; q=\infty.
\end{array}
\right.
$$
{\bf Lemma 4.2} \ \ {\em Let $m\gg-\ln\varepsilon$ and $t=\varepsilon2^{-32m}$. Then for any $1\leqslant q\leqslant\infty$ we have}
\begin{equation}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}(\partial_1-\partial_2)({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)\mbox{\rm d}\tau
\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}\gtrsim\varepsilon^2 m^{1-\sigma-\frac{1}{q}}.
\end{equation}
{\em Proof}:\ \ First we note that for any $f\in S'(\mathbb{R}^n)$, if we denote $u={\rm e}^{t\Delta}f$ then ${\rm supp}\hat{u}(\cdot,t)=
{\rm supp}\hat{f}$ for all $t>0$. Hence, if the frequency support of $f$ satisfies certain property, then the same property is also
satisfied by $u={\rm e}^{t\Delta}f$ for all $t>0$.
We shall use the following principle to prove the above result: If $f\in C^1(\mathbb{R}^n)$ is a real valued function and
$\nabla f\in L^{\infty}(\mathbb{R}^n)$, then for any $\nu,x_0,x_0'\in\mathbb{R}^n$ with $|\nu|=1$, $x_0\neq x_0'$ and
$x_0-x_0'\parallel\nu$, there holds
$$
\|\partial_{\nu}f\|_{L^{\infty}(\mathbb{R}^n)}\geqslant\frac{|f(x_0)-f(x_0')|}{|x_0-x_0'|}.
$$
In what follows, we shall use this principle to the case $\nu=(1/\sqrt{2})(1,-1,0,\cdots,0)$ (so that $\partial_{\nu}
=(1/\sqrt{2})(\partial_1-\partial_2)$) and
\begin{equation}
f(x)=\int_0^t{\rm e}^{(t-\tau)\Delta}U_{1j}\mbox{\rm d}\tau
\end{equation}
(see (4.7) for the expression of the function $U_{1j}$).
Since ${\rm supp}\rho(\cdot-a)\ast\rho(\cdot-b)\subseteq B(a+b,1/2)$, we see that for any $k,k'\in\mathscr{A}_m$ and
$l,l'\in\mathscr{B}_m$,
$$
{\rm supp}(\Phi_{kl}^{+\mu}\ast\Phi_{kl'}^{+\nu})\subseteq B(2a_k,2^{\frac{k}{2}+1}), \quad \mu,\nu\in\{0,1\},
$$
$$
{\rm supp}(\Phi_{kl}^{-\mu}\ast\Phi_{kl'}^{-\nu})\subseteq B(-2a_k,2^{\frac{k}{2}+1}), \quad \mu,\nu\in\{0,1\},
$$
$$
{\rm supp}(\Phi_{kl}^{++}\ast\Phi_{kl}^{--}),\;{\rm supp}(\Phi_{kl}^{+-}\ast\Phi_{kl}^{-+})\subseteq B(0,1),
$$
$$
{\rm supp}(\Phi_{kl}^{++}\ast\Phi_{kl'}^{--}),\;{\rm supp}(\Phi_{kl}^{+-}\ast\Phi_{kl'}^{-+})\subseteq
\{\xi\in\mathbb{R}^n:\;2^{l\wedge l'-2}\leqslant|\xi|<2^{l\wedge l'-1}\} \;\; (l\neq l'),
$$
$$
{\rm supp}(\Phi_{kl}^{\mu\nu}\ast\Phi_{k'l'}^{\mu'\nu'})\subseteq \{\xi\in\mathbb{R}^n:\;2^{k\wedge k'-1}<|\xi|<2^{k\wedge k'+1}\}
\;\; (k\neq k'), \quad \mu,\nu\in\{0,1\},
$$
where as usual $k\wedge k'=\max\{k,k'\}$. Moreover, it is easy to see that for any $j,l,l'\in\mathscr{B}_m$, if $l\wedge l'\neq j$ then
$$
\Delta_j[\mathscr{F}^{-1}(\Phi_{kl}^{++}\ast\Phi_{kl'}^{-+})]=\Delta_j[\mathscr{F}^{-1}(\Phi_{kl}^{+-}\ast\Phi_{kl'}^{--})]=0,
$$
and if $l\wedge l'=j$ then
$$
\Delta_j[\mathscr{F}^{-1}(\Phi_{kl}^{++}\ast\Phi_{kl'}^{-+})]=
\left\{
\begin{array}{l}
\mathscr{F}^{-1}(\Phi_{kj}^{++}\ast\Phi_{kj'}^{-+}) \quad \mbox{if}\;\, j=l,\\
e^{2{{\mbox{\boldmath $i$}}}a_k(c_{j'}-c_j)}\mathscr{F}^{-1}(\Phi_{kj}^{++}\ast\Phi_{kj'}^{-+}) \quad \mbox{if}\;\, j=l',
\end{array}
\right.
$$
$$
\Delta_j[\mathscr{F}^{-1}(\Phi_{kl}^{+-}\ast\Phi_{kl'}^{--})]=
\left\{
\begin{array}{l}
\mathscr{F}^{-1}(\Phi_{kj}^{+-}\ast\Phi_{kj'}^{--}) \quad \mbox{if}\;\, j=l,\\
e^{2{{\mbox{\boldmath $i$}}}a_k(c_{j'}-c_j)}\mathscr{F}^{-1}(\Phi_{kj}^{+-}\ast\Phi_{kj'}^{--}) \quad \mbox{if}\;\, j=l',
\end{array}
\right.
$$
where $j'=l\vee l'=\min\{l,l'\}$. It follows that for any $j\in\mathscr{B}_m$,
\begin{eqnarray}
&&\Delta_j({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)
\nonumber\\
&= & m^{-2\sigma-\frac{2}{q}}\sum_{k\in\mathscr{A}_m}\sum_{l,l'\in\mathscr{B}_m}2^{2k+1}
\Delta_j[\mathscr{F}^{-1}({\rm e}^{-\tau|\xi|^2}\Phi_{kl}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kl'}^{-+}
+{\rm e}^{-\tau|\xi|^2}\Phi_{kl}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kl'}^{--})]
\nonumber\\
&= & m^{-2\sigma-\frac{2}{q}}\sum_{k\in\mathscr{A}_m}2^{2k+1}\mathscr{F}^{-1}
({\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{-+}
+{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{--})
\nonumber\\
&& +m^{-2\sigma-\frac{2}{q}}\sum_{k\in\mathscr{A}_m}\sum_{{j'\in\mathscr{B}_m\atop j'<j}}\theta_{kjj'}2^{2k+1}
\Delta_j[\mathscr{F}^{-1}({\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj'}^{-+}
+{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj'}^{--})]
\nonumber\\
&\;:=\; & m^{-2\sigma-\frac{2}{q}}U_{1j}+m^{-2\sigma-\frac{2}{q}}U_{2j},
\end{eqnarray}
where $\theta_{kjj'}=1+e^{2{{\mbox{\boldmath $i$}}}a_k(c_{j'}-c_j)}$. Hence
\begin{eqnarray}
&&\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}(\partial_1-\partial_2)({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)\mbox{\rm d}\tau
\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\nonumber\\
&\gtrsim\; &m^{-2\sigma-\frac{2}{q}}\Big[\sum_{j\in\mathscr{B}_m}\Big(2^{-j}j^{\sigma}
\Big\|(\partial_1-\partial_2)\int_0^t{\rm e}^{(t-\tau)\Delta}U_{1j}\mbox{\rm d}\tau\Big\|_{\infty}\Big)^q\;\Big]^{\frac{1}{q}}
\nonumber\\
&&\quad -m^{-2\sigma-\frac{2}{q}}\Big[\sum_{j\in\mathscr{B}_m}\Big(2^{-j}j^{\sigma}
\Big\|(\partial_1-\partial_2)\int_0^t{\rm e}^{(t-\tau)\Delta}U_{2j}\mbox{\rm d}\tau\Big\|_{\infty}\Big)^q\;\Big]^{\frac{1}{q}}
\nonumber\\ [0.2cm]
&:=\; &I+I\!\!I.
\end{eqnarray}
Let $f$ be the function as defined in (4.6). We have
\begin{eqnarray*}
f(x)&\;=\;&\int_0^t{\rm e}^{(t-\tau)\Delta}U_{1j}\mbox{\rm d}\tau
\\
&\;=\;&\mathscr{F}^{-1}\Big[\sum_{k\in\mathscr{A}_m}2^{2k+1}\int_0^t{\rm e}^{-(t-\tau)|\xi|^2}\Big({\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{++}
\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{-+}+{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{--}\Big)\mbox{\rm d}\tau\Big]
\\
&\;=\;&\mathscr{F}^{-1}\Big[\sum_{k\in\mathscr{A}_m}2^{2k+1}{\rm e}^{{\mbox{\boldmath $i$}} c_j\xi}\!\int_0^t{\rm e}^{-(t-\tau)|\xi|^2}
\int_{\mathbb{R}^n}{\rm e}^{-\tau(|\xi-\eta|^2+|\eta|^2)}
\\
&&\quad \times\Big(\rho(\xi-\eta-a_k-b_j)\rho(\eta+a_k-b_j)+\rho(\xi-\eta-a_k+b_j)\rho(\eta+a_k+b_j)\Big)\mbox{\rm d}\eta\mbox{\rm d}\tau\Big]
\\
&\;=\;&\mathscr{F}^{-1}\{{\rm e}^{{\mbox{\boldmath $i$}} c_j\xi}[g(t,\xi)+g(t,-\xi)]\}
\\
&\;=\;&\frac{2}{(2\pi)^n}\int_{\mathbb{R}^n}\cos[(x+c_j)\xi]g(t,\xi)\mbox{\rm d}\xi,
\end{eqnarray*}
where
$$
g(t,\xi)={\rm e}^{-t|\xi|^2}\int_{\mathbb{R}^n}G_{j}(t,\xi,\eta)\rho(\xi-\eta-2b_j)\rho(\eta)\mbox{\rm d}\eta,
$$
and
$$
G_{j}(t,\xi,\eta)=\sum_{k\in\mathscr{A}_m}2^{2k+1}\int_0^t{\rm e}^{-\tau(|\xi-\eta+a_k-b_j|^2+|\eta-a_k+b_j|^2-|\xi|^2)}\mbox{\rm d}\tau.
$$
Let $x_0=-c_j$, $x_0'=-c_j+2^{-j-1}\pi\varepsilon^{-1}(1,-1,0,\cdots,0)$. Then since $x_0-x_0'\parallel (1,-1,0,\cdots,0)$ and
$|x_0-x_0'|=2^{-j-\frac{1}{2}}\pi\varepsilon^{-1}\sim 2^{-j}\varepsilon^{-1}$, we have
\begin{eqnarray*}
\Big\|(\partial_1-\partial_2)\int_0^t{\rm e}^{(t-\tau)\Delta}U_{1j}\mbox{\rm d}\tau\Big\|_{\infty}
&\;\gtrsim\;&\frac{1}{2^{-j}\varepsilon^{-1}}\Big|\int_{\mathbb{R}^n}\{1-\cos[2^{-j-1}\pi\varepsilon^{-1}(\xi_1-\xi_2)]\}
g(t,\xi)\mbox{\rm d}\xi\Big|.
\end{eqnarray*}
From the expression of $g(t,\xi)$ we see that on the support of $g$ we have $\xi\sim 2b_j$, which implies that
$2^{-j-1}\pi\varepsilon^{-1}(\xi_1-\xi_2)\sim \pi/2$, and, consequently,
$$
1-\cos[2^{-j-1}\pi\varepsilon^{-1}(\xi_1-\xi_2)]\sim 1.
$$
Hence
\begin{eqnarray*}
\Big\|(\partial_1-\partial_2)\int_0^t{\rm e}^{(t-\tau)\Delta}U_{1j}\mbox{\rm d}\tau\Big\|_{\infty}
&\;\gtrsim\;&\frac{1}{2^{-j}\varepsilon^{-1}}\int_{\mathbb{R}^n}g(t,\xi)\mbox{\rm d}\xi.
\end{eqnarray*}
Since ${\rm supp}\rho\subseteq\bar{B}(0,3/4)$, and
$$
|a_k|=2^k\geqslant 2^{16m+4}, \quad |b_j|=2^{j-1}\leqslant 2^{8m}, \quad \forall k\in\mathscr{A}_m,\;\; \forall j\in\mathscr{B}_m,
$$
we see that on the support of $\rho(\xi-\eta-2b_j)\rho(\eta)$ there holds
$$
|\xi-\eta+a_k-b_j|^2+|\eta-a_k+b_j|^2-|\xi|^2\sim 2|a_k|^2=2^{2k+1},
$$
so that
$$
G_{j}(t,\xi,\eta)\sim\sum_{k\in\mathscr{A}_m}2^{2k+1}\frac{1-{\rm e}^{-t2^{2k+1}}}{2^{2k+1}}\gtrsim m(1-{\rm e}^{-t2^{32m}}).
$$
Hence,
\begin{eqnarray*}
\int_{\mathbb{R}^n}g(t,\xi)\mbox{\rm d}\xi &\;\gtrsim\;&{\rm e}^{-t2^{2j}}\cdot m(1-{\rm e}^{-t2^{32m}}),
\end{eqnarray*}
and, consequently,
\begin{eqnarray*}
\Big\|(\partial_1-\partial_2)\int_0^t{\rm e}^{(t-\tau)\Delta}U_{1j}\mbox{\rm d}\tau\Big\|_{\infty}
&\;\gtrsim\;&2^j\varepsilon{\rm e}^{-t2^{2j}}\cdot m(1-{\rm e}^{-t2^{32m}}).
\end{eqnarray*}
In getting the last inequality we have used the assumption that $t=\varepsilon 2^{-32m}$ and $0<\varepsilon\ll 1$. It follows that
\begin{equation}
I\gtrsim m^{-2\sigma-\frac{2}{q}}\cdot\varepsilon^2 m^{1+\sigma+\frac{1}{q}}\gtrsim\varepsilon^2 m^{1-\sigma-\frac{1}{q}}.
\end{equation}
Next, for any $f\in L^1(\mathbb{R}^n)$, by writing
$$
{\rm e}^{t\Delta}f(x)=\int_{\mathbb{R}^n}{\rm e}^{-|y|^2}f(x-2\sqrt{t}y)\mbox{\rm d} y, \quad x\in\mathbb{R}^n, \;\; t>0,
$$
we see that if $f$ satisfies the property
$$
|f(x)|\leqslant C_N(1+|x|)^{-N}, \quad \forall x\in\mathbb{R}^n, \;\; \forall N>0,
$$
then there also holds
$$
|{\rm e}^{t\Delta}f(x+a)|\leqslant C_N(1+|x+a|)^{-N}, \quad \forall x\in\mathbb{R}^n, \;\; \forall t\in (0,1),
\;\; \forall N>0, \;\; \forall a\in\mathbb{R}^n.
$$
It follows that for any $k\in\mathscr{A}_m$ and $j,j'\in\mathscr{B}_m$ with $j'<j$ we have
\begin{eqnarray*}
\|\partial_i\Delta_j({\rm e}^{\tau\Delta}\check{\Phi}_{kj}^{+\mu}\cdot{\rm e}^{\tau\Delta}\check{\Phi}_{kj'}^{-\mu})\|_{\infty}
&\;\lesssim\;& 2^j\|{\rm e}^{\tau\Delta}\check{\Phi}_{kj}^{+\mu}\cdot{\rm e}^{\tau\Delta}\check{\Phi}_{kj'}^{-\mu}\|_{\infty}
\nonumber\\
&\;\lesssim_N\;& 2^j\|(1+|x+c_j|)^{-N}(1+|x+c_{j'}|)^{-N}\|_{\infty}
\nonumber\\
&\;\lesssim_N\;& 2^{-(N-1)j}, \quad \forall N>0, \;\; \mu\in\{+,-\}, \;\; i=1,2.
\end{eqnarray*}
The last estimate follows from a similar argument as in the proof of (2.45) of \cite{Wang}. Hence
\begin{eqnarray*}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}(\partial_1-\partial_2)U_{2j}\mbox{\rm d}\tau\Big\|_{\infty}
&\;\lesssim\;& \Big(\sum_{k\in\mathscr{A}_m}2^{2k+1}\Big)\cdot tm2^{-(N-1)j}
\\
&\lesssim & 2^{40m}\cdot tm2^{-4m(N-1)}\lesssim\varepsilon m,
\end{eqnarray*}
where we have put $N=3$. It follows that
\begin{equation}
I\!\!I\lesssim m^{-2\sigma-\frac{2}{q}}\cdot 2^{-4m}m^{\sigma}\cdot\varepsilon m\lesssim\varepsilon m^{1-\sigma-\frac{2}{q}}2^{-4m}.
\end{equation}
Substituting (4.9) and (4.10) into (4.8), and assuming that $m$ is so large that $2^{-4m}\leqslant\varepsilon^2$, we obtain (4.5). $\quad\Box$
{\bf Lemma 4.3} \ \ {\em Let $m\gg-\ln\varepsilon$. Then for any $1\leqslant q\leqslant\infty$ and $t>0$ we have}
\begin{equation}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_2[{\rm e}^{\tau\Delta}(u_1^0+u_2^0){\rm e}^{\tau\Delta}u_1^0]\mbox{\rm d}\tau
\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}\lesssim \varepsilon^2 2^{-8m}m^{-\sigma-\frac{2}{q}}.
\end{equation}
{\em Proof}:\ \ Similarly as in the proof of the above lemma, for any $j\in\mathscr{B}_m$ we have
\begin{eqnarray*}
&&\Delta_j[{\rm e}^{\tau\Delta}(u_1^0+u_2^0){\rm e}^{\tau\Delta}u_1^0]
\\
&= & m^{-2\sigma-\frac{2}{q}}\sum_{k\in\mathscr{A}_m}\sum_{l,l'\in\mathscr{B}_m}2^{2k+1}
\Delta_j[\mathscr{F}^{-1}({\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kl}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kl'}^{-+}
\\
&& \qquad\qquad\qquad\qquad\qquad\qquad\quad +{\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kl}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kl'}^{--})]
\\
&= & m^{-2\sigma-\frac{2}{q}}\sum_{k\in\mathscr{A}_m}2^{2k+1}\mathscr{F}^{-1}
({\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kj}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{-+}
+{\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kj}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{--})
\\
&& +m^{-2\sigma-\frac{2}{q}}\sum_{k\in\mathscr{A}_m}\sum_{{j'\in\mathscr{B}_m\atop j'<j}}\theta_{kjj'}2^{2k+1}
\mathscr{F}^{-1}({\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kj}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj'}^{-+}
\\
&& \qquad\qquad\qquad\qquad\qquad\qquad\qquad +{\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kj}^{+-}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj'}^{--})
\\
&\;:=\; & m^{-2\sigma-\frac{2}{q}}V_{1j}+m^{-2\sigma-\frac{2}{q}}V_{2j}.
\end{eqnarray*}
Hence
\begin{eqnarray}
&&\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}(\partial_1-\partial_2)({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)\mbox{\rm d}\tau
\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\nonumber\\
&\lesssim\; &m^{-2\sigma-\frac{2}{q}}\Big[\sum_{j\in\mathscr{B}_m}\Big(2^{-j}j^{\sigma}\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}
\partial_2V_{1j}\mbox{\rm d}\tau\Big\|_{\infty}\Big)^q\;\Big]^{\frac{1}{q}}
\nonumber\\
&&\quad +m^{-2\sigma-\frac{2}{q}}\Big[\sum_{j\in\mathscr{B}_m}\Big(2^{-j}j^{\sigma}\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}
\partial_2V_{2j}\mbox{\rm d}\tau\Big\|_{\infty}\Big)^q\;\Big]^{\frac{1}{q}}
\nonumber\\ [0.2cm]
&:=\; &I\!\!I\!\!I+I\!V.
\end{eqnarray}
As for $\displaystyle\int_0^t{\rm e}^{(t-\tau)\Delta}(\partial_1-\partial_2)U_{1j}\mbox{\rm d}\tau$ we have
\begin{eqnarray*}
\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_2V_{1j}\mbox{\rm d}\tau&\;=\;&
{\mbox{\boldmath $i$}}\mathscr{F}^{-1}\Big[\sum_{k\in\mathscr{A}_m}2^{2k+1}\int_0^t{\rm e}^{-(t-\tau)|\xi|^2}\xi_2\Big({\rm e}^{-\tau|\xi|^2}
\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kj}^{++}\ast{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{-+}
\\
&&\quad +{\rm e}^{-\tau|\xi|^2}\frac{\xi_2-\xi_1}{\xi_2}\Phi_{kj}^{+-}\ast
{\rm e}^{-\tau|\xi|^2}\Phi_{kj}^{--}\Big)\mbox{\rm d}\tau\Big]
\\
&\;=\;&{\mbox{\boldmath $i$}}\mathscr{F}^{-1}\Big[{\rm e}^{{\mbox{\boldmath $i$}} c_j\xi}{\rm e}^{-t|\xi|^2}\xi_2\Big(\int_{\mathbb{R}^n}H^+_{j}(t,\xi,\eta)
\rho(\xi-\eta-2b_j)\rho(\eta)\mbox{\rm d}\eta
\\
&&\quad +\int_{\mathbb{R}^n}H^-_{j}(t,\xi,\eta)\rho(\xi-\eta+2b_j)\rho(\eta)\mbox{\rm d}\eta\Big)\Big],
\end{eqnarray*}
where
\begin{eqnarray*}
H^+_{j}(t,\xi,\eta)&\;:=\;&\sum_{k\in\mathscr{A}_m}2^{2k+1}
\frac{\xi_2-\eta_2-\xi_1+\eta_1-2^{j-1}\varepsilon}{\xi_2-\eta_2+2^kn^{-\frac{1}{2}}-2^j\varepsilon}
\int_0^t{\rm e}^{-\tau(|\xi-\eta+a_k-b_j|^2+|\eta-a_k+b_j|^2-|\xi|^2)}\mbox{\rm d}\tau,
\\
H^-_{j}(t,\xi,\eta)&\;:=\;&\sum_{k\in\mathscr{A}_m}2^{2k+1}
\frac{\xi_2-\eta_2-\xi_1+\eta_1+2^{j-1}\varepsilon}{\xi_2-\eta_2+2^kn^{-\frac{1}{2}}+2^j\varepsilon}
\int_0^t{\rm e}^{-\tau(|\xi-\eta+a_k+b_j|^2+|\eta-a_k-b_j|^2-|\xi|^2)}\mbox{\rm d}\tau.
\end{eqnarray*}
It is easy to see that on the supports of $\rho(\xi-\eta\mp2b_j)\rho(\eta)$ there respectively hold
$$
\Big|\frac{\xi_2-\eta_2-\xi_1+\eta_1\mp2^{j-1}\varepsilon}{\xi_2-\eta_2+2^kn^{-\frac{1}{2}}\mp2^j\varepsilon}\Big|
\lesssim 2^{j-k}\varepsilon,
$$
so that similarly as before we have
$$
|H^{\pm}_{j}(t,\xi,\eta)|\lesssim \sum_{k\in\mathscr{A}_m}2^{2k+1}\cdot 2^{j-k}\varepsilon\cdot
\frac{1-{\rm e}^{-t2^{2k+1}}}{2^{2k+1}}\lesssim 2^{j-16m}\varepsilon.
$$
Hence
\begin{eqnarray*}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_2V_{1j}\mbox{\rm d}\tau\Big\|_{\infty}
&\;\lesssim\;&\int_{\mathbb{R}^n}{\rm e}^{-t|\xi|^2}\xi_2\Big(\int_{\mathbb{R}^n}|H^+_{j}(t,\xi,\eta)|
\rho(\xi\!-\!\eta\!-\!2b_j)\rho(\eta)\mbox{\rm d}\eta
\\
&&\quad +\int_{\mathbb{R}^n}|H^-_{j}(t,\xi,\eta)|\rho(\xi\!-\!\eta\!+\!2b_j)\rho(\eta)\mbox{\rm d}\eta\Big)\mbox{\rm d}\xi
\\
&\;\lesssim\;& 2^{2j-16m}\varepsilon^2, \quad \forall j\in\mathscr{B}_m.
\end{eqnarray*}
It follows that
\begin{equation}
I\!\!I\!\!I\lesssim m^{-2\sigma-\frac{2}{q}}\cdot m^{\sigma}2^{-16m}\varepsilon^2\cdot\Big(\sum_{j\in\mathscr{B}_m}2^{jq}\Big)^{\frac{1}{q}}
\lesssim \varepsilon^22^{-8m}m^{-\sigma-\frac{2}{q}}.
\end{equation}
For $\displaystyle\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_2V_{2j}\mbox{\rm d}\tau$ we have
\begin{eqnarray*}
\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_2V_{2j}\mbox{\rm d}\tau&\;=&\sum_{{j'\in\mathscr{B}_m\atop j'<j}}\!\theta_{kjj'}
{\mbox{\boldmath $i$}}\mathscr{F}^{-1}\Big[{\rm e}^{{\mbox{\boldmath $i$}} c_j\xi}{\rm e}^{-t|\xi|^2}\xi_2\Big(\int_{\mathbb{R}^n}\!{\rm e}^{{\mbox{\boldmath $i$}}(c_j-c_{j'})\eta}H^+_{j'}(t,\xi,\eta)
\rho(\xi\!-\!\eta\!-\!b_j\!-\!b_{j'})\rho(\eta)\mbox{\rm d}\eta
\\
&&\quad +\int_{\mathbb{R}^n}\!{\rm e}^{{\mbox{\boldmath $i$}}(c_j-c_{j'})\eta}H^-_{j'}(t,\xi,\eta)\rho(\xi\!-\!\eta\!+\!b_j\!+\!b_{j'})\rho(\eta)\mbox{\rm d}\eta\Big)\Big].
\end{eqnarray*}
It follows that
\begin{eqnarray*}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_2V_{2j}\mbox{\rm d}\tau\Big\|_{\infty}
&\;\lesssim\;&\sum_{{j'\in\mathscr{B}_m\atop j'<j}}\!2^j\varepsilon\cdot 2^{j'-16m}\varepsilon
\lesssim 2^{2j-16m}\varepsilon^2, \quad \forall j\in\mathscr{B}_m.
\end{eqnarray*}
Consequently,
\begin{equation}
I\!V\lesssim m^{-2\sigma-\frac{2}{q}}\cdot m^{\sigma}2^{-16m}\varepsilon^2\cdot\Big(\sum_{j\in\mathscr{B}_m}2^{jq}\Big)^{\frac{1}{q}}
\lesssim \varepsilon^22^{-8m}m^{-\sigma-\frac{2}{q}}.
\end{equation}
Substituting (4.13) and (4.14) into (4.12), we get (4.11). $\quad\Box$
{\bf Lemma 4.4} \ \ {\em Let $m\gg-\ln\varepsilon$ and $t=\varepsilon 2^{-32m}$. Then for any $1\leqslant q\leqslant\infty$ and $t>0$ we have}
\begin{equation}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\!\sum_{\alpha,\beta=1}^2\!\frac{\partial_{\alpha}\partial_{\beta}}{\Delta}
({\rm e}^{\tau\Delta}u_{\alpha}^0{\rm e}^{\tau\Delta}u_{\beta}^0)
\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}\lesssim\varepsilon^3 m^{1-\sigma-\frac{1}{q}}.
\end{equation}
{\em Proof}:\ \ A basic observation in getting (4.15) is that on the supports of $\rho(\xi-\eta\mp 2b_j)\rho(\eta)$ and
$\rho(\xi-\eta\mp b_j\mp b_{j'})\rho(\eta)$, the symbol of the pseudo-differential operator $\displaystyle
\frac{\partial_{\alpha}\partial_{\beta}}{\Delta}$ is bounded by $C\varepsilon^2$, where $C$ represents positive constant. Indeed, on these
supports we have
$$
\xi_\alpha\sim 2^j\varepsilon \; (\alpha=1,2)\;\; \mbox{and} \;\; |\xi|\sim 2^j\;\; (\mbox{assuming $j'<j$}), \quad \mbox{so that} \;\;
\Big|\frac{\xi_{\alpha}\xi_{\beta}}{|\xi|^2}\Big|\lesssim\varepsilon^2 \; (\alpha,\beta=1,2).
$$
There are four terms in the sum $\displaystyle\sum_{\alpha,\beta=1}^2$, and consequently the left-hand side of (4.15) can be bounded by a sum of
four terms. We estimate each term separately.
(1)\ \ Estimate of the term with $\alpha=\beta=1$. This term can be estimated as in the proof of (4.5). Indeed, similarly as in (4.6) we have
$$
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_1\partial_1}{\Delta}
({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\lesssim V+V\!I,
$$
where $V$ and $V\!I$ are expressions obtained from modifying $I$ and $I\!\!I$, respectively, by replacing $\partial_1-\partial_2$ with
$\displaystyle\partial_1\frac{\partial_{\alpha}\partial_{\beta}}{\Delta}$. By using some similar argument as in the proof of (4.9) (and using
the inequalities $1-{\rm e}^{-t2^{32m}}\leqslant 1$ and ${\rm e}^{-t2^{2j}}\leqslant 1$) we have
$$
V\lesssim m^{-2\sigma-\frac{2}{q}}\cdot\varepsilon^3 m^{1+\sigma+\frac{1}{q}}\lesssim\varepsilon^3m^{1-\sigma-\frac{1}{q}},
$$
and using some similar argument as in the proof of (4.10) we have
$$
V\!I\lesssim m^{-2\sigma-\frac{2}{q}}\cdot 2^{-4m}m^{\sigma}\cdot\varepsilon^3 m
\lesssim\varepsilon^3 m^{1-\sigma-\frac{2}{q}}2^{-4m}.
$$
Hence
\begin{equation}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_1\partial_1}{\Delta}
({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\lesssim\varepsilon^3 m^{1-\sigma-\frac{1}{q}}.
\end{equation}
(2)\ \ Estimate of the term with $\alpha=1$, $\beta=2$ or $\alpha=2$, $\beta=1$. We have
$$
{\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_2^0={\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}(u_1^0+u_2^0)-{\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0.
$$
By using some similar argument as in the proof of (4.11) we have
$$
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_1\partial_2}{\Delta}
[{\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}(u_1^0+u_2^0)]\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\lesssim\varepsilon^4 2^{-8m}m^{-\sigma-\frac{2}{q}},
$$
and similar to (4.16) we have
$$
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_1\partial_2}{\Delta}
({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0)\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\lesssim\varepsilon^3 m^{1-\sigma-\frac{1}{q}}.
$$
Hence
\begin{equation}
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_1\partial_2}{\Delta}
({\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_2^0)\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\lesssim\varepsilon^3 m^{1-\sigma-\frac{1}{q}}.
\end{equation}
(3)\ \ Estimate of the term with $\alpha=\beta=2$. We have
$$
{\rm e}^{\tau\Delta}u_2^0{\rm e}^{\tau\Delta}u_2^0={\rm e}^{\tau\Delta}(u_1^0+u_2^0){\rm e}^{\tau\Delta}(u_1^0+u_2^0)
-2{\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}(u_1^0+u_2^0)+{\rm e}^{\tau\Delta}u_1^0{\rm e}^{\tau\Delta}u_1^0.
$$
By using some similar argument as in the proof of (4.11) we have
$$
\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_1\partial_2}{\Delta}
[{\rm e}^{\tau\Delta}(u_1^0+u_2^0){\rm e}^{\tau\Delta}(u_1^0+u_2^0)]\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\lesssim\varepsilon^5 2^{-16m}m^{-\sigma-\frac{2}{q}}.
$$
Hence
\begin{eqnarray}
&&\Big\|\int_0^t{\rm e}^{(t-\tau)\Delta}\partial_1\frac{\partial_2\partial_2}{\Delta}
({\rm e}^{\tau\Delta}u_2^0{\rm e}^{\tau\Delta}u_2^0)\mbox{\rm d}\tau\Big\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\nonumber\\
&\;\lesssim\;&\varepsilon^5 2^{-16m}m^{-\sigma-\frac{2}{q}}+\varepsilon^4 2^{-8m}m^{-\sigma-\frac{2}{q}}
+\varepsilon^3 m^{1-\sigma-\frac{1}{q}}
\lesssim\varepsilon^3 m^{1-\sigma-\frac{1}{q}}.
\end{eqnarray}
Now, summing up (4.16), (4.17) and (4.18) (with (4.17) twice), we obtain (4.15). $\quad\Box$
We are now ready to give the proof of Theorem 1.3.
{\em Proof of Theorem 1.3}:\ \ As we mentioned in the beginning of this section, here we only consider the case $2<q\leqslant\infty$,
$1-2/q\leqslant\sigma<1-1/q$. Hence, in what follows we assume that these conditions are satisfied.
Given $T>0$, let $X$ be the following function space on $\mathbb{R}^n\times [0,T]$:
$$
X=\{u\in L^{\infty}_{\rm loc}((0,T],L^{\infty}(\mathbb{R}^n)):\; \|u\|_X<\infty\},
$$
where
$$
\|u\|_X:=\sup_{0<t<T}\sqrt{t}\|u(\cdot,t)\|_{L^{\infty}(\mathbb{R}^n)}+\sup_{{x\in\mathbb{R}^n\atop 0<R<\sqrt{T}}}
\Big(\frac{1}{|B(x,R)|}\int_0^{R^2}\!\!\!\int_{B(x,R)}|u(y,t)|^2\mbox{\rm d} y\mbox{\rm d} t\Big)^{\frac{1}{2}},
$$
and let $Y=X\cap L^{\infty}((0,T),bmo^{-1})$ with standard norm for joint space. It is well-known that there exists $\epsilon_T>0$ such that for
${\mbox{\boldmath $u$}}_0\in bmo^{-1}$ with $\|\delta{\mbox{\boldmath $u$}}_0\|_{bmo^{-1}}\leqslant \epsilon_T$, the problem (4.1) has a unique solution ${\mbox{\boldmath $u$}}={\mbox{\boldmath $u$}}(\delta,t)\in Y$, and
\begin{equation}
\|{\mbox{\boldmath $u$}}(\delta,\cdot)\|_Y\lesssim\delta\|{\mbox{\boldmath $u$}}_0\|_{bmo^{-1}}, \quad \forall\delta\in (0,\delta_0).
\end{equation}
Moreover, there holds the following estimate:
\begin{equation}
\|B({\mbox{\boldmath $u$}},{\mbox{\boldmath $v$}})\|_Y\lesssim\|{\mbox{\boldmath $u$}}\|_X\|{\mbox{\boldmath $v$}}\|_X, \quad \forall{\mbox{\boldmath $u$}},{\mbox{\boldmath $v$}}\in X.
\end{equation}
We refer the reader to see Theorem 16.1, Lemma 16.3 and the proof of the corollary following Theorem 16.2 of \cite{LEM02} for proofs of these
assertions. Since ${\mbox{\boldmath $u$}}=\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0+B({\mbox{\boldmath $u$}},{\mbox{\boldmath $u$}})$, from (4.19) and (4.20) it follows that
\begin{equation}
\|{\mbox{\boldmath $u$}}(\delta,\cdot)-\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0\|_Y\lesssim\|{\mbox{\boldmath $u$}}(\delta,\cdot)\|_X^2\lesssim\delta^2\|{\mbox{\boldmath $u$}}_0\|_{bmo^{-1}}^2,
\quad \forall\delta\in (0,\delta_0).
\end{equation}
Let ${\mbox{\boldmath $v$}}(\delta,t)=B({\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0,{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0)$ and set
\begin{eqnarray*}
{\mbox{\boldmath $w$}}(\delta,t)&\;=\;&{\mbox{\boldmath $u$}}(\delta,t)-\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0-\delta^2{\mbox{\boldmath $v$}}(\delta,t)
\\
&\;=\;&\int_0^t{\rm e}^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot[{\mbox{\boldmath $u$}}(\delta,\tau)\otimes{\mbox{\boldmath $u$}}(\delta,\tau)
-\delta^2{\rm e}^{\tau\Delta}{\mbox{\boldmath $u$}}_0\otimes{\rm e}^{\tau\Delta}{\mbox{\boldmath $u$}}_0]\mbox{\rm d}\tau.
\end{eqnarray*}
Since
$$
{\mbox{\boldmath $w$}}(\delta,t)=\int_0^t{\rm e}^{(t-\tau)\Delta}\mathbb{P}\nabla\cdot\{[{\mbox{\boldmath $u$}}(\delta,\tau)-\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0]\otimes{\mbox{\boldmath $u$}}(\delta,\tau)
+\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0\otimes[{\mbox{\boldmath $u$}}(\delta,\tau)-\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0]\}\mbox{\rm d}\tau,
$$
by (4.19) $\sim$ (4.21) it follows that
\begin{equation}
\|{\mbox{\boldmath $w$}}(\delta,\cdot)\|_Y\lesssim(\|{\mbox{\boldmath $u$}}(\delta,\cdot)\|_X+\delta\|{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0\|_X)
\|{\mbox{\boldmath $u$}}(\delta,\cdot)-\delta{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0\|_X\lesssim\delta^3\|{\mbox{\boldmath $u$}}_0\|_{bmo^{-1}}^3.
\end{equation}
Now let ${\mbox{\boldmath $u$}}_0$ be the vector function given by (4.2). Since ${\mbox{\boldmath $u$}}_0\in S(\mathbb{R}^n))$, the above results apply to it. By applying Lemma 4.1 we
have
\begin{equation}
\|{\mbox{\boldmath $u$}}_0\|_{bmo^{-1}}\lesssim\|{\mbox{\boldmath $u$}}_0\|_{B^{-1}_{\infty\,2}}\lesssim m^{\frac{1}{2}-\frac{1}{q}-\sigma}\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}
\lesssim m^{\frac{1}{2}-\frac{1}{q}-\sigma}.
\end{equation}
Thus by (4.22) we have
\begin{equation}
\|{\mbox{\boldmath $w$}}(\delta,\cdot)\|_Y\lesssim\delta^3 m^{\frac{3}{2}-\frac{3}{q}-3\sigma},
\end{equation}
which implies that
\begin{equation}
\|{\mbox{\boldmath $w$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}\lesssim m^{\sigma+\frac{1}{q}}
\|{\mbox{\boldmath $w$}}(\delta,t)\|_{B^{-1}_{\infty\infty}(\mathscr{B}_m)}\lesssim m^{\sigma+\frac{1}{q}}\|{\mbox{\boldmath $w$}}(\delta,\cdot)\|_Y
\lesssim\delta^3 m^{\frac{3}{2}-\frac{2}{q}-2\sigma}, \quad \forall t\in (0,T).
\end{equation}
Let $v_1(\delta,t)$ be the first component of ${\mbox{\boldmath $v$}}(\delta,t)$. From Lemmas 4.2 $\sim$ 4.4 we see that with $t=\varepsilon 2^{-32m}$ and
$m\gg-\ln\varepsilon$,
$$
\|{\mbox{\boldmath $v$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}\gtrsim\|v_1(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
\gtrsim\varepsilon^2 m^{1-\sigma-\frac{1}{q}}.
$$
Hence
\begin{eqnarray}
\|{\mbox{\boldmath $u$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}}&\;\gtrsim\;&\|{\mbox{\boldmath $w$}}(\delta,t)-\delta^2{\mbox{\boldmath $v$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
-\delta\|{\rm e}^{t\Delta}{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}
\nonumber\\
&\;\gtrsim\;&\delta^2\|{\mbox{\boldmath $v$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}-\|{\mbox{\boldmath $w$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}(\mathscr{B}_m)}
-\delta\|{\mbox{\boldmath $u$}}_0\|_{B^{-1,\sigma}_{\infty\,q}}
\nonumber\\
&\;\geqslant\;& C_0\delta^2\varepsilon^2 m^{1-\sigma-\frac{1}{q}}-C_1\delta^3 m^{\frac{3}{2}-\frac{2}{q}-2\sigma}-C_2\delta
\quad \mbox{for}\;\; t=\varepsilon 2^{-32m}.
\end{eqnarray}
Since the conditions $1-2/q\leqslant\sigma<1-1/q$ and $q>2$ imply that $1-\sigma-1/q>0$ and $1-\sigma-1/q>3/2-2/q-2\sigma$, it follows that for any
given $0<\delta<\delta_0$ and $0<\varepsilon\ll 1$, by choosing $m$ sufficiently large (depending on $\delta$ and $\varepsilon$), we have
$$
\|{\mbox{\boldmath $u$}}(\delta,t)\|_{B^{-1,\sigma}_{\infty\,q}}\geqslant\frac{1}{2}C_0\delta^2\varepsilon^2 m^{1-\sigma-\frac{1}{q}}
\quad \mbox{for}\;\; t=\varepsilon 2^{-32m}.
$$
This completes the proof. $\quad\Box$
{\bf Remark 4.5} \ \ Note that the above proof actually works under the weaker conditions $2<q\leqslant\infty$ and $1/2-1/q\leqslant\sigma<1-1/q$.
{\bf Remark 4.6} \ \ To treat the case $1\leqslant q\leqslant 2$ and $0\leqslant\sigma<1/q$, we need to use a different class of initial values
${\mbox{\boldmath $u$}}_0$ which are obtained by modifying the definition of (4.2) as follows: In the first two lines of (4.2), remove the first sum $\displaystyle
\sum_{k\in\mathscr{A}_m}$, replace $m^{-\sigma-\frac{1}{q}}$ with $m^{-\sigma}$, and put $k=16m$. The arguments in the statements and proofs of
Lemmas 4.1 $\sim$ 4.4 must be correspondingly modified: The right-hand sides of (4.5), (4.11) and (4.15) need be replaced with $\varepsilon^2
m^{\frac{1}{q}-\sigma}$, $\varepsilon^2 2^{-8m}m^{-\sigma}$ and $\varepsilon^4 m^{\frac{1}{q}-\sigma}$, respectively. We omit the details here.
Note that this can also be regarded as a modification to the argument of \cite{Wang}.
{\bf Remark 4.7} \ \ To treat the case $2<q\leqslant\infty$ and $0\leqslant\sigma<1-2/q$, another different class of initial values ${\mbox{\boldmath $u$}}_0$ have
to be employed, which are obtained by modifying the definition of (4.2) in another way as follows: In the first two lines of (4.2), remove the
second sum $\displaystyle\sum_{l\in\mathscr{B}_m}$, and put $l=4m$. The corresponding modifications in the statements of Lemmas 4.2
$\sim$ 4.4 are as follows: All $\mathscr{B}_m$ in these lemmas need be replaced with the single-point set $\{4m\}$, and the right-hand sides of
(4.5), (4.11) and (4.15) need be replaced with $\varepsilon^2m^{1-\sigma-\frac{2}{q}}$, $\varepsilon^2 2^{-12m}m^{-\sigma-\frac{2}{q}}$ and
$\varepsilon^3 m^{1-\sigma-\frac{2}{q}}$, respectively. In this case, the estimates in (4.23) $\sim$ (4.26) need also be correspondingly modified:
Not only the set $\mathscr{B}_m$ need be replaced with the single-point set $\{4m\}$, but also the final bounds in these estimates need be replaced
with $m^{-\sigma}$, $\delta^3m^{-3\sigma}$, $\delta^3m^{-2\sigma}$ and
$$
C_0\delta^2\varepsilon^2 m^{1-\sigma-\frac{2}{q}}-C_1\delta^3 m^{-2\sigma}-C_2\delta,
$$
respectively. We omit the details here.
{\bf Remark 4.8} \ \ For the case $2<q\leqslant\infty$ and $0\leqslant\sigma<1-2/q$, an alternative proof is to modify the argument of
\cite{Yon10}. The modification is as follows: Let
\begin{eqnarray*}
& v^0=(0,0,1,0,\cdots,0), \quad v^1=(0,1,0,0,\cdots,0), \quad w^0=(1,0,0,0,\cdots,0), \quad w^1=(0,0,1,0,\cdots,0), &
\\
& a_k^0=2^{4(m+k)}w^0, \quad a_k^1=2^{4(m+k)}w^0+2^{m}w^1, \quad k=1,2,\cdots,m.
\end{eqnarray*}
Then set
$$
{\mbox{\boldmath $u$}}_0(x)=m^{-\sigma-\frac{1}{q}}\sum_{k=1}^m|a_k^0|[v^0\cos(a_k^0x)+v^1\cos(a_k^1x)].
$$
Using this function as the initial value and correspondingly making necessary modifications to the argument of \cite{Yon10}, we obtain a different
proof to ill-posedness of the problem (1.2) in $B^{-1,\sigma}_{\infty\, q}(\mathbb{R}^n)$ for the case $2<q\leqslant\infty$ and
$0\leqslant\sigma<1-2/q$.
{\bf Remark 4.9} \ \ Here we only treated the case $n\geqslant 3$. After making some modifications to the argument given above as in Section 3 of
\cite{Wang}, we see that Theorem 1.3 also holds for the case $n=2$.
{\small
}
\end{document}
|
\begin{document}
\Large
\title{Asymptotics of summands I: square integrable independent random variables}
\author{Aladji Babacar Niang}
\author{Gane Samb Lo}
\author{Moumouni Diallo}
\begin{abstract} This paper is part of series on self-contained papers in which a large part, if not the full extent, of the asymptotic limit theory of summands of
independent random variables is exposed. Each paper of the series may be taken as review exposition but specially as a complete exposition expect a few exterior resources. For graduate students and for researc\emph{h}ers (beginners or advanced), any paper of the series should be considered as a basis for constructing new results. The contents are taken from advanced books but the organization and the proofs use more recent tools, are given in more details and do not systematically follow previous one. Sometimes, theorems are completed and innovated.\\
\noindent $^{\dag}$ Aladji Babacar Niang\\
LERSTAD, Gaston Berger University, Saint-Louis, S\'en\'egal.\\
Email: [email protected], [email protected]\\
\noindent $^{\dag \dag}$ Gane Samb Lo.\\
LERSTAD, Gaston Berger University, Saint-Louis, S\'en\'egal (main affiliation).\newline
LSTA, Pierre and Marie Curie University, Paris VI, France.\newline
AUST - African University of Sciences and Technology, Abuja, Nigeria\\
[email protected], [email protected], [email protected]\\
Permanent address : 1178 Evanston Dr NW T3P 0J9,Calgary, Alberta, Canada.\\
\noindent $^{\dag \dag \dag}$ Dr Moumouni Diallo\\
Université des Sciences Sociale et de Gestion de Bamako ( USSGB)\\
Faculté des Sciences Économiques et de Gestion (FSEG)\\
Email: [email protected]\\
\noindent\textbf{Keywords}. central limit theorem; pre-weak and weak limits; summands of arrays of real-valued random variable; class of admissible weak limits
in the clt for independent random variables; infinitely divisible or decomposable laws; L\'evy, Lyndeberg and Lyapounov criterai; Lynderberg-type condition; product of Poisson type characteristic functions.\\
\textbf{AMS 2010 Mathematics Subject Classification:} 60F05; 60E07
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
\noindent The largest part of the asymptotic theory of partial sums of random variables concentrated on independent random variables over at least two centuries. Almost all the greatest scientists in probability Theory (L\'evy, Kolmogorov, Lyapounov, Lynderberg, Gnedenko, Feller, etc.) engaged themselves in such an enterprise. Besides, a very large part of the current theory on dependent sequence of random variables is based on transformations of independence structures, for example on notions of \textit{nearness} of the dependence to independence (weak dependence, $\phi$-mixing, associated sequence, independent increments, etc.).\\
\noindent So it is important to have the deepest knowledge of that past. In \cite{ips-mfpt-ang}, we introduced some important elements of that theory (Central limit theorems, laws of the large numbers, law of the iterated logarithm, zero-one laws, etc.).\\
\noindent We are beginning a series on self-contained papers in which a large part of the \textit{central limit theorem}, if not the full extent, of the asymptotics of summands of independent random varianles will exposed. Each paper of the series may be taken as review exposition but specially as a complete exposition expect a few exteroir ressources. For graduate students and for researchers (beginners or advanced), any paper of the series should be considered as a basis for constructed new results. The contents are taken from advanced books but the organization and the proofs used more recent tools, are given in more details and do not systematically follow previous one. Sometimes, theorem are completed and innovated.\\
\noindent In this first paper of the series, we focus of the full characterization of the \textit{CLT} problem of independent summands for square integrable random variables. The main material is extracted from \cite{loeve} as a general guide. But we use arguments from our previous works (\cite{ips-mestuto-ang}, \cite{ips-probelem-ang}, \cite{ips-mfpt-ang}, \cite{ips-wcia-ang}, etc.) to have unified and a self-contained \cite{ips-mfpt-ang}. In particular, the text on the weak convergence of bounded measures and its exppression on $\mathbb{R}^k$ ($k\geq 1$) povides tools to make the conclusions in \cite{loeve} more clear, as we expect as least.\\
\noindent Papers of this series focus on complete mathematical texts rather than on a historical review of contributions of differents authors. We refer to \cite{loeve} for that aspect.\\
\noindent Since the set of weak limits of independent summands for square integrable coincide with the set of infinitely decomposable laws, this paper will deal with the most important properties of such probability laws to the extent needed here. More developments, including the study of stable laws, will be given in the other papers of the series.\\
\noindent Let us introduce the problem, after we provide some notations. below, all sequences and all arrays of random variables have all their elements defined on a same probability space. So, we set a generic probability space
$(\Omega, \mathcal{A}, \mathbb{P})$.\\
\noindent Following \cite{loeve}, we transform the study of sums of independent and centered random variables
$$
S_n=X_1+...+X_n, \ n\geq 1,
$$
\noindent (with the notation $\sigma_i^2=\mathbb{E}X_i^2$, $i\geq 1$, if they exist), by summands
$$
S_n=\sum_{1\leq k \leq k_n} X_{k,n}, \ n\geq 1,
$$
\noindent where for each $n\geq 1$, the family $\{X_{k,n}, \ 1 \leq k \leq k_n=k(n)\}$ is a family of independent and centered random variables such that $F_{k,n}$ stands for the \textit{cdf} of $X_{k,n}$ and $\sigma_{k,n}^2=\mathbb{E}X_{k,n}^2$, $1\leq k \leq k_n$. We suppose also that $k_n \rightarrow +\infty$ as $n\rightarrow +\infty$.\\
\noindent \textbf{Notations}. The notation already given and completed by the notation $f_{kn}$ for the characteristic function of $X_{k,n}$, are fixed for once.\\
\noindent In the case of simple summands, we have for each $n\geq 1$, $k_n=n$ and $X_{k,n}=X_k$ for $k \in [1,n]$. Here, the rows $(X_{k,n})_{1\leq k \leq k_n}$ are such that each of them is obtained by adding one element to the predecessor. But, in the general case, no relation between families
$\mathcal{E}_n=\{X_{k,n}, \ 1 \leq k \leq k(n)\}$ is required. Also, in the case of the simple sequence $(X_k)_{k\geq 1}$, the studied array for each $n\geq 1$, is
$\{X_1/s_n, \cdots, X_n/s_n\}$ where $s_n^2=\mathbb{V}ar(X_1+\cdots+X_n)$.
\noindent Here, we are going to investigate the general problem of finding all the possible weak limits do $S_n$. Without restrictions, we this may lead to trivial results. So we have to fix a general frame in which the study will be done. In doing, the best way seems to go back to the complete theory of Feller-Levy-Lynderberg and there, discover the following two fundamental hypotheses.\\
\section{The Bounded Variance Hypothesis (\textit{BVH}) and the Uniformly Asymptotic Negligibility (\textit{UAN})} \label{03_id_sec_01}
\noindent Given a sequence $(X_k)_{k\geq 1}$ of independent, centered and square-integrable random variables, we set $\sigma_k^2=\mathbb{E}X_k^2$, $k\geq 1$, for $n\geq 1$
$$
s^2_n=\sum_{k=1}^{n} \sigma_k^2, \ \ t_n^2=\max \{\sigma_k^2, \ \ 1\leq k \leq n\} \ \ and \ \ B_n=t_n^2/s_n^2,
$$
\noindent $k(n)=n$ for each $n\geq 1$ and for each $n\geq 1$
$$
\{X_{k,n}, \ 1\leq k\leq k(n)\}=:\left\{\frac{X_k}{s_n}, \ 1\leq k\leq k(n)\right\}
$$
\noindent and
$$
S_n=\sum_{k=1}^{k(n)} X_{k,n}.
$$
\noindent The Feller-Levy-Lynderberg (\textit{F2L}) theorem (see \cite{loeve}, or \cite{ips-mfpt-ang}, Chapter 7, Section 2, Part B) ensures that: \label{HVB-UAN}\\
\begin{equation}
S_n \rightsquigarrow \mathcal{N}(0,1) \ \ [WC] \ \ and \ \ B_n\rightarrow 0 \ \ [NG] \label{Ng}
\end{equation}
\noindent if and only if, for any $\varepsilon>0$,
\begin{equation}
L_n(\varepsilon)=\sum_{1\leq k \leq k(n)} \int_{|X_{k,n}|\geq \varepsilon} X_{k,n}^2 \ d\mathbb{P} \rightarrow 0. \label{lyndeberg}
\end{equation}
\noindent Let us see how behave the two following important quantities in that frame :
\begin{equation}
U(n,\varepsilon)= \sup_{1\leq k \leq n} \mathbb{P}(|X_{k,n}|\geq \varepsilon) \ \ and \ \ MV(n)=\sum_{1\leq k \leq k(n)} \mathbb{V}ar(X_{k,n}).
\end{equation}
\noindent We have, by Markov inequality,
\begin{equation}
U(n,\varepsilon)\leq \sup_{1\leq k \leq n} \frac{\mathbb{E} X_{k,n}^2}{\varepsilon^2 s_n^2}=\varepsilon^{-2} B_n, \label{UAN2}
\end{equation}
\noindent and $MV(n)=1$ for all $n\geq 1$ and for $c=1$
$$
\sup_{n\geq 1} MV(n)=c<+\infty.
$$
\noindent The theory we are going to develop in a more general case needs the properties we just introduced with specific names.
\noindent \textbf{Definition}. Under the notation given above, we say that :\\
\noindent (i) the \textbf{Bounded Variance Hypothesis (\textit{BVH})} holds if and only if
$$
\sup_{n\geq 1} MV(n)=c<+\infty;
$$
\noindent (ii) the \textbf{Variance Convergence Hypothesis (\textit{VCH})} holds if and only if
$$
MV(n) \rightarrow c \in ]0,+\infty[;
$$
\noindent (iii) the \textbf{Uniformly Asymptotic Negligibility (\textit{UAN})} holds if and only if, for any $\varepsilon>0$,
$$
U(n,\varepsilon) \rightarrow 0.
$$
\noindent We express the \textit{F2L} theorem as follows: If \\
\noindent (a) the \textit{(UAN)} condition holds;\\
\noindent (b) the \textit{(BVH)} holds, then
$$
S_n \rightsquigarrow \mathcal{N}(0,1) \ \ \text{\textit{if \ and \ only \ if}} \ \ L_n(\varepsilon)\rightarrow 0, \ \text{\textit{for any}} \ \varepsilon>0.
$$
\noindent In this particular case, the characteristic function of the weak law $\mathcal{N}(0,1)$
$$
\psi_{\infty}(u)= \exp(-u^2/2)
$$
\noindent is such that, for any $p \in \mathbb{N}$, $p>0$, $\psi_{\infty}^{1/p}$ defined by
$$
\psi_{\infty}^{1/p}(u)= \exp(-p^{-1}u^2/2)
$$
\noindent is still a characteristic function, actually of a $\mathcal{N}(0, \ p^{-1})$ law. Let us denote by $\mathcal{C}_f$ the class of all characteristic functions
$f : \mathbb{R}\rightarrow \mathbb{C}$ on $\mathbb{R}$.\\
\noindent Now we may set our general task to be done: (\textit{Task}). Given the \textit{UAN} and the \textit{BVH} conditions, what is the class of all possible limits
$Z_{\infty}$ of characteristic function $\psi_{\infty}$. By the particular case of \textit{F2L} theorem, we may think that the searched class can be
$$
\mathcal{C}_{fid}=\{\psi \in \mathcal{C}_f: \ \ (\forall p>1), \ \psi^{1/p} \in \mathcal{C}_f\}.
$$
\noindent We define $\mathcal{C}_{fid}$ as the class of infinitely divisible characteristic functions. At least, in the current version of the central limit theorem, the Gaussian weak limit is in $\mathcal{C}_{fid}$.\\
\noindent We are going to see that the suggestion in the description of the \textit{task} is effectively the global solution.\\
\noindent In the sequel, we will devote Section \ref{03_id_sec_02} on infinitely divisible (or decomposable) laws. In Section \ref{03_stable_GEN_03}, we finish the
\textit{task} we have given to ourselves under the \textit{UAN} Condition and the \textit{BVC}. Finally in \ref{03_Gauss-Poisson}, rediscover the characterization of the \textit{CLT} to a Gaussian law and that of the \textit{CLT} to a Poisson law. In the next element of the series, we proceed to a general theory with non-necessarily square-integrable random variables.
\section{Class of infinitely divisible (or decomposable) laws on $\mathbb{R}$} \label{03_id_sec_02}
\subsection{Definitions and examples} $ $\\
\noindent The basic definition is the following.
\begin{definition} \label{idf_def} A characteristic function $\psi \in \mathcal{C}_{f}$ is infinitely decomposable (\textit{idecomp}), denoted by $\psi \in \mathcal{C}_{fid}$ if and only if for all positive integer $p$, $\psi^{1/p}$ is still a characteristic function.
\end{definition}
\noindent Let us explain the notion of \textit{idecomp} in terms of random variables. Suppose that $\psi \in \mathcal{C}_{f}$, $p\geq 1$ and $\psi_p=\psi^{1/p}$. Suppose that $\psi_p$ is the characteristic function (\textit{ch.f}) of a probability measure $\mathbb{P}_{p}$. By the Kolmogorov theorem, it is possible to construct a probability space $(\Omega_p, \mathcal{A}_p,\mathbb{P}_p)$ holding independent real-valued random variables $Z_{p}$, $Z_{1,p}$, $\cdots$, $Z_{p,p}$ having all the \textit{cha.f} $\psi_p$, i.e,
$$
\psi_p(u)= \mathbb{E}_{\mathbb{P}_{p}} \exp(iuZ_{p}) = \int \exp(iux) \ d\mathbb{P}_{p}.
$$
\noindent It is clear that $\psi_p^p$ is the \textit{cha.f} of $S_p=Z_{1,p}+\cdots+Z_{p,p}$. As well, $\psi$ is the \textit{cha.f} of a probability measure $\mathbb{P}_{\psi}$ on $\mathbb{R}$ and let us denote by $Z_{\psi}$ a random variable with $\mathbb{P}_{\psi}$ as probability law.\\
\noindent We easily see that we may extend the definition as follows. In the definition below, we use the notion of \textit{idecomp} probability law at the place of \textit{idecomp characteristic function} or \textit{idecom random variable}.
\begin{definition} \label{idf_defExt} Let $Z$ be a real-valued random variable with probability law $\mathbb{P}_Z$ and \textit{ch.f} $\psi_Z$. $\mathbb{P}_Z$ is \textit{idecom} (equivalently $\psi_Z$ is idecomp or $Z$ is idecomp) if and only if one of the following assertions holds:\\
\noindent (i) For all $p \in \mathbb{N}\setminus \{0\}$, $\psi_Z^{1/p}$ is a \textit{cha.f}.\\
\noindent (ii) For all $p \in \mathbb{N}\setminus \{0\}$, there exists a \textit{cha.f} $\psi_p$ such that $\psi_Z=\psi_{p}^{p}$.\\
\noindent (iii) For all $p \in \mathbb{N}\setminus \{0\}$, there exists a probability $\mathbb{P}_{p}$ on $\mathbb{R}$ such that
$$
\mathbb{P}_Z=\mathbb{P}^{\otimes p}_{p},
$$
\noindent that is, $\mathbb{P}_{Z}$ is the convolution product of $\mathbb{P}_{p}$ by itself $p$ times.\\
\noindent (iv) For all $p \in \mathbb{N}\setminus \{0\}$, there exists a sequence $Z_{1,p}$, $\cdots$, $Z_{p,p}$ of independent and identically distributed real-valued random variables such that
$$
Z =_{d} Z_{1,p}+\cdots+Z_{p,p}.
$$
\end{definition}
\noindent \textbf{Examples}. Let us give some quick examples.\\
\noindent \textbf{Example 1}. (Degenerate random variable). Let $Z=a$, \textit{p.s} of \textit{cha.f}
$$
\psi_Z(t)=e^{iat}, \ t \in \mathbb{R}.
$$
\noindent For $p\geq 1$, $\psi_Z(t)^{1/p}=e^{i(a/p)t}$, which is the \textit{cha.f} of the degenerate \textit{r.v} $Z_{p}=a/p$.\\
\noindent \textbf{Example 2}. (Gaussian random variables). Let $Z \sim \mathcal{N}(m,\sigma^2)$, $m \in \mathbb{R}$, $\sigma \in \mathbb{R}_{+}\setminus \{0\}$ with \textit{cha.f}
$$
\psi_Z(t)=\exp (imt - \sigma^{2}t^2/2), \ t \in \mathbb{R}.
$$
\noindent For $p\geq 1$, we have
$$
\psi_Z(t)^{1/p}=\exp (i(m/p)t - (\sigma/\sqrt{p})^2t^2/), \ t \in \mathbb{R},
$$
\noindent which is the \textit{cha.f} of a $\mathcal{N}(m/p,\sigma^2/p)$ \textit{r.v}.\\
\noindent \textbf{Example 3}. (Translated Poisson random variables). Let $Z \sim \mathcal{P}(a,\lambda)\equiv + \mathcal{P}(\lambda)$, $a \in \mathbb{R}$, $\lambda \in \mathbb{R}_{+}\setminus \{0\}$ with \textit{cha.f}
$$
\psi_Z(t)=\exp \left(iat + \lambda(e^{it}-1) \right).
$$
\noindent For $p\geq 1$, we have
$$
\psi_Z(t)^{1/p}=\exp \left(i(a/p)t + (\lambda/p)(e^{it}-1) \right), \ t \in \mathbb{R},
$$
\noindent which is the \textit{cha.f} of a $\mathcal{P}(a/p,\lambda/p)$ \textit{r.v}.\\
\noindent \textbf{Example 4}. (Gamma random variables). Let $Z \sim \gamma(a,b)$, $a>0$, $b>0$, with \textit{cha.f}
$$
\psi_Z(t)=\left(1 - it/b\right)^{-a}, \ t \in \mathbb{R}.
$$
\noindent For $p\geq 1$, we have
$$
\psi_Z(t)^{1/p}=\left(1 - it/b\right)^{-(a/p)}, \ t \in \mathbb{R},
$$
\noindent which is the \textit{cha.f} of a $\gamma(a/p, b)$ \textit{r.v}.\\
\noindent \textbf{Example 5}. (Cauchy random variables). Let $Z \sim Ca(a,b)$, $a\in \mathbb{R}$, $b>0$ with \textit{cha.f}
$$
\psi_Z(t)=\exp(iua -b|t|), \ t \in \mathbb{R}.
$$
\noindent For $p\geq 1$, we have
$$
\psi_Z(t)^{1/p}=\exp(iu(a/p) -(b/p)|t|), \ t \in \mathbb{R},
$$
\noindent which is the \textit{cha.f} of a $Ca(a/p, b/p)$ \textit{r.v}.\\
\noindent Now, let us focus on properties of such laws.\\
\subsection{Properties} $ $\\
\noindent \textbf{Property 1}. \label{property_01} If $\psi_1$ and $\psi_2$ are two \textit{idecomp} \textit{cha.f}, then $\psi=\psi_1 \psi_2$ is an \textit{idecomp} \textit{cha.f}.\\
\noindent \textbf{Proof}. Suppose that $\psi_1$ and $\psi_2$ are two \textit{idecomp} \textit{cha.f} and let $p\geq 1$. Thus $\psi^{1/p}=\psi_1^{1/p} \psi_2^{1/p}$ is the \textit{cha.f} of the convolution product of the probability measures associated to the \textit{cha.f} $\psi_i^{1/p}$ ($i \in \{1,2\}$).\\
\noindent \textbf{Property 2}. \label{property_02} If $\psi$ is an \textit{idecomp} \textit{cha.f}, the conjugate $\overline{\psi}$ is also an \textit{idecomp} \textit{cha.f} and the complex square norm $\|\psi\|^2$ is an \textit{idecomp} \textit{cha.f}.\\
\noindent \textbf{Proof}. Let $\psi$ be the \textit{cha.f} of $X$, i.e., $\psi(t)=\psi_{X}(t)=\mathbb{E}(e^{itX})$, it is clear that
$$
\mathbb{E}(e^{-itX})=\mathbb{E}(\overline{e^{itX}})=\overline{\mathbb{E}(e^{itX})}=\overline{\psi}(t).
$$
\noindent This and $\mathbb{E}(e^{-itX})=\psi_{-X}(t)$ for $t\in \mathbb{R}$ show that $\psi$ is a \textit{cha.f}. It is also direct to see that $X$ and $-X$ are \textbf{idecomp} or {non-idecomp} at the same time or not. Finally by Property 1, $\|\psi\|^2=\psi \overline{\psi}$ is \textit{idecomp} if $\psi$ is.\\
\noindent \textbf{Property 3}. \label{property_03} If $\psi$ is an \textbf{idecomp} \textbf{cha.f}, then $\psi^{1/n}$ converges to 1 everywhere, as \ $n\rightarrow +\infty$.\\
\noindent \textbf{Proof}. Suppose that $\psi$ is an \textit{idecomp} \textit{cha.f}. Let us denote, for all $n\geq 1$, $\psi_n=\psi^{1/n}$, that is a \textit{cha.f}. But
$\|\psi\|\leq 1$ and $\|\psi_n\|^2=\|\psi\|^{2/n}$ converges to $g$ with $g=0$ on $\psi=0$ and $g=1$ on $\psi\neq 0$. Let us show that $\psi$ cannot take the null value. Indeed $\psi$ is continuous (at zero in particular) and $\psi(0)=1$. So $\psi>1/2$ on an interval $]-r,r[$, $r>0$ and next $g=1$ on $]-r,r[$. But the function
$h\equiv 1$ is the \textit{cha.f} of the random variable $Z=0$. By Proposition in \cite{billingsley} (see page 388), we get that $g=h$ and then $g=1$ everywhere, so $\|\psi_n\|^2 \rightarrow 1$. This ensures that $\psi$ does not take the null value. Finally, we get rid of the norm by
$$
\psi^{1/n}=\exp\left(\frac{1}{n} \log \psi \right) \rightarrow 1 \ as \ n\rightarrow +\infty.
$$
\noindent \textbf{Property 4}. \label{property_04} Let $(\psi_n)_{n\geq 1}$ be a sequence of \textit{idecomp} \textit{cha.f}'s such that $\psi_n \rightarrow \psi$ and $\psi$ is continuous at zero. Then $\psi$ is an \textit{idecomp cha.f}.\\
\noindent \textbf{Proof}. Let $\mathcal{C}_{fid} \ni \psi_p \rightarrow \psi$ and $\psi$ is continuous at zero. For any fixed $q\geq 1$, $|\psi_p|^{2/q} \rightarrow |\psi|^{2/q}$. Since the $|\psi_p|^{2/q}$, are \textit{cha.f} and $|\psi^{2/q}|$ is continuous at zero, it comes that $\psi|^{2/q}$ is a \textit{cha.f} for any $q\geq 1$. So $|\psi|^2$ is an \textit{idecomp} \textit{cha.f} and by property 3, $\psi$ is nonwhere zero and next
$$
\psi_p^{1/q}=\exp\left(\frac{1}{q} \log \psi_p \right) \rightarrow \exp\left(\frac{1}{q} \log \psi \right)=\psi^{1/q}
$$
\noindent is a \textit{cha.f} by the Levy continuity theorem.\\
\noindent \textbf{Property 5}. \label{property_05} An \textit{cha.f} $\psi$ is \textit{idecomp} if and only if it is limit of a sequence of products Poisson type \textit{cha.f}.\\
\noindent \textbf{Proof}. If $\psi$ is a limit of a sequence of products Poisson type \textit{cha.f}, it is \textit{idecomp} by Property 4, since products of Poisson type \textit{cha.f} are \textit{idecomp} \textit{cha.f}.\\
\noindent Conversely, let us be given an \textit{idecomp} \textit{cha.f} $\psi$. Since $\psi$ is non-where equal to zero (Property 3), we have
$$
\log \psi = \lim_{p\rightarrow +\infty} p(\psi^{1/p}-1).
$$
\noindent For $p\geq 1$, let us denote by $F_{p}$ the \textit{cdf} associated with the \textit{cha.f} $\psi_p=\psi^{1/p}$. So we have
$$
\Psi_p(t)=p(f^{1/p}-1) =\int p\left(e^{itx}-1\right) \ dF_p(x).
$$
\noindent Since the function $\hookrightarrow p\left(e^{itx}-1\right)$ is bounded on $\mathbb{R}$, it is locally integrable and $\lambda_{F_p}$ is a finite measure, we may apply Lebesgue Dominated theorem and we can conclude that for any fixed $p\geq 1$,
$$
\Psi_p(t)=p(f^{1/p}-1) =\lim_{0<a\rightarrow +\infty} \int_{-a}^{a} p\left(e^{itx}-1\right) \ dF_p(x)=:\lim_{0<a\rightarrow +\infty} \Psi_{p,a}.
$$
\noindent By continuity of the integrand, the integral $\Psi_{p,a}$ is limit of Riemann-Stieltjes, which are of the form
$$
\sum_{1\leq j\leq k(p,a)} pb_{j,p} \left( e^{ic_{j,p}u}-1 \right),
$$
\noindent which are sums of logarithms of Poisson type \textit{cha.f}. Hence $\exp(\Psi_{p,a})$ are \textit{cha.f} and next $\exp(\Psi_{p})$ is a \textit{cha.f} as limit of the sequence $\exp(\Psi_{p,a})$.\\
\noindent Finally $\psi$ is limit of \textit{cha.f} of the form $\exp(\Psi_{p})$, which is a sequence of products of Poisson type \textit{cha.f}.\\
\noindent \textbf{Property 6}. \label{pageprop4} A \textit{cha.f} is \textit{idecomp} if and only if it is limit of a sequence of products of \textit{cha.f} of Poisson type laws.\\
\noindent \textbf{Proof} Let $\psi$ be a \textit{cha.f}. Let $\psi_p$ a product of \textit{cha.f} of type Poisson laws
$$
\psi_p(t)=\prod_{j=1}^{k(p)} \exp\left(ia_{j,p}t + b_{j,p} \left(e^{ic_{j,p}t}-1\right) \right), \ t \in \mathbb{R},
$$
\noindent where the $a_{j,p}$'s and $c_{j,p}$'s are real numbers and the $b_{j,p}$'s positive numbers. We have, for $p$ fixed and for $q\geq 1$
$$
\psi_p(t)^{1/q}=\exp\left(i \left\{\frac{1}{q}\sum_{j=1}^{k(p)} a_{j,p}\right\} t + \frac{1}{q} \sum_{j=1}^{k(p)} b_{j,p} \left( e^{i c_{j,p} t}-1\right) \right), \ \ t \in \mathbb{R}.
$$
\noindent This is still a product of \textit{cha.f}'s of type Poisson type laws and is a \textit{cha.f}. If $\psi_p \rightarrow \psi$, thus by Property 4, $\psi$ is \textit{idecomp}.\\
\noindent We will need more facts on \textbf{cha.f}'s that we will introduce when needed,\\
\noindent We begin by studying the case of bounded variances. First, we deal with three important results that constitute the pillars of the current theory. \\
\subsection{The three pillars of that theory} \label{03_03_ssec_lem_com}
\noindent In this subsection, we assume that both the \textit{UAN} and the \textit{BVH} hold.
\begin{lemma} \label{03_03_lem_01} (Comparison Lemma) The complex function $\log f_{k,n}$ is well-defined and for any $u\in \mathbb{R}$
$$
\sum_{k=1}^{k(n)}\left\{\log f_{k,n}(u) -(f_{k,n}(u)-1)\right\} \rightarrow 0,
$$
\noindent as $n\rightarrow +\infty$.
\end{lemma}
\noindent \textbf{Proof}. Let $u\in \mathbb{R}$ fixed and $n\geq 1$. Then for any $k\in \{1,\cdots,k(n)\}$, we have the one order expansion
\begin{equation*}
f_{k,n}(u)=1+ \theta_{k,n} u^2 \sigma_{k,n}^2/2,
\end{equation*}
\noindent with $|\theta_{k,n}|<1$ and $|\circ|$ stands for the norm in $\mathbb{C}$ or the absolute value when applied to real numbers. In all this chapter, numbers of the form $\theta_{\circ}$, possibly written with primes or double primes, are only required to have norms less than one and their values are not important. So, we get
\begin{equation*}
\max_{1\leq k \leq k(n)} |f_{k,n}(u)-1|\leq \frac{u^2 B_n}{2} \rightarrow 0 \ as \ \ n\rightarrow +\infty.
\end{equation*}
\noindent Next for $v_{k,n}=\theta_{k,n} u^2 \sigma_{k,n}^2/2$, we surely have that $\max_{1\leq k \leq k(n)} |v_{k,n}| \leq (u^2 B_n)/2$ goes to zero. We also have for all $u\mathbb{R}$,
\begin{eqnarray*}
\log f_{k,n}(u)&=& \log (1 + (f_{k,n}(u)-1))=\log (1 + v_{k,n})=v_{k,n} + \theta_{k,n}^{\prime} v_{k,n}^2\\
&=& (f_{k,n}(u)-1) + \theta_{k,n}^{\prime} v_{k,n}^2,
\end{eqnarray*}
\noindent which leads to, as $n\rightarrow +\infty$,
\begin{eqnarray}
\left| \sum_{k=1}^{k(n)}\left\{\log f_{k,n}(u) -(f_{k,n}(u)-1)\right\} \right| &\leq& \sum_{1}^{k(n)} |\theta_{k,n}^{\prime}| v_{k,n}^2 \label{boundX_01}\\
&\leq& \sum_{k=1}^{k(n)} \frac{u^4}{4} |\theta_{k,n}|^2 \sigma_{k,n}^4 \ \ \ \ (L3)\notag\\
&\leq& \frac{u^4 B_n}{4} \sum_{k=1}^{k(n)} \sigma_{k,n}^2 \ \ \ \ (L4)\notag \\
&\leq& \frac{c u^4 B_n}{4} \rightarrow 0. \notag
\end{eqnarray}
\noindent The proof of Lemma \ref{03_03_lem_01} is over. $\square$\\
\noindent Now, let us use new expressions of the results in Lemma \ref{03_03_lem_01}. Since the variables $X_{k,n}$ are centered, we have
\begin{equation*}
\forall n\geq 1, \ \forall 1\leq k\leq k(n), \ \int X_{k,n} \ d\mathbb{P}=\int x \ dF_{k,n}(x)=0 \ and \ \ \int x \ \ dF_{k,n}(x)=\sigma^{2}_{k,n}.
\end{equation*}
\noindent Let us set, for $n\geq 1$,
\begin{equation*}
\psi_n(u)\equiv \sum_{k=1}^{k(n)}(f_{k,n}(u)-1)=\sum_{k=1}^{k(n)} \int \left(e^{iux}-1\right) \ dF_{k,n}(x), \ u\in \mathbb{R}.
\end{equation*}
\noindent By using the remark that $\mathbb{E}X_{k,n}=0$, i.e. $\int x \ dF_{k,n}(x)=0$, we get
\begin{eqnarray*}
\psi_n(u)&=&\sum_{k=1}^{k(n)} \int \left(e^{iux}-1-iux\right) \ dF_{k,n}(x)\\
&=& \int \left(e^{iux}-1-iux\right) \ \sum_{k=1}^{k(n)} dF_{k,n}(x)\\
&=& \int \frac{1}{x^2} \left(e^{iux}-1-iux\right) \ x^2 \sum_{k=1}^{k(n)} dF_{k,n}(x), \ u \in \mathbb{R}.
\end{eqnarray*}
\noindent But, by putting
$$
dK_n(x)=x^2 \sum_{k=1}^{k(n)} dF_{k,n}(x),
$$
\noindent we get
\begin{equation*}
\psi_n(u)= \int \frac{1}{x^2} \left(e^{iux}-1-iux)\right) \ dK_n(x).
\end{equation*}
\noindent Finally, Lemma \ref{03_03_lem_01} can be expressed as \\
\begin{lemma} \label{03_03_lem_02}
\begin{equation*}
\forall u \in \mathbb{R}, \log\left(\prod_{k=1}^{k(n)} f_{k,n}(u)\right) - \psi_n(u)\rightarrow 0, \ as \ n\rightarrow +\infty,
\end{equation*}
\noindent where
$$
dK_n(x)=x^2 \sum_{k=1}^{k(n)} dF_{k,n}(x)
$$
\noindent and
\begin{equation*}
\psi_n(u)= \int \frac{1}{x^2} \left(e^{iux}-1-iux\right) \ dK_n(x).
\end{equation*}
\end{lemma}
\noindent This lemma becomes the second pillar. The third is the following
\begin{lemma} \label{03_03_lem_03}
For any $n\geq 1$, $\exp(\Psi_n)$ is an \textit{idecomp} \textit{cha.f} and is the \textit{cha.f} of a centered random variable of variance
$$
\int \ dK_n(x)=s_n^2.
$$
\end{lemma}
\noindent \textbf{Proof}. Let $n\geq 1$ be fixed. We have
$$
\Psi_n(u)=\int \ g(u,x) \ dK_n(x) \ with \ g(u,x)=\frac{e^{iux}-1-iux}{x^2}, \ x \in \mathbb{R}.
$$
\noindent Clearly $g$ is continuous on $\mathbb{R} \times \mathbb{R}^{\ast}$ (with $\mathbb{R}^{\ast}=\mathbb{R}\setminus \{0\}$) and for $u$ fixed, $g(u,0)$ is the extension of $g(u,x)$ by limit, since an expansion at zero gives
$$
g(u,x)=\frac{1+iux-u^2x^2/2-1-iux+O(x^3)}{x^2} \rightarrow -u^2/2 \ as \ x\rightarrow 0.
$$
\noindent So, for $u$ fixed, $x\mapsto g(u,x)$ is continuous everywhere. Moreover we have
\begin{equation}
\forall u\in \mathbb{R}, \ \forall x\in \mathbb{R}^{\ast}, \ |g(u,x)|\leq \frac{2}{x^2} + \frac{|u|}{|x|} \label{bound_01}
\end{equation}
\noindent and
$$
\int x^{-2} \ dK_n(x)=\sum_{k=1}^{k(n)} \int dF_{k,n}(x)=k(n),
$$
\noindent and, by using $(|x|\leq 1 +x^2)$
\begin{eqnarray*}
\int |x|^{-1} \ dK_n(x)&=&\sum_{k=1}^{k(n)} \int \frac{x^2}{|x|} \ dF_{k,n}(x)\\
&=&\sum_{k=1}^{k(n)} \int |x| \ dF_{k,n}(x)\\
&\leq&\sum_{k=1}^{k(n)} \int (1+x^2)\ dF_{k,n}(x)\\
=k(n)+s_n^2.
\end{eqnarray*}
\noindent We conclude that $g(u,x)$ is bounded by $g_0(x)=2x^{-2}+|ux^{-1}|$ which is $K_n$-integrable. So by the dominated convergence theorem, $\Psi_n$ is continuous at zero. Also, as an improper Riemann-Stieltjes integral, for $\varepsilon>0$ fixed, we can find a number $A>0$ such that for $a\geq A$
$$
\left| \psi_n(u) - \Psi_{n,a}(x)\right|<\varepsilon \ with \ \Psi_{n,a}(u)=\int_{-a}^{a} g(u,x) \ dK_n(x).
$$
\noindent Now, since $\Psi_{n,a}(u)$ is continuous, it a limit of a sequence of of Riemann-Stieltjes sums: there exists a partition of $[-a,a]$
$$
-a=x_{0,p}<\cdots <x_{j-1,p}<x_{j,p}<\cdots<x_{\ell(p),p}=a
$$
\noindent and a sequence of points $c_{j,p} \in (x_{j,p}, x_{j+1,p})$, $0\leq j\leq \ell(p)-1$,
$$
S_p(u)=\sum_{j=0}^{\ell(p)-1} \{K_n(x_{j+1,p})-K_n(x_{j,p})\} g(u,c_{j,p}) \rightarrow \Psi_{n,a}(u),
$$
\noindent as $\max\{x_{j+1,p}-x_{j,p}, \ 1\leq j\leq \ell(p)-1\} \rightarrow 0$ as $p\rightarrow +\infty$. We may choose all the $c_{j,p}$ not null from the interior of
$(x_{j,p}, x_{j+1,p})$ ($x_{j,p}<x_{j+1,p}$). We have
\begin{eqnarray*}
S_p(u)&=&\sum_{j=0}^{\ell(p)-1} \frac{\lambda_{K_n}(]x_{j,p}, x_{j+1,p}])}{c_{j,p}^2} \biggr(e^{ic_{j,p}u}-1-ic_{j,p}u\biggr)\\
&=&\sum_{j=0}^{\ell(p)-1} -i \frac{\lambda_{K_n}(]x_{j,p}, x_{j+1,p}])}{c_{j,p}^2}c_{j,p}u + \frac{\lambda_{K_n}(]x_{j,p}, x_{j+1,p}])}{c_{j,p}^2}
\biggr(e^{ic_{j,p}u}-1\biggr)\\
&=:&\sum_{j=0}^{\ell(p)-1} -i \mu_{j,p} u + \lambda_{j,p} \biggr(e^{ic_{j,p}u}-1\biggr),
\end{eqnarray*}
\noindent with
$$
\lambda_{j,p}=\frac{\lambda_{K_n}(]x_{j,p}, x_{j+1,p}])}{c_{j,p}^2} \ and \ \mu_{j,p}=\frac{\lambda_{K_n}(]x_{j,p}, x_{j+1,p}])}{c_{j,p}^2}c_{j,p}.
$$
\noindent We clearly see that $\exp(S_p)$ is the product of Poisson type \textit{cha.f} converging to $\exp(\Psi_n)$ as $p\rightarrow +\infty$ and $a\rightarrow +\infty$. But we also have that $\exp(\Psi_n)$ is continuous. So by the L\'evy continuity theorem, $\exp(\Psi_n)$ is a \textit{cha.f} and it is \textit{idecomp} by
Property 4 (see page \pageref{pageprop4}).\\
\noindent Let us study the differentiability of $\Psi_n$. We have
$$
\left|\frac{\partial g(u,x)}{\partial x}\right|=\left|\frac{ix(e^{iux}-1}{x^2})\right|\leq \frac{2}{|x|} \in \mathcal{L}^1({K_n}),
$$
\noindent and hence
$$
\Psi_n^{\prime}(u)=\int \frac{ix(e^{iux}-1}{x^2}) \ dK_n(x) \ and \ \Psi_n^{\prime}(0)=0.
$$
\noindent Also
$$
\left| \frac{\partial^2 g(u,x)}{\partial^2 x}\right|=\left|\frac{-x^2e^{iux}}{x^2}\right|=1 \in \mathcal{L}^1(K_n),
$$
\noindent and hence
$$
\Psi_n^{\prime\prime}(u)=- \int e^{iux} \ dK_n(x) \ and \ \Psi_n^{\prime\prime}(0)=-s_n^2.
$$
\noindent Finally, let $Z_n$ be a \textit{r.v.} with \textit{cha.f} $\exp(\Psi_n)$. The first and second derivatives of $\exp(\Psi_n)$ are
$$
\Psi_n^{\prime}(u) \exp(\Psi_n(u)) \ and \ \{\Psi_n^{\prime\prime}(u) \exp(\Psi_n(u)) + \left(\Psi_n^{\prime}(u)\right)^2 \exp(\Psi_n(u))\}
$$
\noindent taking the values
$$
\Psi_n^{\prime}(0) \exp(\Psi_n(0))=0 \ and \ \{\Psi_n^{\prime\prime}(0) \exp(\Psi_n(0)) + \left(\Psi_n^{\prime}(0)\right)^2 \exp(\Psi_n(0))\}=-s_n^2.
$$
\noindent We conclude that $\mathbb{E}Z_n=0$ and $\mathbb{V}ar(Z_n)=s_n^2$. The relation
$$
\Psi_n^{\prime\prime}(u)=- \int e^{iux} \ dK_n(x) \ \ \ (C)
$$
\noindent shows that $\Psi_n^{\prime\prime}(u)$ characterizes $K_n$ and vice-versa. Now, for two functions $\Psi_n$ and $\Phi_n$ such that $\Psi_n^{\prime \prime}=\Phi_n^{\prime \prime}$ with
$\Psi_n(0)=\Phi_n(0)=0$ and $\Psi_n^{\prime}(0)=\Phi_n^{\prime}(0)=0$, we have
$$
\forall u \in \mathbb{R}, \ \ \Psi_n^{\prime}(u)=\Phi_n^{\prime}(u) + d_1,
$$
\noindent and by applying this for $u=0$, we get $d_1=0$. Next, we have
$$
\forall u \in \mathbb{R}, \ \ \Psi_n(u)=\Phi_n(u) + d_2,
$$
\noindent and by applying this for $u=0$, we get $d_2=0$. So $\Psi=\Phi$ and we have the following fact.
\begin{fact} \ \label{charact_01}
$K_n$ characterizes $\Psi_n$ and vice-versa. $\blacksquare$\\
\end{fact}
\section{The weak convergence theorem of summands under the \textit{BVH} and the \textit{UAN} Condition} \label{03_stable_GEN_03}
\subsection{The Central limit theorem for centered, independent and square integrable random variables} \label{clt_subsec_01}$ $\\
\noindent We are going to conclude the discussion above to find solutions of the \textit{CLT} problem under the \textit{BVH} and the \textit{UAN} Condition. We will have two studies from which of them we draw a final conclusion.\\
\noindent \textbf{Study (A)}. From Lemma \ref{03_03_lem_02} and from the notations above, we have
\begin{equation*}
\forall \ t \in \mathbb{R}, \ \Psi_{S_n}(t)-\exp(\Psi_n(t)) \rightarrow 0 \ as \ n\rightarrow +\infty.
\end{equation*}
\noindent But $\exp(\Psi_n(\circ))$ is an \textit{idecomp} \textit{cha.f} for any $n\geq 1$ and is linked to
\begin{equation}
\Psi_n(u)=\int \ g(u,x) \ d\lambda_{K_n}(x) \ with \ g(u,x)=\frac{e^{iux}-1-iux}{x^2}, \ x \in \mathbb{R}, \label{HB}
\end{equation}
\noindent where $\lambda_{K_n}$ is the Lebesgue-Stieltjes measure associated with the \textit{df} $K_n$. Now, we are using the weak convergence theory of bounded measures on $\mathbb{R}$ as exposed Chapter 6 in \cite{ips-wcia-ang}.\\
\noindent \textbf{Direct part}. Let us suppose that $\lambda_{K_n}$ pre-weakly converges to some \textit{df} $\lambda_{K}$, i.e.,
(for $C(K)$ standing for the set continuity points of $K$),
$$
\forall x \in C(K), \ K_n(x) \rightarrow K(x) \ as \ n\rightarrow +\infty.
$$
\noindent By Part (i) of Proposition 37 in Chapter 6 in \cite{ips-wcia-ang}, we have
$$
\lambda_K(\mathbb{R}) \leq \liminf_{n\rightarrow +\infty} \lambda_{K_n}(\mathbb{R})\leq c,
$$
\noindent since, for any $n\geq 1$,
$$
\lambda_{K_n}(\mathbb{R})=\sum_{k=1}^{k(n)} \int x^2 dF_{k,n}(x)=\sum_{k=1}^{k(n)} \mathbb{V}ar(X_{k,n})\leq c
$$
\noindent from the \textit{BVH}. Hence the pre-weak limit $\lambda_K$ is a bounded measure. Now we apply the integral Helly-Bray theorem as in Theorem 30 in Chapter 6 in \cite{ips-wcia-ang} to \eqref{HB} (See above). By \eqref{bound_01}, for any fixed real number $u$, the function $g(u,x)$ (in $s$) in \eqref{HB} is continuous and satisfies $g(\pm \infty)=0$. So by the cited Helly-Bray integral theorem, we have
$$
\forall u\in \mathbb{R}, \ \Psi_n(u) \rightarrow \Psi_K(u)=\int \ g(u,x) \ d\lambda_{K}(x)=:\int \frac{e^{ix}-1-iux}{x^2} \ d\lambda_{K}(x).
$$
\noindent Now, from the expression of $\Psi_K(u)$ and from \eqref{bound_01}, we see that $\Psi_K(u)$ is a parametrized (in $u$) integral and by the dominated convergence theorem, $\Psi_K(u)$ is continuous and $\Psi_K(0)=0$. Therefore,
$$
\forall u\in \mathbb{R}, \ \exp(\Psi_n(u)) \rightarrow \exp(\Psi_K(u))=:f_{K}(u).
$$
\noindent Since $f_K(\circ)$ is continuous at $zero$ and $f_K(0)=1$, we get by the L\'evy continuity theorem (See Theorem 11 in Chapter 3 in \cite{ips-wcia-ang}), we conclude that $f_K$ is \textit{cha.f} and by designating by $\mathcal{K}_K$ the probability law associated to the \textit{cha.f} $f_K$, we have
$$
S_n \rightsquigarrow \mathcal{K}_K.
$$
\noindent By \textit{Property 4} (see page \pageref{property_05} above), $\mathcal{K}$ is an \textit{idecomp} probability law, following the fact that each $\exp(\Psi_n(\circ))$, $n\geq 1$, is an \textit{idecomp} \textit{cha.f}. \\
\noindent \textbf{Indirect Part}. Suppose that for some \textit{df} $K_0$,
$$
S_n \rightsquigarrow \mathcal{K}_{K_0},
$$
\noindent where $\mathcal{K}_{K_0}$ is the probability law associated to $K_0$. We are going to use a Prohorov's type argument. By the asymptotic
tightness theorem (See Theorem 29 in \cite{ips-wcia-ang}), any sub-sequence $(\lambda_{K_{n_j}})_{j\geq 1}$ of $(\lambda_{K_{n}})_{n\geq 1}$ contains a sub-sequence
$(\lambda_{K_{n_{j_{\ell}}}})_{\ell\geq 1}$ pre-weakly converging to some $\lambda_{K^\ast}$. By the direct part,
$$
S_{n_{j_\ell}} \rightsquigarrow \mathcal{K}_{K^\ast},
$$
\noindent where $\mathcal{K}_{K^\ast}$ is associated to a \textit{cha.f} $f_{K^{\ast}}=\exp(\Psi_{K^\ast})$, with
$$
\forall u\in \mathbb{R}, \ \exp(\Psi_{n_{j_\ell}}(u)) \rightarrow \exp(\Psi_{K^\ast}(u))=:f_{K}(u)
$$
\noindent and
$$
\forall u \in \mathbb{R}, \ \Psi_{K^\ast}(u)=\int \frac{e^{iux}-1-iux}{x^2} \ d\lambda_{K^\ast}(x).
$$
\noindent By uniqueness of the weak limit, $\mathcal{K}_{K^\ast}=_d\mathcal{K}_{K_0}$. Then each sub-sequence of $(K_n)_{n\geq 1}$ contains a sub-sequence converging to $K_0$. We conclude that by Prohorov theorem
$$
K_n \ \rightsquigarrow_{pre} K_0.
$$
\noindent In both parts, $\mathbb{V}ar(K)<+\infty$ and by Fact \ref{charact_01} applied to $K$, we may conclude that $K$ and $\Psi_K$ characterizes one the other.\\
\noindent We conclude as follows.
\begin{theorem} \label{CLTF_01} Under the \textit{BVH} and the \textit{UAN} Condition for summands of independent, centered and square integrable real valued random variables, we have:\\
\noindent (a) If
$$
S_n \rightsquigarrow \mathcal{K},
$$
\noindent where $\mathcal{K}$ is a probability law, then is $\mathcal{K}$ is \textit{idecomp}.\\
\noindent (b) For any \textit{idecomp} probability law $\mathcal{K}$ of a centered and square integrable random variable $Z$, for which for any $n\geq 1$, there exists
$X_{1,n}$, $\cdots$, $X_{n,n}$ independent and of same law (they are necessarily centered and square integrable) such that
$$
Z=X_{1,n}+\cdots+X_{n,n}=:S_n.
$$
\noindent Then clearly, $\mathcal{K}$ is a weak limit of summands of independent, centered and square integrable real valued random variables under the \textit{BVH} and the \textit{UAN} Condition.\\
\noindent (3) We have, under the \textit{BVH} and the \textit{UAN} Condition,
$$
S_n \rightsquigarrow \mathcal{K}_K,
$$
\noindent for some \textit{df} $K$, if and only if (using the notation stated above)
$$
K_n \rightsquigarrow_{pre} K.
$$
\noindent Moreover
$$
\Psi_K(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK
$$
\noindent and $K$ characterize each other, and $\exp(\Psi_K(\circ))$ is the characteristic function of $\mathcal{K}_K$.
\end{theorem}
\noindent \noindent \textbf{Study (B)}. Here, we suppose that the \textit{VCH} and the \textit{UAN} Condition hold. We begin by remarking that the Comparison Lemma \ref{03_03_lem_01} holds since formula \eqref{boundX_01} (page \pageref{boundX_01}) holds with the use of the \textit{VCH} in Line (L4).\\
\noindent \textbf{Direct part}. Let $K_n \rightsquigarrow K$. In particular $K_n \ \rightsquigarrow_{pre} K$. By the direct part of \textit{Study (A)}, we still have
\begin{equation} \label{w00}
\lambda_K(\mathbb{R}) \leq \liminf_{n\rightarrow +\infty} \lambda_{K_n}(\mathbb{R})=c,
\end{equation}
\noindent and
$$
S_n \rightsquigarrow \mathcal{K}_K.
$$
\noindent Actually, by weak convergence, we exactly have
$$
\lambda_K(\mathbb{R})=\lim_{n\rightarrow +\infty} \lambda_{K_n}(\mathbb{R})=c,
$$
\noindent but this not play any role for the direct part.\\
\noindent \textbf{Indirect part}. Let
$$
S_n \rightsquigarrow \mathcal{K}_{K},
$$
\noindent for some \textit{df} $K$. By the indirect part of \textit{Study (A)}, we still have
$$
K_n \ \rightarrow_{pre} K.
$$
\noindent Now if, for $Z\sim \mathcal{K}_{K}$ such that $\mathbb{V}ar(Z)=c$, we have that $\lambda_K(\mathbb{R})=\mathbb{V}ar(Z)$ and then
$$
\lim_{n\rightarrow +\infty} \lambda_{K_n}(\mathbb{R})=\lambda_{K}(\mathbb{R}) \ \ and \ \ K_n \ \rightarrow_{pre} K.
$$
\noindent By Proposition 37 in Chapter 6 in \cite{ips-wcia-ang}, we conclude that $K_n \rightsquigarrow K$ \ as $\rightarrow+\infty$. \\
\noindent We conclude as follows.\\
\begin{theorem} \label{CLTF_02} Under the \textit{VCH} and the \textit{UAN} Condition for summands of independent, centered and square integrable real valued random variables, we have the following characterization. If $\mathcal{K}_K$ is associated with a random variable $Z$ such that $\mathbb{V}ar(Z)=c$, where $c$ is the limit in the \textit{VCH}, then we have
$$
S_n \rightsquigarrow \mathcal{K}_K,
$$
\noindent if and only if
$$
K_n \rightsquigarrow K.
$$
\end{theorem}
\subsection{The Central limit theorem for non-centered, independent and square integrable random variables} \label{clt_subsec_02}$ $\\
\noindent Let us re-conduct all the notations in Subsection \ref{clt_subsec_01}. Let us denote
$$
\biggr(\forall n\geq 1, \ \forall 1\leq k\leq k(n), \ \mathbb{E}X_{k,n}=a_{k,n}\biggr) \ and \ \biggr(\forall n\geq 1, \ \sum_{k=1}^{k(n)} a_{k,n}=a_n\biggr)
$$
\noindent Let us write
$$
\forall n\geq 1, \ \ S_n=(S_n-a_n) + a_n=\sum_{k=1}^{k(n)}(X_{k,n}-a_{k,n}) + a_n =:S_n^{\ast} + a_n.
$$
\noindent Let us denote by $F_{k,n}^{\ast}$ the \textit{cdf} of $(X_{k,n}-a_{k,n})$ for $n\geq 1$ and $1\leq k \leq k(n)$,
$$
\forall u\in \mathbb{R}, \Psi_{K^\ast}(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK^{\ast}(x)
$$
\noindent and
$$
\forall n\geq 1, \ \forall u\in \mathbb{R}, \ \ \Psi_{K_n^{\ast}}(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK_n^{\ast}(x)
$$
\noindent with
$$
\forall n\geq 1, \ \forall x\in \mathbb{R}, \ \ K^{\ast}_n(x)=\int_{-\infty}^{x} y^2 \sum_{k=1}^{k(n)} \ dF_{k,n}^{\ast}(y).
$$
\noindent \textbf{Direct part}. If $K^{\ast}_n(x) \rightsquigarrow_{pre} K^\ast$ and $a_n \rightarrow a$, then
$$
S_n \rightsquigarrow \mathcal{K}_{K^\ast} + a=\mathcal{K}_{0}.
$$
\noindent Moreover, the \textit{cha.f} of $\mathcal{K}_{K^\ast}$ is $\exp(\Psi_{K^\ast}(\circ))$ and next the \textit{cha.f} of $\mathcal{K}_{0}$ is
$$
\forall u\in \mathbb{R}, \ \Psi(u)=\exp\biggr(iau + \Psi_{K^\ast}(u)\biggr).
$$
\noindent \textbf{Indirect part}. Suppose that
$$
S_n \rightsquigarrow \mathcal{K}_{0},
$$
\noindent where $\mathcal{K}_{0}$ is associated with an \textit{a.s} finite random variable $Z$. Then $b=\limsup_{n\rightarrow+\infty} a_n$ is finite. Otherwise consider a sub-sequence
$a_{n_\ell}\rightarrow +\infty$ as $\ell\rightarrow +\infty$. So $S_{n_\ell}=S_{n_\ell}^{\ast} + a_{n_\ell}$ necessarily weakly converges to $Z$, where by Theorem \ref{CLTF_01}, $S_{n_\ell}^{\ast} \rightsquigarrow Z^{\ast}$, of law $\mathcal{K}_{K^{\ast}}$ and $Z^{\ast}$ finite \textit{a.s}, and hence $Z$ is \textit{a.s} infinite. Hence $b=\limsup_{n\rightarrow+\infty} a_n$ if finite. Now, each sub-sequence of $(a_{n})_{n\geq 1}$ contains a sub-sequence $(a_{n^\prime})_{n^\prime\geq 1}$ is converging to $a$ finite. By the argument given above, $S_{n^\prime}^\ast$ weakly converges to some $\mathcal{K}_{K^\ast}$. By prohorov's criteria,
$S_{n}^{\ast}$ weakly converges to $\mathcal{K}_{K^\ast}$ and $S_{n}$ weakly converges to $\mathcal{K}_{K^\ast}+a=_d\mathcal{K}_{0}$. The later inequality shows that all converging subsequences of $(a_{n})_{n\geq 1}$ converge to the same number $a$. Finally
$$
S_n \rightsquigarrow \mathcal{K}_{K^\ast}+a,
$$
\noindent with $a_n\rightarrow a$. Let us summarize the discussions as follows.\\
\begin{theorem} \label{CLTF_03} Under the \textit{BVH} and the \textit{UAN} Condition for summands of independent and square integrable real valued random variables, we have the following characterization. Let us denote
$$
\biggr(\forall n\geq 1, \ \forall 1\leq k\leq k(n), \ \mathbb{E}X_{k,n}=a_{k,n}\biggr) \ and \ \biggr(\forall n\geq 1, \ \sum_{k=1}^{k(n)} a_{k,n}=a_n\biggr);
$$
$$
\forall n\geq 1, \ \ S_n=(S_n-a_n) + a_n=\sum_{k=1}^{k(n)}(X_{k,n}-a_{k,n}) + a_n =:S_n^{\ast} + a_n;
$$
\noindent $F_{k,n}^{\ast}$ the \textit{cdf} of $(X_{k,n}-a_{k,n})$ for $n\geq 1$ and $1\leq k \leq k(n)$;
$$
\forall u\in \mathbb{R}, \Psi_{K^\ast}(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK^{\ast}(x)
$$
\noindent and, finally,
$$
\forall n\geq 1, \ \forall u\in \mathbb{R}, \ \ \Psi_{K^\ast_n}(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK_n^{\ast}(x)
$$
\noindent with
$$
\forall n\geq 1, \ \forall x\in \mathbb{R}, \ \ K^{\ast}_n(x)=\int_{-\infty}^{x} y^2 \sum_{k=1}^{k(n)} \ dF_n^{\ast}(y).
$$
\noindent We have the following facts.\\
\noindent (i) If $K_n^\ast \rightsquigarrow_{pre} K^{\ast}$ and $a_n\rightarrow a$, then
$$
S_n \rightsquigarrow \mathcal{K}_{K^{\ast}} + a.
$$
\noindent (ii) If
$$
S_n \rightsquigarrow \mathcal{K}_{0}
$$
\noindent where $\mathcal{K}_{0}$ is associated to an \textit{a.s} finite random variable $Z$, then the sequence $(a_n)_{n\geq 1}$ converges to a real number $a$ and
$$
\mathcal{K}_0 =_d \mathcal{K}_{K^{\ast}} + a
$$
\noindent and
$$
K_n^\ast \rightsquigarrow_{pre} K^{\ast}.
$$
\noindent Moreover if the \textit{VCH} holds at the place of the \textit{BVH} and the variance of $\mathcal{K}_{K^\ast}$ is equal to $c$, we have
$$
K_n^\ast \rightsquigarrow K^{\ast}
$$
\noindent in both parts (i) and (ii).
\end{theorem}
\section{Characterizations of two important examples} \label{03_Gauss-Poisson}
\noindent The two important limits of Gaussian law and Poisson law are very important. In stochastic analysis, these laws allow to represent some stochastic process into a discontinuous process (Poisson component) and a continuous process (Gaussian part).
\subsection{Gaussian limit} \label{GC}$ $\\
\noindent Let us suppose that the weak limit of the summands $(S_n)_{n\geq 1}$ is the standard Gaussian law
$$
\exp(\Psi_K(u))=\exp(-u^2/2), \ u\in \mathbb{R},
$$
\noindent i.e.
\begin{equation}
\Psi_K(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK(x)=-\frac{u^2}{2}, \ u\in \mathbb{R}.
\end{equation}
\noindent But, for $\lambda_{K}=\delta_{0}$, that is, $K=1_{[0,+\infty[}$, we have
$$
\int \frac{e^{iux}-1-iux}{x^2} \ d\delta_0(x)=\left[\frac{e^{iux}-1-iux}{x^2}\right]_{x=0}=-\frac{u^2}{2}.
$$
\noindent We are going to rediscover the L\'evy-Lynderberg-Feller (L2F) theorem as stated in \cite{ips-mfpt-ang} (See Theorem 20 in page ...).
\begin{theorem} \label{L2F-Gauss}
Let $S_n=X_{1,n}+\cdots+X_{k(n),n}$ summands of centered and square integrable random variables as denoted above such that
\begin{equation}
\sum_{1\leq k \leq k(n)} \mathbb{V}ar(X_{k,n})= \sum_{1\leq k \leq k(n)} \sigma^2_{k,n}=1. \label{GC0}
\end{equation}
\noindent For $\varepsilon>0$ and $n\geq 1$, let us denote the Lynderberg function as
\begin{equation}
g_n(\varepsilon)=\sum_{k=1}^{k(n)}\int_{(|x|\geq \varepsilon)} x^2 \ dF_{k,n}(x). \label{lyndG}
\end{equation}
\noindent We have the following characterization:
\begin{equation}
S_n \rightsquigarrow \mathcal{N}(0,1) \ as \ n\rightarrow+\infty \label{GC1a}
\end{equation}
\noindent and
\begin{equation}
\max_{1\leq k\leq k(n)} \mathbb{V}ar(X_{k,n})\rightarrow 0 \ as \ n\rightarrow+\infty \label{GC1b}
\end{equation}
\noindent if and only if, for any $\varepsilon>0$, the following Lynderberg criterion holds:
\begin{equation}
g_n(\varepsilon)\rightarrow 0 \ as \ n\rightarrow+\infty \label{GC2}.
\end{equation}
\end{theorem}
\noindent \textbf{Proof}. Let us begin by linking the Lynderberg function as \eqref{lyndG} with \eqref{GC1b}. We have
\begin{eqnarray}
\max_{1\leq k\leq k(n)} \mathbb{V}ar(X_{k,n})&=& \max_{1\leq k\leq k(n)} \int x^2 dF_{k,n}(x) \notag\\
&=& \sum_{1\leq k\leq k(n)} \int x^2 dF_{k,n}(x) \notag\\
&=& \sum_{1\leq k\leq k(n)} \int_{(|x|\leq \varepsilon)} x^2 dF_{k,n}(x) \notag\\
&+& \sum_{1\leq k\leq k(n)} \int_{(|x|> \varepsilon)} x^2 dF_{k,n}(x) \ \ (L3)\notag \\
&=& \varepsilon^2 + g_n(\varepsilon), \label{GC3}
\end{eqnarray}
\noindent where we used \eqref{GC0} in the first summation in Line (L3). By letting $n\rightarrow +\infty$ first and next, by letting
$\varepsilon \rightarrow 0$, we get that the Lynderberg criterion implies \eqref{GC1b}. We have: \\
\begin{fact} \label{fact_GC1}
The Lynderberg criterion \eqref{GC2} implies \eqref{GC1b}, which in turn implies the \textit{UAN} hypothesis.
\end{fact}
\noindent Now, Let us prove both implications.\\
\noindent \textbf{Direct implication}. Suppose that \eqref{GC1a} and \eqref{GC1b} hold. So the \textit{BVH} (by \eqref{GC0}) and the \textit{UAN} Condition holds
(by Fact \ref{fact_GC1}). Actually the \textit{BVH} \eqref{GC0} is also a \textit{VCH} Conditions. So may apply both Theorems \ref{CLTF_01} and \ref{CLTF_02}. By applying Theorem \ref{CLTF_01}, we have
$$
\forall x\in C(K), \ K_n(x)=\sum_{1\leq k\leq k(n)} \int_{-\infty}^{x} y^2 \ dF_{k,n}(y) \rightarrow 1_{(x\geq 0)}
$$
\noindent since $1_{(x\geq 0)}$ is the \textit{df} associated with $\delta_{0}$. Any $x>0$ is in $C(K)$ and then
\begin{eqnarray}
&&K_n(x)=\sum_{1\leq k\leq k(n)} \int_{-\infty}^{x} y^2 \ dF_{k,n}(y) \rightarrow 1 \ \notag\\
&& \Leftrightarrow \sum_{1\leq k\leq k(n)} \int y^2 \ dF_{k,n}(y)-\sum_{1\leq k\leq k(n)} \int_{(y>x)} y^2 \ dF_{k,n}(y) \rightarrow 1 \notag\\
&& \Leftrightarrow 1- \sum_{1\leq k\leq k(n)} \int_{(y>x)} y^2 \ dF_{k,n}(y) \rightarrow 1. \notag
\end{eqnarray}
\noindent Hence
$$
\forall x>0, \ g_{n,1}(x):=\sum_{1\leq k\leq k(n)} \int_{(y>x)} y^2 \ dF_{k,n}(y) \rightarrow 0.
$$
\noindent Next, any $x<0$ is in $C(K)$ and then
\begin{eqnarray}
K_n(x)&=&\sum_{1\leq k\leq k(n)} \int_{-\infty}^{x} y^2 \ dF_{k,n}(y) \rightarrow 0 \ \notag\\
&\Leftrightarrow& \sum_{1\leq k\leq k(n)} x^2 \lambda_{F_{k,n}}(\{x\}) + \sum_{1\leq k\leq k(n)} \int_{(y<x)} y^2 \ dF_{k,n}(y) \rightarrow 0 \notag \\
&\Leftrightarrow& \lambda_{K_n}(\{x\})+ \sum_{1\leq k\leq k(n)} \int_{(y<x)} y^2 \ dF_{k,n}(y) \rightarrow 0. \label{discountX}
\end{eqnarray}
\noindent But, by Portmanteau Theorem (see Criterion (vi) of Theorem 2, page 47 in \cite{ips-wcia-ang}),
$\lambda_{K_{n}}(\{x\})\rightarrow \lambda_{K}(\{x\})$ since $\partial \{x\}=\{x\}$ and hence $\lambda_{K}(\{x\})=K(x)-K(x-0)=0$ since $x \in C(K)$.\\
\noindent Hence
$$
\forall x<0, \ g_{n,2}(x):=\sum_{1\leq k\leq k(n)} \int_{(y<x)} y^2 \ dF_{k,n}(y) \rightarrow 0.
$$
\noindent By putting together the two last results, for any $\varepsilon>0$
\begin{eqnarray*}
g_n(\varepsilon)&=&\sum_{1\leq k\leq k(n)} \int_{(|y|>\varepsilon)} y^2 \ dF_{k,n}(y)\\
&=&\sum_{1\leq k\leq k(n)} \int_{(y>\varepsilon)} y^2 \ dF_{k,n}(y)+\sum_{1\leq k\leq k(n)} \int_{(y<-\varepsilon)} y^2 \ dF_{k,n}(y)\\
&=&g_{n,1}(\varepsilon) + g_{n,2}(-\varepsilon)\\
&\rightarrow& 0 \ as \ n \rightarrow +\infty. \ \ \square
\end{eqnarray*}
\noindent \textbf{Proof of the indirect implication}. Let \eqref{GC2} holds. So, by Fact \ref{fact_GC1}, \eqref{GC1b} holds and then the \textit{UAN} is satisfied and the \textit{BVH} is already satisfied as an hypothesis of the theorem. Still by Theorem \ref{CLTF_01}, \eqref{GC1a} holds whenever
\begin{equation}
\forall x\in C(K), \ K_n(x)=\sum_{1\leq k\leq k(n)} \int_{-\infty}^{x} y^2 \ dF_{k,n}(y) \rightarrow 1_{(x\geq 0)}. \label{GC4}
\end{equation}
\noindent Let us prove \eqref{GC4}, by exploiting \eqref{GC2}. We have $C(F)=(x<0)+(x>0)$. For $x>0$, we have
\begin{eqnarray*}
K_n(x)&=&\sum_{1\leq k\leq k(n)} \int_{(y\leq x)} y^2 \ dF_{k,n}(y) \\
&=&1 - \sum_{1\leq k\leq k(n)} \int_{(y>x)} y^2 \ dF_{k,n}(y) \\
&=&1 - \sum_{1\leq k\leq k(n)} \int_{(|y|>x)} y^2 \ dF_{k,n}(y) \\
&=&1 - g_n(x) \\
&\rightarrow& 1 \ as \ n \rightarrow +\infty. \ \ \square
\end{eqnarray*}
\noindent For $x<0$, we have
\begin{eqnarray}
K_n(x)&=&\sum_{1\leq k\leq k(n)} \int_{(y\leq x)} y^2 \ dF_{k,n}(y) \notag\\
&=&\sum_{1\leq k\leq k(n)} x^2 \lambda_{F_{k,n}}(\{x\})+\sum_{1\leq k\leq k(n)} \int_{(y<x)} y^2 \ dF_{k,n}(y) \notag\\
&=&\sum_{1\leq k\leq k(n)} x^2 \lambda_{F_{k,n}}(\{x\})+\sum_{1\leq k\leq k(n)} \int_{(-y>-x)} y^2 \ dF_{k,n}(y) \notag\\
&=&\sum_{1\leq k\leq k(n)} x^2 \lambda_{F_{k,n}}(\{x\})+\sum_{1\leq k\leq k(n)} \int_{(|y|> -x)} y^2 \ dF_{k,n}(y) \notag\\
&=& \lambda_{K_n}(\{x\})+ \sum_{1\leq k\leq k(n)} \int_{(|y|> -x)} y^2 \ dF_{k,n}(y) \notag\\
&=& \lambda_{K_n}(\{x\}) + g_{n}(-x). \notag
\end{eqnarray}
\noindent Now, by \eqref{GC2}, $g_{n}(-x)\rightarrow 0$ and by using a similar technical in line \eqref{discountX}, $\lambda_{K_n}(\{x\})\rightarrow 0$. \\
\noindent So \eqref{GC4} holds and we have proved \eqref{GC1a} and \eqref{GC1b}. $\blacksquare$
\subsection{Poisson limit} \label{PC}$ $\\
\noindent The searched limit here is a translated Poisson law $\mathcal{P}(b,\lambda)=b+\mathbb{P}(\lambda)$, with $b\in \mathbb{R}$ and $\lambda>0$ of characteristic function
$$
\exp(\Psi_K(u))=\exp(ibu+\lambda (e^{iu}-1))=\exp(i(b+\lambda)u + \lambda (e^{iu}-1-iu)), \ u\in \mathbb{R},
$$
\noindent with
$$
\Psi_K(u)=i(b+\lambda)u + \Psi_{K^\ast}(u), \ \Psi_{K^\ast}(u)=\lambda (e^{iu}-1-iu),
$$
\noindent where $\exp(\Psi_{K^\ast}(\circ))$ is the \textit{cha.f} of the centered Poisson law $\mathcal{P}^\ast(\lambda)=(\mathcal{P}(\lambda)-\lambda)$.\\
\noindent Let us state the characterization theorem.\\
\begin{theorem} \label{L2F-Poiss}
Let $S_n=X_{1,n}+\cdots+X_{k(n),n}$ be summands of independent and square integrable random variables. As above, let $a_{k,n}=\mathbb{E}X_{k,n}$ and let
$F^{\ast}_{k,n}$ be the \textit{cdf} of $X_{k,n}-a_{k,n}$. Let us introduce Lynderberg-type functions, for $\varepsilon>0$ and $n\geq 1$, as
\begin{equation}
g_{n,pois}(\varepsilon)=\sum_{k=1}^{k(n)}\int_{|x-1|> \varepsilon} x^2 \ dF^{\ast}_{k,n}(x). \label{lyndP}
\end{equation}
\noindent Suppose that, as $n\rightarrow +\infty$ with $\biggr(\text{\textit{MVP(n)}}=\sum_{1\leq k \leq k(n)} \sigma^2_{k,n}\biggr)$,
\begin{equation}
B_n=\max_{1\leq k \leq k(n)} \sigma^2_{k,n} \rightarrow 0 \ and \ \text{\textit{MVP(n)}} \rightarrow \lambda. \label{HPOIS}
\end{equation}
\noindent Let $b\in \mathbb{R}$. We have the following characterization.\\
\begin{equation}
S_n \rightsquigarrow \mathcal{P}(b,\lambda) \ as \ n\rightarrow+\infty \label{PC1}
\end{equation}
\noindent if and only if,
\begin{equation}
\sum_{k=1}^{k(n)} \mathbb{E}(X_{k,n})=a_n \rightarrow a=b+\lambda \ as \ n\rightarrow+\infty \label{PC2a}
\end{equation}
\noindent and for any $\varepsilon>0$, the following Lynderberg Poisson-type criterion holds:
\begin{equation}
g_{n,pois}(\varepsilon)\rightarrow 0 \ as \ n\rightarrow+\infty. \label{PC2b}
\end{equation}
\end{theorem}
\noindent \textbf{Proof}. Based \eqref{HPOIS}, the \textit{CVH} and the \textit{UAN} condition hold. We can apply Theorem \ref{CLTF_03}. We study the limit of $\Psi_{K_n^\ast}(u)$, for any $u \in \mathbb{R}$ to
$$
\Psi_{K^\ast}(u)=\int \frac{e^{iux}-1-iux}{x^2} \ dK^\ast(x)=\lambda (e^{iu}-1-iu).
$$
\noindent Let $\lambda_{K^\ast}=\lambda\delta_{1}$, i.e., $K^\ast(x)=\lambda 1_{(x\geq 1)}$. Thus
$$
\int \frac{e^{iux}-1-iux}{x^2} \ dK^\ast(x)=\lambda\left[\frac{e^{iux}-1-iux}{x^2} \right]_{x=1}=\lambda(e^{iu}-1-iu), \ u\in \mathbb{R}.
$$
\noindent \textbf{Proof of the direct part}. Suppose that \eqref{PC1} holds. Applying Theorem \ref{CLTF_03}, where the probability law limit is associated with an \textit{a.s} finite random variable, leads to
$$
a_n \rightarrow b+\lambda \ \ and \ \ K_n^\ast \rightsquigarrow K^\ast.
$$
\noindent So \eqref{PC2a} holds. We also have that $K_n \rightsquigarrow K$ means:
$$
\forall x\in C(K^\ast), \ \ K_n^{\ast}(x) \rightarrow \lambda 1_{(x\geq 1)}, \ as \ n\rightarrow +\infty,
$$
\noindent since $C(K^\ast)=(x<1)+(x>1)$ and $\lambda_{K_n^\ast}(\mathbb{R}) \rightarrow \lambda_{K^\ast}(\mathbb{R})=\lambda$. For $x>1$, we have
\begin{eqnarray*}
&&\sum_{k=1}^{k(n)} \int_{y\leq x} y^2 \ dF_{k,n}^\ast(y) \rightarrow \lambda\\
&&\Leftrightarrow \sum_{k=1}^{k(n)} \int y^2 \ \ dF_{k,n}^\ast(y) + \sum_{k=1}^{k(n)} \int_{(y>x)} y^2 \ dF_{k,n}^\ast(y) \rightarrow \lambda\\
&&\Leftrightarrow \sum_{k=1}^{k(n)} \sigma^2_{k,n} + \sum_{k=1}^{k(n)} \int_{y> x} y^2 \ dF_{k,n}^\ast(y) \rightarrow \lambda,
\end{eqnarray*}
\noindent where we use that $\int y^2 \ dF_{k,n}^\ast(y)=\mathbb{V}ar(X_{k,n}-a_{k,n})=\sigma^2_{k,n}$ in the last line. Hence
\begin{eqnarray*}
&&\sum_{k=1}^{k(n)} \int_{y\leq x} y^2 \ dF_{k,n}^\ast(y) \rightarrow \lambda\\
&&\Leftrightarrow \sum_{k=1}^{k(n)} \int_{y> x} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0. \ \ (L22)
\end{eqnarray*}
\noindent Let $\varepsilon=x-1>0$, (L22) is equivalent to
$$
\sum_{k=1}^{k(n)} \int_{(y-1> x-1)} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0,
$$
\noindent which is
$$
\sum_{k=1}^{k(n)} \int_{(|y-1|> x-1)} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0,
$$
\noindent that is
$$
g_{n,pois}(x-1) \rightarrow 0.
$$
\noindent Next, For $x<1$, we have
\begin{eqnarray*}
&&\sum_{k=1}^{k(n)} \int_{y\leq x} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0\\
&&\Leftrightarrow \sum_{1\leq k\leq k(n)} x^2 \lambda_{F_{k,n}}(\{x\}) + \sum_{k=1}^{k(n)} \int_{(y<x)} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0\\
&&\Leftrightarrow \lambda_{K_n^\ast}(\{x\}) + \sum_{k=1}^{k(n)} \int_{(1-y>1-x)} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0\\
&&\Leftrightarrow \sum_{k=1}^{k(n)} \int_{(|1-y|>1-x)} y^2 \ dF_{k,n}^\ast(y) \rightarrow 0,
\end{eqnarray*}
\noindent where we use that $\lambda_{K_n^\ast}(\{x\})\rightarrow 0$ (as shown in line \eqref{discountX} above) in the last line. Hence
$$
g_{n,pois}(1-x) \rightarrow 0.
$$
\noindent By combining these results, we have for any $\varepsilon>0$, by taking either $x-1=\varepsilon$ (for $x>1$) or $1-x=\varepsilon$ (for $x<1$), we arrive at \eqref{PC2b}.\\
\noindent \textbf{Proof of the indirect implication}. Suppose that \eqref{PC2a} and \eqref{PC2b} are satisfied. Let us exploit \eqref{PC2b}. For $x>1$,
\begin{eqnarray*}
K_n(x)&=&\sum_{k=1}^{k(n)} \int_{(y\leq x)} y^2 \ dF_{k,n}^\ast(y)\\
&=&\sum_{k=1}^{k(n)} \sigma^2_{k,n}-\sum_{k=1}^{k(n)} \int_{(y-1>x-1)} y^2 \ dF_{k,n}^\ast(y)\\
&=&\sum_{k=1}^{k(n)} \sigma^2_{k,n}- g_{n,pois}(x-1) \rightarrow \lambda.
\end{eqnarray*}
\noindent For $x<1$
\begin{eqnarray*}
K_n(x)&=&\sum_{k=1}^{k(n)} \int_{(y\leq x)} y^2 \ dF_{k,n}^\ast(y)\\
&=&\lambda_{K_n}(\{x\}) + \sum_{k=1}^{k(n)} \int_{(|y-1|>1-x)} y^2 \ dF_{k,n}^\ast(y)\\
&=&\lambda_{K_n}(\{x\}) + g_{n,pois}(1-x).
\end{eqnarray*}
\noindent So, $\lambda_{K_n}(\{x\})\rightarrow 0$ is shown exactly as in the lines \eqref{discountX} above. Next $g_{n,pois}(1-x)\rightarrow 0$ is Assumption . Hence
$K_n \rightsquigarrow_{pre} \lambda 1_{(\circ \geq 1)}$. To complete the proof, we remark that for any $n\geq 1$, $K_n(-\infty)=0$ and
$K_n(+\infty)=\sum_{k=1}^{k(n)} \sigma^2_{k,n}$. So
$$
\lambda_{K_n}(\mathbb{R})=K_n(+\infty)=\sum_{k=1}^{k(n)} \sigma^2_{k,n} \rightarrow \lambda=\lambda_{K}(\mathbb{R}).
$$
\noindent By Theorem \ref{CLTF_03}, we conclude that \eqref{PC1} holds. $\blacksquare$\\
\section{Conclusion}
\noindent We hope that we have given a complete exposition of the theory of the weak limits of independent summands of square integrable random variables on $\mathbb{R}$. The next step will be a re-do of the same theory when the existence of the moments, even the first moment, is not required.
\end{document}
|
\begin{document}
\title{On $q$-Quasiadditive and $q$-Quasimultiplicative Functions}
\begin{abstract}
In this paper, we introduce the notion of \begin{math}q\end{math}-quasiadditivity of
arithmetic functions, as well as the related concept of
\begin{math}q\end{math}-quasimultiplicativity, which generalise strong \begin{math}q\end{math}-additivity
and -multiplicativity, respectively. We show that there are many
natural examples for these concepts, which are characterised by
functional equations of the form \begin{math}f(q^{k+r}a + b) = f(a) + f(b)\end{math} or
\begin{math}f(q^{k+r}a + b) = f(a) f(b)\end{math} for all \begin{math}b < q^k\end{math} and a fixed parameter \begin{math}r\end{math}.
In addition to some elementary properties of \begin{math}q\end{math}-quasiadditive and \begin{math}q\end{math}-quasimultiplicative functions, we prove characterisations of \begin{math}q\end{math}-quasiadditivity and \begin{math}q\end{math}-quasimultiplicativity for the special class of \begin{math}q\end{math}-regular functions. The final main result provides a general central limit theorem that includes both classical and new examples as corollaries.
\end{abstract}
\section{Introduction}
Arithmetic functions based on the digital expansion in some base \begin{math}q\end{math}
have a long history (see, e.g., \cite{Bellman-Shapiro:1948,Gelfond:1968:sur,Delange:1972:q-add-q-mult,Delange:1975:chiffres,Cateland:digital-seq,Bassily-Katai:1995:distr,Drmota:2000})
The notion of a \begin{math}q\end{math}-\emph{additive} function is due to \cite{Gelfond:1968:sur}: an arithmetic function (defined on nonnegative integers) is called \begin{math}q\end{math}-additive if
\begin{equation*}f(q^k a + b) = f(q^k a) + f(b)\end{equation*}
whenever \begin{math}0 \leq b < q^k\end{math}. A stronger version of this concept is \emph{strong} (or \emph{complete}) \begin{math}q\end{math}-additivity: a function \begin{math}f\end{math} is said to be strongly \begin{math}q\end{math}-additive if we even have
\begin{equation*}f(q^k a + b) = f(a) + f(b)\end{equation*}
whenever \begin{math}0 \leq b < q^k\end{math}. The class of (strongly) \begin{math}q\end{math}-\emph{multiplicative} functions is defined in an analogous fashion.
Loosely speaking, (strong) \begin{math}q\end{math}-additivity of a function means that
it can be evaluated by breaking up the base-\begin{math}q\end{math} expansion. Typical
examples of strongly \begin{math}q\end{math}-additive functions are the \begin{math}q\end{math}-ary sum of
digits and the number of occurrences of a specified nonzero digit.
There are, however, many simple and natural functions based on the \begin{math}q\end{math}-ary expansion that are not \begin{math}q\end{math}-additive. A very basic example of this kind are \emph{block counts}: the number of occurrences of a certain block of digits in the \begin{math}q\end{math}-ary expansion. This and other examples provide the motivation for the present paper, in which we define and study a larger class of functions with comparable properties.
\begin{defi}
An arithmetic function (a function defined on the set of nonnegative integers) is called \begin{math}q\end{math}-\emph{quasiadditive} if there exists some nonnegative integer \begin{math}r\end{math} such that
\begin{equation}\label{eq:q-add}
f(q^{k+r}a + b) = f(a) + f(b)
\end{equation}
whenever \begin{math}0 \leq b < q^k\end{math}. Likewise, \begin{math}f\end{math} is said to be \begin{math}q\end{math}-\emph{quasimultiplicative} if it satisfies the identity
\begin{equation}\label{eq:q-mult}
f(q^{k+r}a + b) = f(a)f(b)
\end{equation}
for some fixed nonnegative integer \begin{math}r\end{math} whenever \begin{math}0 \leq b < q^k\end{math}.
\end{defi}
We remark that the special case \begin{math}r = 0\end{math} is exactly strong
\begin{math}q\end{math}-additivity, so strictly speaking the term ``strongly
\begin{math}q\end{math}-quasiadditive function'' might be more appropriate. However, since
we are not considering a weaker version (for which natural examples
seem to be much harder to find), we do not make a distinction. As a further caveat, we remark that the term ``quasiadditivity'' has also been used in \cite{allouche:1993} for a related, but slightly weaker condition.
In the
following section, we present a variety of examples of
\begin{math}q\end{math}-quasiadditive and \begin{math}q\end{math}-quasimultipli\-cative functions.
In Section~\ref{sec:elem-properties}, we give some general properties of
such functions. Since most of our examples also belong to the related class of \begin{math}q\end{math}-regular
functions, we discuss the connection in Section~\ref{sec:q-regular}.
Finally, we prove a general central limit theorem for \begin{math}q\end{math}-quasiadditive and -multiplicative functions that contains both old
and new examples as special cases.
\section{Examples of $q$-quasiadditive and $q$-quasimultiplicative functions}
\label{sec:exampl-q-quasiadd}
Let us now back up the abstract concept of \begin{math}q\end{math}-quasiadditivity by some concrete examples.
\subsection*{Block counts}
As mentioned in the introduction, the number of occurrences of a fixed
digit is a typical example of a \begin{math}q\end{math}-additive function. However, the
number of occurrences of a given block \begin{math}B = \varepsilonilon_1\varepsilonilon_2
\cdots \varepsilonilon_{\ell}\end{math} of digits in the expansion of a nonnegative
integer \begin{math}n\end{math}, which we denote by \begin{math}c_B(n)\end{math}, does not represent a
\begin{math}q\end{math}-additive function. The reason is simple: the \begin{math}q\end{math}-ary expansion of \begin{math}q^ka + b\end{math} is obtained by joining the expansions of \begin{math}a\end{math} and \begin{math}b\end{math}, so occurrences of \begin{math}B\end{math} in \begin{math}a\end{math} and occurrences of \begin{math}B\end{math} in \begin{math}b\end{math} are counted by \begin{math}c_B(a) + c_{B}(b)\end{math}, but occurrences that involve digits of both \begin{math}a\end{math} and \begin{math}b\end{math} are not.
However, if \begin{math}B\end{math} is a block different from \begin{math}00\cdots0\end{math}, then \begin{math}c_B\end{math} is \begin{math}q\end{math}-quasiadditive: note that the representation of \begin{math}q^{k+\ell} a + b\end{math} is of the form
\begin{equation*}\underbrace{a_1 a_2 \cdots a_{\mu}}_{\text{expansion of } a} \underbrace{0 0 \cdots 0_{\vphantom{\mu}}}_{\ell \text{ zeros}} \underbrace{b_1 b_2 \cdots {b_{\nu}}_{\vphantom{\mu}}}_{\text{expansion of } b}\end{equation*}
whenever \begin{math}0 \leq b < q^k\end{math}, so occurrences of the block \begin{math}B\end{math} have to belong to either \begin{math}a\end{math} or \begin{math}b\end{math} only, implying that
\begin{math}c_B(q^{k+\ell} a + b) = c_B(a) + c_B(b)\end{math},
with one small caveat: if the block starts and/or ends with a sequence
of zeros, then the count needs to be adjusted by assuming the digital
expansion of a nonnegative integer to be padded with zeros on the left
and on the right.
For example, let \begin{math}B\end{math} be the block \begin{math}0101\end{math} in base \begin{math}2\end{math}. The binary representations of \begin{math}469\end{math} and \begin{math}22\end{math} are \begin{math}111010101\end{math} and \begin{math}10110\end{math}, respectively, so we have \begin{math}c_B(469) = 2\end{math} and \begin{math}c_B(22) = 1\end{math} (note the occurrence of \begin{math}0101\end{math} at the beginning of \begin{math}10110\end{math} if we assume the expansion to be padded with zeros), as well as
\begin{equation*}c_B(240150) = c_B(2^9 \cdot 469 + 22) = c_B(469) + c_B(22) = 3.\end{equation*}
Indeed, the block \begin{math}B\end{math} occurs three times in the expansion of \begin{math}240150\end{math}, which is \begin{math}111010101000010110\end{math}.
\subsection*{The number of runs and the Gray code}
The number of ones in the Gray code of a nonnegative integer \begin{math}n\end{math},
which we denote by \begin{math}h_{\mathsf{GRAY}}(n)\end{math}, is also equal to the number of runs
(maximal sequences of consecutive identical digits) in the binary
representations of \begin{math}n\end{math} (counting the number of runs in the
representation of \begin{math}0\end{math} as \begin{math}0\end{math}); the sequence defined by \begin{math}h_{\mathsf{GRAY}}(n)\end{math} is
\href{http://oeis.org/A005811}{A005811} in Sloane's On-Line Encyclopedia of Integer Sequences
\cite{OEIS:2016}. An analysis of its expected value is performed in \cite{Flajolet-Ramshaw:1980:gray}. The function \begin{math}h_{\mathsf{GRAY}}\end{math} is \begin{math}2\end{math}-quasiadditive up to some minor
modification: set \begin{math}f(n) = h_{\mathsf{GRAY}}(n)\end{math} if \begin{math}n\end{math} is even and \begin{math}f(n) = h_{\mathsf{GRAY}}(n)
+ 1\end{math} if \begin{math}n\end{math} is odd. The new function \begin{math}f\end{math} can be interpreted as the
total number of occurrences of the two blocks \begin{math}01\end{math} and \begin{math}10\end{math} in the
binary expansion (considering binary expansions to be padded with zeros at both ends), so the argument of the previous example applies again and shows that \begin{math}f\end{math} is \begin{math}2\end{math}-quasiadditive.
\subsection*{The nonadjacent form and its Hamming weight}
The nonadjacent form (NAF) of a nonnegative integer is the unique
base-\begin{math}2\end{math} representation with digits \begin{math}0,1,-1\end{math} (\begin{math}-1\end{math} is usually
represented as \begin{math}\overline{1}\end{math} in this context) and the additional
requirement that there may not be two adjacent nonzero digits, see
\cite{Reitwiesner:1960}. For example, the NAF of \begin{math}27\end{math} is
\begin{math}100\overline{1}0\overline{1}\end{math}. It is well known that the NAF always
has minimum Hamming weight (i.e., the number of nonzero digits) among all
possible binary representations with this particular digit set,
although it may not be unique with this property (compare, e.g.,
\cite{Reitwiesner:1960} with \cite{Joye-Yen:2000:optim-left}).
The Hamming weight \begin{math}h_{\mathsf{NAF}}\end{math} of the nonadjacent form has been analysed in
some detail \cite{Thuswaldner:1999,Heuberger-Kropf:2013:analy}, and it is also an example of a \begin{math}2\end{math}-quasiadditive function. It is not difficult to see that \begin{math}h_{\mathsf{NAF}}\end{math} is characterised by the recursions
\begin{equation*}h_{\mathsf{NAF}}(2n) = h_{\mathsf{NAF}}(n), \qquad h_{\mathsf{NAF}}(4n+1) = h_{\mathsf{NAF}}(n) + 1, \qquad h_{\mathsf{NAF}}(4n-1) = h_{\mathsf{NAF}}(n) + 1\end{equation*}
together with the initial value \begin{math}h_{\mathsf{NAF}}(0) = 0\end{math}. The identity
\begin{equation*}h_{\mathsf{NAF}}(2^{k+2}a + b) = h_{\mathsf{NAF}}(a) + h_{\mathsf{NAF}}(b)\end{equation*}
can be proved by induction. In Section~\ref{sec:q-regular}, this example will be generalised and put into a larger context.
\subsection*{The number of optimal $\{0,1,-1\}$-representations}
As mentioned above, the NAF may not be the only representation with minimum Hamming weight among all possible binary representations with digits \begin{math}0,1,-1\end{math}. The number of optimal representations of a given nonnegative integer \begin{math}n\end{math} is therefore a quantity of interest in its own right. Its average over intervals of the form \begin{math}[0,N)\end{math} was studied by Grabner and Heuberger \cite{Grabner-Heuberger:2006:Number-Optimal}, who also proved that the number \begin{math}r_{\mathsf{OPT}}(n)\end{math} of optimal representations of \begin{math}n\end{math} can be obtained in the following way:
\begin{lemma}[Grabner--Heuberger \cite{Grabner-Heuberger:2006:Number-Optimal}]\label{lemma:opt-representations-recursion}
Let sequences \begin{math}u_i\end{math} (\begin{math}i=1,2,\ldots,5\end{math}) be given recursively by
\begin{equation*}u_1(0) = u_2(0) = \cdots = u_5(0) = 1, \qquad u_1(1) = u_2(1) = 1,\ u_3(1) = u_4(1) = u_5(1) = 0,\end{equation*}
and
\begin{align*}
u_1(2n) = u_1(n), \qquad & u_1(2n+1) = u_2(n) + u_4(n+1), \\
u_2(2n) = u_1(n), \qquad & u_2(2n+1) = u_3(n), \\
u_3(2n) = u_2(n), \qquad & u_3(2n+1) = 0, \\
u_4(2n) = u_1(n), \qquad & u_4(2n+1) = u_5(n+1), \\
u_5(2n) = u_4(n), \qquad & u_5(2n+1) = 0.
\end{align*}
The number \begin{math}r_{\mathsf{OPT}}(n)\end{math} of optimal representations of \begin{math}n\end{math} is equal to \begin{math}u_1(n)\end{math}.
\end{lemma}
A straightforward calculation shows that
\begin{equation}\label{eq:8n_a}
\begin{aligned}
&u_1(8n) = u_2(8n) = \cdots = u_5(8n) = u_1(8n+1) = u_2(8n+1) = u_1(n),\\
&u_3(8n+1) = u_4(8n+1) = u_5(8n+1) = 0.
\end{aligned}
\end{equation}
This gives us the following result:
\begin{lemma}\label{lem:optrep}
The number of optimal \begin{math}\{0,1,-1\}\end{math}-representations of a nonnegative integer is a \begin{math}2\end{math}-quasimulti\-plicative function. Specifically, for any three nonnegative integers \begin{math}a,b,k\end{math} with \begin{math}b < 2^k\end{math}, we have
\begin{equation*}r_{\mathsf{OPT}}(2^{k+3}a + b) = r_{\mathsf{OPT}}(a)r_{\mathsf{OPT}}(b).\end{equation*}
\end{lemma}
\begin{proof} We will prove a somewhat stronger statement by induction on \begin{math}t\end{math}: write
\begin{equation*}\mathbf{u}(n) = (u_1(n),u_2(n),u_3(n),u_4(n),u_5(n))^t.\end{equation*}
We show that
\begin{equation*}\mathbf{u}(2^{k+3}a + b) = r_{\mathsf{OPT}}(a) \mathbf{u}(b)\end{equation*}
and
\begin{equation*}\mathbf{u}(2^{k+3}a + b+1) = r_{\mathsf{OPT}}(a) \mathbf{u}(b+1)\end{equation*}
for all \begin{math}a,b,k\end{math} satisfying the conditions of the lemma, from which the desired result follows by considering the first entry of the vector \begin{math}\mathbf{u}(2^{k+3}a+b)\end{math}. Note first that both identities are clearly true for \begin{math}k=0\end{math} in view of~\eqref{eq:8n_a}. For the induction step, we distinguish two cases: if \begin{math}b\end{math} is even, we have
\begin{align*}
\mathbf{u}(2^{k+3}a + b) &=
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0
\end{pmatrix} \cdot \mathbf{u}(2^{k+2}a + b/2) \\
&= \begin{pmatrix}
1 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0
\end{pmatrix} \cdot r_{\mathsf{OPT}}(a) \mathbf{u}(b/2) \\
&= r_{\mathsf{OPT}}(a) \mathbf{u}(b)
\end{align*}
by the induction hypothesis, as well as
\begin{align*}
\mathbf{u}(2^{k+3}a + b+1) &=
\begin{pmatrix}
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0
\end{pmatrix} \cdot \mathbf{u}(2^{k+2}a + b/2) + \begin{pmatrix}
0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 \\
0 & 0 & 0 & 0 & 0
\end{pmatrix} \cdot \mathbf{u}(2^{k+2}a + b/2+1) \\
&= \begin{pmatrix}
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0
\end{pmatrix} \cdot r_{\mathsf{OPT}}(a)\mathbf{u}(b/2) + \begin{pmatrix}
0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 \\
0 & 0 & 0 & 0 & 0
\end{pmatrix} \cdot r_{\mathsf{OPT}}(a)\mathbf{u}(b/2+1) \\
&= r_{\mathsf{OPT}}(a)\mathbf{u}(b+1).
\end{align*}
The case that \begin{math}b\end{math} is odd is treated in an analogous fashion.
\end{proof}
In Section~\ref{sec:q-regular}, we will show that this is also an instance of a more general phenomenon.
\subsection*{The run length transform and cellular automata}
The \emph{run length transform} of a sequence is defined in a recent paper of Sloane \cite{Sloane:number-on}: it is based on the binary representation, but could in principle also be generalised to other bases. Given a sequence \begin{math}s_1,s_2,\ldots\end{math}, its run length transform is obtained by the rule
\begin{equation*}t(n) = \prod_{i \in \mathcal{L}(n)} s_i,\end{equation*}
where \begin{math}\mathcal{L}(n)\end{math} is the multiset of run lengths of \begin{math}n\end{math} (lengths
of blocks of consecutive ones in the binary representation). For
example, the binary expansion of \begin{math}1910\end{math} is \begin{math}11101110110\end{math}, so the
multiset \begin{math}\mathcal{L}(n)\end{math} of run lengths would be \begin{math}\{3,3,2\}\end{math}, giving
\begin{math}t(1910) = s_2 s_3^2\end{math}.
A typical example is obtained for the sequence of Jacobsthal numbers given by the formula \begin{math}s_n = \frac13 (2^{n+2} - (-1)^n)\end{math}. The associated run length transform \begin{math}t_n\end{math} (sequence \href{http://oeis.org/A071053}{A071053} in the OEIS \cite{OEIS:2016}) counts the number of odd coefficients in the expansion of \begin{math}(1+x+x^2)^n\end{math}, and it can also be interpreted as the number of active cells at the \begin{math}n\end{math}-th generation of a certain cellular automaton. Further examples stemming from cellular automata can be found in Sloane's paper \cite{Sloane:number-on}.
The argument that proved \begin{math}q\end{math}-quasiadditivity of block counts also applies here, and indeed it is easy to see that the identity
\begin{equation*}t(2^{k+1}a + b) = t(a)t(b),\end{equation*}
where \begin{math}0 \leq b < 2^k\end{math}, holds for the run length transform of any sequence, meaning that any such transform is \begin{math}2\end{math}-quasimultiplicative. In fact, it is not difficult to show that every \begin{math}2\end{math}-quasimultiplicative function with parameter \begin{math}r=1\end{math} is the run length transform of some sequence.
\section{Elementary properties}
\label{sec:elem-properties}
Now that we have gathered some motivating examples for the concepts of \begin{math}q\end{math}-quasiadditivity and \begin{math}q\end{math}-quasi\-multiplicativity, let us present some simple results about functions with these properties. First of all, let us state an obvious relation between \begin{math}q\end{math}-quasiadditive and \begin{math}q\end{math}-quasimultiplicative functions:
\begin{prop}\label{prop:trivial}
If a function \begin{math}f\end{math} is \begin{math}q\end{math}-quasiadditive, then the function defined by \begin{math}g(n) = c^{f(n)}\end{math} for some positive constant \begin{math}c\end{math} is \begin{math}q\end{math}-quasimultiplicative. Conversely, if \begin{math}f\end{math} is a \begin{math}q\end{math}-quasimultiplicative function that only takes positive values, then the function defined by \begin{math}g(n) = \log_c f(n)\end{math} for some positive constant \begin{math}c \neq 1\end{math} is \begin{math}q\end{math}-quasiadditive.
\end{prop}
The next proposition deals with the parameter \begin{math}r\end{math} in the definition of a \begin{math}q\end{math}-quasiadditive function:
\begin{prop}
If the arithmetic function \begin{math}f\end{math} satisfies
\begin{equation*}f(q^{k+r}a + b) = f(a) + f(b)\end{equation*}
for some fixed nonnegative integer \begin{math}r\end{math} whenever \begin{math}0 \leq b < q^k\end{math}, then it also satisfies
\begin{equation*}f(q^{k+s}a + b) = f(a) + f(b)\end{equation*}
for all nonnegative integers \begin{math}s \geq r\end{math} whenever \begin{math}0 \leq b < q^k\end{math}.
\end{prop}
\begin{proof}
If \begin{math}a,b\end{math} are nonnegative integers with \begin{math}0 \leq b < q^k\end{math}, then clearly also \begin{math}0 \leq b < q^{k+s-r}\end{math} if \begin{math}s \geq r\end{math}, and thus
\begin{equation*}f(q^{k+s}a + b) = f(q^{(k+s-r)+r}a + b) = f(a) + f(b).\end{equation*}
\end{proof}
\begin{cor}\label{cor:lin_comb}
If two arithmetic functions \begin{math}f\end{math} and \begin{math}g\end{math} are \begin{math}q\end{math}-quasiadditive functions, then so is any linear combination \begin{math}\alpha f + \beta g\end{math} of the two.
\end{cor}
\begin{proof}
In view of the previous proposition, we may assume the parameter \begin{math}r\end{math} in~\eqref{eq:q-add} to be the same for both functions. The statement follows immediately.
\end{proof}
Finally, we observe that \begin{math}q\end{math}-quasiadditive and \begin{math}q\end{math}-quasimultiplicative functions can be computed by breaking the \begin{math}q\end{math}-ary expansion into pieces.
\begin{lemma}\label{lem:simplefacts}
If \begin{math}f\end{math} is a \begin{math}q\end{math}-quasiadditive (\begin{math}q\end{math}-quasimultiplicative) function, then
\begin{itemize}
\item \begin{math}f(0) = 0\end{math} (\begin{math}f(0) = 1\end{math}, respectively, unless \begin{math}f\end{math} is identically \begin{math}0\end{math}),
\item \begin{math}f(qa) = f(a)\end{math} for all nonnegative integers \begin{math}a\end{math}.
\end{itemize}
\end{lemma}
\begin{proof}
Assume first that \begin{math}f\end{math} is \begin{math}q\end{math}-quasiadditive. Setting \begin{math}a = b = 0\end{math} in the defining functional equation~\eqref{eq:q-add}, we obtain
\begin{equation*}f(0) = f(0) + f(0),\end{equation*}
and the first statement follows. Setting \begin{math}b = 0\end{math} while \begin{math}a\end{math} is arbitrary, we now find that
\begin{equation*}f(q^{k+r}a) = f(a)\end{equation*}
for all \begin{math}k \geq 0\end{math}. In particular, this also means that
\begin{equation*}f(a) = f(q^{r+1}a) = f(q^r \cdot qa) = f(qa),\end{equation*}
which proves the second statement. For \begin{math}q\end{math}-quasimultiplicative
functions, the proof is analogous (and one can also use
Proposition~\ref{prop:trivial} for positive functions).
\end{proof}
\begin{prop}\label{prop:split}
Suppose that the function \begin{math}f\end{math} is \begin{math}q\end{math}-quasiadditive with parameter \begin{math}r\end{math}, i.e., \begin{math}f(q^{k+r}a + b) = f(a) + f(b)\end{math} whenever \begin{math}0 \leq b < q^k\end{math}. Going from left to right, split the $q$-ary expansion of \begin{math}n\end{math} into blocks by inserting breaks after each run of \begin{math}r\end{math} or more zeros. If these blocks are the $q$-ary representations of \begin{math}n_1,n_2,\ldots,n_{\ell}\end{math}, then we have
\begin{equation*}f(n) = f(n_1) + f(n_2) + \cdots + f(n_{\ell}).\end{equation*}
Moreover, if \begin{math}m_i\end{math} is
the greatest divisor
of \begin{math}n_i\end{math} which
is not divisible by \begin{math}q\end{math} for $i=1,\ldots,\ell$, then
\begin{equation*}f(n) = f(m_1) + f(m_2) + \cdots + f(m_{\ell}).\end{equation*}
Analogous statements hold for \begin{math}q\end{math}-quasimultiplicative functions, with sums replaced by products.
\end{prop}
\begin{proof}
This is obtained by a straightforward induction on \begin{math}\ell\end{math} together with the fact that \begin{math}f(q^{h} a) = f(a)\end{math}, which follows from the previous lemma.
\end{proof}
\begin{example}
Recall that the Hamming weight of the NAF (which is the minimum Hamming weight of a \begin{math}\{0,1,-1\}\end{math}-representation) is \begin{math}2\end{math}-quasiadditive with parameter \begin{math}r=2\end{math}. To determine \begin{math}h_{\mathsf{NAF}}(314\,159\,265)\end{math}, we split the binary representation, which is
\begin{math}10010101110011011000010100001,\end{math}
into blocks by inserting breaks after each run of at least two zeros:
\begin{equation*}100|101011100|110110000|1010000|1.\end{equation*}
The numbers \begin{math}n_1,n_2,\ldots,n_{\ell}\end{math} in the statement of the proposition are now \begin{math}4,348,432,80,1\end{math} respectively, and the numbers \begin{math}m_1,m_2,\ldots,m_{\ell}\end{math} are therefore \begin{math}1,87,27,5,1\end{math}. Now we use the values \begin{math}h_{\mathsf{NAF}}(1) = 1\end{math}, \begin{math}h_{\mathsf{NAF}}(5) = 2\end{math}, \begin{math}h_{\mathsf{NAF}}(27) = 3\end{math} and \begin{math}h_{\mathsf{NAF}}(87) = 4\end{math} to obtain
\begin{equation*}h_{\mathsf{NAF}}(314\,159\,265) = 2h_{\mathsf{NAF}}(1) + h_{\mathsf{NAF}}(5) + h_{\mathsf{NAF}}(27) + h_{\mathsf{NAF}}(87) = 11.\end{equation*}
\end{example}
\begin{example}
In the same way, we consider the number of optimal representations \begin{math}r_{\mathsf{OPT}}\end{math}, which is \begin{math}2\end{math}-quasimultiplicative with parameter \begin{math}r=3\end{math}. Consider for instance the binary representation of \begin{math}204\,280\,974\end{math}, namely
\begin{math}1100001011010001010010001110\end{math}.
We split into blocks:
\begin{equation*}110000|101101000|101001000|1110.\end{equation*}
The four blocks correspond to the numbers \begin{math}48 = 16 \cdot 3\end{math}, \begin{math}360 = 8 \cdot 45\end{math}, \begin{math}328 = 8 \cdot 41\end{math} and \begin{math}14 = 2 \cdot 7\end{math}. Since \begin{math}r_{\mathsf{OPT}}(3) = 2\end{math}, \begin{math}r_{\mathsf{OPT}}(45) = 5\end{math}, \begin{math}r_{\mathsf{OPT}}(41) = 1\end{math} and \begin{math}r_{\mathsf{OPT}}(7) = 1\end{math}, we obtain
\begin{math}r_{\mathsf{OPT}}(204\,280\,974) = 10\end{math}.
\end{example}
\section{$q$-Regular functions}\label{sec:q-regular}
In this section, we introduce \begin{math}q\end{math}-regular functions and examine the
connection to our concepts. See~\cite{Allouche-Shallit:2003:autom} for
more background on \begin{math}q\end{math}-regular functions and sequences.
A function \begin{math}f\end{math} is
\emph{\begin{math}q\end{math}-regular} if it can be expressed as \begin{math}f=\boldsymbol{u}^{t}\boldsymbol{f}\end{math} for a vector \begin{math}\boldsymbol{u}\end{math}
and a vector-valued function \begin{math}\boldsymbol{f}\end{math}, and there are matrices \begin{math}M_{i}\end{math}, \begin{math}0\leq i<q\end{math}, satisfying
\begin{equation}\label{eq:q-regular-recursive}
\boldsymbol{f}(qn+i)=M_{i}\boldsymbol{f}(n)
\end{equation}
for \begin{math}0\leq i<q\end{math}, \begin{math}qn+i>0\end{math}. We set \begin{math}\boldsymbol{v}=\boldsymbol{f}(0)\end{math}.
Equivalently, a function \begin{math}f\end{math} is \begin{math}q\end{math}-regular if and only if \begin{math}f\end{math} can be written as
\begin{equation}
\label{eq:q-regular}
f(n)=\boldsymbol{u}^{t} \prod_{i=0}^{L} M_{n_{i}}\boldsymbol{v}
\end{equation}
where \begin{math}n_{L}\cdots n_{0}\end{math} is the \begin{math}q\end{math}-ary expansion of \begin{math}n\end{math}.
The notion of \begin{math}q\end{math}-regular functions is a generalisation of
\begin{math}q\end{math}-additive and \begin{math}q\end{math}-multiplicative functions. However, we emphasise that \begin{math}q\end{math}-quasiadditive and \begin{math}q\end{math}-quasimultiplicative functions are not
necessarily \begin{math}q\end{math}-regular: a \begin{math}q\end{math}-regular sequence can always be bounded
by \begin{math}O(n^{c})\end{math} for a constant \begin{math}c\end{math}, see~\cite[Thm.\
16.3.1]{Allouche-Shallit:2003:autom}. In our setting however, the values of \begin{math}f(n)\end{math} can be chosen arbitrarily for those \begin{math}n\end{math} whose \begin{math}q\end{math}-ary expansion does not contain \begin{math}0^{r}\end{math}. Therefore a \begin{math}q\end{math}-quasiadditive or -multiplicative function can grow arbitrarily fast.
We call \begin{math}(\boldsymbol{u}, (M_{i})_{0\leq i<q}, \boldsymbol{v})\end{math} a \emph{linear representation} of the
\begin{math}q\end{math}-regular function \begin{math}f\end{math}. Such a linear representation is called
\emph{zero-insensitive} if \begin{math}M_{0}\boldsymbol{v}=\boldsymbol{v}\end{math}, meaning that in
\eqref{eq:q-regular}, leading zeros in the \begin{math}q\end{math}-ary expansion of \begin{math}n\end{math} do
not change anything. We call a linear representation \emph{minimal} if the dimension
of the matrices \begin{math}M_{i}\end{math} is minimal among all linear representations of \begin{math}f\end{math}.
Following \cite{Dumas:2014:asymp}, every \begin{math}q\end{math}-regular function has a
zero-insensitive minimal linear representation.
\subsection{When is a $q$-regular function $q$-quasimultiplicative?}
We now give a characterisation of \begin{math}q\end{math}-regular functions that are \begin{math}q\end{math}-quasimultiplicative.
\begin{theorem}\label{theorem:reg-mult}
Let \begin{math}f\end{math} be a \begin{math}q\end{math}-regular sequence with zero-insensitive minimal linear representation~\eqref{eq:q-regular}. Then the following
two assertions are equivalent:
\begin{itemize}
\item The sequence \begin{math}f\end{math} is \begin{math}q\end{math}-quasimultiplicative with parameter
\begin{math}r\end{math}.
\item \begin{math}M_{0}^{r}=\boldsymbol{v}\boldsymbol{u}^{t}\end{math}.
\end{itemize}
\end{theorem}
\begin{proof} Let \begin{math}d\end{math} be the dimension of the vectors. We first prove that the set of vectors
\begin{equation*}\mathcal{B}ig\{\boldsymbol{u}^{t}\prod_{i\in I}
M_{n_{i}}\mid n_{i}\in \{0,\ldots,q-1\}, I \text{ finite}\mathcal{B}ig\}\end{equation*}
is a generating
system of the whole \begin{math}d\end{math}-dimensional vector space. This is done by contradiction: assume that there is a coordinate
transformation such that the first \begin{math}d_{0}<d\end{math} unit vectors form a
basis of the transformed space spanned by \begin{math}\{\boldsymbol{u}^{t}\prod_{i\in I}
M_{n_{i}}\mid n_{i}\in \{0,\ldots,q-1\}, I \text{ finite}\}\end{math}. This
coordinate transform defines a different linear representation of \begin{math}f\end{math}
with matrices \begin{math}\hat{M_{i}}\end{math} and vectors \begin{math}\hat{\boldsymbol{u}}\end{math} and
\begin{math}\hat{\boldsymbol{v}}\end{math}. However, only the first \begin{math}d_{0}\end{math} coordinates of any
vector \begin{math}\boldsymbol{u}^{t}\prod_{i\in I}M_{n_{i}}\end{math} are nonzero. Thus we can reduce
the dimension of the matrices and vectors from \begin{math}d\end{math} to \begin{math}d_{0}\end{math} to obtain a new
linear representation of \begin{math}f\end{math}. This contradicts the minimality of the
original linear representation.
Analogously,
\begin{math}\{\prod_{j\in J} M_{n_{j}}\boldsymbol{v}\mid n_{j}\in \{0,\ldots,q-1\}, J
\text{ finite}\}\end{math} is also a generating system for the whole vector space.
The \begin{math}q\end{math}-quasimultiplicativity of \begin{math}f(n)\end{math} with parameter \begin{math}r\end{math} is equivalent to the identity
\begin{equation*}
\boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}}(M_{0}^{r}-\boldsymbol{v}\boldsymbol{u}^{t})\prod_{j\in J}M_{n_{j}}\boldsymbol{v}=0
\end{equation*}
for all finite tuples \begin{math}(n_{i})_{i\in I}\end{math} and \begin{math}(n_{j})_{j\in
J}\end{math}. Since both \begin{math}\{\boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} \}\end{math} and \begin{math}\{\prod_{j\in J}M_{n_{j}}\boldsymbol{v}\}\end{math} are generating systems of the entire vector space, this is equivalent to
\begin{math}\boldsymbol{x}^{t}(M_{0}^{r}-\boldsymbol{v}\boldsymbol{u}^{t})\boldsymbol{y}=0\end{math} for all vectors \begin{math}\boldsymbol{x}\end{math} and \begin{math}\boldsymbol{y}\end{math}, which in turn is
equivalent to \begin{math}M_{0}^{r}=\boldsymbol{v}\boldsymbol{u}^{t}\end{math}.
\end{proof}
\begin{example}[The number of optimal \begin{math}\{0,1,-1\}\end{math}-representations]
The number of optimal \begin{math}\{0,1,-1\}\end{math}-repre\-sentations as described in
Section~\ref{sec:exampl-q-quasiadd} is a \begin{math}2\end{math}-regular sequence by
Lemma~\ref{lemma:opt-representations-recursion}. A
minimal zero-insensitive linear representation for the vector \begin{math}(u_{1}(n),
u_{2}(n), u_{3}(n), u_{1}(n+1), u_{4}(n+1),
u_{5}(n+1))^{t}\end{math} is given by
\begin{equation*}
M_{0}=
\begin{pmatrix}
1&0&0&0&0&0\\
1&0&0&0&0&0\\
0&1&0&0&0&0\\
0&1&0&0&1&0\\
0&0&0&0&0&1\\
0&0&0&0&0&0
\end{pmatrix},\quad
M_{1}=
\begin{pmatrix}
0&1&0&0&1&0\\
0&0&1&0&0&0\\
0&0&0&0&0&0\\
0&0&0&1&0&0\\
0&0&0&1&0&0\\
0&0&0&0&1&0
\end{pmatrix},
\end{equation*}
\begin{math}\boldsymbol{u}^{t}=(1,0,0,0,0,0)\end{math} and \begin{math}\boldsymbol{v}=(1,1,1,1,0,0)^{t}\end{math}.
As \begin{math}M_{0}^{3}=\boldsymbol{v}\boldsymbol{u}^{t}\end{math}, this sequence is \begin{math}2\end{math}-quasimultiplicative with
parameter \begin{math}3\end{math}, which is the same result as in Lemma~\ref{lem:optrep}.
\end{example}
\begin{remark}
The condition on the minimality of the linear representation in
Theorem~\ref{theorem:reg-mult} is necessary as illustrated by the
following example:
Consider the sequence \begin{math}f(n)=2^{s_{2}(n)}\end{math}, where \begin{math}s_{2}(n)\end{math} is the binary sum of digits
function. This sequence is \begin{math}2\end{math}-regular and
\begin{math}2\end{math}-(quasi-)multiplicative with parameter \begin{math}r=0\end{math}. A ($1$-dimensional) minimal
linear representation is given by \begin{math}M_{0}=1\end{math}, \begin{math}M_{1}=2\end{math}, \begin{math}v=1\end{math} and \begin{math}u=1\end{math}. As stated
in Theorem~\ref{theorem:reg-mult}, we have \begin{math}M_{0}^{0}=vu^{t}=1\end{math}.
If we use the zero-insensitive non-minimal linear representation defined by \begin{math}M_{0}=\big(
\begin{smallmatrix}
1&13\\0&2
\end{smallmatrix}\big)
\end{math}, \begin{math}M_{1}=\big(
\begin{smallmatrix}
2&27\\0&5
\end{smallmatrix}
\big)\end{math}, \begin{math}\boldsymbol{v}=(1, 0)^{t}\end{math} and \begin{math}\boldsymbol{u}^{t}=(1, 0)\end{math} instead, we have \begin{math}\rank M_{0}^{r}=2\end{math}
for all \begin{math}r\geq 0\end{math}. Thus \begin{math}M_{0}^{r}\neq \boldsymbol{v}\boldsymbol{u}^{t}\end{math}.
\end{remark}
\subsection{When is a $q$-regular function $q$-quasiadditive?}
The characterisation of \begin{math}q\end{math}-regular functions that are also
\begin{math}q\end{math}-quasiadditive is somewhat more complicated. Again, we consider a
zero-insensitive (but not necessarily minimal) linear representation. We let \begin{math}U\end{math} be the smallest
vector space such that all vectors of the form \begin{math}\boldsymbol{u}^{t}\prod_{i\in I}
M_{n_{i}}\end{math} lie in the affine subspace \begin{math}\boldsymbol{u}^{t} + U^t\end{math} (\begin{math}U^t\end{math} is used
as a shorthand for \begin{math}\{\boldsymbol{x}^{t} \,:\, \boldsymbol{x} \in U\}\end{math}). Such a vector
space must exist, since \begin{math}\boldsymbol{u}^{t}\end{math} is a vector of this form
(corresponding to the empty product, where \begin{math}I = \emptyset\end{math}). Likewise,
let \begin{math}V\end{math} be the smallest vector space such that all vectors of the form
\begin{math}\prod_{j\in J}M_{n_{j}}\boldsymbol{v}\end{math} lie in the affine subspace \begin{math}\boldsymbol{v} +
V\end{math}.
\begin{theorem}\label{thm:q-reg-q-quasiadd}
Let \begin{math}f\end{math} be a \begin{math}q\end{math}-regular sequence with zero-insensitive linear representation
\eqref{eq:q-regular}. The sequence \begin{math}f\end{math} is \begin{math}q\end{math}-quasiadditive with parameter \begin{math}r\end{math} if and only if all of the following statements hold:
\begin{itemize}
\item \begin{math}\boldsymbol{u}^t \boldsymbol{v} = 0\end{math},
\item \begin{math}U^t\end{math} is orthogonal to \begin{math}(M_0^r - I)\boldsymbol{v}\end{math}, i.e., \begin{math}\boldsymbol{x}^t(M_0^r - I)\boldsymbol{v} = \boldsymbol{x}^tM_0^r\boldsymbol{v} - \boldsymbol{x}^t\boldsymbol{v} = 0\end{math} for all \begin{math}\boldsymbol{x} \in U\end{math},
\item \begin{math}V\end{math} is orthogonal to \begin{math}\boldsymbol{u}^t(M_0^r - I)\end{math}, i.e., \begin{math}\boldsymbol{u}^t(M_0^r - I)\boldsymbol{y} = \boldsymbol{u}^tM_0^r\boldsymbol{y} - \boldsymbol{u}^t\boldsymbol{y} = 0\end{math} for all \begin{math}\boldsymbol{y} \in V\end{math},
\item \begin{math}U^t M_0^r V = 0\end{math}, i.e., \begin{math}\boldsymbol{x}^t M_0^r \boldsymbol{y} = 0\end{math} for all \begin{math}\boldsymbol{x} \in U\end{math} and \begin{math}\boldsymbol{y} \in V\end{math}.
\end{itemize}
\end{theorem}
\begin{proof}
The first statement \begin{math}\boldsymbol{u}^t \boldsymbol{v} =0\end{math} is equivalent to \begin{math}f(0)=0\end{math}, which we already know to be a necessary condition by Lemma~\ref{lem:simplefacts}. Note also that \begin{math}\boldsymbol{u}^t M_0^r \boldsymbol{v} = \boldsymbol{u}^t \boldsymbol{v} = 0\end{math} by the assumption that the linear representation is zero-insensitive.
For the remaining statements, we write the quasiadditivity condition in terms of our matrix representation as we did in the quasimultiplicative case:
\begin{equation*}
\boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} M_{0}^{r} \prod_{j\in J}M_{n_{j}} \boldsymbol{v} = \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} \boldsymbol{v} + \boldsymbol{u}^{t} \prod_{j\in J}M_{n_{j}} \boldsymbol{v}.
\end{equation*}
Specifically, when \begin{math}J = \emptyset\end{math}, we get
\begin{equation*}
\boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} \big( M_{0}^{r} - I \big) \boldsymbol{v} = \boldsymbol{u}^t \boldsymbol{v} = 0.\end{equation*}
Setting also \begin{math}I = \emptyset\end{math} gives us \begin{math}\boldsymbol{u}^{t} (M_0^r-I) \boldsymbol{v} = 0\end{math},
so together we obtain
\begin{equation*}\big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) \big( M_{0}^{r} - I \big) \boldsymbol{v} = 0.\end{equation*}
Since \begin{math}U^t\end{math} is spanned by all vectors of the form \begin{math}\boldsymbol{u}^{t}\prod_{i\in
I} M_{n_{i}} - \boldsymbol{u}^t\end{math}, the second statement follows. The proof of the third statement is analogous. Finally, if we assume that the first three statements hold, then we find that
\begin{align*}
&\boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} M_{0}^{r} \prod_{j\in J}M_{n_{j}} \boldsymbol{v} \\
&=
\big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) M_{0}^{r} \big( \prod_{j\in J}M_{n_{j}} \boldsymbol{v} - \boldsymbol{v} \big) +
\big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) M_{0}^{r} \boldsymbol{v} +
\boldsymbol{u}^t M_{0}^{r} \big( \prod_{j\in J}M_{n_{j}} \boldsymbol{v} - \boldsymbol{v} \big)\\&\quad +
\boldsymbol{u}^t M_{0}^{r} \boldsymbol{v} \\
&= \big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) M_{0}^{r} \big( \prod_{j\in J}M_{n_{j}} \boldsymbol{v} - \boldsymbol{v} \big) +
\big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) \boldsymbol{v} +
\boldsymbol{u}^t \big( \prod_{j\in J}M_{n_{j}} \boldsymbol{v} - \boldsymbol{v} \big) \\
&= \big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) M_{0}^{r} \big( \prod_{j\in J}M_{n_{j}} \boldsymbol{v} - \boldsymbol{v} \big)
+ \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} \boldsymbol{v} + \boldsymbol{u}^{t}\prod_{j\in J} M_{n_{j}} \boldsymbol{v}.
\end{align*}
Thus \begin{math}q\end{math}-quasiadditivity is equivalent to
\begin{equation*}\big( \boldsymbol{u}^{t}\prod_{i\in I} M_{n_{i}} - \boldsymbol{u}^t \big) M_{0}^{r} \big( \prod_{j\in J}M_{n_{j}} \boldsymbol{v} - \boldsymbol{v} \big) = 0\end{equation*}
being valid for all choices of \begin{math}I, J\end{math}, \begin{math}n_i\end{math} and \begin{math}n_j\end{math}. The desired fourth condition is clearly equivalent by definition of \begin{math}U\end{math} and \begin{math}V\end{math}.
\end{proof}
\begin{example}
For the Hamming weight of the nonadjacent form, a zero-insensitive (and also minimal) linear representation for the vector \begin{math}(h_{\mathsf{NAF}}(n),h_{\mathsf{NAF}}(n+1),h_{\mathsf{NAF}}(2n+1),1)^{t}\end{math} is
\begin{equation*}
M_{0}=
\begin{pmatrix}
1&0&0&0\\0&0&1&0\\1&0&0&1\\0&0&0&1
\end{pmatrix},\quad
M_{1}=
\begin{pmatrix}
0&0&1&0\\0&1&0&0\\0&1&0&1\\0&0&0&1
\end{pmatrix},
\end{equation*}
\begin{math}\boldsymbol{u}^{t}=(1,0,0,0)\end{math} and \begin{math}\boldsymbol{v}=(0,1,1,1)^{t}\end{math}.
The three vectors \begin{math}\mathbf{w}_1 = \boldsymbol{u}^{t}M_{1}-\boldsymbol{u}^{t}\end{math},
\begin{math}\mathbf{w}_2 = \boldsymbol{u}^{t}M_{1}^{2}-\boldsymbol{u}^{t}\end{math} and
\begin{math}\mathbf{w}_3 = \boldsymbol{u}^{t}M_{1}M_{0}M_{1}-\boldsymbol{u}^{t}\end{math} are linearly
independent. If we let \begin{math}W\end{math} be the vector space spanned by those three, it is easily verified that \begin{math}M_{0}\end{math} and \begin{math}M_{1}\end{math}
map the affine subspace \begin{math}\boldsymbol{u}^{t}+ W^t\end{math} to itself, so \begin{math}U=W\end{math} is spanned by these vectors.
Similarly, the three vectors \begin{math}M_{1}\boldsymbol{v}-\boldsymbol{v}\end{math},
\begin{math}M_{1}^{2}\boldsymbol{v}-\boldsymbol{v}\end{math} and
\begin{math}M_{1}M_{0}M_{1}\boldsymbol{v}-\boldsymbol{v}\end{math} span \begin{math}V\end{math}.
The first condition of Theorem~\ref{thm:q-reg-q-quasiadd} is obviously
true. We only have to verify the other three conditions with \begin{math}r=2\end{math} for the base vectors
of \begin{math}U\end{math} and \begin{math}V\end{math}, which is done easily. Thus \begin{math}h_{\mathsf{NAF}}\end{math} is a \begin{math}2\end{math}-regular
sequence that is also \begin{math}2\end{math}-quasiadditive, as was also proved in Section~\ref{sec:exampl-q-quasiadd}.
\end{example}
Finding the vector spaces \begin{math}U\end{math}
and \begin{math}V\end{math} is not trivial. But in a certain special
case of \begin{math}q\end{math}-regular functions, we can give a sufficient condition for
\begin{math}q\end{math}-additivity, which is easier to check. These \begin{math}q\end{math}-regular functions are output sums of
transducers as defined
in~\cite{Heuberger-Kropf-Prodinger:2015:output}: a transducer
transforms the \begin{math}q\end{math}-ary expansion of an integer \begin{math}n\end{math} deterministically into an output
sequence. We are
interested in the sum of this output sequence. Before we can state our
condition, we introduce our notation more precisely.
A transducer consists of a finite number of states, an
initial state, the
input alphabet $\{0,\ldots,q-1\}$, an output alphabet, which is a
subset of the real numbers, and transitions between two states with labels
$\varepsilon\mid\delta$ for $\varepsilon$ an input letter and $\delta$ an output
letter. We assume that the transducer is complete and deterministic,
that is for every state $s$ and input letter $\varepsilon$, there exists
exactly one transition leaving state $s$ with input label
$\varepsilon$. Additionally every state has a final output.
The transducer reads the $q$-ary expansion of an integer $n$, starting from the
least significant digit, as input, which defines a unique path starting at the initial state with the given
input as input label. The output of the transducer is the sequence of
output labels along this path together with the final output of the
final state of this path. The output sum is then the sum of this
output sequence.
The function \begin{math}h_{\mathsf{NAF}}\end{math}, see
Example~\ref{ex:hnaf-trans}, as well as many other examples, can be
represented in this way.
This output sum of a transducer is a $q$-regular sequence
\cite{Heuberger-Kropf-Prodinger:2015:output}. To obtain a linear representation, we define the matrix $N_{\varepsilon}$ for $\varepsilon\in\{0,\ldots,q-1\}$
to be the adjacency matrix of the transducer where we only take into
account transitions with input label $\varepsilon$. Note that because our
transducer is complete and deterministic, there is exactly one entry
$1$ in every row. Without loss of
generality, we say that the initial state corresponds to the first row
and column. Furthermore, the $i$-th entry of the vector
$\boldsymbol{\delta}_{\varepsilon}$ is the output label of the transition starting in
state $i$ with input label $\varepsilon$. We define the matrices
\begin{equation*}
M_{\varepsilon}=\begin{pmatrix}
N_{\varepsilon}&\boldsymbol{\delta}_{\varepsilon}&[\varepsilon=0]I\\
\boldsymbol{0}&1&\boldsymbol{0}\\
0&0&[\varepsilon=0]I
\end{pmatrix},
\end{equation*}
where $I$ is an identity matrix of the correct size, and we set \begin{math}\boldsymbol{u}^{t}=(1,0,\ldots,0)\end{math} and
\begin{equation*}
\boldsymbol{v}=
\begin{pmatrix}
\boldsymbol{b}(0)\\1\\\boldsymbol{b}(0)-N_{0}\boldsymbol{b}(0)-\boldsymbol{\delta}_{0}
\end{pmatrix},
\end{equation*}
where the entries of $\boldsymbol{b}(0)$ are the final outputs of the states.
Following \cite[Remark~3.10]{Heuberger-Kropf-Prodinger:2015:output},
the output sum of a transducer is $q$-regular with the linear
representation $(\boldsymbol{u}, (M_{\varepsilon})_{0\leq\varepsilon<q}, \boldsymbol{v})$.
\begin{example}\label{ex:hnaf-trans}
\begin{figure}
\caption{Transducer to compute the Hamming weight of the nonadjacent form.}
\label{fig:NAF}
\end{figure}
The output sum of the transducer in Figure~\ref{fig:NAF} is exactly
the Hamming weight of the nonadjacent form $h_{\mathsf{NAF}}(n)$ (see, e.g., \cite{Heuberger-Kropf-Prodinger:2015:output}). The matrices
and vectors corresponding to this transducer are
\begin{align*}
N_{0}=
\begin{pmatrix}
1&0&0\\
1&0&0\\
0&1&0
\end{pmatrix},\qquad N_{1}=
\begin{pmatrix}
0&1&0\\
0&0&1\\
0&0&1
\end{pmatrix},
\end{align*}
$\boldsymbol{\delta}_{0}^{t}=(0,0,1)$, $\boldsymbol{\delta}_{1}^{t}=(1,0,0)$ and $\boldsymbol{b}(0)^{t}=(0,0,1)$.
\end{example}
To state our condition, we also introduce the notion of a reset
sequence: a reset sequence is an input sequence which always leads to
the same state no matter in which state of the transducer we start.
Not every transducer has a reset sequence, not even every strongly
connected transducer has one. In many cases arising from combinatorics
and digit expansions
the reset sequence consists only of zeros.
\begin{prop}\label{proposition:q-add-transducer}
The output sum of a connected transducer is \begin{math}q\end{math}-additive with parameter \begin{math}r\end{math} if the following
conditions are satisfied:
\begin{itemize}
\item The transducer has the reset sequence \begin{math}0^{r}\end{math} leading to the
initial state.
\item For every state, the output sum along the path of the reset
sequence \begin{math}0^{r}\end{math} equals the final output of
this state.
\item Additional zeros at the end of the input sequence do not
change the output sum.
\end{itemize}
\end{prop}
\begin{proof}
Let $\boldsymbol{f}(n)$ be the vector corresponding to the linear representation
$(\boldsymbol{u}, (M_{\varepsilon})_{0\leq\varepsilon<q}, \boldsymbol{v})$ as defined
in~\eqref{eq:q-regular-recursive}. By induction, we obtain that the
middle coordinate of $\boldsymbol{f}(n)$ is always $1$ and the coordinates below
are always $0$ if $n\geq1$. We denote the coordinates above by $\boldsymbol{b}(n)$.
The output sum of the transducer is
the first coordinate of $\boldsymbol{b}(n)$. By~\eqref{eq:q-regular-recursive},
we obtain the recursion
\begin{equation}\label{eq:output-sum-recursive}
\boldsymbol{b}(qn+\varepsilon)=N_{\varepsilon}\boldsymbol{b}(n)+\boldsymbol{\delta}_{\varepsilon}
\end{equation}
if \begin{math}qn+\varepsilon>0\end{math}.
The third condition ensures that leading zeros does not
change anything. Thus the connectivity of the underlying graph
implies that \eqref{eq:output-sum-recursive} also holds
for \begin{math}qn+\varepsilon=0\end{math}. Thus, the last
coordinates of \begin{math}\boldsymbol{v}\end{math} are zero and we could reduce the dimension of the
linear representation.
Let \begin{math}J\end{math} be finite and \begin{math}n_{j}\in\{0,\ldots,q-1\}\end{math} for \begin{math}j\in J\end{math}. The first condition implies that
\begin{equation*}
\prod_{j\in J}N_{n_{j}}N_{0}^{r}=
\begin{pmatrix}
1&0&\cdots&0\\
\vdots&\vdots&\ddots&\vdots\\
1&0&\cdots&0
\end{pmatrix},
\end{equation*}
and the second condition implies that
\begin{equation*}
\prod_{j\in J}N_{n_{j}}\boldsymbol{b}(0)=\prod_{j\in J}N_{n_{j}}(I+\cdots+N_{0}^{r-1})\boldsymbol{\delta}_{0}.
\end{equation*}
Using \eqref{eq:output-sum-recursive} recursively together with these two conditions gives
\begin{align*}
\boldsymbol{b}(q^{k+r}m+n)&=\prod_{j=0}^{k-1}N_{n_{j}}\boldsymbol{b}(q^{r}m)+\sum_{j=0}^{k-1}\prod_{i=0}^{j-1}N_{n_{i}}\boldsymbol{\delta}_{n_{j}}\\
&=\prod_{j=0}^{k-1}N_{n_{j}}N_{0}^{r}\boldsymbol{b}(m)+\prod_{j=0}^{k-1}N_{n_{j}}(I+\cdots+N_{0}^{r-1})\boldsymbol{\delta}_{0}+\boldsymbol{b}(n)-\prod_{j=0}^{k-1}N_{n_{j}}\boldsymbol{b}(0)\\
&=\begin{pmatrix}
1&0&\cdots&0\\
\vdots&\vdots&\ddots&\vdots\\
1&0&\cdots&0
\end{pmatrix}\boldsymbol{b}(m)+\boldsymbol{b}(n)
\end{align*}
for all \begin{math}n\end{math} with $q$-ary digit expansion \begin{math}(n_{k-1}\cdots n_{0})\end{math} and all
\begin{math}m\end{math}. This implies that the first coordinate of \begin{math}\boldsymbol{b}(n)\end{math} is \begin{math}q\end{math}-quasiadditive.
\end{proof}
\begin{example}
We now continue Example~\ref{ex:hnaf-trans} and check whether the
conditions of Proposition~\ref{proposition:q-add-transducer} are
satisfied for the transducer given in Figure~\ref{fig:NAF}. First, a
reset sequence is $00$ (i.e., $r=2$) and leads to the initial
state. Second, the output sum along the path of the reset sequence
is $0$, $0$ and $1$ for the left, the middle and the right state,
respectively, which is exactly the final output of the corresponding
state. Furthermore, leading zeros do not change the output sum. Thus we have another proof that $h_{\mathsf{NAF}}(n)$ is a 2-quasiadditive function with parameter
$r=2$.
\end{example}
\section{A central limit theorem for $q$-quasiadditive and -multiplicative functions}
In this section, we prove a central limit theorem for
\begin{math}q\end{math}-quasimultiplicative functions taking only positive values.
By Proposition~\ref{prop:trivial}, this also implies a central
limit theorem for \begin{math}q\end{math}-quasiadditive functions.
To this end, we define a generating function: let \begin{math}f\end{math} be a \begin{math}q\end{math}-quasimultiplicative function with positive values, let \begin{math}\mathcal{M}_k\end{math} be the set of all nonnegative integers less than \begin{math}q^k\end{math} (i.e., those positive integers whose \begin{math}q\end{math}-ary expansion needs at most \begin{math}k\end{math} digits), and set
\begin{equation*}F(x,t) = \sum_{k \geq 0} x^k \sum_{n \in \mathcal{M}_k} f(n)^t.\end{equation*}
The decomposition of Proposition~\ref{prop:split} now translates
directly to an alternative representation for \begin{math}F(x,t)\end{math}: let \begin{math}\mathcal{B}\end{math} be
the set of all positive integers not divisible by \begin{math}q\end{math} whose \begin{math}q\end{math}-ary representation does not contain the block \begin{math}0^{r}\end{math}, let \begin{math}\ell(n)\end{math} denote the length of the \begin{math}q\end{math}-ary representation of \begin{math}n\end{math}, and define the function \begin{math}B(x,t)\end{math} by
\begin{equation*}B(x,t) = \sum_{n \in \mathcal{B}} x^{\ell(n)} f(n)^t.\end{equation*}
We remark that in the special case where \begin{math}q=2\end{math} and \begin{math}r=1\end{math}, this simplifies greatly to
\begin{equation}\label{eq:q2_r1}
B(x,t) = \sum_{k \geq 1} x^{k} f(2^k-1)^t.
\end{equation}
\begin{prop}\label{prop:gf}
The generating function \begin{math}F(x,t)\end{math} can be expressed as
\begin{equation*}F(x,t) = \frac{1}{1-x} \cdot \frac{1}{1 - \frac{x^r}{1-x} B(x,t)} \mathcal{B}ig( 1 + (1+x+\cdots+x^{r-1})B(x,t) \mathcal{B}ig) = \frac{1+(1+x+\cdots+x^{r-1})B(x,t)}{1-x-x^rB(x,t)}.\end{equation*}
\end{prop}
\begin{proof}
The first factor stands for the initial sequence of leading zeros, the
second factor for a (possibly empty) sequence of blocks consisting of
an element of \begin{math}\mathcal{B}\end{math} and \begin{math}r\end{math} or more zeros, and the last factor for the
final part, which may be empty or an element of \begin{math}\mathcal{B}\end{math} with up to \begin{math}r-1\end{math} zeros (possibly none) added at the end.
\end{proof}
Under suitable assumptions on the growth of a \begin{math}q\end{math}-quasiadditive or \begin{math}q\end{math}-quasimultiplicative function, we can exploit the expression of Proposition~\ref{prop:gf} to prove a central limit theorem.
\begin{defi}
We say that a function \begin{math}f\end{math} has \emph{at most polynomial growth} if
\begin{math}f(n)=O(n^{c})\end{math} and \begin{math}f(n) = \Omega(n^{-c})\end{math} for a fixed \begin{math}c\geq
0\end{math}. We say that \begin{math}f\end{math} has \emph{at most logarithmic growth} if
\begin{math}f(n)=O(\log n)\end{math}.
\end{defi}
Note that our definition of at most polynomial growth is slightly
different than usual: the extra condition \begin{math}f(n) =
\Omega(n^{-c})\end{math} ensures that the absolute value
of \begin{math}\log f(n)\end{math} does not grow too fast.
\begin{lemma}\label{lemma:singularity} Assume that the positive, \begin{math}q\end{math}-quasimultiplicative function \begin{math}f\end{math} has at most polynomial growth.
There exist positive constants \begin{math}\delta\end{math} and \begin{math}\varepsilonilon\end{math} such that
\begin{itemize}
\item \begin{math}B(x,t)\end{math} has radius of convergence \begin{math}\rho(t) > \frac1q\end{math} whenever \begin{math}|t| \leq \delta\end{math}.
\item For \begin{math}|t| \leq \delta\end{math}, the equation \begin{math}x + x^r B(x,t) = 1\end{math} has a complex solution \begin{math}\alpha(t)\end{math} with \begin{math}|\alpha(t)| < \rho(t)\end{math} and no other solutions with modulus \begin{math}\leq (1+\varepsilonilon)|\alpha(t)|\end{math}.
\item Thus the generating function \begin{math}F(x,t)\end{math} has a simple pole at \begin{math}\alpha(t)\end{math} and no further singularities of modulus \begin{math}\leq (1+ \varepsilonilon)|\alpha(t)|\end{math}.
\item Finally, \begin{math}\alpha\end{math} is an analytic function of \begin{math}t\end{math} for \begin{math}|t| \leq \delta\end{math}.
\end{itemize}
\end{lemma}
\begin{proof}
The polynomial growth of \begin{math}f\end{math} implies that \begin{math}C^{-1}\phi^{-\ell(n)} \leq f(n)\leq C
\phi^{\ell(n)}\end{math} for some positive constants \begin{math}C\end{math} and \begin{math}\phi\end{math}.
Moreover, \begin{math}\mathcal{B}\end{math} contains \begin{math}O(\beta^{\ell})\end{math} elements whose \begin{math}q\end{math}-ary
expansion has length at most \begin{math}\ell\end{math}, where \begin{math}\beta < q\end{math} is a root of
the polynomial \begin{math}x^r-(q-1)x^{r-1}-\cdots-(q-1)x-(q-1)\end{math}. This implies
that \begin{math}B(x,t)\end{math} is indeed an analytic function of \begin{math}x\end{math} for \begin{math}|x| < \beta^{-1}
\phi^{\delta}\end{math} whenever \begin{math}|t| \leq \delta\end{math}. For suitably small
\begin{math}\delta\end{math}, \begin{math}\beta^{-1} \phi^{\delta}\end{math} is greater than \begin{math}\frac1q\end{math}, which proves the first part of
our statement. Next note that
\begin{equation*}
B(x,0)=\frac{(q-1)x}{1-(q-1)x-\cdots-(q-1)x^{r}},
\end{equation*}
and it follows by an easy calculation that
$$1 - x - x^r B(x,0) = \frac{(1-x)(1-qx)}{1 - q x + (q-1)x^{r+1}}.$$
Hence \begin{math}\alpha(0) = \frac1q\end{math} is the only solution of the equation \begin{math}x + x^r B(x,0) = 1\end{math}, and it is a simple root.
All remaining statements are therefore simple consequences of the implicit function theorem.
\end{proof}
\begin{lemma}\label{lem:sing_anal}
Assume that the positive, \begin{math}q\end{math}-quasimultiplicative function \begin{math}f\end{math} has at most polynomial growth.
With \begin{math}\delta\end{math} and \begin{math}\varepsilonilon\end{math} as in the previous lemma, we have, uniformly in \begin{math}t\end{math},
\begin{equation*}[x^k] F(x,t) = \kappa(t) \cdot \alpha(t)^{-k} \big(1 + O((1+\varepsilonilon)^{-k})\big)\end{equation*}
for some function \begin{math}\kappa\end{math}. Both \begin{math}\alpha\end{math} and \begin{math}\kappa\end{math} are analytic functions of \begin{math}t\end{math} for \begin{math}|t| \leq \delta\end{math}, and \begin{math}\kappa(t) \neq 0\end{math} in this region.
\end{lemma}
\begin{proof}
This follows from the previous lemma by means of singularity analysis,
see \cite[Chapter VI]{Flajolet-Sedgewick:ta:analy}.
\end{proof}
\begin{theorem}\label{thm:clt-mult}
Assume that the positive, \begin{math}q\end{math}-quasimultiplicative function \begin{math}f\end{math} has at most polynomial growth.
Let \begin{math}N_k\end{math} be a randomly chosen integer in \begin{math}\{0,1,\ldots,q^k-1\}\end{math}. The
random variable \begin{math}L_k = \log f(N_k)\end{math} has mean \begin{math}\mu k + O(1)\end{math} and
variance \begin{math}\sigma^2 k + O(1)\end{math}, where the two constants are given by
\begin{equation*}\mu = \frac{B_t(1/q,0)}{q^{2r}}\end{equation*}
and
\begin{multline}
\sigma^2= -B_{t}(1/q,0)^{2} {q}^{-4r+1}(q-1)^{-1} + 2B_{t}(1/q,0)^{2} {q}^{-3r+1}(q-1)^{-1} -B_{t}(1/q,0)^{2}{q}^{-4r}(q-1)^{-1} \\-
4rB_{t}(1/q,0)^{2} {q}^{-4r} + B_{tt}(1/q,0){q}^{-2r}
- 2B_{t}(1/q,0)
B_{tx}(1/q,0)
{q}^{-4r-1} .
\end{multline}
If \begin{math}f\end{math} is not the constant function \begin{math}f \equiv 1\end{math}, then \begin{math}\sigma^2 \neq 0\end{math} and the normalised random variable \begin{math}(L_k - \mu k)/(\sigma \sqrt{k})\end{math} converges weakly to a standard Gaussian distribution.
\end{theorem}
\begin{proof}
The moment
generating function of \begin{math}L_{k}\end{math} is \begin{math}[x^{k}]F(x,t)/q^{k}\end{math}.
Hence the statement follows from Lemma~\ref{lem:sing_anal} by means of the Quasi-power theorem, see
\cite{Hwang:1998} or \cite[Chapter IX.5]{Flajolet-Sedgewick:ta:analy}. The only part that we actually have to verify is
that \begin{math}\sigma^2 \neq 0\end{math} unless \begin{math}f\end{math} is constant.
Assume that \begin{math}\sigma^{2}=0\end{math}. We first consider the case that
\begin{math}\log\alpha(t)\end{math} is not a linear function. Let \begin{math}s\end{math} be the least integer
greater than \begin{math}1\end{math} such that \begin{math}t^s\end{math} occurs with a nonzero coefficient in the Taylor expansion of \begin{math}\log \alpha(t)\end{math} at \begin{math}t=0\end{math}, i.e.,
\begin{equation*}\log \alpha(t) = \log \alpha(0) + a t + b t^s + O(t^{s+1}).\end{equation*}
Note that $a = -\mu$. Moreover, by the assumption that $\sigma^2 = 0$, we must have \begin{math}s\geq 3\end{math}. Since \begin{math}\alpha(0) = \frac{1}{q}\end{math} and \begin{math}\kappa(0)=1\end{math}, it follows that
\begin{align*}
\mathbb{E}(\exp(tL_k)) &= \frac{[x^k] F(x,t)}{q^k} = \exp \mathcal{B}ig( \log \kappa(t) - k\log \alpha(t) - k \log q + O\big((1+\varepsilonilon)^{-k} \big) \mathcal{B}ig) \\
&= \exp\mathcal{B}ig( -akt -bkt^s +O \big(kt^{s+1}+t+(1+\varepsilonilon)^{-k} \big) \mathcal{B}ig).
\end{align*}
Considering the normalised version \begin{math}R_k = \frac{L_k - \mu k}{k^{1/s}}\end{math} of the random variable \begin{math}L_k\end{math}, we get
\begin{equation*}\mathbb{E} \mathcal{B}ig( \exp\big( \tau R_k \big) \mathcal{B}ig) = \exp \mathcal{B}ig( -b\tau^s + O \big(k^{-1/s} + (1+\varepsilonilon)^{-k} \big)\mathcal{B}ig)\end{equation*}
for fixed \begin{math}\tau\end{math}.
So for every complex $\tau$, we have \begin{math}\lim_{k \to \infty} \mathbb{E} ( \exp\big( \tau R_k \big) ) =
\exp(-b\tau^s)\end{math}, which is a continuous
function. By L\'evy's continuity theorem, this would imply convergence
in distribution of \begin{math}R_k\end{math} to a random variable with moment generating
function \begin{math}M(\tau) = \exp(-b\tau^s)\end{math}. However, there is no such random
variable: all derivatives at \begin{math}\tau=0\end{math} are finite and the second
derivative of \begin{math}\exp(-b\tau^s)\end{math} at \begin{math}\tau=0\end{math} is 0, thus the second
moment is \begin{math}0\end{math}. A random variable whose second moment is \begin{math}0\end{math} is almost surely
equal to \begin{math}0\end{math} and would thus have moment generating function \begin{math}1\end{math}.
The only remaining possibility is that \begin{math}\log \alpha(t)\end{math} is linear:
\begin{math}\log \alpha(t) = \log \alpha(0) + a t\end{math}, thus \begin{math}\alpha(t) = \alpha(0)
e^{at} = e^{at}/q\end{math}. If we plug this into the defining equation of \begin{math}\alpha(t)\end{math}, we obtain
\begin{equation*}1 = \frac{e^{at}}{q} + \frac{e^{art}}{q^r} \sum_{n \in \mathcal{B}} q^{-\ell(n)} e^{a \ell(n)t} f(n)^t\end{equation*}
identically for \begin{math}|t| \leq \delta\end{math}. However, the right side of this identity has strictly positive second derivative for real \begin{math}t\end{math} unless \begin{math}a = 0\end{math} and \begin{math}f(n) = 1\end{math} for all \begin{math}n \in \mathcal{B}\end{math} (in which case \begin{math}f(n) = 1\end{math} for all \begin{math}n\end{math}). Thus \begin{math}\sigma^2 \neq 0\end{math} unless \begin{math}f \equiv 1\end{math}.
\end{proof}
\begin{cor}\label{cor:clt-add}
Assume that the \begin{math}q\end{math}-quasiadditive function \begin{math}f\end{math} has at most logarithmic growth.
Let \begin{math}N_k\end{math} be a randomly chosen integer in \begin{math}\{0,1,\ldots,q^k-1\}\end{math}. The
random variable \begin{math}L_k = f(N_k)\end{math} has mean \begin{math}\hat\mu k + O(1)\end{math} and
variance \begin{math}\hat\sigma^2 k + O(1)\end{math}, where the two constants \begin{math}\hat\mu\end{math} and \begin{math}\hat\sigma^2\end{math}are given by
the same formulas as in Theorem~\ref{thm:clt-mult}, with \begin{math}B(x,t)\end{math} replaced by
\begin{equation*}
\hat B(x,t) = \sum_{n \in \mathcal{B}} x^{\ell(n)} e^{f(n)t}.
\end{equation*}
If \begin{math}f\end{math} is not the constant function \begin{math}f \equiv 0\end{math}, then the normalised random variable \begin{math}(L_k - \hat\mu k)/(\hat\sigma \sqrt{k})\end{math} converges weakly to a standard Gaussian distribution.
\end{cor}
\begin{remark}
By means of the Cram\'er-Wold device (and Corollary~\ref{cor:lin_comb}), we also obtain joint normal distribution of tuples of \begin{math}q\end{math}-quasiadditive functions.
\end{remark}
We now revisit the examples discussed in
Section~\ref{sec:exampl-q-quasiadd} and state the corresponding
central limit theorems. Some of them are well known while others are
new. We also provide numerical values for the constants in mean and variance.
\begin{example}[see also \cite{Kirschenhofer:1983:subbl,Drmota:2000}]The number of blocks \begin{math}0101\end{math} occurring in the binary
expansion of \begin{math}n\end{math} is a \begin{math}2\end{math}-quasiadditive function of at most
logarithmic growth. Thus by Corollary~\ref{cor:clt-add}, the
standardised random variable is asymptotically normally distributed, the constants being \begin{math}\hat\mu = \frac1{16}\end{math} and \begin{math}\hat\sigma^2 = \frac{17}{256}\end{math}.
\end{example}
\begin{example}[see also \cite{Thuswaldner:1999,Heuberger-Kropf:2013:analy}]
The Hamming weight of the nonadjacent form is \begin{math}2\end{math}-quasiadditive
with at most logarithmic growth (as the length of the NAF of \begin{math}n\end{math} is logarithmic). Thus by Corollary~\ref{cor:clt-add}, the
standardised random variable is asymptotically normally distributed. The associated constants are \begin{math}\hat\mu = \frac13\end{math} and \begin{math}\hat\sigma^2 = \frac2{27}\end{math}.
\end{example}
\begin{example}[see Section~\ref{sec:exampl-q-quasiadd}]
The number of optimal \begin{math}\{0,1,-1\}\end{math}-representations is
\begin{math}2\end{math}-quasi\-mul\-ti\-plica\-tive. As it is always greater or equal to \begin{math}1\end{math} and
\begin{math}2\end{math}-regular, it has at most polynomial growth. Thus
Theorem~\ref{thm:clt-mult} implies that the standardised logarithm
of this random variable is asymptotically normally distributed with
numerical constants given by \begin{math}\mu\approx 0.060829\end{math}, \begin{math}\sigma^{2}\approx 0.038212\end{math}.
\end{example}
\begin{example}[see Section~\ref{sec:exampl-q-quasiadd}] Suppose that the sequence \begin{math}s_1,s_2,\ldots\end{math} satisfies \begin{math}s_{n}\geq 1\end{math} and \begin{math}s_{n}=O(c^{n})\end{math} for a constant
\begin{math}c\geq 1\end{math}.
The run length transform \begin{math}t(n)\end{math} of \begin{math}s_{n}\end{math}
is \begin{math}2\end{math}-quasimultiplicative. As \begin{math}s_{n}\geq 1\end{math} for all \begin{math}n\end{math}, we have \begin{math}t(n)\geq
1\end{math} for all \begin{math}n\end{math} as well. Furthermore, there exists a constant \begin{math}A\end{math} such that \begin{math}s_n \leq A c^n\end{math} for all \begin{math}n\end{math}, and the sum of all run lengths is bounded by the length of the binary expansion, thus
\begin{equation*}
t(n)=\prod_{i\in\mathcal L(n)}s_{i} \leq \prod_{i \in \mathcal{L}(n)} (A c^i) \leq (Ac)^{1+\log_2 n}.
\end{equation*}
Consequently, \begin{math}t(n)\end{math} is positive and has at most polynomial growth. By
Theorem~\ref{thm:clt-mult}, we obtain an asymptotic normal
distribution for the standardised random variable \begin{math}\log t(N_{k})\end{math}. The constants \begin{math}\mu\end{math} and \begin{math}\sigma^2\end{math} in mean and variance are given by
\begin{equation*}
\mu = \sum_{i \geq 1} (\log s_i) 2^{-i-2}
\end{equation*}
and
\begin{equation*}
\sigma^2 = \sum_{i \geq 1} (\log s_i)^2 \big(2^{-i-2} - (2i-1)2^{-2i-4} \big) - \sum_{j > i \geq 1} (\log s_i)(\log s_j) (i+j-1) 2^{-i-j-3}.
\end{equation*}
These formulas can be derived from those given in Theorem~\ref{thm:clt-mult} by means of the representation~\eqref{eq:q2_r1}, and the terms can also be interpreted easily: write \begin{math}\log t(n) = \sum_{i \geq 1} X_i(n) \log s_i\end{math}, where \begin{math}X_i(n)\end{math} is the number of runs of length \begin{math}i\end{math} in the binary representation of \begin{math}n\end{math}. The coefficients in the two formulas stem from mean, variance and covariances of the \begin{math}X_i(n)\end{math}.
In the special case that
\begin{math}s_{n}\end{math} is the Jacobsthal sequence ($s_n = \frac13(2^{n+2} - (-1)^n$), see Section~\ref{sec:exampl-q-quasiadd}), we have the
numerical values
\begin{math}\mu \approx 0.429947\end{math}, \begin{math}\sigma^{2} \approx 0.121137\end{math}.
\end{example}
Let us finally show that the central limit theorem holds in a slightly more general version, where we pick an integer uniformly at random from the set $\{0,1,2,\ldots,K-1\}$ ($K$ not necessarily being a power of $q$ any longer). We first state and prove our result for $q$-quasiadditive functions; it automatically transfers to $q$-quasimultiplicative functions by Proposition~\ref{prop:trivial}.
\begin{theorem}
Assume that the \begin{math}q\end{math}-quasiadditive function \begin{math}f\end{math} has at most logarithmic growth, and that $f$ is not the constant function $f \equiv 0$. Let \begin{math}M_K\end{math} be a randomly chosen integer in \begin{math}\{0,1,\ldots,K-1\}\end{math}. The random variable
$$\frac{f(M_K) - \hat\mu \log_q K}{\hat\sigma \sqrt{\log_q K}},$$
where the two constants $\hat\mu$ and $\hat\sigma^2$ are the same as in Corollary~\ref{cor:clt-add}, converges weakly to a standard Gaussian distribution.
\end{theorem}
\begin{proof}
Let $L_1$ and $L_2$ be the largest integers for which we have $q^{L_1}
< K/\log^2 K$ and $q^{L_2} < K/\log K$, respectively. For each
nonnegative integer $m < K$, we consider (if it exists) a representation of the form
\begin{equation}\label{eq:good_rep}
m = q^{k+r} a + b,
\end{equation}
where $b < q^k$ and $L_1 \leq k \leq L_2$. If there are two or more such representations for a specific $m$, we take the one for which $k$ is maximal so as to obtain a unique representation. If $m$ does not have a representation of this form, then it does not have $r$ consecutive zeros in its $q$-ary representation anywhere in the block ranging from the $(L_1+1)$-th to the $(L_2+r)$-th digit, counting from the least significant digit. The proportion of such integers is
$$O \mathcal{B}ig( (1-q^{-r})^{(L_2-L_1)/r} \mathcal{B}ig) = O \mathcal{B}ig( (1-q^{-r})^{(\log\log K)/r} \mathcal{B}ig),$$
which becomes negligible as $K \to \infty$.
If however $m$ can be represented in the form~\eqref{eq:good_rep}, then we have
$$f(m) = f(a) + f(b)$$
by quasiadditivity of $f$. Moreover, $a = O(\log^2 K)$ by the
definition of $L_1$, so $f(a) = O(\log \log K)$ since we assumed $f$
to have at most logarithmic growth. For given $a$ and $k$, $b$ can be
any integer in the set $\{0,1,\ldots,q^k-1\}$, unless $a = \lfloor
K/q^{k+r} \rfloor$. In the former case, we can identify $b$ with $N_k$, the random variable defined in Theorem~\ref{thm:clt-mult} and
Corollary~\ref{cor:clt-add}. The latter case, however, is negligible, since it only accounts for a proportion of at most
$$\frac{1}{K}\sum_{k=L_1}^{L_2} q^k = O(q^{L_2}/K) = O(1/\log K)$$
values of $m$. Now we condition on the event that the random integer $M_K$ has a representation of the form~\eqref{eq:good_rep} for certain fixed $k$ and $a \neq \lfloor
K/q^{k+r} \rfloor$. For every real number $x$, we have
\begin{equation}\label{eq:conv-distr-M}
\begin{aligned}
\mathbb{P} \mathcal{B}ig( f(M_K) \leq \hat\mu \log_q K + x \hat \sigma \sqrt{\log_q K} \,\mathcal{B}ig|\,& q^{k+r} a \leq M_K < q^{k+r} a + q^k \mathcal{B}ig) \\
&= \mathbb{P} \mathcal{B}ig( f(N_k) \leq \hat \mu \log_q K + x \hat \sigma
\sqrt{\log_q K} - f(a) \mathcal{B}ig)\\
&=\mathbb{P}\mathcal{B}ig(\frac{f(N_{k})-\hat \mu \log_q K - f(a)}{\hat \sigma
\sqrt{\log_q K}}\leq x\mathcal{B}ig).
\end{aligned}
\end{equation}
Note that $k = \log_q K + O(\log \log K)$, so
$$\frac{f(N_{k})-\hat \mu \log_q K - f(a)}{\hat \sigma
\sqrt{\log_q K}} = \frac{f(N_k) - \hat\mu k}{\hat\sigma \sqrt{k}} + O \mathcal{B}ig( \frac{\log \log K}{\sqrt{\log K}} \mathcal{B}ig).$$
Let $\Phi(x) = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^x e^{-t^2/2}\,dt$ denote the distribution function of a standard Gaussian distribution.
By Corollary~\ref{cor:clt-add}, and because $\Phi$ is continuous, we have
$$\mathbb{P}\mathcal{B}ig(\frac{f(N_{k})-\hat \mu \log_q K - f(a)}{\hat \sigma \sqrt{\log_q K}}\leq x\mathcal{B}ig) = \Phi(x) + o(1),$$
and this holds uniformly in $x$, $a$ and $k$ as $K \to \infty$ (in fact, one can make the speed of convergence explicit by means of the Quasi-power theorem).
Summing~\eqref{eq:conv-distr-M} over all possible values of $a$ and $k$, we obtain
$$\lim_{K \to \infty} \mathbb{P} \mathcal{B}ig( f(M_K) \leq \hat\mu \log_q K + x \hat \sigma \sqrt{\log_q K} \,\mathcal{B}ig) = \Phi(x)$$
for all real numbers $x$, which is what we wanted to prove.
\end{proof}
\begin{cor}
Assume that the positive, \begin{math}q\end{math}-quasimultiplicative function \begin{math}f\end{math} has at most polynomial growth, and that $f$ is not the constant function $f \equiv 1$. Let \begin{math}M_K\end{math} be a randomly chosen integer in \begin{math}\{0,1,\ldots,K-1\}\end{math}. The random variable
$$\frac{f(M_K) - \mu \log_q K}{\sigma \sqrt{\log_q K}},$$
where the two constants $\mu$ and $\sigma^2$ are the same as in Theorem~\ref{thm:clt-mult}, converges weakly to a standard Gaussian distribution.
\end{cor}
\end{document}
|
\begin{equation}gin{document}
\title[Compressible Navier-Stokes equations with a class of large data]{Global solutions to the isentropic compressible Navier-Stokes equations with a class of large initial data}
\author{Daoyuan Fang, Ting Zhang, Ruizhao Zi*}
\address{Department of Mathematics, Zhejiang University, Hangzhou 310027, China}
\email{[email protected]}
\address{Department of Mathematics, Zhejiang University, Hangzhou 310027, China}
\email{[email protected]}
\address{*Corresponding author. School of Mathematics and Statistics \& Hubei Key Laboratory of Mathematical Sciences, Central China Normal University, Wuhan 430079, China}
\email{[email protected]}
\subjclass[2010]{35Q35, 76N10}
\keywords{Compressible Navier-Stokes equations, global well-posedness, large data}
\begin{equation}gin{abstract}
In this paper, we consider the global well-posedness problem of the isentropic compressible Navier-Stokes equations in the whole space $\mathbb{R}^N$ with $N\ge2$. In order to better reflect the characteristics of the dispersion equation, we make full use of the role of the frequency on the integrability and regularity of the solution, and prove that the isentropic compressible Navier-Stokes equations admit global solutions when the initial data are close to a stable equilibrium in the sense of suitable hybrid Besov norm. As a consequence, the initial velocity with arbitrary $\dot{B}^{\fr{N}{2}-1}_{2,1}$ norm of potential part $\Pe^\bot u_0$ and large highly oscillating are allowed in our results. The proof relies heavily on the dispersive estimates for the system of acoustics, and a careful
study of the nonlinear terms.
\end{abstract}
\maketitle
\section{Introduction}
The isentropic compressible Navier-Stokes equations are governed by conservation of mass and conservation of momentum:
\begin{equation}gin{eqnarray}\label{CNS}
\begin{equation}gin{cases}
\pr_t\rho+\mathrm{div}(\rho u)=0,\ \ \ t>0,\ x\in\mathbb{R}^N,\ N\geq2,\\
\rho(\pr_tu+u\cdot\nb u)+\nb P(\rho)-\mu\displaystylel u-(\lm+\mu)\nb\mathrm{div} u=0,\\
(\rho,u)\rightarrow (\bar{\rho},0),\ \textrm{ as }|x|\rightarrow\infty,\\
(\rho, u)|_{t=0}=(\rho_0, u_0).
\end{cases}
\end{eqnarray}
where the unknowns $\rho$ and $u$ are the density and velocity of the fluid, respectively. $P=P(\rho)$ is the pressure, which is a smooth function of $\rho$. The viscous coefficients $\mu$ and $\lambda$ are assumed to be constants, satisfying the following physical restrictions:
\begin{equation}\label{vis-coefficients}
\mu>0, \quad 2\mu+N\lambda\ge0,
\end{equation}
with $N\ge2$ the spacial dimension. Clearly, \eqref{vis-coefficients} implies $\nu:=\lm+2\mu>0$, which, together with \eqref{vis-coefficients} ensures the ellipticity for the Lam\'e operator $\mu\displaystylel+(\lm+\mu)\nb\mathrm{div}$. Moreover, without loss of generality, we assume that $\bar{\rho}=1$ and
\begin{equation}\label{a-P}
P'(1)=1.
\end{equation}
There are huge literatures on the well-posedness results of the compressible Navier-Stokes equations.
To the best of our knowledge, the local existence and uniqueness of classical solutions are first established in \cite{Nash62, Serrin59} with $\rho_0$ bounded away from zero.
For the case that the initial density may vanish in open sets, see \cite{CCK04,SS93}. The global classical solutions were first
obtained by Matsumura and Nishida \cite{MN80} for initial data $(\rho_0, u_0)$ close to a equilibrium $(\bar{\rho}, 0)$ in $H^3\times H^3$, $\bar{\rho}>0$.
Later, by exploiting some smoothing effects of the so-called {\em effective viscous flux} $F := (2\mu +\lambda ��)\mathrm{div} u - P(\rho) + P(\bar{\rho})$, Hoff \cite{Hoff951, Hoff952} constructed the global weak solutions with discontinuous initial data. For
arbitrary initial data and $\bar{\rho}=0$, the breakthrough was made by Lions \cite{Lions98}, where he proved the global existence of weak solutions
provided the specific heat ratio $\gamma$ is appropriately large, for
example, $\gamma\geq3N/(N+2), N=2, 3$. Later, Feireisl, Novotn\'{y}
and Petzeltov\'{y} \cite{FNP01} improved Lions's results to the
case $\gamma>\frac{N}{2}$. If the initial data possess
some symmetric properties, Jiang and Zhang \cite{JZ01, JZ03}
obtained the global weak solutions for any $\gamma>1$. Even in the two dimensional case, the uniqueness of weak
solutions is still an open problem up to now. For the case of {\em small energy}, Huang, Li and Xin \cite{HLX12} recently established the global existence and uniqueness of classical solutions, which can be regarded as a uniqueness and regularity theory of Lions-Feireisl's weak solutions.
The common point among all these papers above is that they did not use scaling considerations, which can help us to find solution spaces as large as possible. This approach goes back to the pioneering work by Fujita and Kato \cite{FK64} for the classical incompressible Navier-Stokes equations:
\begin{equation}gin{eqnarray}\label{INS}
\begin{equation}gin{cases}
\pr_tv+v\cdot\nb v-\mu\displaystylel v+\nb\Pi=0, \ \ \ t>0,\ x\in\mathbb{R}^N,\ N\geq2,\\
\mathrm{div} v=0,\\
v|_{t=0}=v_0.
\end{cases}
\end{eqnarray}
The classical incompressible Navier-Stokes equations, the system
\eqref{INS}, possesses a structure of scaling invariance. Indeed, if $v$ is a
solution of \eqref{INS} on a time interval $[0,T]$ with initial
data $v_0$, then the vector field $v_\lambda$ defined by
$$
v_\lambda(t,x)=\lambda v(\lambda^2t,\lambda x)
$$
is also a solution of \eqref{INS} on the time interval
$[0,\lambda^{-2}T]$ with the initial data $\lambda v_0(\lambda
x)$. There are many works considering the global well-posedness for the classical incompressible Navier-Stokes equations
\eqref{INS} in the scaling invariant spaces, like \cite{Cannone93,Ca97,FK64,Kato,Koch01} etc. The importance of these results can be illustrated by the following example \cite{Ca97} in three dimensional case: if $\phi$
is a function in the Schwartz space $\mathcal{S}
(\mathbb{R}^3)$, let us introduce the family of divergence free vector fields
\begin{equation}
\label{1.4-in} {\phi_\varepsilon}:=\varepsilon^{\al-1}\sin\left(\fr{x_3}{\varepsilon}\right)(-\pr_2 {\phi}, \pr_1 {\phi}, 0).
\end{equation}
Then, for small $\varepsilon$, the size of $\|\phi_\varepsilon\|_{BMO^{-1}}$ is $\varepsilon^{\al}$. The result in \cite{Koch01} implies that the classical incompressible Navier-Stokes system \eqref{INS} is global well-posed with the initial data $v_0=\phi_\varepsilon$ for sufficient small $\varepsilon$. If Supp$\widehat{\phi}\subset B(0,R)=\{\xi\in \mathbb{R}^3, |\xi|\leq R\}$, then Supp$\widehat{\phi_\varepsilon}\subset B((0,0,\frac{1}{\varepsilon}),R)=\{\xi\in \mathbb{R}^3, |\xi-(0,0,\frac{1}{\varepsilon})|\leq R\}$. Thus, such class of the initial data $v_0=\phi_\varepsilon$ has a interesting property that in the frequency space, it almost concentrates on the high frequency part. We would like to remark that due to the parabolic property of the system \eqref{INS}, the high frequency part of the solution can decay very fast. A natural question which arises is: what will happen when the initial data almost concentrate on the low frequency part?
Inspired by this question, let us come back to the isentropic compressible Navier-Stokes equations \eqref{CNS}. In this case, the first work following the scaling invariant approach was given by Danchin, see \cite{Danchin00}, who proved the global well-posedness of strong solutions to \eqref{CNS} with initial data $(\rho_0, u_0)$ close to a stable equilibrium in
\begin{equation}\label{Ddata}
\left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}}_{2,1}\right)\times\dot{B}^{\fr{N}{2}-1}_{2,1}.
\end{equation}
In fact, \eqref{CNS} is not really invariant under the transformation
\begin{equation}gin{gather}\label{scaling}
\begin{equation}gin{cases}
(\rho_0, u_0)\rightarrow (\rho_0(l x), l u_0(l x)),\\
(\rho(t,x), u(t,x))\rightarrow (\rho(l^2t,l x), l u(l^2t,l x)), \quad l>0,
\end{cases}
\end{gather}
unless we neglect the pressure term $P=P(\rho)$. That's why Danchin introduced the hybrid Besov spaces in \cite{Danchin00}. Roughly speaking, by careful analysis of behaviors of the following hyperbolic-parabolic system
\begin{equation}gin{eqnarray}\label{linear2}
\begin{equation}gin{cases}
\pr_tb+\Lm d=f,\\
\pr_t d-\displaystylel d-\Lm b=g, \quad\mathrm{with}\quad \Lm=\sqrt{-\displaystylel},
\end{cases}
\end{eqnarray}
both in low frequency and high frequency parts, Danchin obtained the $L^2$-decay in time for $\rho-\bar{\rho}$ in a $L^2$ type Besov space, which is the key point to construct global solutions to \eqref{CNS}. There is an interesting question how to obtain the global well-posedness result with the large initial data in the space (\ref{Ddata}).
Inspired by works about the classical incompressible Navier-Stokes system \cite{Cannone93,Ca97}, with the aid of Green matrix of \eqref{linear2},
Charve and Danchin \cite{CD10}, Chen, Miao and Zhang \cite{CMZ10} obtained the global well-posedness result in the critical $L^p$
framework respectively, i.e, the high frequency part of the initial data are small in the following Besov space,
$$
b_{0H}\in \dot{B}^\frac{N}{p}_{p,1}, \ u_{0H}\in \dot{B}^{\frac{N}{p}-1}_{p,1},\ b_0=\rho_0-1.
$$
In this paper,
\begin{equation}no
f_L:=\sum_{q< 1} f_q,\quad\mathrm{and}\quad f_H:=\sum_{q\ge1} f_q,
\end{equation}no
with $f\in \mathcal{S}'$ and $f_q:=\dot{\Dl}_q f$.
Later, Haspot \cite{Ha11} gave a new proof via the so called {\em effective velocity}.
Similar to the incompressible Navier-Stokes system, the results in \cite{CD10,CMZ10,Ha11} imply that the isentropic compressible Navier-Stokes system \eqref{CNS} is global well-posed with the highly
oscillating initial velocity $u_0=\phi_\varepsilon$ in (\ref{1.4-in}) for $N=3$, small $\varepsilon$ and some $\al$. However, in \cite{CD10,CMZ10,Ha11}, the low frequency part of the initial data are small in the following Besov space,
\begin{equation}gin{equation}\label{L}
b_{0L},u_{0L}\in \dot{B}^{\frac{N}{2}-1}_{2,1}.
\end{equation}
A natural question which arises is: what will happen when the low frequency part of the initial data are large in (\ref{L})?
Recently, for the large volume viscosity $\lambda$, Danchin and Mucha \cite{DM16} established the global solutions to the two dimensional compressible Navier-Stokes equations \eqref{CNS} with large initial velocity and almost constant density.
The aim of this paper is to construct global solutions to the isentropic compressible Navier-Stokes equations \eqref{CNS} when the low frequency part of the initial velocity field is large. For example, if $N=3$,
for any fixed $\phi\in\mathcal{S}$ with $\hat{\phi}$ supported in a compact set, say, $\mathrm{Supp}\,\hat{\phi}\subset B(0,1)$, the initial data can be chosen as
\begin{equation}
(\rho_0, u_0):=(1, l^{-\begin{equation}ta}\nb \phi_l+\tl{\phi_\varepsilon}),
\end{equation}
in our result, where
\begin{equation}no
\phi_l(x):=\phi(lx),
\end{equation}no
and
\begin{equation}
\tl{\phi_\varepsilon}:=\varepsilon^{\fr{3}{p}-1}\sin\left(\fr{x_3}{\varepsilon}\right)(-\pr_2\tl{\phi}, \pr_1\tl{\phi}, 0), \quad\mathrm{for\ \ some}\quad \tl{\phi}\in\mathcal{S},
\end{equation}
with some $0<l\ll1, \begin{equation}ta\ge0$, $\varepsilon>0$, and $p>3$. Please refer to Remark
\ref{rem1.2} for more details.
Since \eqref{CNS} is not really invariant under the transformation (\ref{scaling}), one may guess that the Besov space $\dot{B}^{\frac{N}{2}-1}_{2,1}$ is not a good functional space for the low frequency part of the initial data. By virtue of the low frequency embedding
\begin{equation}
\|\phi_L\|_{\dot{B}^{s_1}_{2,1}}\le C\| \phi_L\|_{\dot{B}^{s_2}_{2,1}}, \quad\mathrm{for\ \ all}\quad \phi\in\dot{B}^{s_2}_{2,1}, \textrm{ and } s_1>s_2,
\end{equation}
we should consider a class of the initial data that the low frequency part of the initial data $(b_{0L},\Pe^\bot u_{0L})$ are small in the Besov space $\dot{B}^{\frac{N}{2}-1+\al}_{2,1}$ but large in $\dot{B}^{\frac{N}{2}-1}_{2,1}$.
More precisely, we will prove the global existence and uniqueness of solutions to the isentropic compressible Navier-Stokes system \eqref{CNS} with initial data $(\rho_0, u_0)$ close to a stable equilibrium (1,0), satisfying $(\rho_0-1,u_0)\in\mathcal{E}_0$ defined by
\begin{equation}\label{E0}
\mathcal{E}_0:=\left\{(\phi,\varphi)\in\mathcal{S}'_h\times\mathcal{S}'_h: (\phi_L, \Pe^\bot \varphi_L)\in \dot{B}^{\fr{N}{2}-1+\al}_{2,1}, \ \phi_H\in \dot{B}^{\fr{N}{2}}_{2,1},\ \Pe^\bot \varphi_H\in\dot{B}^{\fr{N}{2}-1}_{2,1},\ \Pe \varphi\in\dot{B}^{\fr{N}{p}-1}_{p,1} \right\},
\end{equation}
with some $\al>0$ and $p>2$.
To simplify the presentation, in the following we denote
\begin{equation}\label{bu1}
\|(b_0, u_0)\|_{\mathcal{E}_0}:=\|(b_{0L}, \Pe^\bot u_{0L})\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}+\|b_{0H}\|_{\dot{B}^{\fr{N}{2}}_{2,1}}+\|\Pe^\bot u_{0H}\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}+\|\Pe u_0\|_{\dot{B}^{\fr{N}{p}-1}_{p,1}}.
\end{equation}
We shall construct solutions $(\rho,u)$ to system \eqref{CNS} with $(\rho-1, u)$ lying in the spaces below.
\begin{equation}gin{defn}\label{space}
Let $T>0, $ and $N\ge2$.
\begin{equation}gin{itemize}
\item For $p>2$, $\al>0$, denote by $\mathcal{E}^{\fr{N}{2},\al}_{p}(T)$ the space of functions $(b, u)$ such that
\begin{equation}gin{eqnarray*}
&&(b_L,\Pe^\bot u_L)\in \widetilde{C}_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})\cap \widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1});\\
&&b_H\in\widetilde{C}_T(\dot{B}^{\fr{N}{2}}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}}_{2,1}),\ \ \Pe^\bot u_H\in\widetilde{C}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1});\\
&&\Pe u\in \widetilde{C}_T(\dot{B}^{\fr{N}{p}-1}_{p,1})\cap L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1}).
\end{eqnarray*}
We shall endow the space with the norm:
\begin{equation}gin{eqnarray}no
\|(b,u)\|_{\mathcal{E}^{\fr{N}{2}, \al}_{p}(T)}&:=&\|(b_L,\Pe^\bot u_L)\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})\cap \widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\\
&&+\|b_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}}_{2,1})}+\|\Pe^\bot u_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}
+\|\Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})\cap{L}^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}.
\end{eqnarray}no
\item For $p=2$, denote by $\mathcal{E}^{\fr{N}{2}}(T)$ the space of functions $(b, u)$ such that
\begin{equation}no
(b_L, u)\in \widetilde{C}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1}),\ \ b_H\in \widetilde{C}_T(\dot{B}^{\fr{N}{2}}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}}_{2,1}).
\end{equation}no
with
\begin{equation}no
\|(b,u)\|_{\mathcal{E}^{\fr{N}{2}}(T)}:=\|(b_L, u)\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}+\|b_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}}_{2,1})}.
\end{equation}no
Indeed, $\mathcal{E}^{\fr{N}{2}}(T)$ is nothing but the space introduced by Danchin in \cite{Danchin00}.
\end{itemize}
We use the notation $\mathcal{E}^{\fr{N}{2},\al}_p$ ($\mathcal{E}^\fr{N}{2}$) if $T=\infty$, changing $[0, T]$ into $[0,\infty)$ in the definition above.
\end{defn}
The main results are stated as follows.
\begin{equation}gin{thm}\label{thm-p>2}
Let
\begin{equation}gin{eqnarray}\label{p1}
\begin{equation}gin{cases}
2< p<4,\quad\quad{if}\quad N=2,\\
2< p\le4,\quad\quad{if}\quad N=3,\\
2< p\le\fr{2N}{N-2},\quad{if}\quad N\ge4,
\end{cases}
\end{eqnarray}
and
\begin{equation}\label{al1}
0<\al\le\fr{N-1}{2}\left(\fr12-\fr{1}{p}\right).
\end{equation}
Assume that $(\rho_0, u_0)$ satisfies $(\rho_0-1, u_0)\in \mathcal{E}_0$. Then there exist two constants $c_0$ and $C_0>0$ depending on $N, \mu$ and $\lambda$, such that if
\begin{equation}\label{initial1}
\|(\rho_0-1, u_0)\|_{\mathcal{E}_0}\le c_0,
\end{equation}
then system \eqref{CNS} admits a global solution $(\rho, u)$ with $(\rho-1, u)\in \mathcal{E}^{\fr{N}{2},\al}_p$, satisfying
\begin{equation}\label{uniform1}
\|(\rho-1,u)\|_{\mathcal{E}^{\frac{N}{2},\al}_p}\le C_0\|(\rho_0-1, u_0)\|_{\mathcal{E}_0}.
\end{equation}
Furthermore, if
\begin{equation}gin{equation*}
N\geq 3,\ \
\textrm{ or }\ \ \Pe u_0\in \dot{B}^0_{2,1} \textrm{ when } N=2,
\end{equation*}
then the solution is unique.
\end{thm}
\begin{equation}gin{rem}\label{rem1.1}
For initial data $(\rho_0, u_0)$ with $(\rho_0-1, u_0)\in \left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}}_{2,1}\right)\times\dot{B}^{\fr{N}{2}-1}_{2,1}$ with
$$\|(\rho_0-1, u_0)\|_{\left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}}_{2,1}\right)\times\dot{B}^{\fr{N}{2}-1}_{2,1}}:=R,$$
one easily deduces that
\begin{equation}gin{eqnarray}\label{1.37}
\nn\|(b_0, u_0)\|_{\mathcal{E}_0}
&\leq& C 2^{-Q\al}\left( \|P_{<-Q}b_0\|_{\dot{B}^{\frac{N}{2}-1 }_{2,1}}+\|P_{<-Q}\Pe^\bot u_0\|_{\dot{B}^{\frac{N}{2}-1 }_{2,1}}
\right)+C\|P_{\geq-Q}b_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}\cap{\dot{B}^{\frac{N}{2}}_{2,1}}}\\
\nn &&+C\|P_{\geq-Q}\Pe^\bot u_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}+\|\Pe u_0\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\\
&\leq&C 2^{-Q\al}R+C\|P_{\geq-Q}b_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}\cap{\dot{B}^{\frac{N}{2}}_{2,1}}}
+C\|P_{\geq-Q}\Pe^\bot u_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}+C\|\Pe u_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}},
\end{eqnarray}
where $P_{<-Q}$ and $P_{\geq-Q}$ are defined in (\ref{PQ}).
Therefore, if
\begin{equation}\label{1.38}
C\|P_{\geq-Q}b_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}\cap{\dot{B}^{\frac{N}{2}}_{2,1}}}+C\|P_{\geq-Q}\Pe^\bot u_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}+C\|\Pe u_0\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\leq \frac{c_0}{2},
\end{equation}
with
\begin{equation}\label{1.39}
Q:=\left[\fr{1}{\al}\log_2\fr{2CR}{c_0}\right]+1,
\end{equation}
then the initial data $(\rho_0, u_0)$ satisfy the condition \eqref{initial1}.
\end{rem}
From Theorem \ref{thm-p>2} and Remark \ref{rem1.1}, we easily obtain the following theorem.
\begin{equation}gin{thm}\label{thm-p=2}
Let
\begin{equation}\label{al2}
\begin{equation}gin{cases}
0<\al <\frac{1}{8},\quad\quad{if}\quad N=2,\\
0< \al \leq\frac14,\quad\quad{if}\quad N=3,\\
0< \al\le\fr{ N-1}{2N},\quad{if}\quad N\ge4.
\end{cases}
\end{equation}
There exists a constant $c_1$ depending on $N, \mu$ and $\lambda$, such that for all $(\rho_0, u_0)$ with $(\rho_0-1, u_0)\in\left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}}_{2,1}\right)\times\dot{B}^{\fr{N}{2}-1}_{2,1}$, and $Q\in \N$, if
\begin{equation}gin{eqnarray}\label{initial3}
\nn&&2^{-Q\al}\left( \|P_{<-Q}(\rho_0-1)\|_{\dot{B}^{\frac{N}{2}-1 }_{2,1}}
+\|P_{<-Q}\Pe^\bot u_0\|_{\dot{B}^{\frac{N}{2}-1 }_{2,1}}
\right)\\
&&+\|P_{\ge-Q}(\rho_0-1)\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}}_{2,1}}+\|P_{\ge-Q}\Pe^\bot u_0\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}+\|\Pe u_0\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}\le c_1,
\end{eqnarray}
then system \eqref{CNS} admits a unique solution $(\rho, u)$ with $(\rho-1, u)\in \mathcal{E}^{\fr{N}{2}}$.
\end{thm}
\begin{equation}gin{rem}\label{rem1.2}
We give some examples of {\em large} initial data $(\rho_0, u_0)$ with $(\rho_0-1, u_0)$ satisfying \eqref{initial1} and \eqref{initial3}. For the sake of simplicity, we take $\rho_0=1$. In doing so, we just need to focus on the initial velocity $u_0$. More precisely, for any fixed $\phi\in\mathcal{S}$ with $\|\nb\phi\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}=R$ and $\hat{\phi}$ supported in a compact set, say, $\mathrm{Supp}\,\hat{\phi}\subset B(0,1)$, let us denote
\begin{equation}no
\phi_l(x):=\phi(lx).
\end{equation}no
Then
\begin{equation}gin{itemize}
\item $\|\nb \phi_l\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}=\|\nb\phi\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}=R$.
\item $\widehat{\nb \phi_l(\xi)}=l^{1-N}\widehat{\nb\phi}\left(\fr{\xi}{l}\right)$, and $\mathrm{Supp}\, \widehat{\nb \phi_l}\subset B(0,l)$.
\end{itemize}
Consequently, for all $\begin{equation}ta\in[0,\al)$, taking $l>0$ and $Q\in\N$ satisfying
\begin{equation}\label{l}
\begin{equation}gin{cases}
l<\fr{3}{4}2^{-Q},\\[3mm]
2^{-\al Q}l^{-\begin{equation}ta}R\le\fr{c_0}{2},
\end{cases}
\end{equation}
we find that
\begin{equation}no
\dot{\displaystylel}_q\nb \phi_l=0, \quad q\ge-Q,
\end{equation}no
and
\begin{equation}\label{1.32}
\|l^{-\begin{equation}ta}P_{<-Q}(\nb \phi_l)\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}\le\fr{c_0}{2}.
\end{equation}
{\em Example 1.} Let
\begin{equation}\label{ex1}
(\rho_0, u_0):=(1, l^{-\begin{equation}ta}\nb \phi_l),
\end{equation}
with $l$ satisfying \eqref{l}. Then from Remark \ref{rem1.1} and \eqref{1.32}, it is not difficult to verify that the data given in \eqref{ex1} apply to Theorems \ref{thm-p>2} and \ref{thm-p=2}. This indicates that our results allow for initial data with large potential part of the initial velocity.
In addition, noticing the smallness restriction in \eqref{1.38}, initial velocity with highly oscillating is also permitted in Theorem \ref{thm-p>2} as a by-product. For instance, if $N=3$, the incompressible part of the initial velocity can be given as in \cite{Ca97},
\begin{equation}\label{iu01}
\tl{\phi_\varepsilon}:=\varepsilon^{\fr{3}{p}-1}\sin\left(\fr{x_3}{\varepsilon}\right)(-\pr_2\tl{\phi}, \pr_1\tl{\phi}, 0), \quad{for\ \ some}\quad \tl{\phi}\in\mathcal{S},
\end{equation}
with $\varepsilon>0$, and $p>3$. Combining \eqref{ex1} with \eqref{iu01}, we can give another example. \par
\noindent{\em Example 2.} Let
\begin{equation}\label{ex2}
(\rho_0, u_0):=(1, l^{-\begin{equation}ta}\nb \phi_l+\tl{\phi_\varepsilon}).
\end{equation}
Then for $N=3$, the data in \eqref{ex2} are applicable to Theorem \ref{thm-p>2}.
\end{rem}
\begin{equation}gin{rem}
Our results can be extended to the case with the high frequency part $(b_{0H}, u_{0H})$ of the initial data lying in some $L^p$- type Besov spaces. We omit the details in this paper to avoid tedious computations.
\end{rem}
\begin{equation}gin{rem}
Taking the anisotropy into consideration as in \cite{CZ07} and \cite{Zhang09}, it is possible to relax the smallness restriction on the divergence free part $\Pe u_0$ of the initial velocity $u_0$. Please refer to \cite{CG10, CGP11} for a recent panorama.
\end{rem}
\begin{equation}gin{rem}
Our methods can be used to other related models. Similar results for the incompressible viscoelastic fluids will be given in a forthcoming paper.
\end{rem}
It is worth pointing out that we impose neither any symmetrical structure on the initial data, nor largeness assumptions on the viscosity coefficients $\mu$ or $\lambda$. What's more, our results hold for all dimensional $N\ge2$. Different from \cite{Danchin00}, our proof relies not only on the energy estimates for the hyperbolic-parabolic system \eqref{linear2}, but also on the dispersive properties for the following acoustics system:
\begin{equation}\label{acoustics}
\begin{equation}gin{cases}
\pr_tb+\fr{\Lm d}{\eps}={\bf f},\\
\pr_td-\fr{\Lm b}{\eps}={\bf g}.
\end{cases}
\end{equation}
This method was used before to study the zero Mach number limit problem of the compressible Navier-Stokes equations \cite{DG99, Danchin02, DH14}. It seems that the combination of the energy estimates and Strichartz estimates has never been used to study the global well-posedness problem of the viscous compressible fluids. In this paper, we try to apply this idea to the isentropic compressible Navier-Stokes equations.
Let us now explain how to construct our solution spaces and show the ingredients of the proof. First of all, just as in \cite{Danchin00}, writing $\rho=1+b$, and decomposing $u=\Pe^\bot u+\Pe u$,
where
\begin{equation}no
\Pe^\bot:=-\nb(-\displaystylel)^{-1}\mathrm{div}, \quad\mathrm{and}\quad \Pe:=\mathbb{I}-\Pe^\bot,
\end{equation}no
we reformulate \eqref{CNS} as follows:
\begin{equation}gin{eqnarray}\label{bcu}
\begin{equation}gin{cases}
\pr_t b+{\mathrm{div} \Pe^\bot u}=-\mathrm{div}(bu),\\
\pr_t \Pe^\bot u-\nu \displaystylel\Pe^\bot u+{\nb b}=-\Pe^\bot\left(u\cdot\nb u+K( b){\nb b}+I( b)\mathcal{A}u\right),\\
(b,\Pe^\bot u)|_{t=0}=(b_0,\Pe^\bot u_0),
\end{cases}
\end{eqnarray}
and
\begin{equation}gin{eqnarray}\label{biu}
\begin{equation}gin{cases}
\pr_t \Pe u-\mu \displaystylel\Pe u=-\Pe\left(u\cdot\nb u+I( b)\mathcal{A}u\right),\\
\Pe u|_{t=0}=\Pe u_0,
\end{cases}
\end{eqnarray}
where $b_0:=\rho_0-1$, $I(a):=\fr{a}{1+a}$, and $K(a):=\fr{P'(1+ a)}{1+ a}-1$. For the sake of simplicity, $\nu$ is assumed to be 1 throughout this paper. Moreover,
the condition (\ref{a-P})
ensures that $K(0)=0$. Let us denote $d:=\Lm^{-1}\mathrm{div} u$. From \eqref{bcu}, one easily deduces that $(b,d)$ solves
\begin{equation}gin{eqnarray}\label{bd}
\begin{equation}gin{cases}
\pr_t b+{\Lm d}=-\mathrm{div}(bu),\\
\pr_t d- \displaystylel d-{\Lm b}=-\Lm^{-1}\mathrm{div}\left(u\cdot\nb u+K( b){\nb b}+I( b)\mathcal{A}u\right),\\
(b,d)|_{t=0}=(b_0,d_0).
\end{cases}
\end{eqnarray}
Since $\mathrm{curl}\Pe^\bot=0$, it is easy to verity that \eqref{bd} is equivalent to \eqref{bcu}. In the following, we shall use \eqref{bd} to replace \eqref{bcu} and do not
make a distinction between $\Pe^\bot u$ and $d$ in the absence of confusion.
Next, in order to show our ideas more clearly, we divide Danchin's arguments in \cite{Danchin00} into the following three parts:
\begin{equation}gin{description}
\item[(i)]\ \ Global estimates for the linearized system of \eqref{bd}.
\item[(ii)]\ Commutator estimates for the convection terms.
\item[(iii)]Product estimates for other nonlinear terms.
\end{description}
Combining {\bf(i)} with {\bf(ii)}, Danchin established the global estimates for the paralinearized system of \eqref{bd} with $(b_0, d_0)\in\left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}}_{2,1}\right)\times\dot{B}^{\fr{N}{2}-1}_{2,1}$, see Proposition 10.23 in \cite{Bahouri-Chemin-Danchin11}, for example. Then substituting the results in {\bf(iii)} into the estimates obtained in {\bf(i)} and {\bf(ii)} yields the global estimates of $(b, d)$. Our proof follows this line, but aside from part {\bf(i)}, we develop different approaches to deal with parts {\bf(ii)} and {\bf(iii)}. In particular, the dispersive properties of the system of acoustics \eqref{acoustics} is taken into consideration. Indeed, for $(b_0,d_0)$ satisfying
\begin{equation}\label{extra}
(b_0,d_0)\in\left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}+\al}_{2,1}\right)\times\left(\dot{B}^{\fr{N}{2}-1}_{2,1}\cap\dot{B}^{\fr{N}{2}-1+\al}_{2,1}\right),
\end{equation}
it has been shown in \cite{Danchin02} that some Strichartz norms of $(b,d)$ decay algebraically with respect to the Mach number $\eps$. In our case, $\eps=1$, we can not expect any decay with respect to the Mach number. Nevertheless, in the low frequency part, we still gain some decay by means of the low frequency embedding:
\begin{equation}gin{eqnarray}\label{decay}
\nn\|P_{<-Q}(b, d)\|_{\widetilde{L}^{r}_T(\dot{B}^{\fr{N}{p}-1+\al+\fr{1}{r}}_{p,1})}&\lesssim& \left(\|P_{<-Q}(b_0, d_0)\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}+\|P_{<-Q}({\bf f, g})\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\right)\\
&\lesssim&2^{-\al Q}\left(\|(b_0, d_0)\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}+\|P_{<-Q}({\bf f, g})\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\right),
\end{eqnarray}
with
\begin{equation}\label{pr}
\al>0,\quad p\ge2, \quad \fr{2}{r}\le \min\left\{1, (N-1)\left(\fr12-\fr{1}{p}\right)\right\}, \quad (r, p, N)\neq (2, \infty, 3).
\end{equation}
This is the basic idea underneath our approach, which leads us to believe that it is possible to construct global solutions to \eqref{CNS} with large potential part $\Pe^\bot u_0$ of initial velocity in $\dot{B}^{\fr{N}{2}-1}_{2,1}$.
Motivated by \eqref{decay}, we just impose the extra regularity on the low frequency part of $(b_0, d_0)$. More precisely,
\begin{equation}\label{bd0}
(b_{0L}, d_{0L})\in \dot{B}^{\fr{N}{2}-1+\al}_{2,1}\times\dot{B}^{\fr{N}{2}-1+\al}_{2,1}, \quad (b_{0H}, d_{0H})\in \dot{B}^{\fr{N}{2}}_{2,1}\times\dot{B}^{\fr{N}{2}-1}_{2,1}.
\end{equation}
In order to handle parts {\bf(ii)} and {\bf(iii)} under the condition \eqref{bd0}, we need to compensate the loss of critical norms of $(b, d)$ in the low frequency part. To this end, we set
$$
r=\fr{1}{\al}
$$
in \eqref{decay}. In this way, $p=2$ is not permitted in \eqref{pr} any more, otherwise $\al=0$. This explains the condition \eqref{al1} in Theorem \ref{thm-p>2}.
On the other hand, the divergence free part $\Pe u$ of the velocity $u$ satisfies the parabolic system \eqref{biu}, and hence possesses no dispersive property at all. Accordingly, it seems that it is reasonable to assume
\begin{equation}\label{hiu0}
\Pe u_0\in\dot{B}^{\fr{N}{2}-1}_{2,1}.
\end{equation}
As a result, by the property of heat equation, the space for $\Pe u$ should be
$$
\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1}),
$$
and we have to bound the right hand side of \eqref{biu} in $L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})$. Unfortunately, we do not know how to control $\|\Pe(\dot{T}_{\Pe u}\nb\Pe^\bot u)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}$ since from \eqref{decay} and the property of heat equation, we just have
\begin{equation}\label{s-cu}
\Pe^\bot u\in\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})\cap{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}-1+2\al}_{p,1})}, \quad\mathrm{with}\quad p>2.
\end{equation}
To overcome this problem, owing to the fact that $\Pe\Pe^\bot=0$, we find that
\begin{equation}\label{com-Tiucu}
\Pe(\dot{T}_{\Pe u}\cdot\nb\Pe^\bot u)=[\Pe,\dot{T}_{(\Pe u)^k}]\pr_k\Pe^\bot u.
\end{equation}
Then the commutator estimate (Lemma 2.99 in \cite{Bahouri-Chemin-Danchin11}) enables us to bound
\begin{equation}no
\|[\Pe,\dot{T}_{(\Pe u)^k}]\pr_k\Pe^\bot u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\lesssim\|\nb \Pe u\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p^*}-2\al}_{p^*,1})}\|\nb\Pe^\bot u\|_{{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}-2+2\al}_{p,1})}},
\end{equation}no
with $p*:=\fr{2p}{p-2}$,
provided
\begin{equation}
\fr{N}{p^*}-2\al+1\le1, \quad\mathrm{i. e.}\quad \al\ge\fr{N}{2}\left(\fr12-\fr{1}{p}\right),
\end{equation}
which contradicts to \eqref{pr}. The above analysis has proved a blind alley if the assumption on $\Pe u_0$ is given by \eqref{hiu0}. However, if
\begin{equation}\label{iu0}
\Pe u_0\in\dot{B}^{\fr{N}{p}-1}_{p,1}
\end{equation}
with $p$ the same as in \eqref{decay}, the above method to deal with $\Pe(\dot{T}_{\Pe u}\cdot\nb\Pe^\bot u)$ works since
\begin{equation}\label{key}
\|[\Pe,\dot{T}_{(\Pe u)^k}]\pr_k\Pe^\bot u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\lesssim\|\nb \Pe u\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{-2\al}_{\infty,1})}\|\nb\Pe^\bot u\|_{{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}-2+2\al}_{p,1})}},
\end{equation}
holds for all $\al>0$. Combining \eqref{bd0} with \eqref{iu0}, the condition on $(b_0, u_0)$ becomes
\begin{equation}\label{bu0}
\begin{equation}gin{cases}
(b_{0L}, \Pe^\bot u_{0L})\in\dot{B}^{\fr{N}{2}-1+\al}_{2,1},\\[3mm]
b_{0H}\in\dot{B}^{\fr{N}{2}}_{2,1}, \ \ \Pe^\bot u_{0H}\in\dot{B}^{\fr{N}{2}-1}_{2,1},\\[3mm]
\Pe u_0\in\dot{B}^{\fr{N}{p}-1}_{p,1}, \quad\mathrm{with}\quad p>2.
\end{cases}
\end{equation}
This explains the construction of $\mathcal{E}_0$ in \eqref{E0}.
The rest part of this paper is organized as follows. In Section 2, we introduce the tools ( the Littlewood-Paley decomposition
and paradifferential calculus) and give some product estimates in Besov spaces. In Section 3, we recall some properties of the system of acoustics, transport and heat equations. Section 4 is devoted to the global a priori estimates of system \eqref{bcu}--\eqref{biu}. The proof of Theorem \ref{thm-p>2} is given in Section 5. In Section 6, we prove Theorem \ref{thm-p=2}. Some nonlinear estimates needed in the proof of Theorems \ref{thm-p>2} and \ref{thm-p=2} are put in the Appendix in Section 7.
\bigbreak\noindent{\bf Notation.}\par
\begin{equation}gin{enumerate}
\item For $f\in \mathcal{S}'$, $Q\in \N$, denote $f_q:=\dot{\Dl}_q f$, and
\begin{equation}\label{PQ}
P_{<-Q}f:=\sum_{q<-Q} f_q,\quad\ P_{\geq-Q}f:=f-P_{<-Q}f=\sum_{q\geq-Q}f_q.
\end{equation}
In particular,
\begin{equation}no
f_L:=\sum_{q< 1} f_q,\quad\mathrm{and}\quad f_H:=\sum_{q\ge1} f_q.
\end{equation}no
\item Denote $p*:=\fr{2p}{p-2}$, i. e. $\fr{1}{p*}=\fr{1}{2}-\fr{1}{p}$, for $p\ge2$.
\item Throughout the paper, $C$ denotes various ``harmless'' positive constants, and
we sometimes use the notation $A \lesssim B$ as an equivalent to $A \le CB$. The
notation $A \approx B$ means that $A \lesssim B$ and $B \lesssim A$.
\end{enumerate}
\section{The Functional Tool Box}
\noindent The results of the present paper rely on the use of a
dyadic partition of unity with respect to the Fourier variables, the so-called the
\textit{Littlewood-Paley decomposition}. Let us briefly explain how
it may be built on $\mathbb{R}^N$, and the readers may see more details
in \cite{Bahouri-Chemin-Danchin11,Ch1}. Let $(\chi, \varphi)$ be a couple of $C^\infty$ functions satisfying
$$\hbox{Supp}\,\chi\subset\left\{|\xi|\leq\frac{4}{3}\right\},
\ \ \ \
\hbox{Supp}\,\varphi\subset\left\{\frac{3}{4}\leq|\xi|\leq\frac{8}{3}\right\},
$$
and
$$\chi(\xi)+\sum_{q\geq0}\varphi(2^{-q}\xi)=1,$$
$$\sum_{q\in \mathbb{Z}}\varphi(2^{-q}\xi)=1, \quad \textrm{for} \quad \xi\neq0.$$
Set $\varphi_q(\xi)=\varphi(2^{-q}\xi),$
$h_q=\mathcal{F}^{-1}(\varphi_q),$ and
$\tilde{h}=\mathcal{F}^{-1}(\chi)$. The dyadic blocks and the low-frequency cutoff operators are defined for all $q\in\mathbb{Z}$ by
$$\dot{\displaystyleelta}_{q}u=\varphi(2^{-q}\mathrm{D})u=\int_{\mathbb{R}^N}h_q(y)u(x-y)dy,$$
$$\dot{S}_qu=\chi(2^{-q}\mathrm{D})u=\int_{\mathbb{R}^N}\tl{h}_q(y)u(x-y)dy.$$
Then
\begin{equation}gin{equation}\label{e2.1}
u=\sum_{q\in \mathbb{Z}}\dot{\Dl}_q u,
\end{equation}
holds for tempered distributions {\em modulo polynomials}. As working modulo polynomials is not appropriate for nonlinear problems, we
shall restrict our attention to the set $\mathcal {S}'_h$ of tempered distributions $u$ such that
$$
\lim_{q\rightarrow-\infty}\|\dot{S}_qu\|_{L^\infty}=0.
$$
Note that \eqref{e2.1} holds true whenever $u$ is in $\mathcal{S}'_h$ and that one may write
$$
\dot{S}_qu=\sum_{p\leq q-1}\dot{\displaystylel}_{p}u.
$$
Besides, we would like to mention that the Littlewood-Paley decomposition
has a nice property of quasi-orthogonality:
\begin{equation}gin{equation}\label{e2.2}
\dot{\displaystyleelta}_p\dot{\displaystyleelta}_qu\equiv 0\ \ \hbox{if}\ \ \ |p-q|\geq 2\ \
\hbox{and}\ \ \dot{\displaystyleelta}_p(\dot{S}_{q-1}u\dot{\displaystyleelta}_qu)\equiv 0\ \ \hbox{if}\ \ \
|p-q|\geq 5.
\end{equation}
One can now give the definition of
homogeneous Besov spaces.
\begin{equation}gin{defn}\label{D2.1}
For $s\in\mathbb{R}$, $(p,r)\in[1,\infty]^2$, and
$u\in\mathcal{S}'(\mathbb{R}^N),$ we set
$$\|u\|_{\dot{B}_{p,r}^s}=\left\|2^{ sq}\|\dot{\displaystyleelta}_qu\|_{L^p} \right\|_{\ell^r}.$$
We then define the spaces
$\dot{B}_{p,r}^s:=\{u\in\mathcal{S}'_h(\mathbb{R}^N),\
\|u\|_{\dot{B}_{p,r}^s}<\infty\}$.
\end{defn}
The following lemma describes the way derivatives act on spectrally localized functions.
\begin{equation}gin{lem}[Bernstein's inequalities]\label{Bernstein}
Let $k\in\N$ and $0<r<R$. There exists a constant $C$ depending on $r, R$ and $d$ such that for all $(a,b)\in[1,\infty]^2$, we have for all $\lm>0$ and multi-index $\al$
\begin{equation}gin{itemize}
\item If $\mathrm{Supp} \hat{f}\subset B(0,\lm R)$, then $\sup_{\al=k}\|\pr^\al f\|_{L^b}\le C^{k+1}\lm^{k+d(\fr1a-\fr1b)}\|f\|_{L^a}$.
\item If $\mathrm{Supp} \hat{f}\subset \mathcal{C}(0,\lm r, \lm R)$, then $C^{-k-1}\lm^k\|f\|_{L^a}\le\sup_{|\al|=k}\|\pr^\al f\|_{L^a}\le C^{k+1}\lm^k\|f\|_{L^a}$
\end{itemize}
\end{lem}
Let us now state some classical
properties for the Besov spaces.
\begin{equation}gin{prop}\label{prop-classical}
For all $s, s_1, s_2\in\mathbb{R}$, $1\le p, p_1, p_2, r, r_1, r_2\le\infty$, the following properties hold true:
\begin{equation}gin{itemize}
\item If $p_1\leq p_2 $ and $r_1\leq r_2,$ then
$\dot{B}_{p_1,r_1}^{s}\hookrightarrow
\dot{B}_{p_2,r_2}^{s-\frac{N}{p_1}+\frac{N}{p_2}}$.
\item If $s_1\neq s_2$ and $\theta\in(0,1)$,
$\left[\dot{B}_{p,r_1}^{s_1},\dot{B}_{p,r_2}^{s_2}\right]_{(\theta,r)}=\dot{B}_{p,r}^{\theta
s_1+(1-\theta)s_2}$.
\item For any smooth homogeneous of degree $m\in\Z$ function $F$ on $\mathbb{R}^N\backslash\{0\}$, the operator $F(D)$ maps $\dot{B}^{s}_{p,r}$ in $\dot{B}^{s-m}_{p,r}$.
\end{itemize}
\end{prop}
Next we recall a few nonlinear estimates in Besov spaces which may be
obtained by means of paradifferential calculus. Firstly introduced
by J. M. Bony in \cite{Bony81}, the paraproduct between $f$
and $g$ is defined by
$$\dot{T}_fg=\sum_{q\in\mathbb{Z}}\dot{S}_{q-1}f\dot{\displaystyleelta}_qg,$$
and the remainder is given by
$$\dot{R}(f,g)=\sum_{q\in\Z}\tilde{\dot{\displaystyleelta}}_qf{\dot{\displaystyleelta}}_qg$$
with
$$\tilde{\dot{\displaystyleelta}}_qf=(\dot{\displaystyleelta}_{q-1}+\dot{\displaystyleelta}_{q}+\dot{\displaystyleelta}_{q+1})f.$$
We have the following so-called Bony's decomposition:
\begin{equation}\label{Bony-decom}
fg=\dot{T}_fg+\dot{T}_gf+\dot{R}(f,g)=\dot{T}_fg+\dot{T}'_gf,
\end{equation}
where $\dot{T}'_gf:=\dot{T}_gf+\dot{R}(f,g)$. The paraproduct $\dot{T}$ and the remainder $\dot{R}$ operators satisfy the following
continuous properties.
\begin{equation}gin{prop}[\cite{Bahouri-Chemin-Danchin11}]\label{p-TR}
For all $s\in\mathbb{R}$, $\sigma\ge0$, and $1\leq p, p_1, p_2\leq\infty,$ the
paraproduct $\dot T$ is a bilinear, continuous operator from $\dot{B}_{p_1,1}^{-\sigma}\times \dot{B}_{p_2,1}^s$ to
$\dot{B}_{p,1}^{s-\sigma}$ with $\frac{1}{p}=\frac{1}{p_1}+\frac{1}{p_2}$. The remainder $\dot R$ is bilinear continuous from
$\dot{B}_{p_1, 1}^{s_1}\times \dot{B}_{p_2,1}^{s_2}$ to $
\dot{B}_{p,1}^{s_1+s_2}$ with
$s_1+s_2>0$, and $\frac{1}{p}=\frac{1}{p_1}+\frac{1}{p_2}$.
\end{prop}
In view of \eqref{Bony-decom}, Proposition \ref{p-TR} and Bernstein's inequalities, one easily deduces the following product estimates. Please find the proof in Appendix.
\begin{equation}gin{coro}\label{coro-product}
Let $\rho,p_1,p_2,q_1,q_2\in[1,\infty]$, $\frac{1}{\rho}\leq\frac{1}{p_1}+\frac{1}{p_2}$, $\frac{1}{\rho}\leq\frac{1}{q_1}+\frac{1}{q_2}$, $s_1-\frac{N}{p_1}\leq \min\{0,N(\frac{1}{p_2}-\frac{1}{\rho})\}$, $\sigma_1-\frac{N}{q_1}\leq \min\{0,N(\frac{1}{q_2}-\frac{1}{\rho})\}$, $s_1+s_2>N\max \{0,\fr{1}{p_1}+\frac{1}{p_2}-1\}$, $s=s_1+s_2+N(\frac{1}{\rho}-\frac{1}{p_1}-\frac{1}{p_2})=\sigma_1+\sigma_2+N(\frac{1}{\rho}-\frac{1}{q_1}-\frac{1}{q_2})$, then there holds
\begin{equation}\label{product1}
\|uv\|_{\dot{B}^{s}_{\rho,1}}\leq C\|u\|_{\dot{B}^{s_1}_{p_1,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}}+C\|v\|_{\dot{B}^{\sigma_1}_{q_1,1}}\|u\|_{\dot{B}^{\sigma_2}_{q_2,1}}.
\end{equation}
In particular,
\begin{equation}\label{product1-s}
\|uv\|_{\dot{B}^{r_1+r_2-\frac{N}{p}}_{p,1}}\leq C\|u\|_{\dot{B}^{r_1}_{p,1}}\|v\|_{\dot{B}^{r_2}_{p,1}},
\end{equation}
where $p\in[1,\infty]$, $r_1, r_2\le \fr{N}{p}$ and $r_1+r_2>N\max \{0,\fr{2}{p}-1\}$.
\end{coro}
The following Proposition will be used to prove the uniqueness of solutions obtained in Theorem \ref{thm-p>2} for $N=2$.
\begin{equation}gin{prop}[\cite{Danchin05}]\label{prop-pro}
Let $p\ge2$, $s_1\le\fr{N}{p}, s_2<\fr{N}{p}$, and $s_1+s_2\ge0$, then
\begin{equation}no
\|uv\|_{\dot{B}^{s_1+s_2-\fr{N}{p}}_{p,\infty}}\le C\|u\|_{\dot{B}^{s_1}_{p,1}}\|v\|_{\dot{B}^{s_2}_{p,\infty}}.
\end{equation}no
\end{prop}
The study of non-stationary PDEs requires spaces of the type
$L^\rho_T(X)=L^\rho(0,T;X)$ for appropriate Banach spaces $X$. In
our case, we expect $X$ to be a Besov space, so that it
is natural to localize the equations through Littlewood-Paley
decomposition. We then get estimates for each dyadic block and
perform integration in time. But, in doing so, we obtain the bounds
in spaces which are not of the type $L^\rho(0,T;\dot{B}^s_{p,r})$. That
naturally leads to the following definition introduced by Chemin and Lerner in \cite{CL}.
\begin{equation}gin{defn}\label{defn-chemin-lerne}
For $\rho\in[1,+\infty]$, $s\in\mathbb{R}$, and $T\in(0,+\infty)$, we set
$$\|u\|_{\widetilde{L}^\rho_T(\dot{B}^s_{p,r})}=\left\|2^{qs}
\|\dot{\displaystyleelta}_qu(t)\|_{L^\rho_T(L^p)}\right\|_{\ell^r}
$$
and denote by
$\widetilde{L}^\rho_T(\dot{B}^s_{p,r})$ the subset of distributions
$u\in
\mathcal{D}'((0,T), \mathcal{S}'_{h}(\mathbb{R}^{3}))$ with finite
$\|u\|_{\widetilde{L}^\rho_T(\dot{B}^s_{p,r})}$ norm. When $T=+\infty$, the index $T$ is
omitted. We
further denote $\widetilde{C}_T(\dot{B}^s_{p,r})=C([0,T];\dot{B}^s_{p,r})\cap
\widetilde{L}^\infty_{T}(\dot{B}^s_{p,r}) $.
\end{defn}
\begin{equation}gin{rem}\label{rem-CM-holder}
All the properties of continuity for the paraproduct, remainder, and product remain true for the Chemin-Lerner spaces. The exponent $\rho$ just has to behave according to
H\"{o}lder's inequality for the time variable.
\end{rem}
\begin{equation}gin{rem}\label{rem-CM-minkowski}
The spaces $\widetilde{L}^\rho_T(\dot{B}^s_{p,r})$ can be linked with the classical space $L^\rho_T(\dot{B}^s_{p,r})$ via the Minkowski inequality:
\begin{equation}no
\|u\|_{\widetilde{L}^\rho_T(\dot{B}^s_{p,r})}\le\|u\|_{L^\rho_T(\dot{B}^s_{p,r})}\quad \mathrm{if}\quad r\ge\rho,\qquad \|u\|_{\widetilde{L}^\rho_T(\dot{B}^s_{p,r})}\ge\|u\|_{L^\rho_T(\dot{B}^s_{p,r})}\quad \mathrm{if}\quad r\le\rho.
\end{equation}no
\end{rem}
\section{Preliminaries}
In this section, we first recall the estimates for the acoustics system \eqref{acoustics}, which are very useful in the proof of Theorem \ref{thm-p>2}.
\begin{equation}gin{prop}[\cite{Danchin02}]\label{prop-wave}
Let $(b, v)$ be a solution of the following system of acoustics:
\begin{equation}gin{eqnarray}\label{eq_W}
\begin{equation}gin{cases}
\pr_tb+\eps^{-1}\Lm v=\mathbf{f},\\
\pr_tv-\eps^{-1}\Lm b=\mathbf{g},\\
(b,v)|_{t=0}=(b_0, v_0).
\end{cases}
\end{eqnarray}
Then, for any $s\in\mathbb{R}$ and $T\in(0,\infty]$, the following estimate holds:
\begin{equation}
\|(b,v)\|_{\widetilde{L}^r_T(\dot{B}^{s+N(\frac{1}{p}-\frac12)+\frac{1}{r}}_{p,1})}\leq C\eps^{\frac{1}{r}}\|(b_0, v_0)\|_{\dot{B}^s_{2,1}}+C\eps^{1+\frac{1}{r}-\frac{1}{\bar{r}'}}
\|(\mathbf{f}, \mathbf{g})\|_{\widetilde{L}^{\bar{r}'}_T(\dot{B}^{s+N(\frac{1}{\bar{p}'}-\frac12)+\frac{1}{\bar{r}'}-1}_{\bar{p}',1})},
\end{equation}
with
\begin{equation}gin{eqnarray}no
&p\geq2, \frac{2}{r}\leq\min(1,\ga(p)), (r, p, N)\neq(2,\infty, 3),&\\
&\bar{p}\geq2, \frac{2}{\bar{r}}\leq\min(1,\ga(\bar{p})), (\bar{r}, \bar{p}, N)\neq(2,\infty,3),
\end{eqnarray}no
where $\ga(q):=(N-1)(\frac{1}{2}-\frac{1}{q}), \frac{1}{\bar{p}}+\frac{1}{\bar{p}'}=1$, and $\frac{1}{\bar{r}}+\frac{1}{\bar{r}'}=1$.
\end{prop}
Next, we recall the classical estimates in Besov space for the transport and heat equations (Theorem 3.37, \cite{Bahouri-Chemin-Danchin11}).
\begin{equation}gin{prop}\label{prop3.3}
Let $\sigma\in (-N\min\{\frac{1}{p},\frac{1}{p'}\},1+\frac{N}{p})$ and $1\leq p,r\leq +\infty$, or $\sigma=1+\frac{N}{p}$ if $r=1$. Let $v$ be a smooth vector field such that $\nabla v\in L^1_T(\dot{B}^{\frac{N}{p}}_{p,r}\cap L^\infty)$, $f_0\in \dot{B}^{\sigma}_{p,r}$ and $g\in L^1_T(\dot{B}^{\sigma}_{p,r})$. There exists a constant $C$, such that for all solution $f\in L^\infty([0,T];\dot{B}^{\sigma}_{p,r}) $ of the equation
$$
\partial_t f+v\cdot\nabla f=g,\ f|_{t=0}=f_0,
$$
we have the following a priori estimate
\begin{equation}gin{equation}
\|f\|_{\widetilde{L}^\infty_T(\dot{B}^{\sigma}_{p,r})}\leq e^{CV(T)}\left(
\|f_0\|_{\dot{B}^{\sigma}_{p,r}}+\int^T_0 e^{-CV(t)}\|g(t)\|_{\dot{B}^{\sigma}_{p,r}}dt
\right),
\end{equation}
where $V(t)=\int^t_0\|\nabla v(\tau)\|_{\dot{B}^{\frac{N}{p}}_{p,r}\cap L^\infty}d\tau$.
\end{prop}
\begin{equation}gin{prop}\label{prop3.4}
Let $\sigma\in \mathbb{R}$ and $1\leq \rho,p,r\leq +\infty$. Assume that $f_0\in \dot{B}^{\sigma}_{p,r}$ and $g\in \widetilde{L}^\rho_T(\dot{B}^{\sigma-2+\frac{2}{\rho}}_{p,r})$. There exists a constant $C$, such that for all solution $f\in L^\infty([0,T];\dot{B}^{\sigma}_{p,r})\cap L^1([0,T];\dot{B}^{\sigma+2}_{p,r}) $ of the equation
$$
\partial_t f-\nu\displaystyleelta f=g,\ f|_{t=0}=f_0,
$$
we have the following a priori estimate, for all $\rho\leq\rho_1\leq +\infty$,
\begin{equation}gin{equation}
\nu^{\frac{1}{\rho_1}} \|f\|_{\widetilde{L}^{\rho_1}_T(\dot{B}^{\sigma+\frac{2}{\rho_1}}_{p,r})}\leq C\left(
\|f_0\|_{\dot{B}^{\sigma}_{p,r}}+\nu^{\frac{1}{\rho}-1}\|g \|_{\widetilde{L}^\rho_T(\dot{B}^{\sigma-2+\frac{2}{\rho}}_{p,r})}
\right).
\end{equation}
\end{prop}
\section{A priori estimates}\label{S4}
\noindent Before proceeding any further, let us denote
\begin{equation}gin{gather*}
X_{L}(T):=\|b_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}
+\|\Pe^\bot u_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}, \\
X_{H}(T):=
\|b_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})} +\|\Pe^\bot u_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})},\\
Y_\al(T):=\|(b_L,\Pe^\bot u_L)\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})},\\
W(T):=\|\Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}+\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})},\\
X(T)=X_{L}(T)+X_{H}(T)+Y_\al(T)+W(T),\\
X_{L}^0:=\| b_{0L}\|_{\dot{B}^{\frac{N}{2}-1+\al}_{2,1}}+\|\Pe^\bot u_{0L}\|_{\dot{B}^{\frac{N}{2}-1+\al}_{2,1}}, \quad X_{H}^0:=\| b_{0H}\|_{\dot{B}^{\frac{N}{2}}_{2,1}}+\| \Pe^\bot u_{0H}\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}},
\end{gather*}
and
\begin{equation}no
W^0:=\|\Pe u_0\|_{\dot{B}^{\fr{N}{p}-1}_{p,1}},\quad X^0:= X^0_L+X^0_H+W^0.
\end{equation}no
\subsection{Nonlinear estimates}
Now we estimate the nonlinear terms one by one as follows.
By virtue of the low frequency embedding
\begin{equation}\label{lf-embeding1}
\|P_{<1}\phi\|_{\dot{B}^{s_1}_{2,1}}\le C\|P_{<1}\phi\|_{\dot{B}^{s_2}_{2,1}}, \quad\mathrm{for\ \ all}\quad \phi\in\dot{B}^{s_2}_{2,1}, \textrm{ and } s_1>s_2,
\end{equation}
the high frequency embedding
\begin{equation}\label{hf-embedding1}
\|P_{\geq1}\phi\|_{\dot{B}^{s_1}_{2,1}}\le C\|P_{\geq 1}\phi\|_{\dot{B}^{s_2}_{2,1}}, \quad\mathrm{for\ \ all}\quad \phi\in\dot{B}^{s_2}_{2,1}, \textrm{ and } s_1<s_2,
\end{equation}
and Corollary \ref{coro-product}, we can obtain the following lemma, whose proof will be given in Appendix.
\begin{equation}gin{lem}\label{lem4.1}
Assume $(b,u)\in \mathcal{E}^{\frac{N}{2},\al}_p(T)$ with $(p,\al)$ satisfying \eqref{p1}-\eqref{al1}, then we have
\begin{equation}gin{equation}\label{5.1}
\|P_{<1} (b \mathrm{div} u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\leq C X^2(T),
\end{equation}
\begin{equation}gin{equation}\label{5.1-0}
\|P_{<1}(\dot{T}'_{\nb b}u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\leq C X^2(T).
\end{equation}
and
\begin{equation}gin{equation}\label{5.1-1}
\|P_{<1}(\dot{T}_{u}\nb b)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\leq C X^2(T).
\end{equation}
\end{lem}
Since $\mathrm{div}(bu)=b\mathrm{div} u+\dot{T}'_{\nb b}u+\dot{T}_{u}\nb b$, from Lemma \ref{lem4.1}, we easily get the following Corollary, which will be used to bound $Y_\al(T)$.
\begin{equation}gin{coro}\label{coro1}
Under the conditions in Lemma \ref{lem4.1}, we have
\begin{equation}\label{5.2}
\|P_{<1} \mathrm{div}(b u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\leq C X^2(T).
\end{equation}
\end{coro}
From (\ref{lf-embeding1})-(\ref{hf-embedding1}), Lemma \ref{Bernstein} and Proposition \ref{p-TR}, we can obtain the following lemma, whose proof will be given in Appendix.
\begin{equation}gin{lem}\label{lem4.2}
Under the assumptions in Lemma \ref{lem4.1}, we have
\begin{equation}gin{equation}\label{5.1'}
\|P_{\ge1} (b \mathrm{div} u)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\leq C X^2(T),
\end{equation}
\begin{equation}gin{equation}\label{5.1-0'}
\|P_{\ge1}(\dot{T}'_{\nb b}u)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\leq C X^2(T).
\end{equation}
\end{lem}
From the low frequency embedding \eqref{lf-embeding1}, Lemma \ref{Bernstein}, Proposition \ref{p-TR}, Corollary \ref{coro-product}, Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11}, and the special structure of $ \mathrm{div}(I(b)\mathcal{A}\Pe u)$, we could get the following lemma, whose proof will be given in Appendix.
\begin{equation}gin{lem}\label{lem4.3}
Under the assumptions in Lemma \ref{lem4.1} and
\begin{equation}no
\|b\|_{L^\infty_T(L^\infty)}\le\fr12,
\end{equation}no
we have
\begin{equation}
\|P_{<1}\left(I(b)\mathcal{A}\Pe^\bot u\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(I(b)\mathcal{A}\Pe^\bot u\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T),
\end{equation}
and
\begin{equation}\label{4.26}
\|P_{<1}\left(\Lm^{-1}\mathrm{div}(I(b)\mathcal{A}\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(I(b)\mathcal{A}\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T).
\end{equation}
\end{lem}
Similar, using Lemma \ref{lem-Kb} in the Appendix, we could get the following lemma, whose proof will be given in Appendix.
\begin{equation}gin{lem}\label{lem4.4}
Under the assumptions in Lemma \ref{lem4.3},
we have
\begin{equation}
\|P_{<1}\left(K(b)\nb b\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(K(b)\nb b\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T).
\end{equation}
\end{lem}
In the next two lemmas, we shall estimate the convection term $\Lm^{-1}\mathrm{div}(u\cdot\nb u)$.
Here, we distinguish the terms with the potential part $\Pe^\bot u$ from the terms with the divergence free part $\Pe u$.
\begin{equation}gin{lem}\label{lem4.5}
Under the assumptions in Lemma \ref{lem4.1}, we have
\begin{equation}gin{equation}
\|P_{<1} ( (\Pe^\bot u\cdot\nabla)\Pe^\bot u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})} +\|P_{\ge1} ( (\Pe^\bot u\cdot\nabla)\Pe^\bot u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}
\leq C X^2(T),
\end{equation}
and
\begin{equation}gin{equation}\label{4.40}
\|P_{<1} ( \dot{T}_{\Pe^\bot u}\cdot\nabla d)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+ \|P_{\ge1} ( \dot{T}_{\Pe^\bot u}\cdot\nabla d)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}
\leq C X^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
From Bony's decomposition, the low frequency embedding \eqref{lf-embeding1} and the high frequency embedding \eqref{hf-embedding1}, Proposition \ref{p-TR}, Corollary \ref{coro-product} and Lemma \ref{Bernstein}, we have
\begin{equation}gin{eqnarray}\label{uu}
&& \|P_{<1} ( (\Pe^\bot u\cdot\nabla)\Pe^\bot u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+\|P_{\ge1} ( (\Pe^\bot u\cdot\nabla)\Pe^\bot u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}\nonumber\\
&\leq&C \left( \| \dot{T}_{\Pe^\bot u_L}\cdot\nb \Pe^\bot u_L\|_{L^1_T(\dot{B}^{\frac{N}{2} -1+\al}_{2,1})}
+\| \dot{T}'_{\nb\Pe^\bot u_L}\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\frac{N}{2}-1 +\al}_{2,1})}
+\| \dot{T}_{\Pe^\bot u_L}\cdot \nb \Pe^\bot u_H \|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}
\right.\nonumber\\
&&\nn +\| \dot{T}'_{\nb \Pe^\bot u_H} \Pe^\bot u_L \|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+\| \dot{T}'_{\Pe^\bot u_H}\cdot \nb \Pe^\bot u_L \|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\\
&&\left.+\| \dot{T}_{\nb \Pe^\bot u_L} \Pe^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}+\| \Pe^\bot u_H\cdot\nb\Pe^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})}
\right)\nonumber\\
&\leq&C \left( \|\Pe^\bot u_L\|_{L^{\frac{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}
\|\Pe^\bot u_L\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-\al+1}_{2,1})}
+ \|\Pe^\bot u_L\|_{L^{\frac{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}
\|\nb\Pe^\bot u_H\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-2\al}_{2,1})}
\right.\nonumber\\
&&\nn + \|\nb\Pe^\bot u_H\|_{L^{1}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\|\Pe^\bot u_L\|_{L^{\infty}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+ \|\Pe^\bot u_H\|_{L^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\|\nb\Pe^\bot u_L\|_{L^{2}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})} \\
&&\nn\left. + \|\nb\Pe^\bot u_L\|_{L^{\frac{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})}
\|\Pe^\bot u_H\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+1-2\al}_{2,1})} + \|\Pe^\bot u_H\|_{L^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}^2
\right)\nonumber\\
&\leq&C X^2(T),
\end{eqnarray}
where we have used the facts \eqref{5.3} and
\begin{equation}gin{equation}\label{5.5}
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1 }_{2,1})
\subset \widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-2\al+1}_{2,1}).
\end{equation}
Moreover, $\dot{T}_{\Pe^\bot u}\cdot\nabla d$ can be bounded in a similar way. This completes the proof of Lemma \ref{lem4.5}.
\end{proof}
\begin{equation}gin{lem}\label{lem4.6}
Under the assumptions in Lemma \ref{lem4.1}, we get
\begin{equation}gin{gather}
\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T),\\
\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe^\bot u\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe^\bot u\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T),\\
\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe^\bot u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe^\bot u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T),
\end{gather}
and
\begin{equation}
\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\dot{T}_{\Pe u}\cdot\nb d)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\dot{T}_{\Pe u}\cdot\nb d)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
From Lemma \ref{Bernstein}, Bony's decomposition, the low frequency embedding \eqref{lf-embeding1}, and Proposition \ref{p-TR}, we have
\begin{equation}gin{eqnarray}\label{PuPu2}
\nn&&\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{T}_{\Pe u}\cdot\nb\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}+C\|\dot{T}_{\nb\Pe u}\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}+C\|\pr_k\dot{R}((\Pe u)^k,\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\Pe u\|_{L^\infty_T(\dot{B}^{\fr{N}{p^*}-1}_{p^*,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}+C\|\nb\Pe u\|_{L^2_T(\dot{B}^{\fr{N}{p^*}-1}_{p^*,1})}\|\Pe u\|_{L^2_T(\dot{B}^{\fr{N}{p}}_{p,1})}\\
\nn&\le&C\|\Pe u\|_{L^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}+C\|\Pe u\|_{L^2_T(\dot{B}^{\fr{N}{p}}_{p,1})}^2\\
&\le&CX^2(T),
\end{eqnarray}
where we have used the fact $p*\ge p$ in the third inequality of \eqref{PuPu2}. This explains why we need to assume $p\le4$ in \eqref{p1}. Next, using $\mathrm{div} \Pe u=0$ and the fact $u=u_L+u_H$, we can decompose $\Lm^{-1}\mathrm{div}(\Pe^\bot u\cdot\nb\Pe u)$ as follows:
\begin{equation}no
\Lm^{-1}\mathrm{div}(\Pe^\bot u\cdot\nb\Pe u)=\Lm^{-1}\dot{T}_{\pr_i(\Pe^\bot u_L)^k}\pr_k(\Pe u)^i+\Lm^{-1}\mathrm{div}(\dot{T}'_{\nb\Pe u}\Pe^\bot u_L)+\Lm^{-1}\mathrm{div}(\Pe^\bot u_H\cdot\nb\Pe u).
\end{equation}no
Then it is easy to see that
\begin{equation}gin{eqnarray}
\nn&&\|P_{<1}\left(\Lm^{-1}\dot{T}_{\pr_i(\Pe^\bot u_L)^k}\pr_k(\Pe u)^i\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&&+\|P_{\ge1}\left(\Lm^{-1}\dot{T}_{\pr_i(\Pe^\bot u_L)^k}\pr_k(\Pe u)^i\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{T}_{\pr_i(\Pe^\bot u_L)^k}\pr_k(\Pe u)^i\|_{L^1_T(\dot{B}^{\fr{N}{2}-2+\al}_{2,1})}\\
\nn&\le&C\|{\pr_i(\Pe^\bot u_L)^k}\|_{L^\infty_T(\dot{B}^{\fr{N}{p^*}-2+\al}_{p^*,1})}\|\pr_k(\Pe u)^i\|_{L^1_T(\dot{B}^{\fr{N}{p}}_{p,1})}\\
\nn&\le&C\|{\Pe^\bot u_L}\|_{L^\infty_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
&\le&CX^2(T),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}
\nn&&\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\dot{T}'_{\nb\Pe u}\Pe^\bot u_L)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&&+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\dot{T}'_{\nb\Pe u}\Pe^\bot u_L)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{T}'_{\nb\Pe u}\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&C\|{\nb\Pe u}\|_{L^\infty_T(\dot{B}^{-2}_{\infty,1})}\|\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})}\\
\nn&\le&C\|{\Pe u}\|_{L^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\|\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
Similar to \eqref{PuPu2} and \eqref{4.30}, we have
\begin{equation}gin{eqnarray}
\nn&&\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe^\bot u_H\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe^\bot u_H\cdot\nb\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\Pe^\bot u_H\|_{L^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}+C\|\Pe u\|_{L^2_T(\dot{B}^{\fr{N}{p}}_{p,1})}\|\Pe^\bot u_H\|_{L^2_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
To bound $\Pe u\cdot\nb\Pe^\bot u$, we need to decompose it as follows:
\begin{equation}no
\Pe u\cdot\nb\Pe^\bot u=\Pe u\cdot\nb\Pe^\bot u_L+\Pe u\cdot\nb\Pe^\bot u_H.
\end{equation}no
Then using the high frequency embedding \eqref{hf-embedding1}, one easily deduces that
\begin{equation}gin{eqnarray}
\nn&&\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe^\bot u_L)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe^\bot u_L)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{T}_{\Pe u}\cdot\nb\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|\dot{T}'_{\nb\Pe^\bot u_L}\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&C\|{\Pe u}\|_{L^\infty_T(\dot{B}^{-1}_{\infty,1})}\|\nb\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}+\al}_{2,1})}+C\|{\nb\Pe^\bot u_L}\|_{L^\infty_T(\dot{B}^{\fr{N}{p^*}-2+\al}_{p^*,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
\nn&\le&C\|{\Pe u}\|_{L^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\|\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})}+C\|{\Pe^\bot u_L}\|_{L^\infty_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
&\le&CX^2(T).
\end{eqnarray}
In the same manner as \eqref{PuPu2}, we are led to
\begin{equation}gin{eqnarray}
\nn&&\|P_{<1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe^\bot u_H)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(\Lm^{-1}\mathrm{div}(\Pe u\cdot\nb\Pe^\bot u_H)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\Pe u\|_{L^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\|\Pe^\bot u_H\|_{L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}+C\|\Pe u\|_{L^2_T(\dot{B}^{\fr{N}{p}}_{p,1})}\|\Pe^\bot u_H\|_{L^2_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
Finally, $\dot{T}_{\Pe u}\cdot\nb d$ can be bounded in the same way as $\Pe u\cdot\nb\Pe^\bot u$. The proof of Lemma \ref{lem4.6} is completed.
\end{proof}
The next two lemmas will be used to bound the nonlinear terms in the equation of the divergence free part $\Pe u$ of the velocity (\ref{biu}).
\begin{equation}gin{lem}\label{lem4.7}
Under the assumptions in Lemma \ref{lem4.1}, we obtain
\begin{equation}
\|\Pe(u\cdot \nb u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\le CX^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
It is not difficult to verify that
\begin{equation}
\Pe(u\cdot \nb u)=\Pe(\Pe u\cdot \nb\Pe u)+\Pe(\Pe u\cdot \nb\Pe^\bot u)+\Pe(\Pe^\bot u\cdot \nb\Pe u).\label{4.63}
\end{equation}
Then using Lemma \ref{Bernstein} and (\ref{product1-s}) yields
\begin{equation}gin{eqnarray}\label{PuPup}
\nn&&\|\Pe u\cdot\nb\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}=\|\mathrm{div}(\Pe u\otimes\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|\Pe u\otimes\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}}_{p,1})}
\le C\|\Pe u\|_{\widetilde{L}^2_T(\dot{B}^{\fr{N}{p}}_{p,1})}^2\le CX^2(T).
\end{eqnarray}
Next, in view of \eqref{com-Tiucu}, using Proposition \ref{p-TR}, Lemma 2.99 in \cite{Bahouri-Chemin-Danchin11} and $\mathrm{div} \Pe u=0$, we find that
\begin{equation}gin{eqnarray}\label{PuPbotu}
\nn&&\|\Pe(\Pe u\cdot\nb\Pe^\bot u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|[\Pe,\dot{T}_{(\Pe u)^k}]\pr_k\Pe^\bot u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}+C\|\dot{T}_{\nb\Pe^\bot u}\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}+C\|\dot{R}({\Pe u},\nb\Pe^\bot u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|\nb\Pe u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{-2\al}_{\infty,1})}\|\nb\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}-2+2\al}_{p,1})}+C\|\nb\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})}\|\Pe u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}+1-2\al}_{p,1})}\\
\nn&&+C\|\pr_k\dot{R}((\Pe u)^k,\Pe^\bot u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{{p},1})}\\
\nn&\le&C\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|\Pe u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}+1-2\al}_{p,1})}\\
&\le&CX^2(T).
\end{eqnarray}
Finally, the condition \eqref{p1} on $p$ ensures that $p<2N$, then it is easy to see that
\begin{equation}gin{eqnarray}\label{PbotuPu}
\nn&&\|\Pe^\bot u\cdot\nb\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|\dot{T}_{\Pe^\bot u}\nb\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}+C\|\dot{T}_{\nb\Pe u}\Pe^\bot u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}+C\|\dot{R}({\Pe^\bot u},\nb\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}\|\nb\Pe u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}-2\al}_{p,1})}+C\|\nb\Pe u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{-2\al}_{\infty,1})}\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\\
\nn&&+C\|\dot{R}(\Pe^\bot u,\nb\Pe u)\|_{L^1_T(\dot{B}^{\fr{2N}{p}-1}_{\fr{p}{2},1})}\\
\nn&\le&C\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|\Pe u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}+1-2\al}_{p,1})}\\
&\le&CX^2(T).
\end{eqnarray}
This explains why we need to assume $p<4$ if $N=2$. We complete the proof of Lemma \ref{lem4.7}.
\end{proof}
\begin{equation}gin{lem}\label{lem4.8}
Under the assumptions in Lemma \ref{lem4.4},
we have
\begin{equation}
\|I(b)\mathcal{A}u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\le CX^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
Let us first decompose $u$ as
\begin{equation}no
u=\Pe^\bot u_L+(\Pe^\bot u_H+\Pe u).
\end{equation}no
Then using Corollary \ref{coro-product} with $u=I(b), v=\mathcal{A}\Pe^\bot u_L, p_1=q_2=2, \rho=p_2=q_1=p, s_1=\sigma_2=\fr{N}{2}, s_2=\sigma_1=\fr{N}{p}-1$, and Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11}, we obtain
\begin{equation}gin{eqnarray}
\nn&&\|I(b)\mathcal{A}\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le& C\|I(b)\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\mathcal{A}\Pe^\bot u_L\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le& C\|b\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\Pe^\bot u_L\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+2\al}_{2,1})}\\
&\le&CX^2(T),
\end{eqnarray}
where we have used \eqref{b1}, \eqref{5.6-1} and \eqref{lfe1}. Similarly, using \eqref{b2}, we arrive at
\begin{equation}gin{eqnarray}
\nn&&\|I(b)\mathcal{A}(\Pe^\bot u_H+\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|I(b)\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\mathcal{A}(\Pe^\bot u_H+\Pe u)\|_{L^{1}_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le& C\|b\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\left(\|\Pe^\bot u_H\|_{L^{1}_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}+\|\Pe u\|_{L^{1}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\right)\\
&\le&CX^2(T).
\end{eqnarray}
The proof of Lemma \ref{lem4.8} is completed.
\end{proof}
\subsection{Estimates of $X(T)$}
Using the above lemmas, we could obtain the Dispersive estimates and Energy estimates as follows.
\noindent{\bf Step (I): Dispersive estimates.}
\begin{equation}gin{lem}\label{lem4.9}
Let $p$ and $\al$ satisfy \eqref{p1} and \eqref{al1}, respectively. Assume that $(b,u)$ is a solution to system \eqref{bcu}--\eqref{biu} in $\mathcal{E}^{\frac{N}{2},\al}_p(T)$ with
\begin{equation}no
\|b\|_{L^\infty_T(L^\infty)}\le\fr12.
\end{equation}no
Then we have
\begin{equation}\label{dis-uE}
Y_\al(T)\leq CX_L^0+CX_L(T)+CX^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
First of all, let us cut off the system \eqref{bd} by using the operator $P_{<1}$. Then applying Proposition \ref{prop-wave} to the resulting system with $\eps=1$, $s=\frac{N}{2}-1+\al, \bar{p}=2, \bar{r}=\infty$, and $r=\fr{1}{\al}$, we arrive at
\begin{equation}gin{eqnarray}no
\nn Y_\al(T)
\nn &\le& C\left(\|(b_{0L}, d_{0L})\|_{\dot{B}^{\frac{N}{2}-1+\al}_{2,1}}+\|P_{<1}\mathrm{div}(bu)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+\|P_{<1}\displaystylel d\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\right.\\
\nn&&\left.+\|P_{<1}\Lm^{-1}\mathrm{div}(u\cdot\nb u+K(b)\nb b+I(b)\mathcal{A}u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\right)\\
&\leq&C \left(X^0_L+X_L(T)
+\|P_{<1}\mathrm{div}(bu)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\right.\\
\nn&&\left.+\|P_{<1}\Lm^{-1}\mathrm{div}(u\cdot\nb u+K(b)\nb b+I(b)\mathcal{A}u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\right).
\end{eqnarray}no
Combining Corollary \ref{coro1} with Lemmas \ref{lem4.3}--\ref{lem4.6}, we find that the estimate
\eqref{dis-uE} holds. This completes the proof of Lemma \ref{lem4.9}.
\end{proof}
\noindent{\bf Step (II): Energy estimates.}\par
To begin with, let us localize the system \eqref{bd} as follows:
\begin{equation}gin{eqnarray}\label{viscoelastic-local}
\begin{equation}gin{cases}
\pr_tb_q+\dot{S}_{q-1}u\cdot\nb b_q+\Lm d_q=\tl{f}_q,\\
\pr_td_q+\dot{S}_{q-1}u \cdot \nb d_q-\displaystylel d_q-\Lm b_q=\tl{g}_q,
\end{cases}
\end{eqnarray}
with
\begin{equation}gin{eqnarray}no
\tl{f}_q&:=&f_q+\left(\dot{S}_{q-1}u\cdot\nb b_q-\dot{\Dl}_q\dot{T}_u\cdot\nb b\right),\\
\tl{g}_q&:=&g_q+\left(\dot{S}_{q-1}u \cdot \nb d_q-\dot{\Dl}_q\dot{T}_u\cdot\nb d\right),
\end{eqnarray}no
and
\begin{equation}gin{eqnarray}no
{f}&:=&-b\mathrm{div} u-\dot{T}'_{\pr_kb}u^k,\\
{g}&:=&\dot{T}_u\cdot\nb d-\Lm^{-1}\mathrm{div}\left(u\cdot\nb u+K(b)\nb b+I(b)\mathcal{A}u\right).
\end{eqnarray}no
Now, we estimate the low frequency part $X_L(T)$ and high frequency part $X_H(T)$ separately.
\noindent (i) \underline{ Estimates of $X_{L}(T)$}.\par
\begin{equation}gin{lem}\label{lem4.10}
Under the conditions in Lemma \ref{lem4.9}, we have
\begin{equation}\label{XQL}
X_{L}(T)\leq CX_{L}^0+CX^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
Similar to the energy estimates for the isentropic Navier-Stokes equations obtained by Danchin \cite{Danchin00}, we easily get the following three equalities
\begin{equation}\label{e2}
\fr12\fr{d}{dt}\|b_q\|^2_{L^2}+(\Lm d_q|b_q)=\fr12\int \mathrm{div}\dot{S}_{q-1}u |b_q|^2+(\tl{f}_q|b_q),
\end{equation}
\begin{equation}\label{e1}
\fr12\fr{d}{dt}\|d_q\|^2_{L^2}+\|\nb d_q\|_{L^2}^2-(\Lm b_q|d_q)=\fr12\int \mathrm{div}\dot{S}_{q-1}u |d_q|^2+(\tl{g}_q|d_q),
\end{equation}
and
\begin{equation}gin{eqnarray}\label{e3}
\nn&&\fr{d}{dt}(d_q|\Lm b_q)-\|\Lm b_q\|_{L^2}^2+\|\Lm d_q\|_{L^2}^2-(\displaystylel d_q|\Lm b_q)\\
&=&\int\mathrm{div}\dot{S}_{q-1}u\Lm b_qd_q-([\Lm, \dot{S}_{q-1}u\cdot\nb]b_q|d_q)+(\tl{g}_q|\Lm b_q)+(\Lm \tl{f}_q|d_q).
\end{eqnarray}
A linear combination of \eqref{e2}--\eqref{e3} yields
\begin{equation}gin{eqnarray}\label{eL}
\nn&&\fr12\fr{d}{dt}\left(\| b_q\|^2_{L^2}+\|d_q\|^2_{L^2}-\fr14(d_q|\Lm b_q)\right)+\fr78\|\Lm d_q\|^2_{L^2}+\fr18\|\Lm b_q\|^2_{L^2}+\fr18(\displaystylel d_q|\Lm b_q)\\
\nn&=&\fr12\int \mathrm{div}\dot{S}_{q-1}u \left(|b_q|^2+|d_q|^2\right)-\fr18\int\mathrm{div} \dot{S}_{q-1}u\Lm b_qd_q\\
&&+(\tl{g}_q|d_q-\fr18\Lm b_q)+( \tl{f}_q| b_q-\fr18\Lm d_q)+\fr18([\Lm, \dot{S}_{q-1}u\cdot \nb]b_q|d_q).
\end{eqnarray}
Noting that $u=u_L+ u_H$, it is easy to see that
\begin{equation}gin{eqnarray}\label{Su}
\nn\|\nb \dot{S}_{q-1}u\|_{L^\infty}&\le&C\left(2^{q(2-2\al)}\left(2^{q(2\al-2)}\|\nb \dot{S}_{q-1}u_L\|_{L^\infty}\right)+\|\nb \dot{S}_{q-1} u_H\|_{L^\infty}\right)\\
&\le&C\left(m(u)+\|\nb u_H\|_{L^\infty}\right),
\end{eqnarray}
where
\begin{equation}\label{m1}
m(u):=\min\left\{2^{q(2-2\al)}\|\nb u_L\|_{\dot{B}^{2\al-2}_{\infty,\infty}}, \|\nb u_L\|_{L^\infty}\right\}.
\end{equation}
Then following the proof of Lemma 2.99 in \cite{Bahouri-Chemin-Danchin11}, we have
\begin{equation}gin{eqnarray}\label{com1}
\nn&&\|[\Lm, \dot{S}_{q-1}u\cdot\nb]b_q\|_{L^2}\\
\nn&\le& C\|\nb \dot{S}_{q-1}u\|_{L^\infty}\|\Lm b_q\|_{L^2}\\
&\le&C\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\left(2^q\| b_q\|_{L^2}\right).
\end{eqnarray}
According to Lemma 7.5 in \cite{Danchin02}, we arrive at
\begin{equation}gin{eqnarray}\label{com2}
\nn&&\|\dot{S}_{q-1}u\cdot\nb d_q-\dot{\displaystylel}_q\dot{T}_u\cdot\nb d\|_{L^2}\\
\nn&\le&\|\dot{S}_{q-1}u_L\cdot\nb d_q-\dot{\displaystylel}_q\dot{T}_{u_L}\cdot\nb d\|_{L^2}+\|\dot{S}_{q-1} u_H\cdot\nb d_q-\dot{\displaystylel}_q\dot{T}_{ u_H}\cdot\nb d\|_{L^2}\\
&\le&C\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}\|d_{q'}\|_{L^2},
\end{eqnarray}
and
\begin{equation}\label{com3}
\|\dot{S}_{q-1}u\cdot\nb b_q-\dot{\displaystylel}_q\dot{T}_u\cdot\nb b\|_{L^2}\\
\le C\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}\|b_{q'}\|_{L^2}.
\end{equation}
Then thanks to Bernstein's inequality, we infer from \eqref{eL}--\eqref{com3} that, for $q\le0$, there holds
\begin{equation}gin{eqnarray}\label{qle0}
\nn&&\| b_q(t)\|_{L^2}+\|d_q(t)\|_{L^2}+2^{2q}\| b_q\|_{L^1_t(L^2)}+2^{2q}\| d_q\|_{L^1_t(L^2)}\\
\nn&\le&C\left(\| b_q(0)\|_{L^2}+\|d_q(0)\|_{L^2}+\| f_q\|_{L^1_t(L^2)}+\| g_q\|_{L^1_t(L^2)}\right)\\
&&+C\int_0^t\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}(\| b_{q'}\|_{L^2}+\|d_{q'}\|_{L^2})dt'.
\end{eqnarray}
Recalling that
$$
X_{L}(T)=\|(b_L, \Pe^\bot u_L)\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})},
$$
multiplying \eqref{qle0} by $2^{q(\frac{N}{2}-1+\al)}$, and taking sum with respect to $q$ over $\{\cdots, -2, -1, 0 \}$, we obtain
\begin{equation}gin{eqnarray}\label{e1-d4L}
X_{L}(T)&\le& C\left(X^0_{L}+\|P_{<1}f\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
+\|P_{<1}g\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\right)\nn\\
&&+C\int^T_0\sum_{q\leq0}2^{q(\frac{N}{2}-1+\al)} ( m(u) +\|\nb u_H\|_{L^\infty})\sum_{|q'-q|\leq4} \|(\dot{\displaystyleelta}_{q'} b, \dot{\displaystyleelta}_{q'} d)\|_{L^2}ds.
\end{eqnarray}
Now we go to bound the right hand side of \eqref{e1-d4L}. First of all, from Lemmas \ref{lem4.1}, \ref{lem4.3}--\ref{lem4.6}, we have
\begin{equation}gin{equation}
\|P_{<1}f\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
+\|P_{<1}g\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\leq CX^2(T).
\end{equation}
The remaining terms of the right hand side of \eqref{e1-d4L} can be bounded as follows. In fact, by virtue of Young's inequality, H\"{o}lder's inequality and the high frequency embedding \eqref{hf-embedding1}, we are led to
\begin{equation}gin{eqnarray*}
&&\int^T_0\sum_{q\leq 0}2^{q(\frac{N}{2}-1+\al)}\|\nb u_H\|_{L^\infty}\sum_{|q'-q|\leq4} \|(\dot{\displaystyleelta}_{q'} b,\dot{\displaystyleelta}_{q'} d)\|_{L^2}ds\\
&\leq&\int^T_0\sum_{q\leq 0}2^{q(\frac{N}{2}-1+\al)}\|\nb u_H\|_{L^\infty}\sum_{|q'-q|\leq4} \|(\dot{\displaystyleelta}_{q'} b_L,\dot{\displaystyleelta}_{q'} d_L)\|_{L^2}ds\\
&&+\int^T_0\sum_{q\leq 0}2^{q(\frac{N}{2}-1)}\|\nb u_H\|_{L^\infty}\sum_{|q'-q|\leq4} \|(\dot{\displaystyleelta}_{q'} b_H,\dot{\displaystyleelta}_{q'} d_H)\|_{L^2}ds\\
&\leq& C\|\nb u_H\|_{L^1_T(L^\infty)}
\left(\|(b_L,d_L)\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
+\|b_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}
+\|d_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}
\right)\\
&\leq&C\left(\|\nb \Pe^\bot u_H\|_{L^1_T(L^\infty)}+\|\nb \Pe u_H\|_{L^1_T(L^\infty)}\right)X(T)\\
&\leq&CX^2(T).
\end{eqnarray*}
Similarly, using \eqref{5.3}, and the interpolation
\begin{equation}gin{equation}\label{5.4'}
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{p}-1 }_{p,1})\cap L^1_T(\dot{B}^{\frac{N}{p}+1 }_{p,1})
\subset \widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\frac{N}{p}+2\al-1}_{p,1}) \subset \widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1}),
\end{equation}
we have
\begin{equation}gin{eqnarray*}
&&\int^T_0\sum_{q\leq0}2^{q(\frac{N}{2}+1-\al)} \|\nabla u_L\|_{\dot{B}^{2\al-2}_{\infty,\infty}}
\sum_{|q'-q|\leq4} \|(\dot{\displaystyleelta}_{q'} b_L,\dot{\displaystyleelta}_{q'} d_L)\|_{L^2}ds\\
&\leq& C\|u_L\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}\|(b_L,d_L)\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(
\dot{B}^{\frac{N}{2}+1-\al}_{2,1})}\\
&\leq& C\left(\|\Pe^\bot u_L\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}+\|\Pe u_L\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\right)\|(b_L,d_L)\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(
\dot{B}^{\frac{N}{2}+1-\al}_{2,1})}\\
&\leq&CX^2(T).
\end{eqnarray*}
Finally, according to the following interpolations
\begin{equation}gin{gather}
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})
\subset \widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1}),\\
\label{4.87} \widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})
\subset \widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\frac{N}{2}+1}_{2,1}),\\
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{p}-1}_{p,1})\cap L^1_T(\dot{B}^{\frac{N}{p}+1}_{p,1})
\subset \widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\frac{N}{p}+1-\al}_{p,1}),
\end{gather}
the low frequency embedding
\begin{equation}no
\|\Pe u_L\|_{\widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\le C\|\Pe u_L\|_{\widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\fr{N}{p}+1-\al}_{p,1})},
\end{equation}no
and the high frequency embedding
\begin{equation}no
\|b_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\le C\|b_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})},
\end{equation}no
we find that
\begin{equation}gin{eqnarray}\label{4.78}
\nn&&\int^T_0\sum_{q\leq0} \|\nabla u_L\|_{L^\infty}2^{q(\frac{N}{2}-1+\al)} \sum_{|q'-q|\leq4} \|(\dot{\displaystyleelta}_{q'} b_H,\dot{\displaystyleelta}_{q'} d_H)\|_{L^2}ds\\
\nn&\leq& C\|\nb u_L\|_{{L}^{\frac{2}{2-\al}}_T(L^\infty)}\left(
\|b_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
+
\|d_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\right)\\
\nn&\leq& C\|\nb u_L\|_{{L}^{\frac{2}{2-\al}}_T(L^\infty)}\left(
\|b_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
+
\|d_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\right)\\
\nn&\leq&C\left(\|\Pe u_L\|_{\widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\fr{N}{p}+1-\al}_{p,1})}+\| \Pe^\bot u_L\|_{\widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\frac{N}{2}+1}_{2,1})}\right)X(T)\\
&\le&CX^2(T).
\end{eqnarray}
Combining these estimates with (\ref{e1-d4L}), we obtain \eqref{XQL}. The proof of Lemma \ref{lem4.10} is completed.
\end{proof}
\noindent (ii) \underline{ Estimates of $X_{H}(T)$}.\par
\begin{equation}gin{lem}\label{lem4.11}
Under the conditions in Lemma \ref{lem4.9}, we have
\begin{equation}\label{XQH}
X_{H}(T)\leq CX_{H}^0+CX^2(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
To begin with, let us give the $L^2$ energy estimate for $\Lm b_q$,
\begin{equation}\label{e4}
\fr12\fr{d}{dt}\|\Lm b_q\|^2_{L^2}+(\Lm^2 d_q|\Lm b_q)=\fr12\int\mathrm{div}\dot{S}_{q-1}u|\Lm b_q|^2-([\Lm,\dot{S}_{q-1}u\cdot\nb]b_q|\Lm b_q)+(\Lm \tl{f}_q|\Lm b_q).
\end{equation}
It follows from \eqref{e1}, \eqref{e3} and \eqref{e4} that
\begin{equation}gin{eqnarray}\label{Hq}
\nn&&\fr12\fr{d}{dt}\left(\|\Lm b_q\|^2_{L^2}+2\|d_q\|^2_{L^2}-2(d_q|\Lm b_q)\right)+\|\Lm d_q\|^2_{L^2}+\|\Lm b_q\|^2_{L^2}-2(d_q|\Lm b_q)\\
\nn&=&\int\mathrm{div}\dot{S}_{q-1}u\left(\fr12|\Lm b_q|^2+|d_q|^2\right)-\int\mathrm{div}\dot{S}_{q-1}u\Lm b_qd_q\\
&&+(\tl{g}_q|2d_q-\Lm b_q)+(\Lm \tl{f}_q|\Lm b_q-d_q)+([\Lm, \dot{S}_{q-1}u\cdot \nb]b_q|d_q-\Lm b_q).
\end{eqnarray}
To exhibit the smoothing effect of $u$ in high frequency case, we need the
following $L^2$ energy estimate for $d_q$,
\begin{equation}\label{Huq}
\fr12\fr{d}{dt}\|d_q\|^2_{L^2}+\|\Lm d_q\|_{L^2}^2=\fr12\int\mathrm{div}\dot{S}_{q-1}u|d_q|^2+(\Lm b_q|d_q)+(\tl{g}_q|d_q).
\end{equation}
Using \eqref{com1} and Lemma 7.5 in \cite{Danchin02} again yields
\begin{equation}gin{eqnarray}\label{com4}
\nn&&\|\Lm\left(\dot{S}_{q-1}u\cdot\nb b_q-\dot{\displaystylel}_q\dot{T}_u\cdot\nb b\right)\|_{L^2}\\
\nn&\le& C\|[\Lm,\dot{S}_{q-1}u\cdot\nb] b_q\|_{L^2}+C\|\dot{S}_{q-1}u\cdot\nb\Lm b_q-\Lm\dot{\displaystylel}_q\dot{T}_u\cdot\nb b\|_{L^2}\\
&\le& C\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}2^{q'}\|b_{q'}\|_{L^2}.
\end{eqnarray}
Taking the advantage of Bernstein's inequality, we infer from \eqref{Su}--\eqref{com2}, \eqref{Hq} and \eqref{com4} that, for $q\ge1$, there holds
\begin{equation}gin{eqnarray}no
&&2^q\| b_q(t)\|_{L^2}+\|d_q(t)\|_{L^2}+2^q\| b_q\|_{L^1_t(L^2)}+\| d_q\|_{L^1_t(L^2)}\\
&\le&C\left(2^q\| b_q(0)\|_{L^2}+\|d_q(0)\|_{L^2}+2^q\| f_q\|_{L^1_t(L^2)}+\| g_q\|_{L^1_t(L^2)}\right)\\
&&+C\int_0^t\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}(2^{q'}\| b_{q'}\|_{L^2}+\|d_{q'}\|_{L^2})dt'.
\end{eqnarray}no
Similarly, for $q\geq1$, \eqref{Su}, \eqref{com2} and \eqref{Huq} imply that
\begin{equation}gin{eqnarray}no
\nn\|d_q(t)\|_{L^2}+2^{2q}\| d_q\|_{L^1_t(L^2)}&\le& C\left(\|d_q(0)\|_{L^2}+2^q\| b_q\|_{L^1_t(L^2)}+\|g_q\|_{L^1_t(L^2)}\right)\\
&&+C\int_0^t\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}(2^{q'}\| b_{q'}\|_{L^2}+\|d_{q'}\|_{L^2})dt'.
\end{eqnarray}no
Combining these two inequalities, we find that, if $q\ge1$, there holds
\begin{equation}gin{eqnarray}\label{qge1}
\nn&&2^q\| b_q(t)\|_{L^2}+\|d_q(t)\|_{L^2}+2^q\| b_q\|_{L^1_t(L^2)}+2^{2q}\| d_q\|_{L^1_t(L^2)}\\
\nn&\le&C\left(2^q\| b_q(0)\|_{L^2}+\|d_q(0)\|_{L^2}+2^q\| f_q\|_{L^1_t(L^2)}+\| g_q\|_{L^1_t(L^2)}\right)\\
&&+C\int_0^t\left(m(u)+\|\nb u_H\|_{L^\infty}\right)\sum_{|q'-q|\le4}(2^{q'}\| b_{q'}\|_{L^2}+\|d_{q'}\|_{L^2})dt'.
\end{eqnarray}
Multiplying \eqref{qge1} by $2^{q(\frac{N}{2}-1)}$, and taking sum with respect to $q$ over $\{1, 2, 3, \cdots\}$, we arrive at
\begin{equation}gin{eqnarray}\label{e1-d4H}
X_{H}(T)
\nn&\le& C\left(X^0_{H}+\|P_{\geq1}f\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}
+\|P_{\geq1}g\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}\right)\\
&&+C\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}-1)} (m(u)+ \|\nb u_H\|_{L^\infty})\sum_{|q'-q|\leq4}( 2^{q'} \|\dot{\displaystyleelta}_{q'} b\|_{L^2}+\|\dot{\displaystyleelta}_{q'} d\|_{L^2})ds.
\end{eqnarray}
Now let us bound the right hand side of \eqref{e1-d4H}. In fact, we infer from Lemmas \ref{lem4.2}, and \ref{lem4.3}--\ref{lem4.6} that
\begin{equation}gin{equation}
\|P_{\geq1}f\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}
+\|P_{\geq1}g\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}\leq C X^2(T).
\end{equation}
The estimates of the last term in (\ref{e1-d4H}) are a little bit trickier. First of all, using
Young's inequality, H\"{o}lder's inequality, and \eqref{b2} yields
\begin{equation}gin{eqnarray*}
&&\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}-1)} \|\nb u_H\|_{L^\infty}
\sum_{|q'-q|\leq4}(2^{q'} \|\dot{\displaystyleelta}_{q'} b\|_{L^2} + \|\dot{\displaystyleelta}_{q'} d\|_{L^2})ds\nn\\
&\le&\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}-1)} \|\nb u_H\|_{L^\infty}
\sum_{|q'-q|\leq4}(2^{q'} \|\dot{\displaystyleelta}_{q'} b\|_{L^2} + \|\dot{\displaystyleelta}_{q'} d_H\|_{L^2})ds\nn\\
&&+\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}-1+\al)} \|\nb u_H\|_{L^\infty}
\sum_{|q'-q|\leq4} \|\dot{\displaystyleelta}_{q'} d_L\|_{L^2}ds\nn\\
&\leq&C\| \nb u_H\|_{{L}^1_T(L^\infty)}
\left( \| b\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}+\| d_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+\| d_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}\right)\\
&\le&CX^2(T).
\end{eqnarray*}
Moreover, using \eqref{5.3}, the following low frequency embedding
\begin{equation}no
\| b_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+2-2\al}_{2,1})}\le C\| b_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+1-\al}_{2,1})},
\end{equation}no
and the interpolation \eqref{5.4'}, we find that
\begin{equation}gin{eqnarray}no
&&\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}+1-2\al)} \|\nb u_L\|_{\dot{B}^{2\al-2}_{\infty,\infty}} \nn\sum_{|q'-q|\leq4}\left(2^{q'}\|\dot{\displaystyleelta}_{q'} b_L\|_{L^2}+\|\dot{\displaystyleelta}_{q'} d_L\|_{L^2}\right)ds\\
&\le&C\|u_L\|_{\widetilde{L}^\frac{1}{\al}_T(\dot{B}^{2\al-1}_{\infty,1})}\| b_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+2-2\al}_{2,1})}+C\int^T_0\|\nb u_L\|_{\dot{B}^{2\al-2}_{\infty,\infty}}\sum_{q\geq1}2^{q(\frac{N}{2}+1-\al)} \nn\sum_{|q'-q|\leq4}\|\dot{\displaystyleelta}_{q'} d_L\|_{L^2}ds\\
&\le&C \|u_L\|_{\widetilde{L}^\frac{1}{\al}_T(\dot{B}^{2\al-1}_{\infty,1})}\left(\| b_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+1-\al}_{2,1})}+\| d_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+1-\al}_{2,1})}\right)\\
&\le&CX^2(T).
\end{eqnarray}no
Similar to \eqref{4.78}, we have
\begin{equation}gin{eqnarray*}
&&\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}-1)} \|\nb u_L\|_{L^\infty}
\sum_{|q'-q|\leq4}2^{q'}\|\dot{\displaystyleelta}_{q'} b_H\|_{L^2}ds\nn\\
&\leq&C\|\nb u_L\|_{\widetilde{L}^\frac{2}{2-\al}_T(L^\infty)}
\|b_H\|_{\widetilde{L}^{\frac{2}{\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
&\leq&C\left(\|\Pe u_L\|_{\widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\fr{N}{p}+1-\al}_{p,1})}+\| \Pe^\bot u_L\|_{\widetilde{L}^{\frac{2}{2-\al}}_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}\right)X(T)\\
&\le&CX^2(T).
\end{eqnarray*}
Finally, using \eqref{5.5} and \eqref{5.4'} again, we arrive at
\begin{equation}gin{eqnarray}no
&&\int^T_0\sum_{q\geq1}2^{q(\frac{N}{2}+1-2\al)} \|\nb u_L\|_{\dot{B}^{2\al-2}_{\infty,\infty}} \nn\sum_{|q'-q|\leq4}\|\dot{\displaystyleelta}_{q'} d_H\|_{L^2}ds\\
&\le&C\|u_L\|_{\widetilde{L}^\frac{1}{\al}_T(\dot{B}^{2\al-1}_{\infty,1})}\| d_H\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}+1-2\al}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}no
Combining these estimates with (\ref{e1-d4H}), we obtain
\eqref{XQH}. This completes the proof of Lemma \ref{lem4.11}.
\end{proof}
\noindent (iii) \underline{ Estimates of $W(T)$}.\par
In fact, applying Proposition \ref{prop3.4} to \eqref{biu}, and using Lemmas \ref{lem4.7} and \ref{lem4.8}, we easily get the following estimate for $W(T)$.
\begin{equation}gin{lem}\label{lem4.12}
Under the conditions in Lemma \ref{lem4.9}, we have
\begin{equation}\label{W}
W(T)\leq CW^0+CX^2(T).
\end{equation}
\end{lem}
Collecting Lemmas \ref{lem4.9}--\ref{lem4.12}, we conclude that
\begin{equation}gin{prop}\label{prop-global1}
Let $p$ and $\al$ satisfy \eqref{p1} and \eqref{al1}, respectively. Assume that $(b,u)$ is a solution to system \eqref{bcu}--\eqref{biu} in $\mathcal{E}^{\frac{N}{2},\al}_p(T)$ with
\begin{equation}no
\|b\|_{L^\infty_T(L^\infty)}\le\fr12.
\end{equation}no
Then we have
\begin{equation}\label{e-global}
X(T)\le CX^0+CX^2(T).
\end{equation}
\end{prop}
\section{Proof of Theorem \ref{thm-p>2}}
\noindent The aim of this Section is to give the proof of Theorem \ref{thm-p>2}.
\subsection{The global existence.}
First of all, we construct the approximate solutions to that system \eqref{bcu}--\eqref{biu} with smoothing initial data. For the sake of
simplicity, we just outline it here (for the details, see
e.g. \cite{Bahouri-Chemin-Danchin11} and \cite{Danchin07}). To begin with, let us recall the following local existence theorem.
\begin{equation}gin{thm}[\cite{Danchin07}]\label{local}
Let $N\ge2$. Assume that $\rho_0-1\in\dot{B}^{\fr{N}{2}}_{2,1}$ and $u_0\in\dot{B}^{\fr{N}{2}-1}_{2,1}$ with $\rho_0$ bounded away from 0. There exists a positive time $T$
such that system \eqref{CNS} has a unique solution $(\rho, u)$ with $\rho$ bounded away from 0,
\begin{equation}no
\rho-1\in\widetilde{C}_T(\dot{B}^{\fr{N}{2}}_{2,1}), \ \ {and} \ \ u\in\widetilde{C}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1}).
\end{equation}no
Moreover, the solution $(\rho, u)$ can be continued beyond $T$ if the following three conditions hold:\\
{\em (i)} The function $\rho-1$ belongs to $L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})$,\\
{\em (ii)} the function $\rho$ is bounded away from 0,\\
{\em (iii)} $\int_0^T\|\nb u(\tau)\|_{L^\infty}d\tau<\infty$.
\end{thm}
\begin{equation}gin{rem}\label{rem5.1}
In addition, we claim that if $\rho_0-1\in\dot{B}^{\fr{N}{2}-1+\al}_{2,1}$, then $\rho-1\in\widetilde{C}_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})$. In fact, using Proposition \ref{prop3.3}, and Corollary \ref{coro-product}, we have
\begin{equation}gin{eqnarray}no
\|b\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}&\le& \exp\left\{C\|\nb u\|_{L^1_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right\}\left(\|b_0\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}+\int_0^T\|(1+b)\mathrm{div} u\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}\right)\\
&\le&C\left(\|b_0\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}+\left(1+\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\|\mathrm{div} u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\right)\\
&\le&C\left(\|b_0\|_{\dot{B}^{\fr{N}{2}-1+\al}_{2,1}}+\|u\|^{\fr{1-\al}{2}}_{L^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\|u\|^{\fr{1+\al}{2}}_{L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}T^{\fr{1-\al}{2}}\right)\\
&\le&C.
\end{eqnarray}no
\end{rem}
For initial data $(\rho_0, u_0)$ with $(\rho_0-1, u_0)=:(b_0, u_0)\in\mathcal{E}_0$, by embedding, it is easy to see that
\begin{equation}\label{eta1}
\|b_0\|_{L^\infty}\le C\|b_0\|_{\dot{B}^{\fr{N}{2}}_{2,1}}\le C\|(b_0, u_0)\|_{\mathcal{E}_0}.
\end{equation}
Before proceeding any further, let us denote by $\tl{C}$ the maximum of constants 1 and $C$ appearing in Proposition \ref{prop-global1} and \eqref{eta1}, and choose $(\rho_0, u_0)$ with $(b_0, u_0)$ so small that
\begin{equation}\label{small1}
\|(b_0, u_0)\|_{\mathcal{E}_0}\le\fr{1}{8\tl{C}^2}.
\end{equation}
It follows from \eqref{eta1} and \eqref{small1} that
\begin{equation}
\|b_0\|_{L^\infty}\le \fr18.
\end{equation}
Thanks to Proposition 2.27 in \cite{Bahouri-Chemin-Danchin11}, we can find a sequence of functions $\{(b_{0}^n, u_{0}^n)\}\subset\mathcal{S}\times\mathcal{S}$ satisfying
\begin{equation}\label{appro1}
\|(b^n_0-b_0, u^n_0-u_0)\|_{\mathcal{E}_0}\rightarrow 0, \quad\mathrm{as}\quad n\rightarrow\infty,
\end{equation}
and
\begin{equation}\label{bn}
\|b_0^n\|_{L^\infty}\le\fr14, \quad\mathrm{for\ \ all}\quad n\in\N.
\end{equation}
Then using Theorem \ref{local} and Remark \ref{rem5.1} above, one could obtain a unique local solution $(b^n,u^n)$ to the system \eqref{bcu}--\eqref{biu} with smoothing initial data $(b_0^n,u_0^n)$ on the maximal lifespan $[0,T^n_*)$, satisfying
$$
(b^n,u^n)\in\left( \widetilde{C}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1}\cap\dot{B}^{\frac{N}{2}}_{2,1})\times \left( \widetilde{C}_T(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap
L^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})\right)\right)\subset\mathcal{E}^{\fr{N}{2},\al}_p(T),\ \forall\ T\in(0,T^n_*).
$$
Now define $T_1^n$ be the supremum of all time $T'\in[0,T^n_*)$ such that
\begin{equation}\label{III1}
X^n(T')\le{4\tl{C}}\|(b_0, u_0)\|_{\mathcal{E}_0},
\end{equation}
where
\begin{equation}gin{eqnarray*}
X^n(T)&=&\|(b^n_L,\Pe^\bot u^n_L)\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}+\|(b^n_L,\Pe^\bot u^n_L)\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})} \\
&&+\|b^n_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}
+\|\Pe^\bot u^n_H\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})}
+\|\Pe u^n\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{p}-1}_{p,1})\cap L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}.
\end{eqnarray*}
Combining \eqref{III1} with \eqref{eta1}--\eqref{small1}, one easily deduces that
\begin{equation}\label{bninfty}
\|b^n\|_{L^\infty_{T_1^n}(L^\infty)}\le\fr12.
\end{equation}
Then from Proposition \ref{prop-global1} and \eqref{appro1}, we find that
\begin{equation}gin{eqnarray}
\nn X^n(T^n_1)&\leq& \tl{C}\|(b_0^n, u_0^n)\|_{\mathcal{E}_0}+16\tl{C}^3\|(b_0, u_0)\|_{\mathcal{E}_0}^2\\
\nn&\le&2\tl{C}\|(b_0, u_0)\|_{\mathcal{E}_0}\left(1+8\tl{C}^2\|(b_0, u_0)\|_{\mathcal{E}_0}\right)\\
&\le&3\tl{C}\|(b_0, u_0)\|_{\mathcal{E}_0},
\end{eqnarray}
provided the initial data $(\rho_0, u_0)$ satisfy
\begin{equation}\label{III2}
\|(b_0, u_0)\|_{\mathcal{E}_0} \le\fr{1}{16\tl{C}^2}.
\end{equation}
Thus $T_1^n=T^n_*$, and \eqref{III1} holds true on the interval $[0,T^n_*)$ provided $\|(b_0, u_0)\|_{\mathcal{E}_0} \le c_0$ with $c_0:=\fr{1}{16\tl{C}^2}$.
Consequently, \eqref{bninfty} holds with $T_1^n$ replaced by $T^n_*$, and
$$
\|b^n\|_{\widetilde{L}^\infty_{T^n_*}(\dot{B}^{\frac{N}{2}}_{2,1})}+\int^{T_*^n}_0\|\nabla \Pe u^n\|_{L^\infty}+\|\nabla \Pe^\bot u^n_H\|_{L^\infty}dt
+\left(\int^{T_*^n}_0 \|\nabla \Pe^\bot u^n_L\|_{L^\infty}^{\frac{2}{2-\alpha}}dt\right)^{\frac{2-\alpha}{2}}\leq C.
$$
Therefore, using Theorem \ref{local} again, we conclude that $T^n_*=+\infty$ for all $n\in\N$. Moreover, for all $n\in\mathbb{N}$, there holds
$$
X^n(T)\le {C}_0\|(b_0, u_0)\|_{\mathcal{E}_0}, \ \forall\ T>0,
$$
with $C_0:=4\tl{C}$. Then, using the compactness arguments similar as that in Chapter 10 of \cite{Bahouri-Chemin-Danchin11}, we obtain that $\{(b^n,u^n)\}_{n\in\mathbb{N}}$
weakly converges (up to a subsequence) to some global solution $(b,u)$ to the system \eqref{bcu}--\eqref{biu} with the initial data $(b_0,u_0)$ satisfying \eqref{initial1}. Thus, we prove the global existence part of Theorem \ref{thm-p>2}.
\subsection{The uniqueness when $N\geq3$.}
Next, we will prove the uniqueness part of Theorem \ref{thm-p>2} when $N\geq3$.
Assume there exist two solutions $(b^1,u^1)$ and $(b^2,u^2)$ for the system \eqref{bcu}--\eqref{biu} with the same initial data $(b_0,u_0)$, satisfying the regularity conditions in Theorem \ref{thm-p>2}. In order to show that these two solutions coincide, we shall give some estimates for $(\delta b,\delta u)=(b^2-b^1,u^2-u^1)$. It is easy to verify that $(\delta b,\delta u)$ satisfies the following system
\begin{equation}gin{eqnarray}\label{bcu-uniqu}
\begin{equation}gin{cases}
\pr_t \delta b+u^2\cdot\nabla \delta b=-{\mathrm{div} \Pe^\bot \delta u} -\delta u\cdot\nabla b^1-\delta b\mathrm{div}u^2-b^1\mathrm{div}\delta u,\\
\pr_t \Pe^\bot \delta u- \displaystylel\Pe^\bot \delta u+{\nb \delta b}\\
\ \ \ \ \ \ \ \ =-\Pe^\bot\left(u^2\cdot\nb u^2+K( b^2){\nb b^2}+I( b^2)\mathcal{A}u^2
-u^1\cdot\nb u^1-K( b^1){\nb b^1}-I( b^1)\mathcal{A}u^1\right),\\
\pr_t \Pe \delta u-\mu \displaystylel\Pe \delta u=-\Pe\left(u^2\cdot\nb u^2+I( b^2)\mathcal{A}u^2-u^1\cdot\nb u^1-I( b^1)\mathcal{A}u^1\right),\\
(\delta b, \delta u )|_{t=0}=(0, 0).
\end{cases}
\end{eqnarray}
Following the proof of Proposition \ref{prop3.3}, using Corollary \ref{coro-product} and Lemma 2.100 in \cite{Bahouri-Chemin-Danchin11}, we have
\begin{equation}gin{eqnarray}
&&\|\delta b(t)\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\nn\\
&\leq& \int^t_0\left(
\|-{\mathrm{div} \Pe^\bot \delta u}\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}+\|\delta u\cdot\nabla b^1\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}
+\|\delta b\mathrm{div}u^2\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}+\|b^1\mathrm{div}\Pe^\bot\delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\right.\nonumber\\
&&\left.+\fr12\|\mathrm{div} u^2\|_{L^\infty}\| \delta b\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}+\sum_{q\in \mathbb{Z}}2^{q(\frac{N}{2}-1)}\|[\dot{\displaystyleelta}_q,u^2]\cdot\nabla\delta b\|_{L^2}
\right)ds\nonumber\\
&\leq &C \int^t_0
\left(\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}}_{2,1}}+\|\Pe\delta u\|_{\dot{B}^\frac{N}{p}_{p,1}}\| b^1\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe^\bot\delta u\|_{\dot{B}^\frac{N}{2}_{2,1}}\| b^1\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\| \delta b\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\|u^2\|_{\dot{B}^{\frac{N}{p}+1}_{p,1}}
\right)
ds. \label{5.15-Ng3}
\end{eqnarray}
Applying Proposition \ref{prop3.4} to $\eqref{bcu-uniqu}_2$, we find that
\begin{equation}gin{eqnarray}
&& \|\Pe^\bot\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap \widetilde{L}^1_t(\dot{B}^{\frac{N}{2}}_{2,1})}\nonumber\\
&\leq& C\int^t_0\left(\|{\nb \delta b}\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
+\|\Pe^\bot(u^2\cdot \nabla \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\|\Pe^\bot(\delta u\cdot \nabla u^1)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\|K( b^2){\nb b^2}-K( b^1){\nb b^1}\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
\right.\nonumber\\
&&\left.+\|\Pe^\bot[I( b^2)\mathcal{A}\delta u]\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
+\|\Pe^\bot[(I( b^2)-I(b^1))\mathcal{A} u^1]\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
\right)ds.\label{5.3-Ng3}
\end{eqnarray}
Since
\begin{equation}gin{eqnarray*}
&&\|\Lambda^{-1}\mathrm{div}( u^2\cdot \nabla \Pe \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\\
&\leq&\|\dot{T}_{ \nabla u^2}\nabla\Pe \delta u\|_{\dot{B}^{\frac{N}{2}-3}_{2,1}}+\| \dot{T}_{ \nabla\Pe \delta u} \nabla u^2\|_{\dot{B}^{\frac{N}{2}-3}_{2,1}}+\|\dot{R}(\Pe^\bot u^2,\nabla\Pe \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+
\|\mathrm{div}\dot{R}(\Pe u^2,\Pe \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\\
&\leq&C\|\nabla u^2\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\|\nabla\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}
+C\|\Pe^\bot u^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\|\nabla\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}
+C\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\\
&\leq&C
\left( \|\Pe^\bot u^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}
+ C\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}},
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
\| u^2\cdot \nabla \Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}&\leq&\|\dot{T}_{ u^2}\cdot\nabla\Pe^\bot \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\| \dot{T}_{ \nabla\Pe^\bot \delta u} u^2\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\|\dot{R}(u^2,\nabla\Pe^\bot \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\\
&\leq&C\| u^2\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\|\nabla\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
+C\| u^2\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\\
&\leq&C\left( \|\Pe^\bot u^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}},
\end{eqnarray*}
we get
\begin{equation}gin{eqnarray}
&&\| \Pe^\bot(u^2\cdot \nabla \delta u)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}} \nonumber\\
&\leq&C\left( \|\Pe^\bot u^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)
\left(\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}+\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\right)
+ C\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}.
\end{eqnarray}
Similarly, we have
\begin{equation}gin{eqnarray*}
\| \Pe\delta u\cdot \nabla u^1\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}&\leq&\|\dot{T}_{ \Pe \delta u }\cdot\nabla u^1\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\| \dot{T}_{ \nabla u^1} \Pe \delta u\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\|\mathrm{div}\dot{R}(\Pe \delta u , u^1)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}} \\
&\leq&C\| u^1\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}},
\end{eqnarray*}
\begin{equation}gin{eqnarray*}
\| \Pe^\bot\delta u\cdot \nabla u^1\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}&\leq&\|\dot{T}_{ \Pe^\bot \delta u }\cdot\nabla u^1\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\| \dot{T}_{ \nabla u^1} \Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}+\| \dot{R}(\Pe^\bot \delta u , \nabla u^1)\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}} \\
&\leq&C\| \nabla u^1\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}.
\end{eqnarray*}
Thus,
\begin{equation}gin{eqnarray}
\|\delta u\cdot \nabla u^1\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
\leq C\| u^1\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\left(\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}+
\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\right).
\end{eqnarray}
Using Corollary \ref{coro-product} and Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11}, we obtain
\begin{equation}gin{eqnarray}
&& \|K( b^2){\nb b^2}-K( b^1){\nb b^1}\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}=\|\nabla[\widetilde{K}(b^2)-\widetilde{K}(b^1)]\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\nonumber\\
&\leq &C\| \widetilde{K}(b^2)-\widetilde{K}(b^1) \|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}=C\left\|\int^1_0 K (b^1+\tau(b^2-b^1))d\tau \delta b\right\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}
\nonumber\\
&\leq&C\left\|\int^1_0 K (b^1+\tau(b^2-b^1))d\tau\right\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\|\delta b \|_{\dot{B}^{\frac{N}{2}-1}_{2,1}} \nonumber\\
&\leq& C \| (b^1,b^2 ) \|_{\dot{B}^{\frac{N}{2}}_{2,1}}\|\delta b \|_{\dot{B}^{\frac{N}{2}-1}_{2,1}},
\end{eqnarray}
where $\widetilde{K}(b)=\int^b_0 K(s)ds$. Noting that $\Pe^\bot\Pe=0$, in view of Theorem 2.99 in \cite{Bahouri-Chemin-Danchin11} and Corollary \ref{coro-product}, we are led to
\begin{equation}gin{eqnarray}
&&\|\Pe^\bot[I( b^2)\mathcal{A}\delta u]\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\nonumber\\
&\leq&\|[\Pe^\bot, I( b^2)]\mathcal{A}\Pe\delta u\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}
+\|I( b^2)\mathcal{A}\Pe^\bot\delta u\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\nonumber\\
&\leq&C\|\nabla I(b^2)\|_{\dot{B}^{\frac{N}{p*}-1}_{p*,1}}\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}+C\| I(b^2)\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}
+C\| I(b^2)\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\|\Pe^\bot\delta u\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\nonumber\\
&\leq& C\| b^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\left(\|\Pe^\bot\delta u\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right).
\end{eqnarray}
Moreover, using Corollary \ref{coro-product} with $u= I(b^2)-I(b^1), v=\mathcal{A} u^1, \rho=p_1=q_2=2, p_2=q_1=p$, and $s_1=\sigma_2=\fr{N}{2}-1, s_2=\sigma_1=\fr{N}{p}-1$,
we find that
\begin{equation}gin{eqnarray}
&&\|(I( b^2)-I(b^1))\mathcal{A} u^1\|_{\dot{B}^{\frac{N}{2}-2}_{2,1}}\nonumber\\
&\leq&C\| I(b^2)-I(b^1)\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}
\| \mathcal{A} u^1\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}\nonumber\\
&\leq& C\left\|\int^1_0 I' (b^1+\tau(b^2-b^1))d\tau \delta b\right\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}
\| u^1\|_{\dot{B}^{\frac{N}{p}+1}_{p,1}}
\nonumber\\
&\leq&C\left(1+\left\|\int^1_0 I' (b^1+\tau(b^2-b^1))d\tau-1\right\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\right)\|\delta b \|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}
\| u^1\|_{\dot{B}^{\frac{N}{p}+1}_{p,1}} \nonumber\\
&\leq& C \left(1+\| (b^1,b^2 ) \|_{\dot{B}^{\frac{N}{2}}_{2,1}}\right)\|\delta b \|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}
\| u^1\|_{\dot{B}^{\frac{N}{p}+1}_{p,1}}.\label{5.8-Ng3}
\end{eqnarray}
The estimates (\ref{5.3-Ng3})--(\ref{5.8-Ng3}) imply that
\begin{equation}gin{eqnarray}
&& \|\Pe^\bot\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap {L}^1_t(\dot{B}^{\frac{N}{2}}_{2,1})}\nonumber\\
&\leq& C\int^t_0\left\{\|{ \delta b}\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\left(1+\| (b^1,b^2 ) \|_{\dot{B}^{\frac{N}{2}}_{2,1}}\right)\left(1+
\| u^1\|_{\dot{B}^{\frac{N}{p}+1}_{p,1}}\right)\right.\nonumber\\
&&\left.
+\left( \|(\Pe^\bot u^1,\Pe^\bot u^2)\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|(\Pe u^1,\Pe u^2)\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)
\left(\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}+\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\right)
\right.\nonumber\\
&&\left.+ \left(\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}+\| b^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\right)\left(\|\Pe^\bot\delta u\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)
\right\}ds.\label{5.9-Ng3}
\end{eqnarray}
Next, using similar arguments as in the proof of \eqref{5.9-Ng3}, Corollary \ref{coro-product} and the embedding $\dot{B}^{\fr{N}{2}-2}_{2,1}\hookrightarrow\dot{B}^{\fr{N}{p}-2}_{p,1}$ for $p>2$, we get
\begin{equation}gin{eqnarray}
&& \|\Pe\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{p}-1}_{p,1})\cap {L}^1_t(\dot{B}^\frac{N}{p}_{p,1})}\nonumber\\
&\leq&C \int^t_0\left(\|{\nb \delta b}\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}
+\|u^2\cdot \nabla \delta u\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}+\|\delta u\cdot \nabla u^1\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}
\right.\nonumber\\
&&\left.+\|I( b^2)\mathcal{A}\delta u\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}
+\|[I( b^2)-I(b^1)]\mathcal{A} u^1\|_{\dot{B}^{\frac{N}{p}-2}_{p,1}}
\right)ds\nonumber\\
&\leq& C\int^t_0\left\{\|{ \delta b}\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\left(1+\| (b^1,b^2 ) \|_{\dot{B}^{\frac{N}{2}}_{2,1}}\right)\left(1+
\| u^1\|_{\dot{B}^{\frac{N}{p}+1}_{p,1}}\right)\right.\nonumber\\
&&\left.
+\left( \|(\Pe^\bot u^1,\Pe^\bot u^2)\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|(\Pe u^1,\Pe u^2)\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)
\left(\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}+\|\Pe^\bot \delta u\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}\right)
\right.\nonumber\\
&&\left.+ \left(\|\Pe u^2\|_{\dot{B}^{\frac{N}{p}-1}_{p,1}}+\| b^2\|_{\dot{B}^{\frac{N}{2}}_{2,1}}\right)\left(\|\Pe^\bot\delta u\|_{\dot{B}^{\frac{N}{2}}_{2,1}}
+\|\Pe \delta u\|_{\dot{B}^{\frac{N}{p}}_{p,1}}\right)
\right\}ds.\label{5.10-Ng3}
\end{eqnarray}
By virtue the interpolation inequality and H\"{o}lder's inequality , we obtain
\begin{equation}gin{equation}\label{5.20}
\|\Pe^\bot u^i_L\|_{\widetilde{L}^2_T(\dot{B}^{\frac{N}{2}}_{2,1})}\leq C \|\Pe^\bot u^i_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}^{\frac{1+\al}{2}}
\|\Pe^\bot u^i_L\|_{{L}^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}^{\frac{1-\al}{2}}T^{\frac{\al}{2}}\leq CX(T)T^{\frac{\al}{2}}\leq CX^0T^{\frac{\al}{2}},
\end{equation}
and
\begin{equation}gin{equation}\label{5.21}
\|\Pe^\bot u^i_L\|_{{L}^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})}\leq C \|\Pe^\bot u^i_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}^{\frac{\al}{2}}
\|\Pe^\bot u^i_L\|_{{L}^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}^{1-\frac{\al}{2}}T^{\frac{\al}{2}}\leq CX(T)T^{\frac{\al}{2}}\leq CX^0T^{\frac{\al}{2}},
\end{equation}
for $i=1, 2$. Combining the above estimates, (\ref{uniform1}), (\ref{5.15-Ng3}) and (\ref{5.9-Ng3})--(\ref{5.10-Ng3}), choosing $T=1$, we have for all $t\in[0,1]$
$$
\|\delta b\|_{L^\infty_t(\dot{B}^{\frac{N}{2}-1}_{2,1})}\leq C(1+X^0)\left(\|\Pe^\bot \delta u\|_{L^1_t(\dot{B}^\frac{N}{2}_{2,1})}+\|\Pe \delta u\|_{L^1_t(\dot{B}^\frac{N}{p}_{p,1})}\right)
+CX^0\|\delta b\|_{L^\infty_t(\dot{B}^{\frac{N}{2}-1}_{2,1})},
$$
and
\begin{equation}gin{eqnarray*}
&& \|\Pe^\bot\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap {L}^1_t(\dot{B}^{\frac{N}{2}}_{2,1})}+ \|\Pe\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{p}-1}_{p,1})\cap {L}^1_t(\dot{B}^\frac{N}{p}_{p,1})}\nonumber\\
&\leq& C(1+X^0)\int^t_0\|{ \delta b}\|_{\dot{B}^{\frac{N}{2}-1}_{2,1}}ds+C(1+X^0)X_0\|{ \delta b}\|_{L^\infty_t(\dot{B}^{\frac{N}{2}-1}_{2,1})}\nonumber\\
&&+CX^0\left(\|\Pe^\bot\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap {L}^1_t(\dot{B}^{\frac{N}{2}}_{2,1})}+ \|\Pe\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{p}-1}_{p,1})\cap {L}^1_t(\dot{B}^\frac{N}{p}_{p,1})}\right).
\end{eqnarray*}
When $CX^0\leq\frac{1}{2}$, the above inequalities reduce to
\begin{equation}gin{eqnarray}\label{dlb}
\|\delta b\|_{L^\infty_t(\dot{B}^{\frac{N}{2}-1}_{2,1})}\leq 2C(1+X^0)\left(\|\Pe^\bot \delta u\|_{L^1_t(\dot{B}^\frac{N}{2}_{2,1})}+\|\Pe \delta u\|_{L^1_t(\dot{B}^\frac{N}{p}_{p,1})}\right),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}\label{dlu}
&& \|\Pe^\bot\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap {L}^1_t(\dot{B}^{\frac{N}{2}}_{2,1})}+ \|\Pe\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{p}-1}_{p,1})\cap {L}^1_t(\dot{B}^\frac{N}{p}_{p,1})}\nonumber\\
&\leq& 2C(1+X^0)(t+X_0)\|{ \delta b}\|_{L^\infty_t(\dot{B}^{\frac{N}{2}-1}_{2,1})}.
\end{eqnarray}
Substituting \eqref{dlb} into \eqref{dlu}, we are led to
\begin{equation}gin{eqnarray}\label{dlu1}
&& \|\Pe^\bot\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{2}-1}_{2,1})\cap {L}^1_t(\dot{B}^{\frac{N}{2}}_{2,1})}+ \|\Pe\delta u\|_{\widetilde{L}^2_t(\dot{B}^{\frac{N}{p}-1}_{p,1})\cap {L}^1_t(\dot{B}^\frac{N}{p}_{p,1})}\nonumber\\
\nn&\leq& 4C^2(1+X^0)^2(t+X_0)\left(\|\Pe^\bot \delta u\|_{L^1_t(\dot{B}^\frac{N}{2}_{2,1})}+\|\Pe \delta u\|_{L^1_t(\dot{B}^\frac{N}{p}_{p,1})}\right)\\
&\leq& (8C^2+2)(t+X_0)\left(\|\Pe^\bot \delta u\|_{L^1_t(\dot{B}^\frac{N}{2}_{2,1})}+\|\Pe \delta u\|_{L^1_t(\dot{B}^\frac{N}{p}_{p,1})}\right).
\end{eqnarray}
Accordingly, we conclude that if $X^0\le\fr{1}{4(8C^2+2)}$, then $\delta b=\delta u=0$ for all $t\in[0,T_0]$ with $T_0:=\fr{1}{4(8C^2+2)}$. Repeat this argument on $[T_0, 2T_0], [2T_0, 3T_0], \cdots$, we can easily prove that $(b^1,u^1)=(b^2,u^2)$ for all $t\geq0$.
The proof of Theorem \ref{thm-p>2} when $N\geq3$ is completed.
\subsection{The uniqueness when $N=2$.}
Finally, we prove the uniqueness part of Theorem \ref{thm-p>2} when $N=2$.
To this end, we give the following lemma with additional assumption on the initial data.
\begin{equation}gin{lem}\label{lem-add}
Under the assumptions in Theorem \ref{thm-p>2} and $N=2$, in addition, if $\Pe u_0\in \dot{B}^{0}_{2,1}$, then for all $T>0$, we have
\begin{equation}gin{equation}
\Pe u\in \widetilde{L}^\infty_T(\dot{B}^0_{2,1})\cap L^1_T(\dot{B}^2_{2,1}),\label{5.1-p}
\end{equation}
with
\begin{equation}\label{add}
\| \Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^0_{2,1})\cap L^1_T(\dot{B}^2_{2,1})}\leq C \| \Pe u_0\|_{ \dot{B}^0_{2,1} }+
C\left(X^0\right)^2(1+T^\frac{\al}{2}).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
According to Proposition \ref{prop3.4}, we get
\begin{equation}gin{equation}\label{5.3-p}
\| \Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^0_{2,1})\cap L^1_T(\dot{B}^2_{2,1})}\leq C \| \Pe u_0\|_{ \dot{B}^0_{2,1} }+
C \|\Pe\left(u\cdot\nb u+I( b)\mathcal{A}u\right)\|_{L^1_T(\dot{B}^0_{2,1})}.
\end{equation}
From (\ref{4.63}), we need to bound $\|\Pe\left(u\cdot\nb u\right)\|_{L^1_T(\dot{B}^0_{2,1})}$ by the following three terms,
\begin{equation}gin{equation}\label{5.4-p}
\|\Pe\left(u\cdot\nb u\right)\|_{L^1_T(\dot{B}^0_{2,1})}
\leq \|\Pe\left(\Pe u\cdot\nb \Pe u\right)\|_{L^1_T(\dot{B}^0_{2,1})}
+\|\Pe\left(\Pe u\cdot\nb \Pe^\bot u\right)\|_{L^1_T(\dot{B}^0_{2,1})}
+ \|\Pe\left(\Pe^\bot u\cdot\nb\Pe u\right)\|_{L^1_T(\dot{B}^0_{2,1})}.
\end{equation}
Using the estimate (\ref{uniform1}) and Propositions \ref{prop-classical}--\ref{p-TR}, the above three terms can be estimated as follows,
\begin{equation}gin{eqnarray}
&&\|\Pe\left(\Pe u\cdot\nb \Pe u\right)\|_{L^1_T(\dot{B}^0_{2,1})}=\|\Pe\mathrm{div}\left(\Pe u\otimes\Pe u\right)\|_{L^1_T(\dot{B}^0_{2,1})}\nonumber\\
&\leq&C\| \Pe u\otimes\Pe u \|_{L^1_T(\dot{B}^1_{2,1})} \leq C\| \Pe u\otimes\Pe u \|_{L^1_T(\dot{B}^\frac{4}{p}_{\frac{p}{2},1})}\nonumber\\
&\leq& C\|\Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^{\frac{2}{p}-1}_{p,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\frac{2}{p}+1}_{p,1})}
\leq CX^2(T)\leq C\left(X^0\right)^2,
\end{eqnarray}
\begin{equation}gin{eqnarray}
&&\|\Pe\left(\Pe u\cdot\nb \Pe^\bot u\right)\|_{L^1_T(\dot{B}^0_{2,1})} \nonumber\\
&\leq& C\| \dot{T}_{\Pe u}\nb\Pe^\bot u \|_{L^1_T(\dot{B}^0_{2,1})}+C\|\dot{T}_{\nb\Pe^\bot u}\Pe u \|_{L^1_T(\dot{B}^0_{2,1})}
+C\|\dot{R}({\Pe u},\nb\Pe^\bot u) \|_{L^1_T(\dot{B}^{\frac{2}{p}}_{\frac{2p}{p+2},1})}\nonumber\\
\nn &\leq& C\|\Pe u\|_{\widetilde{L}^2_T(\dot{B}^{0}_{\infty,1})}\|\nb\Pe^\bot u\|_{\widetilde{L}^2_T(\dot{B}^{0}_{2,1})}
+ C\|\nb\Pe^\bot u\|_{\widetilde{L}^2_T(\dot{B}^{-\frac{2}{p}}_{\frac{2p}{p-2},1})}\|\Pe u\|_{\widetilde{L}^2_T(\dot{B}^{\frac{2}{p}}_{p,1})}\\
&&+C\|\nb\Pe^\bot u\|_{\widetilde{L}^2_T(\dot{B}^{ 0}_{2,1})}\|\Pe u\|_{\widetilde{L}^2_T(\dot{B}^{\frac{2}{p}}_{p,1})}\nonumber\\
&\leq& CX (T)\left(\| \Pe^\bot u_L\|_{\widetilde{L}^2_T(\dot{B}^{1 }_{2,1})}+\| \Pe^\bot u_H\|_{\widetilde{L}^2_T(\dot{B}^{1 }_{2,1})}\right)\nn\\
&\leq& CX^0\left(X^0+\| \Pe^\bot u_L\|_{\widetilde{L}^2_T(\dot{B}^{ 1 }_{2,1})}\right),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}
&&\|\Pe\left(\Pe^\bot u\cdot\nb \Pe u\right)\|_{L^1_T(\dot{B}^0_{2,1})} \nonumber\\
\nn&\leq& C\| \dot{T}_{\Pe^\bot u}\nb\Pe u \|_{L^1_T(\dot{B}^0_{2,1})}
+C\|\dot{T}_{\nb\Pe u}\Pe^\bot u \|_{L^1_T(\dot{B}^0_{2,1})}
+C\|\dot{R}({\Pe^\bot u},\nb\Pe u) \|_{L^1_T(\dot{B}^{\frac{2}{p}}_{\frac{2p}{p+2},1})}\nonumber\\
&\leq& C\|\Pe^\bot u\|_{\widetilde{L}^{p}_T(\dot{B}^{0}_{\fr{2p}{p-2},1})}\|\nb\Pe u\|_{\widetilde{L}^\fr{p}{p-1}_T(\dot{B}^{0}_{p,1})}
+ C\|\nb\Pe u\|_{\widetilde{L}^2_T(\dot{B}^{-1}_{\infty,1})}\|\Pe^\bot u\|_{\widetilde{L}^2_T(\dot{B}^{1}_{2,1})}\nn\\
&&+C\|\Pe^\bot u\|_{\widetilde{L}^2_T(\dot{B}^{ 1}_{2,1})}\|\nb\Pe u\|_{\widetilde{L}^2_T(\dot{B}^{\frac{2}{p}-1}_{p,1})}\nonumber\\
\nn&\leq& CX (T)\left(\| \Pe^\bot u\|_{\widetilde{L}^2_T(\dot{B}^{ 1 }_{2,1})}+\|\Pe^\bot u\|_{\widetilde{L}^{p}_T(\dot{B}^{\frac{2}{p}}_{2,1})}
\right)\\
&\leq& CX^0\left(X^0+\| \Pe^\bot u_L\|_{\widetilde{L}^2_T(\dot{B}^{ 1 }_{2,1})}+\|\Pe^\bot u_L\|_{\widetilde{L}^{p}_T(\dot{B}^{\frac{2}{p}}_{2,1})}
\right).
\end{eqnarray}
Using the interpolation inequality, we obtain
\begin{equation}gin{equation}
\|\Pe^\bot u_L\|_{\widetilde{L}^2_T(\dot{B}^{1}_{2,1})}\leq C \|\Pe^\bot u_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\al}_{2,1})}^{\frac{1+\al}{2}}
\|\Pe^\bot u_L\|_{{L}^1_T(\dot{B}^{2+\al}_{2,1})}^{\frac{1-\al}{2}}T^{\frac{\al}{2}}\leq CX(T)T^{\frac{\al}{2}}\leq CX^0T^{\frac{\al}{2}},
\end{equation}
and
\begin{equation}gin{equation}
\|\Pe^\bot u_L\|_{\widetilde{L}^{p}_T(\dot{B}^{\frac{2}{p}}_{2,1})}\leq C \|\Pe^\bot u_L\|_{\widetilde{L}^\infty_T(\dot{B}^{\al}_{2,1})}^{\frac{2p+\al p-2}{2p}}
\|\Pe^\bot u_L\|_{{L}^1_T(\dot{B}^{2+\al}_{2,1})}^{\frac{2-\al p}{2p}}T^{\frac{\al}{2}}\leq CX(T)T^{\frac{\al}{2}}\leq CX^0T^{\frac{\al}{2}}.\label{5.9}
\end{equation}
The above estimates (\ref{5.4-p})--(\ref{5.9}) imply that
\begin{equation}gin{equation}\label{5.10}
\|\Pe\left(u\cdot\nb u\right)\|_{L^1_T(\dot{B}^0_{2,1})}
\leq C\left(X^0\right)^2 (1+T^\frac{\al}{2}).
\end{equation}
Next, using \eqref{4.27'} and Corollary \ref{coro-product}, one easily deduces that
\begin{equation}gin{eqnarray}
\nn && \|\Pe\left( I( b)\mathcal{A}u\right)\|_{L^1_T(\dot{B}^0_{2,1})}\\
\nn&\le&C\| I( b)\mathcal{A}\Pe^\bot u\|_{L^1_T(\dot{B}^0_{2,1})}+C\| I( b)\mathcal{A}\Pe u\|_{L^1_T(\dot{B}^0_{2,1})}\\
&\leq& CX^2(T)+C\| I( b)\|_{\widetilde{L}^\infty_T(\dot{B}^1_{2,1})}
\|\mathcal{A}\Pe u \|_{L^1_T(\dot{B}^0_{2,1})}\nonumber\\
&\leq&C\left(X^0\right)^2+C\|b\|_{\widetilde{L}^\infty_T(\dot{B}^1_{2,1})}
\|\Pe u \|_{L^1_T(\dot{B}^2_{2,1})}\nn\\
&\leq& CX^0\left(X^0+\| \Pe u \|_{L^1_T(\dot{B}^2_{2,1})}\right). \label{5.11}
\end{eqnarray}
From (\ref{5.3-p}), (\ref{5.10}) and (\ref{5.11}), we have
\begin{equation}gin{equation}
\| \Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^0_{2,1})\cap L^1_T(\dot{B}^2_{2,1})}\leq C \| \Pe u_0\|_{ \dot{B}^0_{2,1} }+
C\left(X^0\right)^2 (1+T^\frac{\al}{2})+CX^0\|\Pe u \|_{L^1_T(\dot{B}^2_{2,1})}.
\end{equation}
Consequently, \eqref{add} holds if $CX^0\leq \frac{1}{2}$. This completes the proof of Lemma \ref{lem-add}.
\end{proof}
Now we turn to prove the uniqueness of solutions for $N=2$ with the additional assumption that $\Pe u_0\in \dot{B}^0_{2,1}$. In fact, for any fixed $T>0$, from \eqref{5.20}-- \eqref{5.21}, Lemma \ref{lem-add} and \eqref{uniform1}, we infer that
\begin{equation}gin{eqnarray}\label{u12}
\|u^i\|_{L^1_T(\dot{B}^{2}_{2,1})\cap \widetilde{L}^2_T(\dot{B}^{1}_{2,1})}\nn&\le&\|\Pe^\bot u^i_L\|_{L^1_T(\dot{B}^{2}_{2,1})\cap \widetilde{L}^2_T(\dot{B}^{1}_{2,1})}+\|\Pe^\bot u^i_H\|_{L^1_T(\dot{B}^{2}_{2,1})\cap \widetilde{L}^2_T(\dot{B}^{1}_{2,1})}+\|\Pe u^i\|_{L^1_T(\dot{B}^{2}_{2,1})\cap \widetilde{L}^2_T(\dot{B}^{1}_{2,1})}\\
&\le&\nn C \left(X(T)T^\fr{\al}{2}+X(T)+\| \Pe u_0\|_{ \dot{B}^0_{2,1} }+
\left(X^0\right)^2(1+T^\frac{\al}{2})\right)\\
&\le&C\left(X^0+\| \Pe u_0\|_{ \dot{B}^0_{2,1} }+
X^0\left(1+X^0\right)(1+T^\frac{\al}{2})\right),
\end{eqnarray}
for $i=1, 2$. On this basis, using Propositions \ref{prop3.3}--\ref{prop3.4}, and \ref{prop-pro}, the estimate \eqref{uniform1}, we are led to
\begin{equation}gin{eqnarray}
&& \|\delta b(t)\|_{\dot{B}^0_{2,\infty}}
\leq e^{CV_2(t)}\int^t_0
\|-{\mathrm{div} \delta u} -\delta u\cdot\nabla b^1-\delta b\mathrm{div}u^2-b^1\mathrm{div}\delta u\|_{\dot{B}^0_{2,\infty}}
ds\nonumber\\
&\leq &Ce^{CV_2(t)}\int^t_0
\left(\| \delta u\|_{\dot{B}^1_{2,\infty}}+\|\delta u\|_{\dot{B}^1_{2,1}}\| b^1\|_{\dot{B}^{1}_{2,1}}
+\| \delta b\|_{\dot{B}^{0}_{2,\infty}}\| \mathrm{div}u^2\|_{\dot{B}^1_{2,1}}
\right)
ds\nn\\
&\leq &Ce^{CV_2(t)}\int^t_0
\left(\left(1+X^0\right)\| \delta u\|_{\dot{B}^1_{2,1}} +\| \delta b\|_{\dot{B}^{0}_{2,\infty}}\| u^2\|_{\dot{B}^2_{2,1}}
\right)
ds \label{5.15}
\end{eqnarray}
where $V_2(t)=\int^t_0\|\nabla u^2(\tau)\|_{\dot{B}^{1}_{2,1}}d\tau$,
and
\begin{equation}gin{eqnarray}
\nn&&\|\delta u\|_{\widetilde{L}^2_t(\dot{B}^0_{2,\infty})\cap \widetilde{L}^1_t(\dot{B}^1_{2,\infty})}\\
&\leq& \int^t_0\left(\|{\nb \delta b}\|_{\dot{B}^{-1}_{2,\infty}}
+\|u^2\cdot \nabla \delta u\|_{\dot{B}^{-1}_{2,\infty}}+\|\delta u\cdot \nabla u^1\|_{\dot{B}^{-1}_{2,\infty}}+\|K( b^2){\nb b^2}-K( b^1){\nb b^1}\|_{\dot{B}^{-1}_{2,\infty}}
\right.\nonumber\\
&&\left.+\|I( b^2)\mathcal{A}\delta u\|_{\dot{B}^{-1}_{2,\infty}}
+\|[I( b^2)-I(b^1)]\mathcal{A} u^1\|_{\dot{B}^{-1}_{2,\infty}}
\right)ds\nonumber\\
\nn&\leq&C\|(u^1,u^2)\|_{\widetilde{L}^2_t(\dot{B}^1_{2,1})}\|\delta u\|_{\widetilde{L}^2(\dot{B}^0_{2,\infty})}+C\|b^2\|_{\widetilde{L}^\infty_t(\dot{B}^1_{2,1})} \|\delta u\|_{\widetilde{L}^1(\dot{B}^1_{2,\infty})}\\
\nn&&+C\left(1+\| (b^1,b^2 ) \|_{\widetilde{L}^\infty_t(\dot{B}^{1}_{2,1})}\right)\int^t_0
\left(1+\|u^1\|_{\dot{B}^2_{2,1}}\right)\|\delta b \|_{\dot{B}^0_{2,\infty}}ds\\
\nn&\leq&C\|(u^1,u^2)\|_{\widetilde{L}^2_t(\dot{B}^1_{2,1})}\|\delta u\|_{\widetilde{L}^2(\dot{B}^0_{2,\infty})}+CX^0 \|\delta u\|_{\widetilde{L}^1(\dot{B}^1_{2,\infty})}\\
&&+C\left(1+X^0\right)\int^t_0
\left(1+\|u^1\|_{\dot{B}^2_{2,1}}\right)\|\delta b \|_{\dot{B}^0_{2,\infty}}ds,
\label{5.16}
\end{eqnarray}
where we have used the estimates
\begin{equation}gin{eqnarray}
&& \|K( b^2){\nb b^2}-K( b^1){\nb b^1}\|_{\dot{B}^{-1}_{2,\infty}}\nonumber\\
&\leq &C\left\|\int^1_0 K (b^1+\tau(b^2-b^1))d\tau \delta b\right\|_{\dot{B}^{0}_{2,\infty}}
\nonumber\\
&\leq&C\left\|\int^1_0 K (b^1+\tau(b^2-b^1))d\tau\right\|_{\dot{B}^{1}_{2,1}}\|\delta b \|_{\dot{B}^0_{2,\infty}} \nonumber\\
&\leq&C \| (b^1,b^2 ) \|_{\dot{B}^{1}_{2,1}}\|\delta b \|_{\dot{B}^0_{2,\infty}},
\end{eqnarray}
and
\begin{equation}gin{eqnarray}
&&\| I(b^2)-I(b^1) \|_{\dot{B}^{0}_{2,\infty}}=C\left\|\int^1_0 I' (b^1+\tau(b^2-b^1))d\tau \delta b\right\|_{\dot{B}^{0}_{2,\infty}}
\nonumber\\
&\leq&C\left(1+\left\|\int^1_0 I' (b^1+\tau(b^2-b^1))d\tau-1\right\|_{\dot{B}^{1}_{2,1}}\right)\|\delta b \|_{\dot{B}^0_{2,\infty}} \nonumber\\
&\leq&C\left(1+ \| (b^1,b^2 ) \|_{\dot{B}^{1}_{2,1}}\right)\|\delta b \|_{\dot{B}^0_{2,\infty}}.
\end{eqnarray}
Choosing $X^0$ and $t\le\bar{T}$ so small that the first two terms on the right hand side of \eqref{5.16} can be absorbed by the left hand side, then \eqref{5.16} reduces to
\begin{equation}\label{5.16'}
\|\delta u\|_{\widetilde{L}^2_t(\dot{B}^0_{2,\infty})\cap \widetilde{L}^1_t(\dot{B}^1_{2,\infty})}
\leq C(X^0, \bar{T})\int^t_0
\left(1+\|u^1\|_{\dot{B}^2_{2,1}}\right)\|\delta b \|_{\dot{B}^0_{2,\infty}}ds,
\end{equation}
where $C(X^0,\bar{T})$ denotes the various constants depending on $X^0$ and $\bar{T}$. Thanks to \eqref{u12}, applying Gronwall's lemma to \eqref{5.15}, we find that for all $t\in[0,\bar{T}]$,
\begin{equation}\label{dlb1}
\|\delta b\|_{L^\infty_t(\dot{B}^0_{2,\infty})} \\
\leq C(X^0,\bar{T})
\| \delta u\|_{L^1_t(\dot{B}^1_{2,1})},
\end{equation}
From Proposition 2.8 in \cite{Danchin2003}, we have
\begin{equation}\label{log-inter}
\|\delta u\|_{\widetilde{L}^1_s(\dot{B}^1_{2,1})}\leq C\|\delta u\|_{\widetilde{L}^1_s(\dot{B}^1_{2,\infty})}
\ln\left(e+\frac{\|\delta u\|_{\widetilde{L}^1_s(\dot{B}^\al_{2,\infty})}+\|\delta u\|_{\widetilde{L}^1_s(\dot{B}^{2-\al}_{2,\infty})}}{\|\delta u\|_{\widetilde{L}^1_s(\dot{B}^1_{2,\infty})}}
\right).
\end{equation}
Substituting \eqref{dlb1}--\eqref{log-inter} into \eqref{5.16'}, we obtain
$$
\|\delta u\|_{ \widetilde{L}^1_t(\dot{B}^1_{N,\infty})}
\leq
C \int^t_0 \left(1+\|u^1\|_{\dot{B}^2_{2,1}}\right)\|\delta u\|_{ \widetilde{L}^1_s(\dot{B}^1_{N,\infty})}\ln\left(e+V_3(s){\|\delta u\|^{-1}_{\widetilde{L}^1_s(\dot{B}^1_{N,\infty})}}
\right)
ds,
$$
where
$$
V_3(t):=\|\delta u\|_{\widetilde{L}^1_t(\dot{B}^\al_{2,\infty})}+\|\delta u\|_{\widetilde{L}^1_t(\dot{B}^{2-\al}_{2,\infty})}.
$$
For all $t\in[0,\bar{T}]$, by H\"{o}lder's inequality and interpolations, there hold
\begin{equation}gin{eqnarray}no
\|u^i\|_{L^1_t(\dot{B}^\al_{2,1})}&\le&\|\Pe^\bot u^i_L\|_{L^1_t(\dot{B}^\al_{2,1})}+\|\Pe^\bot u^i_H+\Pe u^i\|_{L^1_t(\dot{B}^\al_{2,1})}\\
&\le&C\bar{T}\|\Pe^\bot u^i_L\|_{L^\infty_t(\dot{B}^\al_{2,1})}+C\bar{T}^{1-\fr{\al}{2}}\|\Pe^\bot u^i_H+\Pe u^i\|^{{1-\fr{\al}{2}}}_{L^\infty_t(\dot{B}^0_{2,1})}\|\Pe^\bot u^i_H+\Pe u^i\|^\fr{\al}{2}_{L^1_t(\dot{B}^2_{2,1})}\\
&\le&C(X^0, \bar{T}),
\end{eqnarray}no
and
\begin{equation}gin{eqnarray}no
\|u^i\|_{L^1_t(\dot{B}^{2-\al}_{2,1})}&\le&\|\Pe^\bot u^i_L\|_{L^1_t(\dot{B}^{2-\al}_{2,1})}+\|\Pe^\bot u^i_H+\Pe u^i\|_{L^1_t(\dot{B}^{2-\al}_{2,1})}\\
&\le&C\|\Pe^\bot u^i_L\|_{L^\infty_t(\dot{B}^\al_{2,1})}^\al\|\Pe^\bot u^i_L\|_{L^1_t(\dot{B}^\al_{2,1})}^{1-\al}+C\bar{T}^{\fr{\al}{2}}\|\Pe^\bot u^i_H+\Pe u^i\|^{{\fr{\al}{2}}}_{L^\infty_t(\dot{B}^0_{2,1})}\|\Pe^\bot u^i_H+\Pe u^i\|^\fr{1-\al}{2}_{L^1_t(\dot{B}^2_{2,1})}\\
&\le&C(X^0, \bar{T}).
\end{eqnarray}no
These two inequalities imply that
\begin{equation}no
V_3(t)\le C(X^0, \bar{T}), \quad \mathrm{for\ \ all} \quad t\in[0,\bar{T}].
\end{equation}no
Since
$$
\int^1_0\frac{ds}{s\ln(e+V_3(\bar{T})s^{-1})}=+\infty,
$$
Osgood's lemma implies that
$\delta b=\delta u=0$ on $[0,\bar{T}]$. Standard arguments then yield that $(b^1,u^1)=(b^2,u^2)$ for all $t\geq0$. The proof of Theorem \ref{thm-p>2} is completed.
{
$\square$
}
\section{Proof of Theorem \ref{thm-p=2}}
\noindent To simplify the presentation, for $T>0$, let us denote
\begin{equation}gin{gather*}
Z_L(T):=\|(b_L, \Pe^\bot u_L)\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})},\quad Z_L^0:=\|(b_{0L}, \Pe^\bot u_{0L})\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}},\\
H(T):=\|\Pe u\|_{\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}, \quad H^0:=\|\Pe u\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}},\\
Z(T):=\|(b, u)\|_{\mathcal{E}^{\fr{N}{2}}(T)}=Z_L(T)+X_H(T)+H(T), \quad Z^0:=Z_L^0+X_H^0+H^0.
\end{gather*}
Now we are in a position to prove Theorem \ref{thm-p=2}. On the one hand, from \eqref{1.37} and the embedding
\begin{equation}\label{emb}
\|\Pe u_0\|_{\dot{B}^{\fr{N}{p}-1}_{p,1}}\le C\|\Pe u_0\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}},
\end{equation}
taking $c_1$ be any constant not larger than $\fr{c_0}{C}$, we infer that \eqref{initial3} implies \eqref{initial1}. Consequently, in view of Theorem \ref{thm-p>2}, there is a solution $(\rho, u)$ to the Navier-Stokes equations \eqref{CNS}. Moreover, using \eqref{1.37} and \eqref{emb} again, \eqref{uniform1} reduces to
\begin{equation}\label{X1}
X(T)\le CC_0c_1, \quad \mathrm{for\ \ all}\quad T>0.
\end{equation}
On the other hand, for the same initial data $(\rho_0, u_0)$, owing to Theorem \ref{local}, there exists a unique local solution $(\rho^*, u^*)$ in $\mathcal{E}^{\fr{N}{2}}(T^\ast)$, where $T^\ast$ is the maximal existence time of $(\rho^*, u^*)$. By using the uniqueness of the solution, we conclude that
\begin{equation}no
(\rho, u)\equiv(\rho^*, u^*), \quad \mathrm{for\ \ all}\quad t\in[0,T^*).
\end{equation}no
Next, we go to bound $Z(T)$ for $T<T^*$. Since $X_{H}(T)$ has been estimated in Lemma \ref{lem4.11}, it suffices to dominate $H(T)$ and $Z_L(T)$. To this end, using Proposition \ref{prop3.4}, \eqref{a5.2} and \eqref{a5.6} in the Appendix, we easily have
\begin{equation}\label{H1}
H(T)\le H^0+CX(T)Z(T), \quad \mathrm{for\ \ all}\quad T<T^*.
\end{equation}
To bound $Z_L(T)$, we follow the proof of Lemma \ref{lem4.10} line by line. Indeed, replacing $\fr{N}{2}-1+\al$ by $\fr{N}{2}-1$ in \eqref{e1-d4L}, and using Lemmas \ref{lem-a4.1}--\ref{lem-a4.4} in the Appendix, it is not difficult to verify that
\begin{equation}\label{ZL1}
Z_L(T)\le Z_L^0+CX(T)Z(T), \quad \mathrm{for\ \ all}\quad T<T^*.
\end{equation}
Now from \eqref{XQH}, \eqref{H1}, \eqref{ZL1} and the fact that $X(T)\le CZ(T)$, we are led to
\begin{equation}\label{Z1}
Z(T)\le Z^0+CX(T)Z(T), \quad \mathrm{for\ \ all}\quad T<T^*.
\end{equation}
Combining \eqref{X1} with \eqref{Z1}, and choosing $c_1$ so small that
\begin{equation}
C^2C_0c_1\le\fr12,
\end{equation}
we conclude that
\begin{equation}
Z(T)\le 2Z^0, \quad \mathrm{for\ \ all}\quad T<T^*.
\end{equation}
This implies that the local solution $(\rho, u)$ can be extended to a global one. The proof of Theorem \ref{thm-p=2} is completed. {
$\square$
}
\section{Appendix}
\subsection{Proof of Corollary \ref{coro-product}.}
$ $
From Propositions \ref{prop-classical}--\ref{p-TR}, using the conditions $s_1-\frac{N}{p_1}\leq \min\{0,N(\frac{1}{p_2}-\frac{1}{\rho})\}$, $s=s_1+s_2+N(\frac{1}{\rho}-\frac{1}{p_1}-\frac{1}{p_2})$ and $\frac{1}{\rho}\leq\frac{1}{p_1}+\frac{1}{p_2}$, we have
\begin{equation}gin{eqnarray}
\|\dot{T}_uv\|_{\dot{B}^s_{\rho,1}}&\leq& C\|u\|_{\dot{B}^{s-s_2}_{p_3,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}}\nonumber\\
&\leq& C\|u\|_{\dot{B}^{s_1}_{p_1,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}},\ \textrm{ (when }\ p_2\geq \rho),\label{7.1}
\end{eqnarray}
where $\frac{1}{\rho}=\frac{1}{p_2}+\frac{1}{p_3}$, and
\begin{equation}gin{eqnarray}
\|\dot{T}_uv\|_{\dot{B}^s_{\rho,1}}&\leq& C\|u\|_{\dot{B}^{s-s_2+N(\frac{1}{p_2}-\frac{1}{\rho})}_{\infty,1}}\|v\|_{\dot{B}^{s_2-N(\frac{1}{p_2}-\frac{1}{\rho})}_{\rho,1}}\nonumber\\
&\leq& C\|u\|_{\dot{B}^{s_1}_{p_1,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}},\ \textrm{ (when }\ p_2< \rho).
\end{eqnarray}
Similarly, noting that $\sigma_1-\frac{N}{q_1}\leq \min\{0,N(\frac{1}{q_2}-\frac{1}{\rho})\}$, $s=\sigma_1+\sigma_2+N(\frac{1}{\rho}-\frac{1}{q_1}-\frac{1}{q_2})$ and $\frac{1}{\rho}\leq\frac{1}{q_1}+\frac{1}{q_2}$, we have
\begin{equation}gin{eqnarray}
\|\dot{T}_vu\|_{\dot{B}^s_{\rho,1}}&\leq& C\|v\|_{\dot{B}^{s-\sigma_2}_{q_3,1}}\|u\|_{\dot{B}^{\sigma_2}_{q_2,1}}\nonumber\\
&\leq& C\|v\|_{\dot{B}^{\sigma_1}_{q_1,1}}\|v\|_{\dot{B}^{\sigma_2}_{q_2,1}},\ \textrm{ (when }\ q_2\geq p),
\end{eqnarray}
where $\frac{1}{\rho}=\frac{1}{q_2}+\frac{1}{q_3}$, and
\begin{equation}gin{eqnarray}
\|\dot{T}_vu\|_{\dot{B}^s_{\rho,1}}&\leq& C\|v\|_{\dot{B}^{s-\sigma_2+N(\frac{1}{q_2}-\frac{1}{\rho})}_{\infty,1}}\|u\|_{\dot{B}^{\sigma_2-N(\frac{1}{q_2}-\frac{1}{\rho})}_{\rho,1}}\nonumber\\
&\leq& C\|v\|_{\dot{B}^{\sigma_1}_{q_1,1}}\|u\|_{\dot{B}^{\sigma_2}_{q_2,1}},\ \textrm{ (when }\ q_2< \rho).
\end{eqnarray}
Next, from Propositions \ref{prop-classical}--\ref{p-TR} , using the conditions $s=s_1+s_2+N(\frac{1}{\rho}-\frac{1}{p_1}-\frac{1}{p_2})$, $s_1+s_2>N\max \{0,\fr{1}{p_1}+\frac{1}{p_2}-1\}$, and $\frac{1}{\rho}\leq\frac{1}{p_1}+\frac{1}{p_2}$, we have
\begin{equation}gin{eqnarray}
\|\dot{R}(u,v)\|_{\dot{B}^s_{\rho,1}}&\leq& C \|\dot{R}(u,v)\|_{\dot{B}^{s+N(\frac{1}{p_1}+\frac{1}{p_2}-\frac{1}{\rho})}_{\frac{p_1p_2}{p_1+p_2},1}}\nonumber\\
&\leq& C\|u\|_{\dot{B}^{s_1}_{p_1,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}},\ \textrm{ (when }\ \frac{1}{p_1}+\frac{1}{p_2}\leq1),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}
\|\dot{R}(u,v)\|_{\dot{B}^s_{\rho,1}}&\leq& C \|\dot{R}(u,v)\|_{\dot{B}^{s+N(1-\frac{1}{\rho})}_{1,1}}\nonumber\\
&\leq& C\|u\|_{\dot{B}^{s-s_2+N(1-\frac{1}{\rho})}_{p_4,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}}\nonumber\\
&\leq& C\|u\|_{\dot{B}^{s_1}_{p_1,1}}\|v\|_{\dot{B}^{s_2}_{p_2,1}},\ \textrm{ (when }\ \frac{1}{p_1}+\frac{1}{p_2}>1),\label{7.6}
\end{eqnarray}
where $1=\frac{1}{p_2}+\frac{1}{p_4}$. Combining \eqref{Bony-decom} with (\ref{7.1})--(\ref{7.6}), we have (\ref{product1}). Then, we can easily obtain (\ref{product1-s}) and finish the proof of Corollary \ref{coro-product}. {
$\square$
}
\subsection{Action of smooth functions}
Here we give a variant of Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11}, which will be used to deal with the nonlinear term stemming from the pressure $P=P(\rho)$.
\begin{equation}gin{lem}\label{lem-Kb}
Let $r\in[1,\infty], s\ge0$ and $T>0$. Assume that $u\in L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})$ with $u_L\in\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}+s}_{2,1})$, $u_H\in\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}}_{2,1})$, and $K$ is a smooth function on $\mathbb{R}$ which vanishes at 0. Then there hold
\begin{equation}\label{App-1}
\|K(u)_L\|_{\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}+s}_{2,1})}\\
\le C(K', \|u\|_{L^\infty_T(L^\infty)})\left(\|u_L\|_{\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}+s}_{2,1})}+\|u_H\|_{\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right),
\end{equation}
and
\begin{equation}\label{App-2}
\|K(u)_H\|_{\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
\le C(K', \|u\|_{L^\infty_T(L^\infty)})\left(\|u_L\|_{\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}+s}_{2,1})}+\|u_H\|_{\widetilde{L}^r_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
In order to obtain \eqref{App-1}, we just need to modify the proof of Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11}. For the convenience of readers, we give some details here. First of all, using Meyer's first linearization method, we rewrite $K(u)$
as
\begin{equation}\label{Kb}
K(u)=\sum_{q'\in\Z}m_{q'}\dot{\displaystylel}_{q'}u,
\end{equation}
where
\begin{equation}no
m_{q'}=\int_0^1K'(\dot{S}_{q'}u+\tau\dot{\displaystylel}_{q'}u)d\tau.
\end{equation}no
The series in \eqref{Kb} converges to $K(u)$ in $L^\infty+L^2$, and $K(u)\in\dot{S}'_h$. In view of \eqref{Kb}, we have
\begin{equation}gin{eqnarray}
\nn\|K(u)_L\|_{\widetilde{L}^{r}_T(\dot{B}^{\fr{N}{2}+s}_{2,1})}&\le& C\sum_{q<1}2^{q(\fr{N}{2}+s)}\|\dot{\displaystylel}_qK(u)\|_{L^{r}_T(L^2)}\\
\nn&\le& C\sum_{q<1}\sum_{q'>q}2^{q(\fr{N}{2}+s)}\|\dot{\displaystylel}_q(m_{q'}\dot{\displaystylel}_{q'}u)\|_{L^{r}_T(L^2)}\\
\nn&&+C\sum_{q<1}\sum_{q'\le q}2^{q(\fr{N}{2}+s)}\|\dot{\displaystylel}_q(m_{q'}\dot{\displaystylel}_{q'}u)\|_{L^{r}_T(L^2)}\\
&=:&I_1+I_2.
\end{eqnarray}
Using the H\"{o}lder's inequality and the convolution inequality, we have
\begin{equation}gin{eqnarray}
I_1\nn&\le&C\sum_{q<1}\sum_{q'>q}2^{q(\fr{N}{2}+s)}\|m_{q'}\|_{L^\infty_T(L^\infty)}\|\dot{\displaystylel}_{q'}u\|_{L^{r}_T(L^2)}\\
\nn&\le&C(K',\|u\|_{L^\infty_T(L^\infty)})\sum_{q<1}\sum_{q'>q}2^{q(\fr{N}{2}+s)}\|\dot{\displaystylel}_{q'}u\|_{L^{r}_T(L^2)}\\
\nn&\le&C(K',\|u\|_{L^\infty_T(L^\infty)})\left(\sum_{q<1}\sum_{q'>q}2^{q(\fr{N}{2}+s)}\|\dot{\displaystylel}_{q'}u_L\|_{L^{r}_T(L^2)}+\sum_{q<1}\sum_{q'>q}2^{q\fr{N}{2}}\|\dot{\displaystylel}_{q'}u_H\|_{L^{r}_T(L^2)}\right)\\
&\le&C(K',\|u\|_{L^\infty_T(L^\infty)})\left(\|u_L\|_{\widetilde{L}^{r}_T(\dot{B}^{\fr{N}{2}+s}_{2,1})}+\|u_H\|_{\widetilde{L}^{r}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}
I_2\nn&\le&C\sum_{q<1}\sum_{q'\le q}\left(2^{q(\fr{N}{2}+s)}\|\dot{\displaystylel}_q(m_{q'}\dot{\displaystylel}_{q'}u_L)\|_{L^{r}_T(L^2)}+2^{q\fr{N}{2}}\|\dot{\displaystylel}_q(m_{q'}\dot{\displaystylel}_{q'}u_H)\|_{L^{r}_T(L^2)}\right)\\
\nn&\le&C\sum_{q<1}\sum_{q'\le q}\left(2^{q(\fr{N}{2}+s-[\fr{N}{2}+s]-1)}\sum_{|\begin{equation}ta|=[\fr{N}{2}+s]+1}\|\pr^\begin{equation}ta\dot{\displaystylel}_q(m_{q'}\dot{\displaystylel}_{q'}u_L)\|_{L^{r}_T(L^2)}\right.\\
\nn&&\left.+2^{q(\fr{N}{2}-[\fr{N}{2}]-1)}\sum_{|\begin{equation}ta|=[\fr{N}{2}]+1}\|\pr^\begin{equation}ta\dot{\displaystylel}_q(m_{q'}\dot{\displaystylel}_{q'}u_H)\|_{L^{r}_T(L^2)}\right)\\
\nn&\le&C(K',\|u\|_{L^\infty_T(L^\infty)})\sum_{q<1}\sum_{q'\le q}\left(2^{(q-q')(\fr{N}{2}+s-[\fr{N}{2}+s]-1)}\left(2^{q'(\fr{N}{2}+s)}\|\dot{\displaystylel}_{q'}u_L\|_{L^{r}_T(L^2)}\right)\right.\\
\nn&&\left.+2^{(q-q')(\fr{N}{2}-[\fr{N}{2}]-1)}\left(2^{q'\fr{N}{2}}\|\dot{\displaystylel}_{q'}u_H\|_{L^{r}_T(L^2)}\right)\right)\\
&\le&C(K',\|u\|_{L^\infty_T(L^\infty)})\left(\|u_L\|_{\widetilde{L}^{r}_T(\dot{B}^{\fr{N}{2}+s}_{2,1})}+\|u_H\|_{\widetilde{L}^{r}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right).
\end{eqnarray}
The proof of \eqref{App-2} can be given in a similar way. This completes the proof of Lemma \ref{lem-Kb}.
\end{proof}
\subsection{Nonlinear estimates}
Here, we give the detail proofs of Lemmas \ref{lem4.1} in Section \ref{S4}.
\textbf{Proof of Lemma \ref{lem4.1}.}
Clearly,
\begin{equation}no
b\mathrm{div} u=b\mathrm{div} \mathbb{P}^\bot u=b_L\mathrm{div}\mathbb{P}^\bot u_L+b_L\mathrm{div}\mathbb{P}^\bot u_H+b_H\mathrm{div}\mathbb{P}^\bot u_L+b_H\mathrm{div}\mathbb{P}^\bot u_H.
\end{equation}no
From Corollary \ref{coro-product} with $u=b_L$, $v=\mathrm{div}\mathbb{P}^\bot u_L$, $\rho=p_2=q_2=2$, $p_1=q_1=p$, $s_1=\frac{N}{p}+2\al-1$, $s_2=\frac{N}{2}-\al$, $\sigma_1=\frac{N}{p}+2\al-2$, $\sigma_2=\frac{N}{2}-\al+1$, one deduces that
\begin{equation}gin{eqnarray}\label{e4.3}
&& \|b_L\mathrm{div}\mathbb{P}^\bot u_L\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}\nonumber\\
&\leq& C\| b_L\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\frac{N}{p}+2\al-1}_{p,1})}
\|\mathrm{div} \Pe^\bot u_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-\al}_{2,1})}
+\|\mathrm{div} \Pe^\bot u_L\|_{\widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\frac{N}{p}+2\al-2}_{p,1})} \|b_L\|_{\widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-\al+1}_{2,1})}.
\end{eqnarray}
Using Corollary \ref{coro-product} again with $u=b_L$, $v=\mathrm{div}\mathbb{P}^\bot u_H$, $\rho=p_2=q_2=p_1=q_1=2$, $s_1=\sigma_2=\frac{N}{2}+\al-1$, $s_2=\sigma_1=\frac{N}{2}$, we have
\begin{equation}
\|b_L\mathrm{div}\mathbb{P}^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\leq C\| b_L\|_{\widetilde{L}^{\infty}_T(\dot{B}^{\frac{N}{2}+ \al-1}_{2,1})}
\|\mathrm{div} \Pe^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2} }_{2,1})}.
\end{equation}
Similarly, taking $u=b_H$, $v=\mathrm{div}\mathbb{P}^\bot u_L$, $\rho=p_2=q_2=p_1=q_1=2$, $s_1=\sigma_2=\frac{N}{2}$, $s_2=\sigma_1=\frac{N}{2}+ \al-1$ in Corollary \ref{coro-product} yields
\begin{equation}
\|b_H\mathrm{div}\mathbb{P}^\bot u_L\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}
\leq C\| b_H\|_{\widetilde{L}^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\|\mathrm{div} \Pe^\bot u_L\|_{\widetilde{L}^{2}_T(\dot{B}^{\frac{N}{2}-1+\al }_{2,1})}.
\end{equation}
By virtue of the low frequency embedding (\ref{lf-embeding1})
and Corollary \ref{coro-product} with $u=b_H$, $v=\mathrm{div}\mathbb{P}^\bot u_H$, $\rho=p_2=q_2=p_1=q_1=2$, $s_1=\sigma_2=\frac{N}{2} $, $s_2=\sigma_1=\frac{N}{2}-1$, we obtain
\begin{equation}
\|P_{<1}(b_H\mathrm{div}\mathbb{P}^\bot u_H)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})} \leq C\|b_H\mathrm{div}\mathbb{P}^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})}
\leq C\| b_H\|_{\widetilde{L}^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\|\mathrm{div} \Pe^\bot u_H\|_{\widetilde{L}^{2}_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})}.
\end{equation}
Then, it follows from the above estimates, and the fact
\begin{equation}gin{equation}\label{5.3}
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})
\subset \widetilde{L}^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-\al+1}_{2,1}),
\end{equation}
that the estimate (\ref{5.1}) holds.
Next, $\|P_{<1}(\dot{T}'_{\nb b}u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}$ and $\|P_{<1}(\dot{T}_{u}\nb b)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}$ will be bounded as follows. On the one hand, using Proposition \ref{p-TR}, we are led to
\begin{equation}gin{eqnarray}\label{4.10}
\nn&&\|P_{<1}(\dot{T}'_{\nb b}\Pe^\bot u)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&\|P_{<1}(\dot{T}'_{\nb b_L}\Pe^\bot u_L)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{<1}(\dot{T}'_{\nb b_L}\Pe^\bot u_H)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&&+\|P_{<1}(\dot{T}'_{\nb b_H}\Pe^\bot u_L)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{<1}(\dot{T}'_{\nb b_H}\Pe^\bot u_H)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\nb b_L\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})}\|\Pe^\bot u_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}-\al+1}_{2,1})}+C\|\nb b_L\|_{L^{2}_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\|\Pe^\bot u_H\|_{L^{2}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
\nn&&+C\|\nb b_H\|_{L^{2}_T(\dot{B}^{-1}_{\infty,1})}\|\Pe^\bot u_L\|_{L^{2}_T(\dot{B}^{\fr{N}{2}+\al}_{2,1})}+C\|\nb b_H\|_{L^{2}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\|\Pe^\bot u_H\|_{L^{2}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
On the other hand, since \eqref{p1} ensures that $\fr{N}{p*}-1\le0$, i.e. $p\le\fr{2N}{N-2}$, thanks to $\mathrm{div} \Pe u=0$, we obtain
\begin{equation}gin{eqnarray}\label{4.11}
\nn&&\|P_{<1}(\dot{T}'_{\nb b}\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&\|P_{<1}(\dot{T}'_{\nb b_L}\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|P_{<1}(\pr_k\dot{T}'_{ b_H}(\Pe u)^k)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\nb b_L\|_{L^{\infty}_T(\dot{B}^{\fr{N}{p^*}-2+\al}_{p^*,1})}\|\Pe u\|_{L^{1}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}+C\| b_H\|_{L^{\infty}_T(\dot{B}^{\fr{N}{p^*}-1}_{p^*,1})}\|\Pe u\|_{L^{1}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
\nn&\le&C\|b_L\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\|\Pe u\|_{L^{1}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}+C\| b_H\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\|\Pe u\|_{L^{1}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
\nn&\le&C\left(\|b_L\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\| b_H\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\|\Pe u\|_{L^{1}_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
&\le&CX^2(T),
\end{eqnarray}
where we have used (\ref{lf-embeding1})-(\ref{hf-embedding1}).
Similarly, owing to \eqref{5.3} and the following interpolation,
\begin{equation}gin{equation}\label{5.4}
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1 }_{2,1})
\subset \widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\frac{N}{2}+2\al-1}_{2,1}) \subset \widetilde{L}^{\frac{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1}),
\end{equation}
we have
\begin{equation}gin{eqnarray}\label{4.13}
\nn&&\|P_{<1}(\dot{T}_{\Pe^\bot u}\cdot\nb b)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&\|P_{<1}(\dot{T}_{\Pe^\bot u}\cdot\nb b_L)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|P_{<1}(\dot{T}_{\Pe^\bot u}\cdot\nb b_H)\|_{L^1_T(\dot{B}^{\fr{N}{2}-2+2\al}_{2,1})}\\
\nn&\le&C\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}\|\nb b_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}-\al}_{2,1})}+C\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}\|\nb b_H\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\Pe^\bot u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\left(\| b_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}+\| b_H\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\\
&\le&CX^2(T),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}\label{4.14}
\nn&&\|P_{<1}(\dot{T}_{\Pe u}\cdot\nb b)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&\|P_{<1}(\dot{T}_{\Pe u}\cdot\nb b_L)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|P_{<1}(\dot{T}_{\Pe u}\cdot\nb b_H)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\Pe u\|_{L^{\infty}_T(\dot{B}^{-1}_{\infty,1})}\|\nb b_L\|_{L^{1}_T(\dot{B}^{\fr{N}{2}+\al}_{2,1})}+C\|\Pe u\|_{L^{2}_T(L^\infty)}\|\nb b_H\|_{L^{2}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\Pe u\|_{L^{\infty}_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\| b_L\|_{L^{1}_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})}+C\|\Pe u\|_{L^{2}_T(\dot{B}^{\fr{N}{p}}_{p,1})}\| b_H\|_{L^{2}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
Combining the above estimates, we complete the proof of Lemma \ref{lem4.1}.
{
$\square$
}
\textbf{Proof of Lemma \ref{lem4.2}.}
First of all, using the fact
\begin{equation}gin{equation}\label{5.6}
\widetilde{L}^\infty_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})\cap L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})
\subset \widetilde{L}^{\frac{2}{1-\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})\subset L^{\frac{2}{1-\al}}_T(L^\infty),
\end{equation}
and the decomposition $b=b_L+b_H$, we have
\begin{equation}\label{b1}
\| b \|_{\widetilde{L}^{\fr{2}{1-\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\le \| b_L \|_{\widetilde{L}^{\fr{2}{1-\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}+\| b_H \|_{\widetilde{L}^{\fr{2}{1-\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\le CX(T).
\end{equation}
Moreover, with the aid of the following low frequency embedding
\begin{equation}no
\| b_L \|_{\widetilde{L}^{\infty}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\le C \| b_L \|_{\widetilde{L}^{\infty}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})},
\end{equation}no
we find that
\begin{equation}\label{b2}
\| b \|_{\widetilde{L}^{\infty}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\le C\left( \| b_L \|_{\widetilde{L}^{\infty}_T(\dot{B}^{\frac{N}{2}-1+\al}_{2,1})}+ \| b_H \|_{\widetilde{L}^{\infty}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\right)\le CX(T).
\end{equation}
Now using Bony's decomposition, the high frequency embedding (\ref{hf-embedding1}),
Lemma \ref{Bernstein} and Proposition \ref{p-TR}, we are led to
\begin{equation}gin{eqnarray}\label{4.19}
\nn&&\|P_{\ge1} (b \mathrm{div} u)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&\le&\|P_{\ge1} (b \mathrm{div} u_L)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}+\|P_{\ge1} (b \mathrm{div} u_H)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&\le&C\|P_{\ge1} (\dot{T}_b \mathrm{div} u_L)\|_{L^1_T(\dot{B}^{\frac{N}{2}+\al}_{2,1})}+C\|\dot{T}'_{\mathrm{div} u_L} b \|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}+C\|b \mathrm{div} u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&\le&C\|b \|_{L^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}\| \mathrm{div} u_L\|_{L^1_T(\dot{B}^{\frac{N}{2}+\al}_{2,1})}+C\| b \|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\|{\mathrm{div} u_L} \|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&&+C\|b \|_{L^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}\| \mathrm{div} u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&\le&C\|b \|_{L^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}\| \Pe^\bot u_L\|_{L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}+C\| b \|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\|{\Pe^\bot u_L} \|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\frac{N}{2}+2\al}_{2,1})}\\
\nn&&+C\|b \|_{L^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}\| \Pe^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})}\\
&\le&CX^2(T),
\end{eqnarray}
where we have used \eqref{b1}--\eqref{b2}, the interpolation
\begin{equation}\label{5.6-1}
\widetilde{L}^\infty_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})\cap{L}^1_T(\dot{B}^{\fr{N}{2}+1+\al}_{2,1})\subset \widetilde{L}^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+2\al}_{2,1}),
\end{equation}
and the following low frequency embedding
\begin{equation}\label{lfe1}
\|\Pe^\bot u_L\|_{\widetilde{L}^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}\le C\|\Pe^\bot u_L\|_{\widetilde{L}^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+2\al}_{2,1})}.
\end{equation}
Next, noting that $\fr{N}{p*}\le1$, using Proposition \ref{p-TR}, we find that
\begin{equation}gin{eqnarray}\label{4.22}
\nn&&\|P_{\ge1}(\dot{T}'_{\nb b}u)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&\le&\|P_{\ge1}(\dot{T}'_{\nb b}\Pe^\bot u_L)\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}+\|P_{\ge1}(\dot{T}'_{\nb b}(\Pe^\bot u_H+\Pe u))\|_{L^1_T(\dot{B}^{\frac{N}{2}}_{2,1})}\\
\nn&\le&C\|P_{\ge1}(\dot{T}'_{\nb b}\Pe^\bot u_L)\|_{L^1_T(\dot{B}^{\frac{N}{2}+\al}_{2,1})}+C\|\nb b\|_{L^\infty_T(\dot{B}^{\frac{N}{p^*}-1}_{p^*,1})}\|\Pe^\bot u_H+\Pe u\|_{L^1_T(\dot{B}^{\frac{N}{p}+1}_{p,1})}\\
\nn&\le&C\|b\|_{L^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}\|\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\frac{N}{2}+1+\al}_{2,1})}+C\| b\|_{L^\infty_T(\dot{B}^{\frac{N}{2}}_{2,1})}\left(\|\Pe^\bot u_H\|_{L^1_T(\dot{B}^{\frac{N}{2}+1}_{2,1})}+\|\Pe u\|_{L^1_T(\dot{B}^{\frac{N}{p}+1}_{p,1})}\right)\\
&\le&CX^2(T).
\end{eqnarray}
This completes the proof of Lemma \ref{lem4.2}.
{
$\square$
}
\textbf{Proof of Lemma \ref{lem4.3}.}
From the low frequency embedding \eqref{lf-embeding1}, Corollary \ref{coro-product} and Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11}, we infer that
\begin{equation}gin{eqnarray}\label{4.27'}
&&\nn\|P_{<1}\left(I(b)\mathcal{A}\Pe^\bot u\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(I(b)\mathcal{A}\Pe^\bot u\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|I(b)\mathcal{A}\Pe^\bot u_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}+C\|I(b)\mathcal{A}\Pe^\bot u_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|I(b)\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\mathcal{A}\Pe^\bot u_L\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}+C\|I(b)\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\mathcal{A}\Pe^\bot u_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|b\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\Pe^\bot u_L\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+2\al}_{2,1})}+C\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\Pe^\bot u_H\|_{L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}\\
&\le&CX^2(T),
\end{eqnarray}
where we have used \eqref{b1}--\eqref{b2}, and \eqref{5.6-1}--\eqref{lfe1}. Next, using $\mathrm{div}\Pe u=0$, we decompose $\Lm^{-1}\mathrm{div}(I(b)\mathcal{A}\Pe u)$ as follows:
\begin{equation}no
\Lm^{-1}\mathrm{div}(I(b)\mathcal{A}\Pe u)=\Lm^{-1}\left(\dot{T}_{\nb I(b)}\mathcal{A}\Pe u\right)+\Lm^{-1}\mathrm{div}\left(\dot{T}_{\mathcal{A}\Pe u} I(b)\right)+\Lm^{-1}\mathrm{div}\left(\dot{R}({ I(b)},\mathcal{A}\Pe u)\right).
\end{equation}no
Then according to Lemma \ref{Bernstein}, Proposition \ref{p-TR}, \eqref{b2}, and Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11} again, we have
\begin{equation}gin{eqnarray}\label{TbPu1}
\nn&&\|\Lm^{-1}\left(\dot{T}_{\nb I(b)}\mathcal{A}\Pe u\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{T}_{\nb I(b)}\mathcal{A}\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{2}-2}_{2,1})}\\
\nn&\le&C\|{\nb I(b)}\|_{L^\infty_T(\dot{B}^{\fr{N}{p^*}-1}_{p^*,1})}\|\mathcal{A}\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
&\le&CX^2(T),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}\label{4.27}
\nn&&\|\Lm^{-1}\mathrm{div}\left(\dot{T}_{\mathcal{A}\Pe u} I(b)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\mathcal{A}\Pe u\|_{L^1_T(\dot{B}^{-1}_{\infty,1})}\|{ I(b)}\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
\nn&\le&C\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
&\le&CX^2(T).
\end{eqnarray}
Finally, we go to bound the remainder term $\Lm^{-1}\mathrm{div}\left(\dot{R}({ I(b)},\mathcal{A}\Pe u)\right)$. Noting that $\fr{N}{2}-1=0$ for $N=2$, we can not use Proposition \ref{p-TR} directly if $N=2$. Fortunately, Proposition \ref{prop-classical} enables us to bound $\|\Lm^{-1}\mathrm{div}\left(\dot{R}({ I(b)},\mathcal{A}\Pe u)\right)\|_{\dot{B}^{\fr{N}{2}-1}_{2,1}}$ by
$$\|\Lm^{-1}\mathrm{div}\left(\dot{R}({ I(b)},\mathcal{A}\Pe u)\right)\|_{\dot{B}^{\fr{N}{2}+\fr{N}{p}-1}_{\fr{2p}{p+2},1}}$$
first. Then Proposition \ref{p-TR} is applicable, and we have
\begin{equation}gin{eqnarray}\label{4.30}
\nn&&\|\Lm^{-1}\mathrm{div}\left(\dot{R}({ I(b)},\mathcal{A}\Pe u)\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{R}({ I(b)},\mathcal{A}\Pe u)\|_{L^1_T(\dot{B}^{\fr{N}{2}+\fr{N}{p}-1}_{\fr{2p}{p+2},1})}\\
\nn&\le&C\|{ I(b)}\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\mathcal{A}\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}-1}_{p,1})}\\
\nn&\le&C\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\Pe u\|_{L^1_T(\dot{B}^{\fr{N}{p}+1}_{p,1})}\\
&\le&CX^2(T).
\end{eqnarray}
It follows from \eqref{TbPu1}--\eqref{4.30} and the low frequency embedding \eqref{lf-embeding1} that \eqref{4.26} holds. This completes the proof of Lemma \ref{lem4.3}.
{
$\square$
}
\textbf{Proof of Lemma \ref{lem4.4}.}
Using the low frequency embedding \eqref{lf-embeding1}, high frequency embedding \eqref{hf-embedding1}, and the decomposition $b=b_L+b_H$, we have
\begin{equation}gin{eqnarray}
\nn&&\|P_{<1}\left(K(b)\nb b\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+\|P_{\ge1}\left(K(b)\nb b\right)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|K(b)\nb b_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|K(b)\nb b_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}.
\end{eqnarray}
By virtue of Corollary \ref{coro-product}, \eqref{b2}, and Theorem 2.16 in \cite{Bahouri-Chemin-Danchin11}, we obtain
\begin{equation}gin{eqnarray}\label{e4.32}
\|K(b)\nb b_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\nn&\le& C\|K(b)\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\nb b_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\| b_H\|_{L^1_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
The remaining term will be divided into three parts.
\begin{equation}gin{eqnarray}\label{e4.33}
\nn&&\|K(b)\nb b_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le& C\|\dot{T}_{K(b)}\nb b_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|\dot{T}'_{\nb b_L}K(b)_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}+C\|\dot{T}'_{\nb b_L}K(b)_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
&=:&J_1+J_2+J_3.
\end{eqnarray}
To bound $J_1$,
using the interpolation inequality,
we infer that
\begin{equation}\label{bL}
\|b_L\|_{\widetilde{L}^{r_1}_T(\dot{B}^{\fr{N}{2}+\al}_{p_1,1})}\le C\|b_L\|^\theta_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|b_L\|^{1-\theta}_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}\le C X(T),
\end{equation}
where
$$
\theta=\fr{(2-4\al)p}{(4+N-6\al)p-2N},\ \fr{1}{r_1}=\theta\al+(1-\theta)(1-\al), \quad \fr{1}{p_1}=\fr{\theta}{p}+\fr{1-\theta}{2},
$$
and
\begin{equation}gin{eqnarray}\label{b4}
\|b\|_{\widetilde{L}^{r_2}_T(\dot{B}^{\fr{N}{p}}_{p_2,1})}\nn&\le& \|b_L\|_{\widetilde{L}^{r_2}_T(\dot{B}^{\fr{N}{p}}_{p_2,1})}+\|b_H\|_{\widetilde{L}^{r_2}_T(\dot{B}^{\fr{N}{p}}_{p_2,1})}\\
\nn&\le& C\left(\|b_L\|_{\widetilde{L}^{r_2}_T(\dot{B}^{\fr{N}{p}}_{p_2,1})}+\|b_H\|_{\widetilde{L}^{r_2}_T(\dot{B}^{\frac{N}{p}+\frac{N}{2}-\frac{N}{p_2}}_{2,1})}\right)\\
\nn&\le& C\left(\|b_L\|^{1-\theta}_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|b_L\|^{\theta}_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}+\|b_H\|_{\widetilde{L}^{r_2}_T(\dot{B}^{\fr{N}{2}}_{2,1})} \right)\\
&\le&CX(T),
\end{eqnarray}
where
\begin{equation}no
\fr{1}{r_2}=(1-\theta)\al+\theta(1-\al), \quad \fr{1}{p_2}=\fr{1-\theta}{p}+\fr{\theta}{2}.
\end{equation}no
Here, $r_1$, $r_2$, $p_1$ and $p_2$ satisfy
\begin{equation}\label{p2}
\fr{1}{r_1}+\fr{1}{r_2}=1, \quad \fr{1}{p_2}=\fr{1}{p^*_1}+\fr{1}{p},\ 2\leq p_1,p_2\leq p.
\end{equation}
Thus, using Proposition \ref{p-TR}, Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11} and \eqref{bL}--\eqref{p2}, we arrive at
\begin{equation}gin{eqnarray}\label{e4.35}
J_1\nn&\le&C\|K(b)\|_{L^{r_2}_T(\dot{B}^{0}_{p_1^*,1})}\|\nb b_L\|_{L^{r_1}_T(\dot{B}^{\fr{N}{2}-1+\al}_{p_1,1})}\\
\nn&\le&C\|K(b)\|_{L^{r_2}_T(\dot{B}^{\fr{N}{p}}_{p_2,1})}\| b_L\|_{L^{r_1}_T(\dot{B}^{\fr{N}{2}+\al}_{p_1,1})}\\
\nn&\le&C\|b\|_{L^{r_2}_T(\dot{B}^{\fr{N}{p}}_{p_2,1})}\| b_L\|_{L^{r_1}_T(\dot{B}^{\fr{N}{2}+\al}_{p_1,1})}\\
&\le&CX^2(T).
\end{eqnarray}
As for $J_2$, according to \eqref{b1}, \eqref{5.6-1}, the following high frequency embedding
\begin{equation}no
\|K(b)_H\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}-\al}_{2,1})}\le C\|K(b)_H\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})},
\end{equation}no
and Theorem 2.61 in \cite{Bahouri-Chemin-Danchin11} again, we obtain
\begin{equation}gin{eqnarray}
J_2\nn&\le&C\|\dot{T}'_{\nb b_L}K(b)_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1+\al}_{2,1})}\\
\nn&\le&C\|{\nb b_L}\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}\|K(b)_H\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}-\al}_{2,1})}\\
\nn&\le&C\|{ b_L}\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+2\al}_{2,1})}\|K(b)_H\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
\nn&\le&C\|{ b_L}\|_{L^{\fr{2}{1+\al}}_T(\dot{B}^{\fr{N}{2}+2\al}_{2,1})}\|b\|_{L^{\fr{2}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
&\le&CX^2(T).
\end{eqnarray}
Finally, in view of Lemma \ref{lem-Kb} in the Appendix, we have
\begin{equation}\label{KbL}
\|K(b)_L\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}\\
\le C\left(\|b_L\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}+\|b_H\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right).
\end{equation}
Therefore, using Proposition \ref{p-TR} again, one deduces that
\begin{equation}gin{eqnarray}\label{e4.38}
J_3\nn&\le&C\|\nb b_L\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})}\|K(b)_L\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}\\
\nn&\le&C\| b_L\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\left(\|b_L\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-\al}_{2,1})}+\|b_H\|_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\\
&\le&CX^2(T).
\end{eqnarray}
The proof of Lemma \ref{lem4.4} is completed.
{
$\square$
}
\subsection{Some nonlinear estimates in $L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})$}
In next three lemmas, we shall bound $\|(f,g)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}$ in terms of $X(T)Z(T)$, where $(f,g)$ are the nonlinear terms on the right hand side of \eqref{viscoelastic-local}.
\begin{equation}gin{lem}\label{lem-a4.1}
Let $p$ and $\al$ satisfy \eqref{p1} and \eqref{al1}, respectively. Assume $(b,u)\in \mathcal{E}^{\frac{N}{2}}(T)$ , then we have
\begin{equation}gin{equation}\label{a5.1}
\|b \mathrm{div} u\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}+\|\dot{T}'_{\nb b}u\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}
\leq C X(T)Z(T),
\end{equation}
and
\begin{equation}gin{equation}\label{a5.2}
\| ( u\cdot\nabla) u\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}+\| \dot{T}_{ u}\cdot\nabla d\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}
\leq C X(T)Z(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
To begin with, we give the estimate of $\|b\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}$. In fact, using the high frequency embedding
\begin{equation}no
\|b_H\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\le C\|b_H\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}}_{p,1})}\le C\|b_H\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})},
\end{equation}no
and the decomposition $b=b_L+b_H$, one deduces that
\begin{equation}\label{b3}
\|b\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\le C\left(\|b_L\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}+\|b_H\|_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\le CX(T).
\end{equation}
Now if $N\ge3$, using Proposition \ref{p-TR}, \eqref{5.4}, \eqref{5.5}, and \eqref{b3}
we have
\begin{equation}gin{eqnarray}\label{a5.3}
&&\|b\mathrm{div} u\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}\nonumber\\
&\leq&C \left( \|\dot{T}_{b}\mathrm{div} u\|_{L^1_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})}
+\|\dot{T}'_{\mathrm{div} u}b_L\|_{L^1_T(\dot{B}^{\frac{N}{2} -1}_{2,1})}
+\|\dot{T}'_{\mathrm{div} u}b_H\|_{L^1_T(\dot{B}^{\frac{N}{2}-1 }_{2,1})}
\right)\nonumber\\
\nn&\leq&C \left( \| b\|_{L^{\frac{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}
\|\mathrm{div} u\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-2\al}_{2,1})}
+\|\mathrm{div} u\|_{L^{\frac{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})} \|b_L\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-2\al+1}_{2,1})}\right.\\
&&\nn\left.+\|\mathrm{div} u\|_{L^{2}_T(\dot{B}^{\frac{N}{2}-1}_{2,1})} \|b_H\|_{L^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\right)\nonumber\\
\nn&\leq&C \left( \| b\|_{L^{\frac{1}{\al}}_T(\dot{B}^{2\al-1}_{\infty,1})}
\| u\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-2\al+1}_{2,1})}
+\| u\|_{L^{\frac{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})} \|b_L\|_{L^{\frac{1}{1-\al}}_T(\dot{B}^{\frac{N}{2}-2\al+1}_{2,1})}\right.\\
&&\nn\left.+\| u\|_{L^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})} \|b_H\|_{L^{2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}
\right)\nonumber\\
&\leq&C X(T)Z(T),
\end{eqnarray}
and
\begin{equation}gin{eqnarray}\label{a5.4}
\|\dot{T}'_{\nb b} u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}
\nn&\le&C\|\nb b\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})}\|u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}-2\al+1}_{2,1})}\\
&\le&CX(T)Z(T).
\end{eqnarray}
If $N=2$, we just need to reestimate $\|\dot{R}(\mathrm{div} u, b_L)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}$ and $\|\dot{R}(\nb b, u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}$. Indeed, they can be treated in the same say as follows:
\begin{equation}gin{eqnarray}\label{a5.5}
\nn&&\|\dot{R}(\mathrm{div} u, b_L)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}+\|\dot{R}(\nb b, u)\|_{L^1_T(\dot{B}^{\frac{N}{2}-1}_{2,1})}\\
\nn&\le&C\|\dot{R}(\mathrm{div} u, b_L)\|_{L^1_T(\dot{B}^{\frac{2N}{p}-1}_{\fr{p}{2},1})}+\|\dot{R}(\nb b, u)\|_{L^1_T(\dot{B}^{\frac{2N}{p}-1}_{\fr{p}{2},1})}\\
\nn&\le&C\|\mathrm{div} u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-2}_{p,1})}\|b_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}-2\al+1}_{p,1})}+C\|\nb b\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-2}_{p,1})}\|u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}-2\al+1}_{p,1})}\\
\nn&\le&C\| u\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|b_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}-2\al+1}_{2,1})}+C\|b\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|u\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}-2\al+1}_{2,1})}\\
&\le&CX(T)Z(T).
\end{eqnarray}
It follows from \eqref{a5.3}--\eqref{a5.5} that \eqref{a5.1} holds. \eqref{a5.2} can be obtained similarly since $b$ and $u$ lie in the same space $\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})$. The proof of Lemma \ref{lem-a4.1} is completed.
\end{proof}
\begin{equation}gin{lem}\label{lem-a4.3}
Under the conditions of Lemma \ref{lem-a4.1}, we have
\begin{equation}\label{a5.6}
\|I(b)\mathcal{A} u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX(T)Z(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
Using Corollary \ref{coro-product}, Lemma \ref{Bernstein}, and \eqref{b2}, we are led to
\begin{equation}gin{eqnarray}no
\|{ I(b)}\mathcal{A} u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}
\nn&\le&C\|{ I(b)}\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\|\mathcal{A} u\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\\
\nn&\le&C\|b\|_{L^\infty_T(\dot{B}^{\fr{N}{2}}_{2,1})}\| u\|_{L^1_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}\\
&\le&CX(T)Z(T).
\end{eqnarray}no
This completes the proof of Lemma \ref{lem-a4.3}.
\end{proof}
Finally, we estimate $\|K(b)\nb b\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}$ in a similar manner as Lemma \ref{lem4.4}.
\begin{equation}gin{lem}\label{lem-a4.4}
Under the conditions of Lemma \ref{lem-a4.1}, we have
\begin{equation}\label{a5.7}
\|K(b)\nb b\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\le CX(T)Z(T).
\end{equation}
\end{lem}
\begin{equation}gin{proof}
First of all, we can use \eqref{e4.32} to bound $\|K(b)\nb b_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}$. In order to bound $\|\dot{T}_{K(b)}\nb b_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}$, using the interpolation inequality,
we get
\begin{equation}no
\|b_L\|_{\widetilde{L}^{\bar{r}_1}_T(\dot{B}^{\fr{N}{2}}_{\bar{p}_1,1})}\le C\|b_L\|^{\bar{\theta}}_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|b_L\|^{1-\bar{\theta}}_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-2\al}_{2,1})}\le C X^{\bar{\theta}}(T)Z^{1-\bar{\theta}}(T),
\end{equation}no
where
\begin{equation}no
\bar{\theta}=\fr{(2-4\al)p}{(4+N-8\al)p-2N},
\ \fr{1}{\bar{r}_1}=\bar{\theta}\al+(1-\bar{\theta})(1-\al), \quad \fr{1}{\bar{p}_1}=\fr{\bar{\theta}}{p}+\fr{1-\bar{\theta}}{2},
\end{equation}no
and
\begin{equation}gin{eqnarray}no
\|b\|_{\widetilde{L}^{\bar{r}_2}_T(\dot{B}^{\fr{N}{p}}_{\bar{p}_2,1})}\nn&\le& \|b_L\|_{\widetilde{L}^{\bar{r}_2}_T(\dot{B}^{\fr{N}{p}}_{\bar{p}_2,1})}+\|b_H\|_{\widetilde{L}^{\bar{r}_2}_T(\dot{B}^{\fr{N}{p}}_{\bar{p}_2,1})}\\
\nn&\le& C\left(\|b_L\|_{\widetilde{L}^{\bar{r}_2}_T(\dot{B}^{\fr{N}{p}}_{\bar{p}_2,1})}+\|b_H\|_{\widetilde{L}^{\bar{r}_2}_T(\dot{B}^{\frac{N}{p}-\frac{N}{\bar{p}_2}+\frac{N}{2}}_{2,1})}\right)\\
\nn&\le& C\left(\|b_L\|^{1-\bar{\theta}}_{\widetilde{L}^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\|b_L\|^{\bar{\theta}}_{\widetilde{L}^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-2\al}_{2,1})}
+\|b_H\|_{\widetilde{L}^{\bar{r}_2}_T(\dot{B}^{\frac{N}{2}}_{2,1})}\right)\\
&\le&CX^{1-\bar{\theta}}(T)Z^{\bar{\theta}}(T),
\end{eqnarray}no
where
\begin{equation}no
\fr{1}{\bar{r}_2}=(1-\bar{\theta})\al+\bar{\theta}(1-\al), \quad \fr{1}{\bar{p}_2}=\fr{1-\bar{\theta}}{p}+\fr{\bar{\theta}}{2}.
\end{equation}no
Here
\begin{equation}no
\fr{1}{\bar{r}_1}+\fr{1}{\bar{r}_2}=1, \quad \fr{1}{\bar{p}_2}=\fr{1}{\bar{p}^*_1}+\fr{1}{p},\quad 2\leq \bar{p}_1,\bar{p}_2\leq p.
\end{equation}no
Then similar to \eqref{e4.35}, we find that
\begin{equation}gin{eqnarray}
\|\dot{T}_{K(b)}\nb b_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\nn&\le&C\|K(b)\|_{L^{\bar{r}_2}_T(\dot{B}^{0}_{\bar{p}_1^*,1})}\|\nb b_L\|_{L^{\bar{r}_1}_T(\dot{B}^{\fr{N}{2}-1}_{\bar{p}_1,1})}\\
\nn&\le&C\|K(b)\|_{L^{\bar{r}_2}_T(\dot{B}^{\fr{N}{p}}_{\bar{p}_2,1})}\| b_L\|_{L^{\bar{r}_1}_T(\dot{B}^{\fr{N}{2}}_{\bar{p}_1,1})}\\
\nn&\le&C\|b\|_{L^{\bar{r}_2}_T(\dot{B}^{\fr{N}{p}}_{\bar{p}_2,1})}\| b_L\|_{L^{\bar{r}_1}_T(\dot{B}^{\fr{N}{2}}_{\bar{p}_1,1})}\\
&\le&CX(T)Z(T).
\end{eqnarray}
Next, similar to \eqref{e4.38}, if $N\ge3$, we have
\begin{equation}gin{eqnarray}
\|\dot{T}'_{\nb b_L}K(b)_L\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\nn&\le&C\|\nb b_L\|_{L^{\fr{1}{\al}}_T(\dot{B}^{2\al-2}_{\infty,1})}\|K(b)_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-2\al}_{2,1})}\\
\nn&\le&C\| b_L\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\left(\|b_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-2\al}_{2,1})}+\|b_H\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\\
&\le&CX(T)Z(T).
\end{eqnarray}
If $N=2$, the remainder $\dot{R}({\nb b_L}, K(b)_L)$ should be estimated as follows:
\begin{equation}gin{eqnarray}
\|\dot{R}({\nb b_L}, K(b)_L)\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\nn&\le&C\|\dot{R}({\nb b_L}, K(b)_L)\|_{L^1_T(\dot{B}^{\fr{2N}{p}-1}_{\fr{p}{2},1})}\\
\nn&\le&C\|\nb b_L\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-2}_{p,1})}\|K(b)_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{p}+1-2\al}_{p,1})}\\
\nn&\le&C\| b_L\|_{L^{\fr{1}{\al}}_T(\dot{B}^{\fr{N}{p}+2\al-1}_{p,1})}\left(\|b_L\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}+1-2\al}_{2,1})}+\|b_H\|_{L^{\fr{1}{1-\al}}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\\
&\le&CX(T)Z(T).
\end{eqnarray}
Finally, thanks to Lemma \ref{lem-Kb} in the Appendix, we have
\begin{equation}\label{KbH}
\|K(b)_H\|_{L^{1}_T(\dot{B}^{\fr{N}{2}}_{2,1})}
\le C\left(\|b_L\|_{L^{1}_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}+\|b_H\|_{L^{1}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right).
\end{equation}
Then using Proposition \ref{p-TR} and \eqref{b2}, we arrive at
\begin{equation}gin{eqnarray}
\|\dot{T}'_{\nb b_L}K(b)_H\|_{L^1_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}
\nn&\le&C\|{\nb b_L}\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}-1}_{2,1})}\|K(b)_H\|_{L^{1}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\\
\nn&\le&C\|{ b_L}\|_{L^{\infty}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\left(\|b_L\|_{L^{1}_T(\dot{B}^{\fr{N}{2}+1}_{2,1})}+\|b_H\|_{L^{1}_T(\dot{B}^{\fr{N}{2}}_{2,1})}\right)\\
&\le&CX(T)Z(T).
\end{eqnarray}
Combining \eqref{e4.32} with the above estimates yields \eqref{a5.7}. We complete the proof of Lemma \ref{lem-a4.4}.
\end{proof}
\bigbreak
\noindent{\bf Acknowledgments}
\bigbreak
Research supported by China Postdoctoral Science Foundation funded project 2014M552065, and National Natural Science Foundation of China 11401237, 11671353, 11271017 and 11331005, and the Fundamental Research Funds for the Central Universities CCNU15A05041, CCNU16A02011. Part of this work was carried out when the third author was visiting the Department of Mathematics of Zhejiang University.
\begin{equation}gin{thebibliography}{aaa}
\bibitem{Bahouri-Chemin-Danchin11}
H. Bahouri, J. Y. Chemin and R. Danchin, ``Fourier analysis and
nonlinear partial differential equations''. Grundlehren der
Mathematischen Wissenschaften [Fundamental Principles of
Mathematical Sciences], 343. Springer, Heidelberg, 2011.
\bibitem{Bony81}
J. M. Bony, {Calcul symbolique et propagation des
singularit\'{e}s pour \'{e}quations aux
d\'{e}riv\'{e}es partielles nonlin\'{e}aires}.
\textit{Annales Scinentifiques de l'\'{e}cole Normale
Sup\'{e}rieure} \textbf{14}(1981), 209--246.
\bibitem{Cannone93}
M. Cannone, Y. Meyer, and F. Planchon,
Solutions autosimilaires des \'{e}quations de Navier-Stokes.
``S\'{e}minaire sur les \'{E}quations aux D\'{e}riv\'{e}es Partielles" de l'\'{E}cole
Polytechnique, Expos\'{e} VIII, 1993--1994.
\bibitem{Ca97}
M. Cannone, A generalization of a theorem by Kato on Navier-Stokes equations. {\em Rev. Mat. Iberoamericana}, {\bf13}(1997), 515--541.
\bibitem{CD10}
F. Charve, R. Danchin, A global existence result for the compressible Navier-
Stokes Navier-Stokes equations in the critical $L^p$ framwork. {\em Arch. Rational
Mech. Anal.}, {\bf198}(2010), 233--271.
\bibitem{Ch1}
J. Y. Chemin, ``Localization in Fourier space and Navier-Stokes
system, Phase Space Analysis of Partial Differential Equations''.
{Proceedings 2004, CRM series, Pisa} 2004, 53--136.
\bibitem{CL}
J. Y. Chemin, N. Lerner
{Flot de champs de vecteurs non Lipschitziens et \'{e}quations
de Navier-Stokes}. {\em J. Differential Equations}, \textbf{121}(1995),
314--228.
\bibitem{CG10}
J.-Y. Chemin, I. Gallagher, Large, global solutions to the Navier-Stokes equations slowly varying in one direction, {\em Trans. Amer. Math. Soc.}, {\bf362}(2010),
2859--2873.
\bibitem{CGP11}
J.-Y. Chemin, I. Gallagher, M. Paicu, Global regularity for some classes of large solutions to the Navier-Stokes equations, {\em Ann. of Math.}, {\bf173} (2011),
983--1012.
\bibitem{CZ07}
J.-Y. Chemin, P. Zhang, On the global wellposedness of the 3-D incompressible anisotropic Navier-Stokes equations. {\em Commun. Math. Phys.}, \textbf{272}(2007), 529--566.
\bibitem{CMZ10}
Q. L. Chen, C. X. Miao, Z. F. Zhang, Global well-posedness for compressible
Navier-Stokes equations with highly oscillating initial velocity. {\em Comm. Pure
Appl. Math.}, {\bf 63}(2010), 1173--1224.
\bibitem{CCK04}
Y.Cho, H. J. Choe, and H. Kim, Unique solvability of the initial boundary value
problems for compressible viscous fluid. {\em J. Math. Pures Appl.}, {\bf83}(2004), 243--275.
\bibitem{Danchin00}
R. Danchin, Global existence in critical spaces for compressible Navier-Stokes equations.
{\em Invent. Math.}, {\bf141}(2000), 579--614.
\bibitem{Danchin02}
R. Danchin, Zero mach numer limit in critical spaces for compresible Navier-Stokes equations. {\em Ann. Scitent. \'{E}c. Norm. Sup.}, {\bf35}(2002), 27--75.
\bibitem{Danchin2003}
R. Danchin, Density-dependent incompressible viscous fluids in critical spaces. {\em Proc. Roy. Soc. Edinburgh}, {\bf133}(2003), 1311--1334.
\bibitem{Danchin05}
R. Danchin, On the uniqueness in critical spaces for compressible Navier-Stokes
equations, {\em NoDEA Nonlinear Differential Equations Appl.}. {\bf12}(2005), 111--128.
\bibitem{Danchin07}
R. Danchin, Well-posedness in critical spaces for barotropic viscous fluids with truly not constant density. {\em Comm. Partial Differential Equations}, {\bf32}(2007), 1373--1397.
\bibitem{DH14}
R. Danchin, L. B. He, The incompressible limit in $L^p$ type critical spaces. arXiv:1409.7323.
\bibitem{DM16}
R. Danchin, P. B. Mucha, Compressible Navier-Stokes system: large solutions and incompressible limit. arXiv:1603.07213.
\bibitem{DG99}
B. Dejardins and E. Grenier, Low Mach number limlit of compressible flows in the whole space. {\em Proc. R. Soc. Lond.}, {\bf455}(1999), 2271--2279.
\bibitem{FNP01}
E. Feireisl, A. Novotn\'{y}, H. Petzeltov\'{a}, On the existence of globally defined weak
solutions to the Navier-Stokes equations. {\em J. Math. Fluid Mech.}, {\bf3}(2001),358--392.
\bibitem{FK64}
H. Fujita and T. Kato, On the Navier-Stokes initial value problem I. {\em Arch. Ration. Mech.
Anal.}, {\bf16}(1964), 269--315.
\bibitem{Ha11}
B. Haspot, Existence of global strong solutions in critical spaces for barotropic viscous fuids. {\em Arch. Rational Mech. Anal.}, {\bf202}(2011), 427--460.
\bibitem{Hoff951}
D. Hoff, Global solutions of the Navier-Stokes equations for multidimensional compressible
flow with discontinuous initial data. {\em J. Differential Equations}, {\bf120}(1995), 215--254.
\bibitem{Hoff952}
D. Hoff, Strong convergence to global solutions for multidimensional flows of compressible,
viscous fluids with polytropic equations of state and discontinuous initial
data. {\em Arch. Rational Mech. Anal.}, {\bf132}(1995), 1--14.
\bibitem{HLX12}
X. D. Huang, J. Li, and Z. P. Xin, Global well-posedness of classical solutions with large oscillations and vacuum to the three-dimensional isentropic compressible Navier-Stokes equations. {\em Comm. Pure Appl. Math.}, {\bf65}(2012), 549--585.
\bibitem{JZ01}
S. Jiang and P. Zhang,
On sperically symmetric solutions of the compressible
isentropic Navier-Stokes equations.
{\em Commun. Math. Phys.}, {\bf215}(2001), 559--581.
\bibitem{JZ03}
S. Jiang and P. Zhang,
Axisymmetric solutions of the 3-D Navier-Stokes equations for compressible isentropic flows.
{\em J. Math. Pure Appl.}, {\bf82}(2003), 949--973.
\bibitem{Kato}
T. Kato, Strong $L^p$ solutions of the Navier-Stokes equations in $\mathbb{R}^m$ with applications to weak solutions. {\em Math. Z.}, {\bf187}(1984),
471--480.
\bibitem{Koch01}
H. Koch, D. Tataru,
Well-posedness for the Navier-Stokes equations.
{\em Adv. Math.}, \textbf{157}(2001), 22--35.
\bibitem{Lions98}
P. L. Lions, ``Mathematical topics in fluid mechanics''. Vol. 2. Compressible models.
Oxford University Press, New York, 1998.
\bibitem{MN80}
A. Matsumura, T. Nishida, The initial value problem for the equations of motion
of viscous and heat-conductive gases. {\em J. Math. Kyoto Univ.}, {\bf20}(1980), 67--104.
\bibitem{Nash62}
J. Nash, Le probl\`{e}me de Cauchy pour les \'{e}quations diff\'{e}rentielles d'un fluide
g\'{e}n\'{e}ral. {\em Bull. Soc. Math. France.}, {\bf90} (1962), 487--497.
\bibitem{SS93}
R. Salvi, I. Straskraba, Global existence for viscous compressible fluids and their
behavior as $t\rightarrow\infty$. {\em J. Fac. Sci. Univ. Tokyo Sect. IA. Math.}, {\bf40}(1993), 17--51.
\bibitem{Serrin59}
J. Serrin, On the uniqueness of compressible fluid motion. {\em Arch. Rational. Mech.
Anal.}, {\bf3}(1959), 271--288.
\bibitem{Zhang09}
T. Zhang, Global wellposedness prolem for the 3-D incompressible anisotropic Navier-Stokes equations in an anisotropic space. {\em Comm. Math. Phys.}, {\bf287}(2009), 211--224.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{A note on exponential-M\"{o}
\begin{abstract}
In 1991, Baker and Harman proved, under the assumption of the generalized Riemann hypothesis, that $\max_{ \theta \in [0,1) }\left|\sum_{ n \leqslant x } \mu(n) e(n \theta) \right| \ll_\epsilon x^{3/4 + \epsilon}$. The purpose of this note is to deduce an analogous bound in the context of polynomials over a finite field using Weil's Riemann Hypothesis for curves over a finite field. Our approach is based on the work of Hayes who studied exponential sums over irreducible polynomials.
\end{abstract}
\section{Introduction}
Let $\mu$ be the M\"{o}bius function and write $e(\theta) = e^{2\pi i \theta}$. Baker and Harman~\cite{BaHar} proved under the assumption of the generalized Riemann hypothesis that for all $\epsilon > 0$,
\begin{equation}
\max_{\theta \in [0,1)} \left| \sum_{n \leqslant x} \mu(n) e(n\theta) \right| \ll_\epsilon x^{\frac{3}{4}+\epsilon}.
\end{equation}
It is conjecture that (1) holds for all $\epsilon>0$ with $\frac{3}{4}$ replaced by $\frac{1}{2}$. The best unconditional result is due to Davenport~\cite{Dav} who showed that for all $A > 0$
$$
\max_{\theta \in [0,1)} \left| \sum_{n \leqslant x} \mu(n) e(n\theta) \right| \ll_A \frac{x}{(\log x)^A}.
$$
The purpose of this note is to deduce an analogue of (1) for the polynomial ring $\mathbb{F}_q[t]$. First, let us go through some definitions required to state the result. The function field analogue of the real numbers is the completion of the field of fractions of $\mathbb{F}_q[t]$ with respect to the norm defined by
$$|f/g| = \begin{cases} q^{\deg f - \deg g} &\text{ if } f\neq 0 \\0 & \text{ otherwise}. \end{cases}$$
This completion is naturally identified with the ring of formal Laurent series $\mathbb{F}_q((1/t))=\{ \sum_{i\leqslant j} x_i t^i \: : \: x_i \in \mathbb{F}_q, \: j \in \mathbb{Z} \}.$ The norm defined above is extended to $x = \sum_{ i \leqslant j } x_i t^i \in \mathbb{F}_q((1/t))$ by setting $|x| = q^j$ where $j$ is the largest index with $x_j \neq 0$. The analogue of the unit interval is $\mathbb{T}:=\{ \sum_{i < 0} x_i t^i \: : \: x_i \in \mathbb{F}_q \},$ and is a subring of $\mathbb{F}_q((1/t))$.
Define the additive character $\psi : \mathbb{F}_q \rightarrow \mathbb{C}^\times$ by $\psi(x) = e(\text{tr}(x)/p)$, where $\text{tr} : \mathbb{F}_q \rightarrow \mathbb{F}_p$ is the usual trace map and $p$ is the characteristic of $\mathbb{F}_q$. Define also the exponential map $\textbf{e}_q : \mathbb{F}_q((1/t))\rightarrow \mathbb{C}^\times$ by $\textbf{e}_q (x) = \psi(x_{-1}).$
Now let $\mu$ denote the M\"{o}bius function on the ring $\mathbb{F}_q[t]$. All sums over polynomials are sums over monic polynomials.
\begin{theorem}\label{thm}
Suppose $ n \geqslant 3$. Then
$$\max_{\theta \in \mathbb{T}} \left| \sum_{ \deg f = n }\mu(f) \textbf{e}_q(f \theta) \right| \leqslant 4 q^{\frac{3n+1}{4}}\left(\tfrac{3\sqrt{3}}{2}\right)^n.$$
\end{theorem}
\begin{rmk}
It follows that for all $ \epsilon > 0$ and $q$ large enough with respect to $\epsilon$ we have
$$\max_{\theta \in \mathbb{T}} \left| \sum_{ \deg f = n }\mu(f) \textbf{e}_q(f \theta) \right| \leqslant 4 q^{(\frac{3}{4} + \epsilon )n}.$$
\end{rmk}
Our proof of Theorem~\ref{thm} will follow the strategy of Hayes employed in his study of the exponential sum
$$\sum_{ \substack{ \deg \omega = n \\ \omega \text{ irreducible } }} \textbf{e}_q(\omega \theta).$$
Recently, Bienvenu and L\^e have independently derived a similar result to Theorem~\ref{thm} in~\cite{PiHo}. Their Theorem 9 corresponds to our Lemma~\ref{mobchar} and their Theorem~11 closely resembles our Theorem~\ref{thm}.
\emph{Acknowledgements}. We are very grateful to Pierre Bienvenu for pointing out a mistake in an earlier version of our proof of Theorem~\ref{thm}. We are also grateful to Andrew Granville for pointing out how to strengthen an earlier version of Theorem~\ref{thm}. Previously, we required both $n$ and $q$ to be large with respect to $\epsilon$ for the remark that follows Theorem~\ref{thm} to hold. This work was supported by the Engineering and Physical Sciences Research Council EP/L015234/1 via the
EPSRC Centre for Doctoral Training in Geometry and Number Theory (The London School of Geometry and
Number Theory), University College London.
\section{Lemmas}
Let $\mathbb{F}_q[t]^\times$ be the multiplicative monoid of monic polynomials in $\mathbb{F}_q[t]$. Whilst investigating the distribution of irreducible polynomoials over $\mathbb{F}_q$, Hayes~\cite{Hay1} introduced certain congruences classes on $\mathbb{F}_q[t]^\times$ defined as follows. Let $s \geqslant 0$ be an integer and $g \in \mathbb{F}_q[t]$. We define an equivalence relation $\mathcal{R}_{s,g}$ on $\mathbb{F}_q[t]^\times$ by
$$ a \equiv b \text{ mod } \mathcal{R}_{s,g} \Leftrightarrow g \text{ divides } a-b \text{ and } \left|\frac{a}{t^{\deg a}} - \frac{b}{t^{\deg b}}\right|<\frac{1}{q^{s}}$$
It is easy to check that this is indeed an equivalence relation and that for all $c \in \mathbb{F}_q[t]^\times$,
$$a \equiv b \text{ mod } \mathcal{R}_{s,g} \Rightarrow ac \equiv bc \text{ mod } \mathcal{R}_{s,g}$$
so we can define the quotient monoid $ \mathbb{F}_q[t]^\times \slash \mathcal{R}_{s,g}$. Hayes showed that an element of $\mathbb{F}_q[t]$ is invertible modulo $\mathcal{R}_{s,g}$ if and only if it is coprime to $g$ and that the units of this quotient monoid form an abelian group of order $q^s \phi(g)$ which we denote $\mathcal{R}_{s,g}^* = \left(\mathbb{F}_q[t]^\times \slash \mathcal{R}_{s,g} \right)^\times.$ Given a character (group homomorphism) $\chi : \mathcal{R}_{s,g}^* \rightarrow \mathbb{C}$ we can lift this to a character of $\mathbb{F}_q[t]^\times$ by setting $\chi(f)=0$ if $f$ is not invertible modulo $\mathcal{R}_{s,g}$. Associated to each such character is the $L$-function $L(u, \chi)$ defined for $u \in \mathbb{C}$ with $|u|<1/q$ by
$$L(u, \chi) = \sum_{ f \in \mathbb{F}_q[t]^\times } \chi(f) u^{\deg f } = \prod_{ \omega }(1-\chi(\omega)u^{\deg \omega})^{-1}$$
where the product is over all monic irreducibles. When $\chi$ is a non-trivial character it can be shown that $L(u, \chi)$ is a polynomial which factorises as
$$L(u,\chi) = \prod_{i = 1}^{d(\chi)} (1-\alpha_i(\chi)u)$$ for some $d(\chi) \leqslant s + \deg g -1 $ and each $\alpha_i(\chi)$ satisfies $|\alpha_i(\chi)| = 1$ or $\sqrt{q}$. This follows from Weil's Riemann Hypothesis and appears to have been first proved by Rhin in~\cite{Rhin}.
When $\chi = \chi_0$ is the trivial character we have
$$L(u , \chi_0) = \sum_{\substack{ f \in \mathbb{F}_q[t]^\times \\(f,g)=1 }}u^{\deg f} =\sum_{ f \in \mathbb{F}_q[t]^\times }u^{\deg f}\prod_{\omega | g }(1-u^{\deg \omega}) = \frac{1}{1-qu}\prod_{\omega | g }(1-u^{\deg \omega}).$$
\begin{lemma}\label{mobchar}
Let $\chi$ be a character modulo $\mathcal{R}^*_{s,g}$ and $\deg g \leqslant n/2$. Then
$$\left|\sum_{ \deg f = n } \mu(f)\chi(f) \right | \leqslant
\begin{cases}
{{n+s+\deg g -2}\choose{s+ \deg g -2}} q^{n/2} \:\:\: &\text{ if } \chi \neq \chi_0 \\
\binom{n+r-1}{r-1}(q+1) &\text{ if } \chi = \chi_0
\end{cases}
$$
where $ r $ is the number of distinct irreducible divisors of $g$.
\end{lemma}
\begin{rmk}
The bound $\chi_0$ is smaller than the one for $\chi \neq \chi_0$ when $n \geqslant 3$ because $\deg g$ is an upper bound for $r$ and for $n \geqslant 3$
$$(q+1)\binom{n+ \deg g - 1}{n} \leqslant \binom{n+ \deg g - 2}{n} q^{n/2}.$$
\end{rmk}
\begin{proof}
Suppose first that $\chi \neq \chi_0$. Then
\begin{align*}
\sum_{ f } \chi(f) \mu(f) u^{\deg f} = L(u,\chi)^{-1} = \prod_{i = 1}^{d(\chi)}(1- \alpha_i(\chi)u)^{-1}
= \sum_{n \geqslant 0} \left( \sum_{ \substack{r_1 + \cdots r_{d(\chi)} = n \\ 0 \leqslant r_i \leqslant n }} \prod_{i = 1}^{d(\chi)} \alpha_i(\chi)^{r_i} \right)u^n.
\end{align*}
Comparing coefficients and using the triangle inequality we get
\begin{align*}
\left| \sum_{ \deg f = n } \chi(f) \mu(f) \right| = \left| \sum_{ \substack{ r_1 + \cdots + r_{d(\chi)} = n \\ 0 \leqslant r_i \leqslant n } } \prod_{i = 1}^{d(\chi)} \alpha_i(\chi)^{r_i} \right| &\leqslant {{n + d(\chi) - 1}\choose{d(\chi) -1 }} q^{n/2} \\
&\leqslant {{n+s+\deg g -2}\choose{s+ \deg g -2}} q^{n/2}.
\end{align*}
When $\chi= \chi_0$ is the principal character
$$L(u,\chi_0)^{-1} = (1-qu)\prod_{\omega|g}(1+u^{\deg \omega} + u^{2 \deg \omega} + \cdots ).$$
If we write $\omega_1, \omega_2, \ldots, \omega_r$ for the distinct irreducible divisors of $g$ then we get, by equating coefficients again,
\begin{align*}
\left|\sum_{\deg f = n}\chi_0(f)\mu(f) \right| &\leqslant \sum_{ \substack{ a_i \in \mathbb{Z}_{\geqslant 0}\\ \sum_{1\leqslant i \leqslant r}a_i \deg \omega_i = n }}1 + q\sum_{ \substack{ a_i \in \mathbb{Z}_{\geqslant 0}\\ \sum_{1\leqslant i \leqslant r}a_i \deg \omega_i = n-1 }}1 \\
&\leqslant (q+1)\sum_{ \substack{ b_i \in \mathbb{Z}_{\geqslant 0}\\ \sum_{1\leqslant i \leqslant r}b_i = n }}1 \\
&= (q+1)\binom{n+r-1}{r-1}.
\end{align*}
\end{proof}
\begin{lemma}\label{decomp}
For each $\theta \in \mathbb{T}$ there exist unique coprime polynomials $a, g \in \mathbb{F}_q[t]$ with $g$ monic and $\deg a < \deg g \leqslant n/2$ such that
$$ \left| \theta - \frac{a}{g} \right| < \frac{1}{q^{\frac{n}{2} + \deg g}}.$$
\end{lemma}
\begin{proof}
See Lemma 3 from~\cite{Pol}.
\end{proof}
\begin{lemma}\label{lem}
Let $\theta \in \mathbb{T}$ and let $a, g$ be the unique polynomials defined as in Lemma 2 with respect to $\theta$ and $n$. Set $ s = n - [\frac{n}{2}] - \deg g$. For any $f_1,f_2 \in \mathbb{F}_q[t]^\times$ of degree $n$ such that $f_1 \equiv f_2 \text{ mod } \mathcal{R}_{s,g}$ we have
$$\textbf{e}_q(f_1 \theta) = \textbf{e}_q(f_2 \theta) .$$
\end{lemma}
\begin{proof}
See Lemma 5.2 from~\cite{Hay2}.
\end{proof}
\begin{comment}
The next lemma is a slight refinement on Lemma 8 from~\cite{TH}.
\begin{lemma}\label{divbound}
Let $\tau(f)$ be the number of divisors of a monic polynomial $f$. If $ n = \deg f$ then
$$\tau(f) \leqslant q^{4 n / \log n}.$$
\end{lemma}
\begin{proof}
Write $f = \prod_{1\leqslant i \leqslant r}\omega_i^{\alpha_i}$ where $\omega_i$ are the distinct irreducibles dividing $f$. Then, for $\lambda > 0$ to be chosen later, we have $\tau(f) = q^{\lambda n} \prod_{1\leqslant i \leqslant r}\frac{1+\alpha_i}{q^{\lambda \alpha_i \deg \omega_i}}.$ The factors in this product with $\deg \omega_i \geqslant \frac{1}{\lambda\log q}$ are bounded by 1 since $1+\alpha_i \leqslant e^{\alpha_i}.$ The other factors, with $\deg \omega_i < \frac{1}{\lambda \log q}$, are bounded by $\frac{1+\alpha_i}{e^{\alpha_i\lambda \log q}} \leqslant e^{2 \sqrt{\alpha_i}-\alpha_i \lambda \log q} \leqslant e ^{\frac{1}{\lambda\log q}}.$ Therefore, since there are at most $q^{\frac{1}{\lambda \log q}} = e^{\frac{1}{\lambda}}$ irreducibles of degree at most $\frac{1}{\lambda \log q}$, we have
$$\tau(f) \leqslant q^{\lambda n} e^{e^{\frac{1}{\lambda}}/(\lambda \log q)} = q^{\lambda n +e^{\frac{1}{\lambda}}/( \lambda (\log q)^2)}.$$ Setting $\lambda = \frac{3}{\log n}$ this becomes
$$\tau(f) \leqslant q^{\frac{3n}{\log n} + \frac{ n^{1/3} \log n}{3 (\log q)^2}} \leqslant q^{\frac{4n}{\log n}}.$$
\end{proof}
\end{comment}
\begin{lemma}\label{better}
Suppose $g \in \mathbb{F}_q[t]$ is square-free. Then
$$\sum_{d | g} \frac{1}{q^{\deg d}} \leqslant (1+\frac{\log (\deg g)}{\log q})e.$$
\end{lemma}
\begin{proof}
Order the monic irreducibles $\omega_1, \omega_2, \ldots, \omega_r$ dividing $g$ and the monic irreducibles $P_1, \ldots$ in $\mathbb{F}_q[t]$ in order of degree (and those of the same degree arbitrarily). Let $\pi(k)$ be the number of monic irreducibles of degree $k$ and define $N$ by $\sum_{\deg P \leqslant N-1}\deg P < \deg g \leqslant \sum_{\deg P \leqslant N}\deg P.$ Then $g$ has at most $\sum_{1 \leqslant k \leqslant N}\pi(N)$ irreducible factors. Therefore, since $\deg P_i \leqslant \deg \omega_i$, we have
$$\sum_{d|g}\frac{1}{q^{ \deg d }} \leqslant \prod_{\omega | g}\left(1 + \frac{1}{q^{\deg \omega}} \right) \leqslant \prod_{\deg P \leqslant N }\left(1+\frac{1}{q^{\deg P}}\right)=\prod_{1\leqslant k \leqslant N}\left(1+\frac{1}{q^k}\right)^{\pi(k)}.$$
Using $\pi(k) \leqslant \frac{q^k}{k}$ this is bounded by
$$\prod_{1\leqslant k \leqslant N}\left(1+\frac{1}{q^k}\right)^{\frac{q^k}{k}} \leqslant \prod_{1\leqslant k \leqslant N}e^{\frac{1}{k}} \leqslant e^{1+\log N} = Ne.$$
Now we bound $N$ in terms of $\deg g$ as follows
$$\deg g > \sum_{\deg P \leqslant N-1}\deg p = \sum_{1 \leqslant k \leqslant N-1}\pi(k)k \geqslant \sum_{k | N -1} \pi(k) k = q^{N-1}$$
by the prime number theorem in $\mathbb{F}_q[t].$ This gives $N \leqslant 1+\frac{\log (\deg g)}{\log q}$ which completes the proof of the Lemma.
\end{proof}
\section{Proof of Theorem~\ref{thm}}
Let $ \theta \in \mathbb{T}$ and choose $g$ and $s$ as in Lemma~\ref{lem}. We start by giving an explicit description of a set a representatives for the equivalence relation $\mathcal{R}_{s,g}$. It is not hard to show that
$$\mathcal{S}_{s,g} = \{t^{[\frac{n}{2}]} g b_1 + b_2 \: | \: \deg b_1 = s, b_1 \text{ monic}, \deg b_2 < \deg g \}$$
is such a set. Furthermore, $$\mathcal{S}^*_{s,g} = \{t^{[\frac{n}{2}]} g b_1 + b_2 \: | \: \deg b_1 = s, b_1 \text{ monic}, \deg b_2 < \deg g, (b_2,g)=1 \}$$
defines a set of reduced representatives modulo $\mathcal{R}_{s,g}$. See~\cite{Hay2} Lemma 7.1 for details.
Then by Lemma~\ref{lem} and the orthogonality of characters modulo $\mathcal{R}_{s,g}^*$ we can write
\begin{align*}
\sum_{ \deg f = n} \mu(f) \textbf{e}_q(f \theta) &= \sum_{ b \in \mathcal{S}_{s,g} } \sum_{ \substack{ \deg f = n \\ f \equiv b \text{ mod } \mathcal{R}_{s,g} } } \mu(f) \textbf{e}_q(f \theta) \\
&= \sum_{d | g}\sum_{ \substack{ b \in \mathcal{S}_{s,g} \\ (g,b)=d} } \textbf{e}_q(b \theta) \sum_{ \substack{ \deg f = n \\ f \equiv b \text{ mod } \mathcal{R}_{s,g} } } \mu(f) \\
&= \sum_{d | g}\sum_{ \substack{ b \in \mathcal{S}_{s,g/d} \\ (g/d,b)=1} } \textbf{e}_q(bd \theta) \sum_{ \substack{ \deg f = n - \deg d \\ f \equiv b \text{ mod } \mathcal{R}_{s,g/d} } } \mu(fd) \\
&= \sum_{d | g}\sum_{ b \in \mathcal{S}^*_{s,g/d} } \textbf{e}_q(b d \theta)\sum_{ \deg f = n -\deg d} \frac{1}{q^s \phi(g/d)} \sum_{ \chi \text{ mod } \mathcal{R}^*_{s,g/d}} \overline{\chi}(b) \chi(f) \mu(fd).
\end{align*}
Notice that $\mu(fd) = \mu(f)\mu(d) \chi_d(f)$ where $\chi_d(f)$ is the trivial character modulo $\mathcal{R}_{s,d}^*$. We can therefore rewrite the above as
$$
=\sum_{d | g}\frac{\mu(d)}{q^s \phi(g/d)}\sum_{ \chi \text{ mod } \mathcal{R}^*_{s,g/d}}\left( \sum_{ b \in \mathcal{S}^*_{s,g/d} } \textbf{e}_q(b d \theta)\overline{\chi}(b)\right ) \left( \sum_{ \deg f = n -\deg d} \mu(f) \chi \chi_d(f) \right).
$$
Now $\chi$ is a character modulo $\mathcal{R}^*_{s,g/d}$ and $\chi_d$ is a character modulo $\mathcal{R}^*_{s,d}$. Therefore, $\chi\chi_d$ is a character modulo $\mathcal{R}^*_{s,g}$, and so using the triangle inequality and Lemma~\ref{mobchar} we can bound this in absolute value by
\begin{align*}
q^{n/2} \sum_{ \substack{ d | g \\ g \text{ square-free } }}\frac{1}{q^{s+\deg d/2} \phi(g/d)} {{n-\deg d+s+\deg g -2}\choose{s+ \deg g -2}}\sum_{ \chi \text{ mod } \mathcal{R}^*_{s,g/d}} \left| \sum_{ b \in \mathcal{S}_{s,g/d} } \textbf{e}_q(bd \theta) \overline{\chi}(b) \right|
\end{align*}
We bound the Gauss sum over $\chi$ mod $\mathcal{R}^*_{s,g/d}$ in the standard way using the Cauchy–Schwarz inequality and Parseval's identity as follows
\begin{align*}
\sum_{ \chi \text{ mod } \mathcal{R}^*_{s,g/d}} \left| \sum_{ b \in \mathcal{S}_{s,g/d} } \textbf{e}_q(bd \theta) \overline{\chi}(b) \right| & \leqslant \left(\sum_{\chi \text{ mod } \mathcal{R}^*_{s,g/d}}1 \sum_{ \chi \text{ mod } \mathcal{R}^*_{s,g/d} } \left|\sum_{b \in \mathcal{S}_{s,g/d}} \textbf{e}_q(bd \theta) \overline{\chi}(b) \right|^2 \right)^{1/2} \\
&= \left( q^s \phi(g/d) \sum_{b_1, b_2 \in \mathcal{S}_{s,g/d} } \textbf{e}_q(d(b_1-b_2)\theta) \sum_{\chi \text { mod } \mathcal{R}^*_{s,g} } \overline{\chi}(b_1) \chi(b_2) \right)^{1/2} \\
&= \left( (q^s \phi(g/d))^2 \sum_{b_1 = b_2 \in \mathcal{S}^*_{s,g/d} } \textbf{e}_q((b_1-b_2)\theta) \right)^{1/2} \\
&=(q^s \phi(g/d))^{3/2}.
\end{align*}
Recall that $s+ \deg g = n-[\frac{n}{2}] \geqslant n/2$ so that
$${{n - \deg d+s+\deg g -2}\choose{s+ \deg g -2}} \leqslant {{2n - [\frac{n}{2}] -2}\choose{n - [\frac{n}{2}] -2}}.$$
We can bound this binomial coefficient using the fact that for all positive integers $k$,
$$\sqrt{2\pi} k^{k+\frac{1}{2}}e^{-k + \frac{1}{12k+1}}<k!<\sqrt{2\pi} k^{k+\frac{1}{2}}e^{-k + \frac{1}{12k}}.$$
This precise form of Stirling's formula is due to Robbins~\cite{Rob}. It follows that if $k=[\frac{n}{2}]$ then
$${{2n - [\frac{n}{2}] -2}\choose{n - [\frac{n}{2}] -2}}<{{3k}\choose{k}}<\frac{1}{\sqrt{2\pi}}e^{\frac{1}{36k}-\frac{1}{12k+1}-\frac{1}{24k+1}} \frac{(3k)^{3k+\frac{1}{2}}}{k^{k+\frac{1}{2}}(2k)^{2k+\frac{1}{2}}}<\frac{1}{\sqrt{4\pi k/3}}\left(\tfrac{3\sqrt{3}}{2}\right)^{2k}.$$
Putting it all together with $\phi(g/d) \leqslant q^{\deg g - \deg d}$ and Lemma~\ref{better} we get
\begin{align*}
\left| \sum_{ \deg f = n} \mu(f) \textbf{e}_q(f \theta) \right| &\leqslant q^{n/2}\frac{1}{\sqrt{2\pi (n-1)/3}}\left(\tfrac{3\sqrt{3}}{2}\right)^{n}\sum_{d | g} \frac{(q^s \phi(g/d))^{1/2}}{q^{\deg d /2}} \\
&\leqslant q^{n-\frac{1}{2}[\frac{n}{2}]} \frac{(1+\tfrac{\log n}{ \log q })e}{\sqrt{2\pi (n-1)/3}}\left(\tfrac{3\sqrt{3}}{2}\right)^{n}
\end{align*}
and Theorem~\ref{thm} easily follows after a short numerical calculation.
\end{document}
|
\begin{document}
\lstset{language=Python}
\title{On the equality of domination number and $ 2 $-domination number}
\author{G\"{u}lnaz Boruzanl{\i} Ekinci\footnote{Department of Mathematics, Faculty of Science, Ege University, Izmir, Turkey\\E-mail address: [email protected]} \hspace{0.2cm}and\hspace{0.2cm}Csilla Bujt\'as\footnote{Faculty of Information Technology, University of Pannonia, Veszpr\'em, Hungary; and Faculty of Mathematics and Physics, University of Ljubljana, Slovenia\\E-mail address: [email protected]}}
\maketitle
\begin{abstract}
The $2$-domination number $\gamma_2(G)$ of a graph $G$ is the minimum cardinality of a set $ D \color{blue}ubseteq V(G) $ for which
every vertex outside $ D $ is adjacent to at least two vertices in $ D $. Clearly, $ \gamma_2(G) $ cannot be smaller than the
domination number $ \gamma(G) $. We consider a large class of graphs and characterize those members which satisfy
$\gamma_2=\gamma$. For the general case, we prove that it is NP-hard to decide whether $\gamma_2=\gamma$ holds. We also
give a necessary and sufficient condition for a graph to satisfy the equality hereditarily.
\end{abstract}
\noindent{\bf Keywords}: Domination number, $ 2 $-domination number, Hereditary property, Computational complexity.
\noindent{\bf MSC:} 05C69, 05C75, 68Q25.
\color{blue}ection{Introduction}
In this paper, we continue to expand on the study of graphs that satisfy the equality $\gamma(G) = \gamma_2(G)$, where $\gamma(G)$ and $\gamma_2(G)$ stand for the domination number and the $ 2 $-domination number of a graph $ G $, respectively. If $\gamma(G) = \gamma_2(G)$ holds for a graph $G$, then we call it $ (\gamma,\gamma_2) $\textit{-graph}. We prove that the corresponding recognition problem is NP-hard and there is no forbidden subgraph characterization for $ (\gamma,\gamma_2) $-graphs in general. On the other hand, in one of our main results, we consider a large graph class ${\cal H}$ and give a special type of forbidden subgraph characterization for $ (\gamma,\gamma_2) $-graphs over ${\cal H}$. Although the number of these forbidden subgraphs is infinite, we prove that the recognition problem is solvable in polynomial time on ${\cal H}$.
Putting the question into another setting, we give a complete characterization for $(\gamma, \gamma_2)$-perfect graphs, that is, we characterize the graphs for which all induced subgraphs with minimum degree at least two satisfy the equality of domination number and $ 2 $-domination number.
\color{blue}ubsection{Terminology and Notation}
\indent Let $ G $ be a simple undirected graph, where $ V(G) $ and $ E(G) $ denote the set of vertices and the set of edges of $ G $, respectively. The \textit{(open) neighborhood} of a vertex $ v $ is the set $ N_G(v) = \{u \in V(G): uv \in E(G)\} $ and its \textit{closed neighborhood} is $ N_G[v] = N_G(v) \cup\{v\}$. The \textit{degree} of $ v $ is given by the cardinality of $ N_G(v) $, that is, $ \deg_G(v) =|N_G(v)| $. We will write $N(v)$, $N[v]$ and $\deg(v)$ instead of $N_G(v)$, $N_G[v]$ and $\deg_G(v)$, if $G$ is clear from the context. An edge $ uv $ is a \textit{pendant edge} if $ \deg(u)=1 $ or $ \deg(v)=1 $, otherwise the edge is \textit{non-pendant}. The minimum and maximum vertex degrees of $ G $ are denoted by $ \delta(G) $ and $ \Delta(G) $, respectively. For a subset $ S\color{blue}ubseteq V(G) $, let $ G[S] $ denote the subgraph induced by $S$. We say that $S$ is \textit{independent} if $G[S]$ does not contain any edges. For disjoint subsets $ U, W \color{blue}ubseteq V(G)$, we let $ E[U,W] $ denote the set of edges between $ U $ and $ W $.
For a positive integer $ k $, the \textit{$ k^{th} $ power} of a graph $ G $, denoted by $ G^k $, is the graph on the same vertex set as $ G $ such that $ uv $ is an edge if and only if the distance between $ u $ and $ v $ is at most $ k $ in $ G $. An edge $ uv \in E(G) $ is \textit{subdivided} by deleting the edge $ uv $, then adding a new vertex $ x $ and two new edges $ ux $ and $ xv $. Let $ K_n $, $ C_n $ and $P_n$ denote the complete graph, the cycle and the path, all of order $ n $, respectively; and let $ S_n $ denote the star of order $ n+1 $. For any positive integer $ n $, let $ [n] $ be the set of positive integers not exceeding $ n $. For notation and terminology not defined here, we refer the reader to \cite{West2001}.
For a positive integer $ k $, a subset $ D \color{blue}ubseteq V(G) $ is a \textit{$ k $-dominating set} of the graph $ G $ if $ |N_G(v) \cap D|\geq k $ for every $ v \in V(G)\color{blue}etminus D $. The \textit{$ k $-domination number} of $ G $, denoted by $ \gamma_k(G) $, is the minimum cardinality among the $ k $-dominating sets of $ G $. Note that the $ 1 $-domination number, $ \gamma_1(G) $, is the classical domination number $ \gamma(G) $.
A graph $G$ is called \textit{$F$-free} if it does not contain any induced subgraph isomorphic to $F$. More generally, let ${\cal F}$ be a (finite or infinite) class of graphs, then $G$ is ${\cal F}$-free if it is $F$-free for all $F\in {\cal F}$. On the other hand, let $G^D$ denote a graph $G$ with a specified subset $D \color{blue}ubseteq V(G)$. Then, $F^{D'}$ is a (induced) subgraph of $G^D$ if $F$ is a (induced) subgraph of $G$ and $D'=V(F)\cap D$. We say that $F_1^{D_1}$ is isomorphic to $F_2^{D_2}$ if there is an edge-preserving bijection between $V(F_1)$ and $V(F_2)$ which maps $D_1$ onto $D_2$. Analogously, we may define the $F^{D'}$-freeness of $G^D$ and forbidden (induced) subgraph characterization with a specified vertex subset $D$.
\color{blue}ubsection{Preliminary results}
The concept of $ k $-domination in graphs was introduced by Fink and Jacobson \cite{Fink85, Fink85-2} and it has been studied extensively by many researchers (see for example \cite{bonomo2018, Bujtas2017, Caro1990-2, Caro1990, desormeaux2014, Favaron1988, Favaron2008, Hansberg2015, Hansberg2013, krzywkowski2017, Shaheen2009, yue2020}). For more details, we refer the reader to the books on domination by Haynes, Hedetniemi and Slater \cite{ DominationBook2, DominationBook1} and to the survey on $ k $-domination and $ k $-independence by Chellali \textit{et al.}\ \cite{ChellaliSurvey}.
Fink and Jacobson \cite{Fink85} established the following basic theorem.
\begin{thm}\label{thm:FJ} \cite{Fink85}
For any graph $ G $ with $ \Delta(G)\geq k\geq 2 $, $ \gamma_k(G) \geq \gamma(G)+k-2$.
\end{thm}
Although it is proved that the above inequality is sharp for every $k\ge 2$, the characterization of graphs attaining the equality is still open, even for the case when $ k = 2$. The corresponding characterization problem was studied in~\cite{Hansberg2015, Hansberg2016, Hansberg2008},
while similar problems involving different domination-type graph and hypergraph invariants were considered for example in~\cite{Arumugam2013, Blidia2006, hartnell1995, krzywkowski2017, Randerath1998}.
In this paper, we study $ (\gamma,\gamma_2) $-graphs that is graphs for which Theorem~\ref{thm:FJ} holds with equality if $k=2$. Note that $ G $ is a $ (\gamma,\gamma_2) $-graph, that is $\gamma_2(G)=\gamma(G)$, if and only if every component of $ G $ is a $ (\gamma,\gamma_2) $-graph. Thus, we only deal with connected graphs in the rest of the paper.
Hansberg and Volkmann \cite{Hansberg2008} characterized the cactus graphs (i.e., graphs in which no two cycles share an edge) which are $ (\gamma,\gamma_2) $-graphs and they also gave some general properties of the graphs attaining the equality. In 2016, the claw-free (i.e., $S_3$-free) $ (\gamma,\gamma_2) $-graphs and the line graphs which are $ (\gamma,\gamma_2) $-graphs were characterized by Hansberg \textit{et al.} \cite{Hansberg2016}. We will refer to the following basic lemmas proved in these papers.
\begin{lemma} \cite{Hansberg2008} \label{lem:0}
If $ G $ is a connected nontrivial graph with $ \gamma_2(G)=\gamma(G) $, then $ \delta(G)\geq 2 $.
\end{lemma}
\begin{lemma}\cite{Hansberg2016} \label{lem:1}
Let $D$ be a minimum $ 2 $-dominating set of a graph $G$. If $ \gamma_2(G)=\gamma(G) $, then $ D $ is independent.
\end{lemma}
\begin{lemma}\cite{Hansberg2016} \label{lem:2}
Let $ G $ be a connected nontrivial graph with $ \gamma_2(G)=\gamma(G) $ and let $ D $ be a minimum $ 2 $-dominating set of $ G $. Then, for each vertex $ u' \in V\color{blue}etminus D $ and $ u,v \in D \cap N(u') $, there is a vertex $ v' \in V \color{blue}etminus D $ such that $ u,u',v $ and $ v' $ induce a $ C_4 $.
\end{lemma}
We strengthen Lemma \ref{lem:2} by proving the following statement.
\begin{lemma}
\label{lem:2nghbrs}
Let $ G $ be a connected nontrivial graph with $ \gamma_2(G)=\gamma(G) $ and let $ D $ be a minimum $ 2 $-dominating set of $ G $. For every pair $ u,v \in D$, if $ N_G(u) \cap N_G(v) \neq \emptyset $, then there exists a nonadjacent pair $ u',v' \in V\color{blue}etminus D $ such that $ N_G(u') \cap D = N_G(v')\cap D = \{u,v\}$.
\end{lemma}
\begin{proof}
For every vertex $ x \in N_G(u)\cap N_G(v) $, there is a vertex $ y $ different from $x$ such that $N_G(y)\cap D = \{u,v\}$ and $xy \notin E(G)$, since otherwise $ (D \color{blue}etminus \{u,v\})\cup\{x \} $ would be a dominating set of $ G $, a contradiction. This proves that we have at least two non-adjacent vertices $u'$ and $v'$ with the property $ N_G(u') \cap D = N_G(v')\cap D = \{u,v\}$.
\end{proof}
The following simple proposition demonstrates that $(\gamma,\gamma_2)$-graphs form a rich class and it indicates the possible difficulties in a general characterization.
\begin{prop}
\label{prop:0}
There is no forbidden (induced) subgraph for the graphs satisfying the equality of domination number and $ 2 $-domination number.
\end{prop}
\begin{proof} Consider an arbitrary graph $F$ and a four-cycle $ C_4 $, which is vertex-disjoint to $F$. Let $ u $ and $ v $ be two non-adjacent vertices of $ C_4$. Construct the graph $ G_F $ by joining each vertex of $ F $ to both $ u $ and $ v $. Since, for any $F$, the graph $ G_F$ contains $ F $ as an induced subgraph and it satisfies the equality $ \gamma_2(G_F) = \gamma(G_F) =2$, there is no forbidden induced subgraph for $(\gamma,\gamma_2)$-graphs.
\end{proof}
As a consequence of the Lemmas \ref{lem:0}-\ref{lem:2nghbrs}, we will prove that all $(\gamma,\gamma_2)$-graphs belong to the following graph class ${\cal G}$ that we define together with its subclasses ${\cal G}_1$ and ${\cal G}_2$.
\begin{definition} Given an arbitrary simple graph $F$ with vertex set $V(F)=D=\{v_1,\dots v_d\}$, a graph $G$ belongs to the class ${\cal G}(F)$ if $G$ can be obtained from $F$ by the following rules.
\begin{itemize}
\item[$(i)$] Define a pair of vertices $X_{i,j}=\{x_{i,j}^1, x_{i,j}^2\}$ for every edge $v_iv_j$ of $F$, and further, let $Y$ be an arbitrary (possibly empty) set of vertices, such that $D$, $Y$ and all the pairs $X_{i,j}$ are mutually disjoint sets of vertices. Define $V(G)=D \cup X \cup Y$, where $X=\bigcup_{v_iv_j\in E(F)}X_{i,j}$.
\item[$(ii)$] The edges between $D$ and $X\cup Y$ are defined such that $N_G(x_{i,j}^s)\cap D=\{v_i,v_j\}$ for every vertex $x_{i,j}^s\in X$, and the set $N_G(u)\cap D$ contains at least two vertices and induces a complete subgraph in $F$ for any $u\in Y$. The induced subgraph $G[D]$ cannot contain edges.
\item[$(iii)$] The edges inside $X\cup Y$ can be chosen arbitrarily, but each $X_{i,j}$ must remain independent.
\end{itemize}
Moreover, $G$ belongs to ${\cal G}_1(F)$ if $ |N_G(y)\cap D| = 2 $ for each $ y \in Y $; and $G$ belongs to ${\cal G}_2(F)$ if $Y=\emptyset$. The graph classes ${\cal G}$, ${\cal G}_1$, ${\cal G}_2$ contain those graphs $G$ for which there exists a graph $F$ such that $G$ belongs to ${\cal G}(F)$, ${\cal G}_1(F)$, ${\cal G}_2(F)$, respectively.
\end{definition}
For $G\in {\cal G}(F)$ with the fixed partition $V(G)=D\cup X\cup Y$ as per above definition, a vertex $v$ is a \textit{$D$-vertex} (or original vertex) if $v\in D$; $v$ is a \textit{subdivision vertex} if $v\in X$; and $v$ is a \textit{supplementary vertex} if $v\in Y$. The edges inside $G[X\cup Y]$ are called \textit{supplementary edges}, and $F$ is said to be the \textit{underlying graph} of $G$. In Section 5, we will show that the underlying graph is not necessarily unique by presenting a $(\gamma, \gamma_2)$-graph having two non-isomorphic underlying graphs. Note that the construction in the proof of Proposition \ref{prop:0} always belongs to the class $ {\cal G}_1 $. Hence, Proposition \ref{prop:0} remains true under the condition $ G \in {\cal G}_1 $. This motivates us to focus on the smaller class ${\cal G}_2$.
Alternatively, we may define the graph class ${\cal G}_2(F)$ in the following constructive way. Let $ F $ be a simple graph with vertex set $ V(F) $ and edge set $ E(F) $. Consider the \textit{double subdivision graph} $ F^* $ obtained by substituting each edge $ v_iv_j $ by two parallel edges and subdividing each edge once by adding the vertices $ x_{i,j}^1 $ and $ x_{i,j}^2 $. Let $ X_{i,j} = \{x_{i,j}^1, x_{i,j}^2\} $ and define the set of subdivision vertices $X = \bigcup_{v_iv_j \in E(F)}^{} X_{i,j}$. The graph class ${\cal G}_2(F)$ consists of the graphs obtained by adding some (maybe zero) supplementary edges between subdivision vertices of $ F^* $ such that each $ X_{i,j}$ remains independent.
\begin{prop}
\label{prop:1}
If $G$ is a graph with $ \gamma_2(G) = \gamma(G) $, then $G \in \mathcal{G} $.
\end{prop}
\begin{proof} Assuming $ \gamma_2(G) = \gamma(G) $, choose a minimum $ 2 $-dominating set $D$ of $G$ and define the graph $ F = G^2[D] $. We first note that, by Lemma~\ref{lem:1}, $D$ is independent in $G$. Since $D$ is a $ 2 $-dominating set, every $u\in V(G)\color{blue}etminus D$ has at least two neighbors in $D$ and, by the definition of $F$, the set $N_G(u)\cap D$ induces a complete subgraph in $F$. By Lemma~\ref{lem:2nghbrs}, for every edge $v_iv_j$ of $F$, there exist at least two different and non-adjacent vertices $u$, $u' \in V(G) \color{blue}etminus D$ such that $ N_G(u) \cap D = N_G(u') \cap D = \{v_i,v_j\}$. If we select such a pair and define $X_{i,j}=\{u, u'\}$ for every $v_iv_j \in E(F)$, and let $Y=V(G)\color{blue}etminus (D\cup X)$, then $G$ can be obtained from the underlying graph $F$ with the vertex partition $V(G)=D\cup X\cup Y$, proving that $G\in {\cal G}(F)$.
\end{proof}
In a follow-up paper of the present work \cite{ekinci2020}, we studied the analogous problem for each $k \ge 3$. There we gave a characterization for connected bipartite graphs satisfying $\gamma_k(G)=\gamma(G)+k-2$ and $\Delta(G) \ge k$. This result is based on the notion of the $k$-uniform ``underlying hypergraph'' that corresponds to the underlying graph, as defined here, if $k=2$.
\color{blue}ubsection{Structure of the paper}
In Section 2, we define the class ${\cal H}$ of those graphs which are contained in ${\cal G}_2$ with an underlying graph of girth at least 5 and we give a characterization for $(\gamma, \gamma_2)$-graphs over ${\cal H}$. Then, in Section 3, we discuss algorithmic complexity questions. First, we prove that the recognition problem of $(\gamma, \gamma_2)$-graphs is NP-hard on ${\cal G}_1$ (even if a minimum $ 2 $-dominating set is given together with the problem instance). Then, on the positive side, we show that there is a polynomial-time algorithm which recognizes $(\gamma, \gamma_2)$-graphs over the class ${\cal H}$ if the instance is given together with the minimum $ 2 $-dominating set $D=V(F)$. The algorithm is based on our characterization theorem and Edmond's Blossom Algorithm. In Section 4, we consider the hereditary version of the property and characterize $(\gamma, \gamma_2)$-perfect graphs. As a direct consequence, we get that $(\gamma, \gamma_2)$-perfect graphs are easy to recognize. In the concluding section, we put remarks on the underlying graphs and discuss some open problems.
\color{blue}ection{Characterization of $(\gamma, \gamma_2)$-graphs over ${\cal H}$} \label{sec:2}
To formulate the main result of this section, we will refer to the following definitions.
\begin{definition} Let ${\cal H}$ be the union of those graph classes ${\cal G}_2(F)$ where the underlying graph $F$ is $(C_3,C_4)$-free.
\end{definition}
When we consider a graph $G\in {\cal H}$, we will always assume that a fixed $(C_3,C_4)$-free underlying graph $F$ and a corresponding partition $V(G)=D\cup X$ are given. In order to indicate this structure, we will use the notation $ G^D $.
\begin{figure}
\caption{The graph $ A_4 $}
\label{fig:A4B4}
\end{figure}
\begin{figure}
\caption{The graph $ B$ }
\label{fig:C1C2}
\end{figure}
\begin{definition}
For a positive integer $k\geq 2$, let $A_k^W$ be the graph on the vertex set
$$V(A_k)=\{v, w_1,\dots ,w_k, x_1^1, \dots, x_k^1, x_1^2, \dots ,x_k^2\}$$
and with the edge set
$$E(A_k)=\{vx_i^1, vx_i^2, w_ix_i^1, w_ix_i^2: 1\le i \le k\}\cup \{x_i^1x_{i+1}^2: 1\le i \le k\}\cup \{x_k^1x_1^2\}.$$
The specified vertex set is $W_k = W =\{v\} \cup\{w_i: 1\le i \le k\}$ (for illustration see Fig.~\ref{fig:A4B4}).
\end{definition}
\begin{definition}
Let $B^W$ be the graph of order 8 with
$$V(B)=\{v_1, u_1, v_2, u_2, x_1^1,x_1^2,x_2^1,x_2^2\},$$
$$E(B) = \{v_ix_i^1, v_ix_i^2,u_ix_i^1,u_ix_i^2: 1\le i \le 2\}\cup \{x_1^1x_2^1\}$$
The specified vertex set is $W=\{v_1, u_1, v_2, u_2\}$ (for illustration see Fig.~\ref{fig:C1C2}).
\end{definition}
Note that $ A_k \in {\cal G}_2(S_k)$ and $ B \in {\cal G}_2(2K_2) $.
We first prove a lemma which will be referred to in the proof of our main theorem and also in later sections.
\begin{lemma} \label{lem:d2dom}
If $ G^D \in \mathcal{G}_1(F) $, then $D$ is a minimum $ 2 $-dominating set of $G$.
\end{lemma}
\begin{proof}
By definition, every vertex from $X$ has two neighbors in $ D $. Thus, $D$ is a $ 2 $-dominating set in $G$.
Suppose, to the contrary that, $ D' $ is a $ 2 $-dominating set of $ G $ such that $ |D'|<|D| $. Let $ D_1 = D \cap D'$ and $ D_2 = D \color{blue}etminus D' $. Since $ D $ is independent in $ G $, the vertices in $ D_2 $ have to be $ 2 $-dominated by the vertices of $ D' \color{blue}etminus D $, that is, every vertex in $ D_2 $ has at least two neighbors in $ D' $. Then we have \[|E[D',D_2]|\geq 2|D_2|.\]
Moreover, by the definition of $\mathcal{G}_1(F) $, every vertex in $ D'\color{blue}etminus D $ has exactly two neighbors in $ D $, so we have \[2|D'\color{blue}etminus D|\geq |E[D',D_2]|.\]
Thus, $ |D'\color{blue}etminus D| \geq |D_2| $.
Since $ D'=(D' \color{blue}etminus D) \cup D_1 $, we conclude $ |D'| \geq |D_2|+|D_1|=|D| $, a contradiction.
\end{proof}
\begin{thm} \label{thm:2}
Let $G^D$ be a graph from ${\cal H}$. Then $\gamma(G)=\gamma_2(G)$ holds if and only if $G^D$ contains no subgraph isomorphic to $B^W$ and no subgraph isomorphic to $A_k^{W}$ for any $k\ge 2$.
\end{thm}
\begin{proof}
Throughout the proof, we assume that $G\in {\cal H}$ and hence there exists a $(C_3,C_4)$-free underlying graph $F$ such that $G \in {\cal G}_2(F)$. By Lemma~\ref{lem:d2dom}, $D=V(F)$ is a minimum $ 2 $-dominating set of $G$.
First assume that $G^D$ contains a (not necessarily induced) subgraph which is isomorphic to $B^W$. We may assume, without loss of generality, that this subgraph contains the vertices $ S = \{v_1, u_1, v_2, u_2, x_1^1,x_1^2,x_2^1,x_2^2\} $, the edges correspond to those in Fig.~\ref{fig:C1C2}, and $ S \cap D =\{v_1, u_1, v_2, u_2\} $. Since $F$ is $ \{C_3,C_4\}$-free, the induced subgraph $F[S \cap D]$ is $ \{C_3,C_4\}$-free as well. Therefore, as $|S\cap D|=4$, $F[S \cap D]$ is a forest. It contains at least two edges, namely $v_1u_1$ and $v_2u_2$. Hence, $F[S \cap D]$ contains a leaf, say $ v_1 $. Consider the set $ D' = (D \color{blue}etminus S) \cup \{u_1, x_1^1, x_2^2\} $. Observe that $ D' $ dominates all the vertices in $D$; the vertex $x_1^1 \in D'$ dominates $ x_2^1 $; the vertex $ u_1 $ dominates $ x_1^2 $. By the choice of $ v_1 $ and $u_1$, $ F[\{v_1,v_2,u_2\}] $ contains only the edge $ v_2u_2 $. Hence, all the subdivision vertices different from $ \{x_1^1,x_1^2,x_2^1,x_2^2\} $ are dominated either by $ D \color{blue}etminus S $ or $ u_1 $. Hence, $D'$ is a dominating set in $G$ and $|D'|<|D|$. These imply $\gamma(G)<\gamma_2(G)$.
Next assume that $G^D$ contains a subgraph which is isomorphic to $A_k^W$. We may assume, without loss of generality, that the vertices of this subgraph are named as given in the definition of $ A_k^W $. Consider the set $D'=(D\color{blue}etminus W) \cup \{x_1^1, \dots x_k^1\}$. Observe that $D'$ dominates all the vertices in $D$; the set $\{x_1^1, \dots x_k^1\} \color{blue}ubseteq D'$ dominates all the vertices of the form $x_i^s$ ($i \in [k]$, $s\in [2]$). Since $F$ is assumed to be $C_3$-free, for any further subdivision vertex $x_{i,j}^s$ of $G$, at least one of its neighbors which is a $D$-vertex, namely at least one of $v_i$ and $v_j$, is not included in $W$. Thus, $x_{i,j}^s$ is dominated by a vertex in $D\color{blue}etminus W$. We may conclude that $D'$ is a dominating set in $G$. Since $|W|=k+1$, we have $|D'|<|D|$ from which $\gamma(G)<\gamma_2(G)$ follows. This finishes the proof of one direction of our theorem.
For the converse, we assume that $G$ contains no subgraph isomorphic to $B^W$ and no subgraph isomorphic to $A_k^{W}$ for any $k\ge 2$, and then prove that $\gamma(G)=\gamma_2(G)$. In particular, having no subgraph isomorphic to $B^W$ means that every supplementary edge is inside a neighborhood of a $D$-vertex and, therefore, $N[x_{i,j}^s]\color{blue}ubseteq N[v_i]\cup N[v_j]$ holds for each supplementary vertex $x_{i,j}^s$. Now, suppose for a contradiction that $\gamma(G)<\gamma_2(G)$. Let $D'$ be a minimum dominating set of $G$ such that $|D'\cap D|$ is maximum under this condition. It is clear that $|D'|=\gamma(G)<\gamma_2(G)=|D|$.
We first prove that no pair $x_{i,j}^1$, $x_{i,j}^2$ are contained together in $D'$. Suppose, to the contrary, that $\{x_{i,j}^1$, $x_{i,j}^2 \}\color{blue}ubseteq D'$. Then, since $N[x_{i,j}^1]\cup N[x_{i,j}^2]\color{blue}ubseteq N[v_i]\cup N[v_j]$,
the set $D''= (D'\color{blue}etminus \{x_{i,j}^1, x_{i,j}^2\}) \cup \{v_i,v_j\}$ would be a dominating set of $ G $. This contradicts either the minimality of $ |D'| $ or the maximality of $|D'\cap D|$.
If we have some edges $v_iv_j \in E(F)$ such that $|X_{i,j}\cap D'|=0$, then we delete all these $X_{i,j}$ pairs from $G$, delete all the associated edges from $F$ and obtain $G'$ and $F'$. Note that, by definition, $G'\in {\cal G}_2(F')$ and $F'$ is still $(C_3,C_4)$-free. As $D'$ contains exactly one vertex from each remaining pair $X_{i,j}$, we infer that $|E(F')| \le |D'|$. By Lemma~\ref{lem:d2dom}, $\gamma_2(G')$ remains $|D|$ (we did not delete the possibly arising isolated vertices). We deleted only subdivision vertices not contained in $D\cup D'$ and $D'$ contains exactly one vertex from each pair $X_{i,j}$ corresponding to an edge $v_iv_j \in E(F')$. Therefore,
\begin{equation} \label{eq:1}
|E(F')| \le |D' \cap V(G')|< |D\cap V(G')|
\end{equation}
holds and $D' \cap V(G')$ is a dominating set in $G'$. By Lemma~\ref{lem:d2dom}, $D\cap V(G')$ remains a minimum $2$-dominating set in $G'$.
$G'$ might contain several components. By the inequality (\ref{eq:1}), there is a component, say $G''$, such that $|D'\cap V(G'')| <|D \cap V(G'')|=\gamma_2(G'')$. It is clear that $G''$ is not an isolated vertex.
Recall that $N_G[x_{i,j}^s] \color{blue}ubseteq N_G[v_i] \cup N_G[v_j]$ holds for each supplementary vertex $x_{i,j}^s$ in $G$ and hence, by construction, the analogous statement remains true in $G''$. Thus, the connectivity of the underlying graph $F''$ of $G''$ follows from the connectivity of $G''$. It also holds that $V(F'')= D \cap V(G'')$. Moreover, as $D' \cap V(G'')$ intersects each pair $X_{i,j} $ from $G''$, we have $|E(F'')| \le |D' \cap V(G'')|$. We may conclude
\begin{equation} \label{eq:2}
|E(F'')| \le |D' \cap V(G'')| < |V(F'') |.
\end{equation}
The underlying graph $F''$ is therefore a tree and
\begin{equation} \label{eq:3}
|E(F'')|= |D' \cap V(G'')| = |V(F'') |-1
\end{equation}
holds. By the first equality in (\ref{eq:3}), $D' \cap D \cap V(G'')=\emptyset$. Note that $F''$ is not necessarily an induced subgraph of $F$ but, as $F$ is $C_3$-free, all the star-subgraphs of $F''$ are induced stars in $F$.
Consider a non-pendant edge $ v_iv_j $ in $ F'' $ (if there exists). We know that $ D'\cap V(G'') $ is a dominating set in $G''$ and it contains exactly one vertex from $ X_{ij}$. Renaming the vertices if necessary, we may suppose $ x_{i,j}^1 \in D' $. Then the vertex $ x_{i,j}^2 $ must be dominated by a vertex from $ D' $, which is a neighbor of either $ v_i $ or $ v_j $. Without loss of generality, assume that $ x_{i,j}^2 $ is dominated by a neighbor of $ v_i $. Let $ S = V(G'')\color{blue}etminus (N_{G''}(v_j) \color{blue}etminus X_{ij}) $ and consider the induced subgraph $ G''[S] $. Let $ H $ be the component of the resulting graph, which contains both $ v_i $ and $ v_j $.
Recall that $D' \cap V(G'')$ dominates all vertices in $G''$. By construction, $N_{G''}[v_p] \color{blue}ubseteq V(H)$ is true for every vertex $v_p\neq v_j$ from $ D' \cap V(H)$ and
$$N_{G''}[x_{p,q}^s] \color{blue}ubseteq N_{G''}[v_p] \cup N_{G''}[v_q] \color{blue}ubseteq V(H)$$
holds for every $x_{p,q}^s \in X \cap V(H)$ if $p \neq j \neq q$. The set $D' \cap V(H)$ therefore dominates all vertices from $V(H) \color{blue}etminus N_H[v_j]$. As $N_H[v_j]=\{v_j, x_{i,j}^1, x_{i,j}^2 \}$, it can be readily seen that $D'\cap V(H)$ is a dominating set in $H$.
Repeate sequentially this procedure of deleting non-pendant edges in the underlying graph. At the end we obtain a graph $ H_r $ with an underlying graph $ F_r $ such that $F_r$ is isomorphic to a star graph $ K_{1,m} $. Then the set $ D_r = V(H_r) \cap D' $ is a dominating set of $ H_r $ and it contains exactly one vertex from each pair $ X_{i,j} $ of subdivision vertices.
We will construct a directed graph $ R $ as follows. We create a vertex $ x_{i,j} $ corresponding to each pair $ X_{i,j} \color{blue}ubset V(H_r) $ of subdivision vertices. Then, we add a directed edge from $ x_{i,j} $ to $ x_{k,\ell} $ in $R $, if the vertex in $ X_{i,j}\color{blue}etminus D_r $ is dominated by the vertex in $ X_{k,\ell}\cap D_r $. As $ D_r $ has exactly one vertex from each pair $ X_{i,j} $, the outdegree of each vertex $ x_{i,j} \in V(R) $ is at least one. Thus, there is a directed cycle of order at least $ t \ge 2 $, which corresponds to a subgraph isomorphic to $ A_t^W $ in $ H_r^{D \cap V(H_r)} \color{blue}ubseteq G^D $. This contradicts our assumption and finishes the proof of the theorem.
\end{proof}
\color{blue}ection{Algorithmic complexity} \label{sec:3}
Since there are infinitely many forbidden subgraphs, Theorem \ref{thm:2} does not give directly a polynomial time recognition algorithm for $(\gamma,\gamma_2)$-graphs on ${\cal H}$. However, based on this characterization, we can design a polynomial time algorithm to check whether $ \gamma(G) = \gamma_2(G) $ holds for a general instance $ G^D \in \mathcal{H} $.
\begin{thm}
\label{thm:complexity}
Let $ G^D \in {\cal H}$ be given. It can be decided in polynomial time whether the graph $ G^D $ satisfies the equality $ \gamma(G) = \gamma_2(G) $.
\end{thm}
\begin{proof}
By Theorem \ref{thm:2}, $\gamma(G)=\gamma_2(G)$ holds if and only if $G^D$ contains no subgraph isomorphic to $B^W$ and no subgraph isomorphic to $A_k^{W}$ for any $k\ge 2$.
\noindent \hrulefill\\
\noindent \textbf{Algorithm}
\noindent \hrulefill\\
\indent \textit{Input:} A graph $ G^D \in {\cal H}$ \\
\indent \textit{Output:} If $ \gamma(G) = \gamma_2(G) $, then true; else false. \\
\indent \hspace{1cm} for each supplementary edge $ uv $ in $ G $ \\
\indent \hspace{2cm}if $D \cap (N_G(u) \cap N_G(v)) = \emptyset $, then return false\\
\indent \hspace{1cm} for each vertex $ x $ in $ D $ \\
\indent \hspace{2cm} $ X \leftarrow N_G(x)$ and $ G'\leftarrow G[X] $\\
\indent \hspace{2cm} $ k= (\deg_{G}x) /2$ \\
\indent \hspace{2cm} for $ i \leftarrow 1 $ to $ k $ do \\
\indent \hspace{3cm} $ E \leftarrow E(G') $\\
\indent \hspace{3cm} for $ j \leftarrow 1 $ to $ k $ do \\
\indent \hspace{4cm} if $ j\ne i $, then $ E \leftarrow E \cup \{x_j^1 x_j^2\}$ \\
\indent \hspace{3cm} $ \mu $ $ \leftarrow $ the order of the maximum matching in $ E $\\
\indent \hspace{3cm} if $ \mu = k $, then return false\\
\indent \hspace{2cm}end-for\\
\indent \hspace{1cm} end-for\\
\indent \hspace{1cm} return true\\
\indent end.
\noindent \hrulefill
The algorithm above, first, determines whether $B^W \color{blue}ubseteq G^D $. If it holds, then the algorithm halts. It can be readily checked that this part of the algorithm requires polynomial time.
\noindent Then, in the next steps of the algorithm, the existence of subgraphs isomorphic to $A_\ell^{W}$ is tested. In order to find such a subgraph (if it exists), the algorithm searches for an appropriate matching in $G[N_G(v_i)]$ for every vertex $ v_i$ from $ D $. Since a subgraph $A_\ell^W$ does not necessarily contain all the neighbors of $ v_i $, it is not enough to check the existence of a perfect matching in $ G[N_G(v_i)] $. Instead, we define the edge set $E_i= \{x_{i,j}^1x_{i,j}^2: v_j \in N_F(v_i)\}$. Let $ G_i^* $ be the graph $ G[N_G(v_i)] $ extended by the edges from $ E_i $. Clearly, $G_i^* $ contains a perfect matching which is $ E_i $. On the other hand, $ G_i^* $ contains a perfect matching different from $ E_i $ if and only if $ G[N_G(v_i)] $ has a subgraph isomorphic to $ A_\ell^W $. Hence, the algorithm checks all possible $G_i^*-e$ graphs, where $e\in E_i$, and if any of them has a perfect matching, then there exists a subgraph isomorphic to $ A_\ell^W $.
In order to find a maximum matching in $G_i^*-e$, we can use Edmond's Blossom Algorithm \cite{Edmonds1965}, which was improved by Micali and Vazirani in \cite{Micali1980} to run in time $ O(\color{blue}qrt{n}m) $ for any graph of order $n $ and size $ m $. The procedure will be repeated $ (\deg_G(x) /2) = \deg_F(x)$ times for every vertex $ x\in D $, that is, $ \Sigma_{v\in V(F)}\deg(v) = 2|E(F)| $, in total. Thus, the second part of the algorithm requires polynomial-time. This finishes the proof.
\end{proof}
\noindent We now show that the same problem is NP-hard even on the graph class $ \mathcal{G}^D_1 $.
\begin{thm} \label{thm:4}
Consider every graph $ G \in \mathcal{G}_1 $ together with a specified set $ D $ such that $ G^2[D] \cong F $ and $ G \in \mathcal{G}_1(F) $. Then, it is NP-complete to decide whether the inequality $ \gamma(G) < \gamma_2(G)$ holds for a general instance $ G \in \mathcal{G}_1 $.
\end{thm}
\begin{proof}
By Lemma \ref{lem:d2dom}, we have $ \gamma_2(G) = |D| $ and it can be checked in polynomial time whether a given set $ D' $ with $ |D'| < |D| $ is a dominating set of $ G $. Thus, the decision problem belongs to NP.
In order to prove the NP-hardness, we present a polynomial-time reduction from the well-known $ 3 $-SAT problem, which is proved to be NP-complete \cite{Garey1979}.
Let $ X = \{x_1, x_2, \dots, x_k\}$ be a set of Boolean variables. A truth assignment for $ X $ is a function $ \varphi:X \rightarrow \{t,f\} $. If $ \varphi(x_i)=t $ holds, then the variable $ x_i $ is called $ true $; else if $ \varphi(x_i)=f$ holds, then $ x_i $ is called $ false $. If $ x_i $ is a variable in $ X $, then $ x_i $ and $ \neg{x_i} $ are literals over $ X $. The literal $ x_i $ is true under $ \varphi $ if and only if the variable $ x_i $ is true under $ \varphi $; the literal $ \neg x_i $ is true if and only if the variable $ x_i $ is false. A clause over $ X $ is a set of three literals over $ X $, represents the disjunction of those literals and it is satisfied by a truth assignment if and only if at least one of its members is true under that assignment. A collection $ \mathcal{C} $ of clauses over $ X $ is \textit{satisfiable} if and only if there exists some truth assignment for $ X $ that satisfies all the clauses in $ \mathcal{C} $. Such a truth assignment is called a \textit{satisfying truth assignment} for $ \mathcal{C} $. The $ 3 $-SAT problem is specified as follows.
\noindent \textbf{3-SATISFIABILITY (3-SAT) PROBLEM}
\noindent\textbf{\textit{Instance:}} A collection $ \mathcal{C} = \{C_1,C_2, \dots ,C_\ell\} $ of clauses over a finite set $ X $ of variables such that $ |\mathcal{C}_j| = 3 $, for $1 \le j \le \ell\} $.
\noindent\textbf{\textit{Question:}} Is there a truth assignment for $ X $ that satisfies all the clauses in $ \mathcal{C} $?
Let $ \mathcal{C} $ be a $ 3 $-SAT instance with clauses $ C_1,C_2,\dots, C_\ell $ over the Boolean variables $ X=\{x_1,x_2, \dots, x_k\} $. We may assume that for every three variables $ x_{i_1}, x_{i_2}, x_{i_3} $ there exists a clause $ C_j $, where $ j\in [\ell] $, such that $ C_j $ does not contain any of the variables $ x_{i_1}, x_{i_2}, x_{i_3} $ (neither in positive form, nor in negative form). Otherwise, the problem could be reduced to at most eight (separated) $ 2$-SAT problems, which are solvable in polynomial time.
We now construct a graph $ G \in \mathcal{G}_1(F) $, where $ F \cong S_{k+1} $, such that the given instance $ \mathcal{C} $ of $ 3 $-SAT problem is satisfiable if and only if $ \gamma(G) < \gamma_2(G)$.
The construction is as follows.
For every variable $ x_i $, we create three vertices $ \{x_i^t, x_i^f, v_i\} $ and then we add the edges $ x_i^tv_i $ and $ x_i^fv_i $. For every clause $ C_j \in \mathcal{C}$, we create a vertex $ c_j $, and if $ x_i $ is a literal in $ C_j $, then $ x_i^tc_j \in E(G) $; if $ \neg x_i $ is a literal in $ C_j $, then $ x_i^fc_j \in E(G) $. Moreover, we add a vertex $ c^* $ and the edges $ c^*x_i^t $ and $ c^*x_i^f $ for every $ i\in [k] $. We also add a vertex $ v_{k+1} $ and the edge set $ \{c_iv_{k+1}:1\le i\le\ell\} \cup \{c^*v_{k+1}\}$. Finally, we add a new vertex $ v_0 $, which is adjacent to every vertex in $ V(G)\color{blue}etminus \{v_1,v_2\dots,v_{k+1}\} $ (for an illustration of the construction see Fig.~\ref{fig:construction}). The order of $ G $ is obviously $ 3k+\ell +3 $ and this construction can be done in polynomial time. Note that $ G \in \mathcal{G}_1(F) $, where $ F $ is a star with center $ v_0 $ and leaves $ v_1,\dots, v_{k+1} $. Thus, we have $ \gamma_2(G)=k+2 $, by Lemma \ref{lem:d2dom}.
\begin{figure}
\caption{\protect An illustration of the construction for $ 3 $-SAT reduction: The clauses $ C_1 $ and $ C_\ell $ corresponding to the vertices $ c_1 $ and $ c_\ell $, resp., are $ C_1=(x_1 \vee \neg x_3 \vee \neg x_k) $ and $ C_\ell=(x_1 \vee \neg x_2 \vee x_k) $.}
\label{fig:construction}
\end{figure}
We now prove that $ \mathcal{C} $ is satisfiable if and only if $ \gamma(G) < \gamma_2(G) $. First, consider a truth assignment $ \varphi : x_i \rightarrow \{t,f\} $ which satisfies $ \mathcal{C} $. Let $D_1=\bigcup_{i\in [k] }\{x_i^t :\varphi(x_i)=t\} $ and let $D_2 = \bigcup_{i\in [k] }\{x_i^f : \varphi(x_i)=f\}$. Consider the set $ D' = D_1 \cup D_2 \cup \{c^*\} $. It can be readily checked that $ D' $ is a dominating set of cardinality $ k+1 $. Hence, $ \gamma(G)< \gamma_2(G)$ follows.
Conversely, assume that $ \gamma(G) < \gamma_2(G) $ and consider a minimum dominating set $ D' $ of cardinality at most $ k+1 $. In order to dominate $ v_i $, the set $ D' $ contains at least one vertex from the set $ \{x_i^t, x_i^f, v_i\} $, for each $ i\in [k] $. Similarly, to dominate $ v_{k+1} $, the set $ D' $ contains at least one vertex from the set $ \{c_1,c_2,\dots,c_\ell,c^*,v_{k+1}\}$. Since $ |D'|\le k+1 $, we have $|D' \cap \{x_i^t, x_i^f, v_i\}| = 1 $ for every $ i\in [k] $. Moreover, $ |D'\cap \{c_1,c_2,\dots,c_\ell,c^*,v_{k+1}\}| = 1$ and $ v_0 \notin D' $.
Suppose that $ v_{k+1} \in D'$. In order to dominate the vertices $ x_i^t $ and $ x_i^f $, the set $ D' $ contains the vertex $ v_i $ for all $ i \in [k] $. Hence, $ N_G(v_0) \cap D' = \emptyset $. From the discussion above, we know that $ v_0 \notin D' $. Thus, $ v_0 $ is not dominated by a vertex from $ D' $, a contradiction.
Suppose that $ c_j \in D' $ for some $ j \in [\ell] $. Let $ C_j $ be the corresponding clause containing the variables $x_{i_1},x_{i_2}, x_{i_3} $. Consider any variable $ x_s \in X \color{blue}etminus \{x_{i_1},x_{i_2}, x_{i_3}\} $. Since $|D' \cap \{x_i^t, x_i^f, v_i\}| = 1 $ for each $ i\in [k] $, $ D' $ contains $ v_s $ in order to dominate both of the vertices $ x_s^t $ and $ x_s^f $. By our assumption, there exists a clause $C_q $ not containing the variables $x_{i_1},x_{i_2}, x_{i_3} $ neither in positive nor in negative form. Thus, $ c_q $ is not dominated by a vertex from $ D' $, a contradiction.
Since $ |D'\cap \{c_1,c_2,\dots,c_\ell,c^*\}| = 1$, the only remaining case is $ c^*\in D' $. Under this assumption, every vertex $ c_i $ must be dominated by the vertices corresponding to the literals in $ C_i$. Thus, the truth assignment
\[ \varphi(x_i) = \begin{cases}
t, & \text{if }x_i^t \in D' \\
f, & \text{if }x_i^f \in D' \text { or if }v_i \in D' \\
\end{cases}
\]
satisfies $ \mathcal{C} $. This finishes the proof.
\end{proof}
Theorem~\ref{thm:4} implies that it is coNP-complete to decide whether the equality $ \gamma(G) = \gamma_2(G)$ holds for a general instance $ G $ from $ \mathcal{G}_1$. On the other hand, we cannot prove that the problem belongs to NP. Instead, we will consider the complexity class $\Theta^p_2$, which consists of those problems solvable by a polynomial-time deterministic algorithm using NP-oracle asked for only $O(\log n)$ times. (For a detailed introduction, please, see \cite{Marx2006}.)
\begin{prop}
The complexity of deciding whether $\gamma(G)=\gamma_2(G)$ holds for a general instance $G$ is in the class $\Theta^p_2$.
\end{prop}
\begin{proof}
Using binary search, the parameters $\gamma(G)$ and $\gamma_2(G)$ can be determined by asking the NP-oracle $O(\log n)$ times whether the inequalities $\gamma(G) \le k$ and $\gamma_2(G) \le k$ hold. Thus, the decision problem belongs to $\Theta^p_2$.
\end{proof}
Note that in \cite{Arumugam2013}, a similar statement was proved for the problem of deciding whether the transversal number $\tau({\cal H})$ equals the domination number $\gamma({\cal H})$ for a general instance hypergraph ${\cal H}$.
\color{blue}ection{Characterization of $(\gamma, \gamma_2)$-perfect graphs} \label{sec:4}
Recently, Alvarado, Dantas, Rautenbach \cite{Alvarado2015-2, Alvarado2015} and Henning, J\"ager, Rautenbach \cite{Henning2018} studied graphs for which the equality between two fixed domination-type invariants hereditarily holds. The analogous problem for transversal and domination numbers of graphs and hypergraphs was considered in \cite{Arumugam2013}.
In this section, we characterize $(\gamma, \gamma_2)$-perfect graphs, that is, we characterize the graphs for which the equality between the domination and the $ 2 $-domination numbers hereditarily holds. By Lemma \ref{lem:0}, $ \delta(G) \geq 2$ is a necessary condition for $ \gamma(G)=\gamma_2(G) $. Hence, we define $(\gamma, \gamma_2)$-perfect graphs as follows.
\begin{definition}
Let $ G $ be a graph with $ \delta(G) \geq 2 $. Then $ G $ is a $(\gamma, \gamma_2)$-perfect graph if the equality $ \gamma(H)=\gamma_2(H) $ holds for every induced subgraph $ H $ of minimum degree at least two.
\end{definition}
Note that a disconnected graph $ G $ is $(\gamma, \gamma_2)$-perfect if and only if all of its components are $(\gamma, \gamma_2)$-perfect.
In order to formulate the results of this section we will define the following class.
\begin{definition}
Let $ S_{k} $ be the star with center vertex $ v $ and end vertices $ \{v_1,v_2,\dots,\allowbreak v_k\} $ such that $ k\geq 1 $. Denote the edge $ vv_j\in E(S_{k})$ by $ e_j $ for $ j\in [k] $. Let $ S(i_1,i_2,\dots,i_k) $ be the graph obtained by substituting each edge $ e_j $ of $ S_{k} $ by $ i_j $ parallel edges $ e_j^1,e_j^2,\dots e_j^{i_j} $, where $ i_j \geq 2 $, and then subdividing each edge $ e_j^r $ by adding the vertex $ x_j^r $ for all $ r \in [i_j] $ and all $ j\in [k] $. A graph $ G $ belongs to the class $ {\cal S} $ if it is isomorphic to $ S(i_1,i_2,\dots,i_k) $ for some $ k\geq 1 $, where $ i_j\geq 2 $ for all $ j\in [k] $.
\end{definition}
We clearly have $ {\cal S} \color{blue}ubseteq {\cal G}_1$, since any $S(i_1,i_2,\dots,i_k) \in {\cal G}_1(F) $, where $ F \cong S_{k} $. On the other hand, if $G' \in {\cal G}(S_k)$, the underlying graph does not contain a clique of order larger than two and consequently, $|N(y)\cap D|=2$ for every supplementary vertex $y$. This implies that $G'\in {\cal G}_1(S_k)$. By the definitions above, we have the following equivalence.
\begin{prop}
\label{prop:perfect}
For any graph, $G\in {\cal S}$ holds if and only if $G\in {\cal G}_1(S_k)$ $ ( $or, equivalently, $G\in {\cal G}(S_k))$ for a non-trivial star $S_k$ and $G$ does not contain a supplementary edge.
\end{prop}
The main result of this section is a characterization theorem for $(\gamma,\gamma_2)$-perfect graphs.
\begin{thm}
\label{thm:perfect}
$ G $ is a connected $ (\gamma,\gamma_2)$-perfect graph if and only if $ G \in {\cal S}. $
\end{thm}
\begin{proof}
We first prove that if $ G \in {\cal S} $, then it is $ (\gamma,\gamma_2) $-perfect graph.
By Proposition \ref{prop:perfect}, we know that $ G \in {\cal G}_1(F) $, where $ F \cong S_{k} $ for $ k\geq 1 $. Then, by Lemma \ref{lem:d2dom}, $ \gamma_2(G) = |V(F)|= k+1$. Since a minimal $ 2 $-dominating set is a dominating set, we have the inequality $ \gamma(G) \leq k+1 $. In order to prove that $\gamma(G)=\gamma_2(G)$, it is enough to show that $ \gamma(G) > k $. Suppose, to the contrary, that $ D' $ is a minimum dominating set of $ G $ such that $ |D'|\leq k $.
Consider the vertices of $ G $ corresponding to the end vertices of the star $ S_{k} $. Let $ \{v_1,v_2,\dots, v_k\} = V(F)\color{blue}etminus \{v\} \color{blue}ubseteq V(G)$, where $ v $ is the center of $ F\cong S_{k} $. Since $D'$ is a dominating set, $ |N_G[v_j]\cap D'| \geq 1 $ for each $ j \in [k] $. Note that the closed neighborhoods of any two vertices from the set $ \{v_1,v_2,\dots,v_k\} $ are disjoint. Since $ |D'|\leq k $ by our assumption, we have $v\notin D'$ and $ |N_G[v_j] \cap D'|=1 $, for every $ j\in[k] $. Moreover, as the center $v$ must also be dominated, there exists some $ j \in [k] $ and $ r\in [i_j] $ such that $ x_j^r \in D' $. Then, $ v_j \notin D' $ and the vertices in $ (X_j \cup Y_j)\color{blue}etminus \{x_j^r\} $ are not dominated by $ D' $, which is a contradiction. Consequently, $ k $ vertices are not enough to dominate all the vertices of $ G $, that is, $ \gamma(G) \geq k+1 $. It follows that $ \gamma(G) = \gamma_2(G) $ for any $ G \in {\cal S} $.
Next, suppose that $ H $ is an induced subgraph of $ G $ with minimum degree at least two. If $H$ does not contain any subdivision vertices, we have $ \delta(H)=0 $, a contradiction. Thus, $ H $ contains a subdivision vertex. Let $ x_p^q \in V(H)$ for some $ p\in [k] $ and $ q\in [i_p] $. Since $ \deg_G(x_p^q)=2 $, then both of the neighbors of $ x_p^q $ must be in $ V(H) $, i.e., $ N_G(x_p^q) = \{v,v_p\}\color{blue}ubseteq V(H) $. Since $ \delta(H) \geq 2 $ by the assumption, using an argument similar to the above, we have $ \deg_H(v_p)\geq 2 $. Thus, $ |(X_p \cup Y_p)\color{blue}etminus \{x_p^q\})\cap V(H)| \geq 1 $. Consequently, $ H \in {\cal S} $ and, as it was proved above, $ \gamma(H) = \gamma_2(H) $ holds for every induced subgraph of $ G $ with minimum degree at least two.
To prove the converse, assume that $ G $ is a connected $ (\gamma, \gamma_2) $-perfect graph. Note that $ \gamma(C_n) = \ceil{\frac{n}{3}} \ $ and $ \gamma_2(C_n) = \ceil{\frac{n}{2}}$, where $ n\ge 3 $. Thus, the $(\gamma,\gamma_2)$-perfect graph $ G $ does not contain an induced cycle $ C_n $, where $ n =3 $ or $ n\geq 5 $.
\begin{figure}
\caption{The graphs $ H_1 $, $ H_2 $ and $ H_3 $}
\label{fig:induced3Graphs}
\end{figure}
Now, suppose that $ G $ has a non-induced subgraph isomorphic to $ C_r $, for some $ r\geq 5 $. Since all of its induced cycles are $ 4 $-cycles, $ G $ contains at least one of the three graphs $ H_1, H_2 $ and $ H_3 $, shown in Figure \ref{fig:induced3Graphs}, as an induced subgraph. Observe that $ \gamma(H_i) < \gamma_2(H_i) $ for all $ i \in \{1,2,3\} $. This contradicts our assumption that $ G $ is a $ (\gamma, \gamma_2)$-perfect graph. Thus, $ G $ does not contain a cycle $ C_r $, where $ r\neq 4 $.
Since $ G $ is $ (\gamma, \gamma_2)$-perfect by the assumption, then the equality $ \gamma(G)=\gamma_2(G) $ holds. By Proposition \ref{prop:1}, we know that $ G\in {\cal G} $. Thus, if $ D $ is a minimum $ 2 $-dominating set of $ G $, then $ D $ is independent and $ F=G^2[D] $ is the underlying graph of $ G $.
First, note that $ F $ does not contain a cycle $ C_r $ for $ r\geq 3 $. Otherwise, $ G $ would contain a subgraph isomorphic to $ C_{2r} $, which is a contradiction. Thus, $ F $ is a forest and $G\in {\cal G}_1(F)$. Then suppose that $ F $ is not connected. Since $ G $ is connected, there is a supplementary edge $ e = uv $, where $ u $ and $ v $ are two subdivision vertices of $ G $ such that $N(u)\cap V(F)$ and $N(v)\cap V(F)$ are in different components of $ F $. By the definition of the graph class $ {\cal G}_1$, there are two vertices $ u' $ and $ v' $ such that $N_G(u) \cap V(F) = N_G(u') \cap V(F) $ and $N_G(v) \cap V(F) = N_G(v') \cap V(F) $. Let $ \{x_1,x_2\} = N_G(u) \cap V(F) $ and $ \{x_3,x_4\} = N_G(v) \cap V(F) $, where the sets $ \{x_1,x_2\} $ and $ \{x_3,x_4\} $ are contained by different components of $ F $. Consider the set $ A = \{x_1,x_2,x_3,x_4,u,v,u',v'\} $ and the induced subgraph $ G[A] $. It is easy to check that $\delta(G[A]) \ge 2$, $\gamma(G[A])\le 3$ and $\gamma_2(G[A])=4 $, which is a contradiction. Thus, $ F $ is a tree.
Suppose that $ G $ has a supplementary edge $ e=uv \in E(G) $, where $ u,v \in V(G)\color{blue}etminus V(F) $. Let $ N_G(u) \cap V(F) = \{x_1,x_2\} $ and $ N_G(v) \cap V(F) = \{x_3,x_4\} $. Note that $|\{ x_1,x_2\} \cap \{x_3,x_4\}|\leq 1 $, otherwise $ G $ would contain a subgraph isomorphic to $ C_3 $. By Lemma~\ref{lem:2nghbrs}, there exist two further vertices $u'$ and $v'$ satisfying $ N_G(u') \cap V(F) = \{x_1,x_2\} $ and $ N_G(v') \cap V(F) = \{x_3,x_4\} $. If $|\{ x_1,x_2\} \cap \{x_3,x_4\}|= 1 $, then without loss of generality, assume that $ x_2 = x_3 $. Then, there is a subgraph of $ G $ isomorphic to $ C_3 $ induced by the vertices $ u$, $v$ and $x_2 $, a contradiction. If $\{ x_1,x_2\} \cap \{x_3,x_4\} = \emptyset$, then let $ S= \{x_1,x_2,x_3,x_4,u,v,u',v'\}$. A similar argument applied to the subgraph of $ G $ induced by the vertex set $ S $ yields the inequality $ \gamma(G[S])\le 3 < \gamma_2(G[S])=4$. Thus, $ G $ does not have any supplementary edges.
\begin{figure}
\caption{The graph $ H_4 $}
\label{fig:H4}
\end{figure}
Suppose that $ F $ contains a subgraph isomorphic to $ P_4 $. Since $ G $ does not have a supplementary edge, it contains an induced subgraph isomorphic to $ H_4 $ given in Figure \ref{fig:H4}. Note that $ \delta(H_4) \geq 2 $ and $ 3=\gamma(H_4) < \gamma_2(H_4) = 4 $, which contradicts the assumption that $ G $ is $ (\gamma, \gamma_2)$-perfect. Thus, $ F $ is a star, $G\in {\cal G}_1(F)$, and $G$ does not contain supplementary edges. This finishes the proof by Proposition~\ref{prop:perfect}.
\end{proof}
The graph obtained from an edge by attaching two pendant edges to both of its ends will be called $ T_6 $ (for illustration see Fig.~\ref{fig:T}).
\begin{figure}
\caption{The graph $ T_6 $}
\label{fig:T}
\end{figure}
\begin{prop}
\label{prop:S}
$ G \in {\cal S} $ if and only if $ G $ is a connected graph with $ \delta(G)\geq 2 $ and it contains no subgraph isomorphic to any of $T_6,P_8$, or $C_k $ where $ k\neq 4 $.
\end{prop}
\begin{proof}
If $ G \in {\cal S} $, then it is easy to see that $ G $ is a connected graph with $ \delta(G)\geq 2 $ and it does not contain a subgraph isomorphic to $T_6,P_8$, or $C_k $ where $ k\neq 4 $.
Now, assume that $ G $ is a connected graph of minimum degree at least two which does not contain a subgraph isomorphic to $T_6,P_8$, or $C_k $ where $ k\neq 4 $. Note that $G$ is bipartite. We further have $\min\{\deg_G(u),\deg_G(v)\}=2$ for each edge $uv \in E(G)$, since $\delta(G) \ge 2$ and $G$ does not contain a subgraph isomorphic to $T_6$ or $C_3$.
First, suppose that $ G $ contains an edge $ e=uv \in E(G) $, which is a bridge. Then $ G-e $ has two components, say $ G_1 $ and $ G_2$. Since $ \delta(G)\geq 2 $, both $G_1$ and $G_2$ are non-trivial graphs and may contain at most one vertex, namely either $u$ or $v$, which is of degree 1. Thus, both of the components contain a cycle. These cycles must be vertex-disjoint $ 4 $-cycles with a path between them. Hence, $ G $ contains a subgraph isomorphic to $ P_8 $ and this contradicts our assumption.
Since $ G $ does not contain a bridge, every edge of $ G $ lies on a $ 4 $-cycle. If all the vertices of $ G $ have degree two, then $ G $ is isomorphic to $ C_4 $ and $ G \in {\cal S} $. If $ G $ is not isomorphic to $ C_4 $, then every $ 4 $-cycle contains a vertex of degree at least three. For a vertex $ v $ of degree two, we define the function $ f(v) $ to denote the vertex opposite to $ v $ in a $4 $-cycle. Let $ A = \{v \in V(G): \deg(v) \geq 3$ or $\deg(f(v))\geq 3\} $.
Consider two vertices $ u,v \in A $. If $ uv \in E(G) $, then $ uv $ belongs to a $ 4 $-cycle, say $ uvv'u' $. At least one of $ u $ and $ v $ is of degree two, without loss of generality, say $ \deg(u)=2 $. Thus, $ u $ belongs only to this $ 4 $-cycle. Since $ f(u) = v' $, by the definition of $ A $, $ \deg(v')\geq 3 $. If $ \deg(v)\geq 3 $, then $ vv' \in E(G) $, we have a contradiction. If $ \deg(v)= 2 $, then $ v\in A $ and $ v $ belongs only to the $ 4 $-cycle $ uvv'u' $. Thus, $ f(v)=u' $, $ \deg(u')\geq 3 $ and $ u'v' \in E(G) $, which is a contradiction. Hence, $ A $ is independent.
Consider two vertices $ u,v \in V(G) \color{blue}etminus A $. If $ uv \in E(G) $, then at least one of $ f(u) $ or $ f(v) $ is of degree at least three. Then, by the definition of the function $ f $, we have $ u \in A $ or $ v \in A $, which is a contradiction. Hence, $ V(G) \color{blue}etminus A $ is independent.
Consequently, $ (A, V(G)\color{blue}etminus A) $ is a bipartition of $ V(G) $. Note that every $ 4 $-cycle has exactly two vertices in $ A $. Hence, $G^A \in {\cal G}_1(F)$ where $F\cong G^2[A]$, and there are no supplementary edges. Since $G$ does not have a subgraph isomorphic to $C_n$ for $n\ge 6$, the underlying graph is a tree. If $ F $ contains a subgraph isomorphic to $ P_4 $, then $ G $ contains a subgraph isomorphic to $ P_8 $, which is a contradiction. Thus, $ F $ is a star, and Proposition~\ref{prop:perfect} implies that $G\in {\cal S}$.
\end{proof}
Thus, Proposition~\ref{prop:S} allows us to state Theorem~\ref{thm:perfect} in a different form as follows.
\begin{thm}
Let $ G $ be a connected graph with $ \delta(G)\geq 2 $. Then $ G $ is a $ (\gamma,\gamma_2 )$-perfect graph if and only if $ G $ contains no subgraph isomorphic to any of $T_6,P_8$, or $C_k $ where $ k\neq 4 $.
\end{thm}
Note that for any $G\in {\cal S}$, the center of the underlying star can be chosen as a vertex $v$ of degree $\Delta(G)$ and then, the subdivision vertices are exactly those contained in $N_G(v)$. Therefore, the characterization given in Theorem~\ref{thm:perfect} directly yields a polynomial-time algorithm which recognizes $(\gamma, \gamma_2)$-perfect graphs.
\color{blue}ection{Concluding remarks and open problems} \label{sec:5}
In Section 1, we defined the graph class ${\cal G}$ which contains all $(\gamma, \gamma_2)$-graphs. Then, in Section 2, we gave a characterization for $(\gamma, \gamma_2)$-graphs over a specified subclass ${\cal H}$ of ${\cal G}$. In the definition of ${\cal H}$ and in the proof of the main theorem, we referred to the properties of the
\begin{figure}
\caption{$ G^* $ is a graph with $ \gamma (G^*)= \gamma_2 (G^*) = 6$, which has two non-isomorphic underlying graphs and $ G^* \in {\cal H}
\label{fig:nonIso}
\end{figure}
underlying graph. We noted there that the underlying graph is not always unique when a graph $G$ from ${\cal G}$ is given. In Figure \ref{fig:nonIso}, we show a $ (\gamma,\gamma_2) $-graph having two non-isomorphic underlying graphs. Analogously, one can construct infinitely many graphs with the same property.
In the definition of the class ${\cal H}$, we forbid $ 3 $-cycles and $ 4 $-cycles in the underlying graph. The characterization given in Theorem~\ref{thm:2} does not hold if $ 3 $-cycles are not forbidden in the underlying graph. This is shown by the graph $A_4^* \in {\cal G}_2(F)$ (see Figure~\ref{fig:A4_star}), where the underlying graph $F$ is a star supplemented by an edge. One can readily check that even if $A_4^*$ contains an induced $A_4^W$ subgraph, it remains a $(\gamma, \gamma_2)$-graph as $\gamma(A_4^*)=\gamma_2(A_4^*)=5$. Similarly, it is possible to construct graphs whose underlying graphs are $C_3$-free but not $C_4$-free such that the statement of Theorem~\ref{thm:2} does not remain valid for them. Therefore, the following problems are still open.
\begin{figure}
\caption{The graph $ A_4^* $ }
\label{fig:A4_star}
\end{figure}
\begin{prob}
Characterize $(\gamma, \gamma_2)$-graphs over the following graph classes:
\begin{enumerate}
\item Over the subclass of ${\cal G}_2$ where the underlying graph does not contain any $C_4$ subgraphs;
\item Over the subclass of ${\cal G}_2$ where the underlying graph is $C_3$-free;
\item Over ${\cal G}_2$.
\end{enumerate}
\end{prob}
\noindent \textbf{Acknowledgment.} Research of Csilla Bujt\'as was partially supported by the Slovenian Research Agency under the project N1-0108.
\end{document}
\end{document}
|
\begin{document}
\title{Extending Landau's Theorem on Dirichlet Series with Non-Negative Coefficients}
\begin{abstract}
A classical theorem of Landau states that, if an ordinary Dirichlet series has non-negative coefficients, then it has a singularity on the real line at its abscissae of absolute convergence. In this article, we relax the condition on the coefficients while still arriving at the same conclusion. Specifically, we write $a_n$ as $|a_n| e^{i \theta _n}$ and we consider the sequences $\{ \; |a_n| \; \}$ and $\{ \; \cos{ \theta _n} \; \}$. Let $M \in \mathbb{N}$ be given. The condition on $\{ \; |a_n| \; \}$ is that, dividing the sequence sequentially into vectors of length $M$, each vector lies in a certain convex cone $B \subset [0,\infty)^M$. The condition on $\{ \; \cos{ \theta _n} \; \}$ is (roughly) that, again dividing the sequence sequentially into vectors of length $M$, each vector lies in the negative of the polar cone of $B$. We attempt to quantify the additional freedom allowed in choosing the $ \theta _n$, compared to Landau's theorem. We also obtain sharpness results.
\end{abstract}
\section{Introduction}
A (ordinary) Dirichlet series is a function of the following form, with $a_n \in \mathbb{C}$:
\begin{equation}\label{DirSer}
f(s) = \sum_{n=1}^{\infty} a_n n^{-s} \qquad s \in \mathbb{C}
\end{equation}
For $s = \sigma+it \in \mathbb{C}$, we denote the real part of $s$ by $\Re{s}$. The standard region on which a Dirichlet series might be expected to converge is a right half plane, we denote these by
$$
\Omega_{\sigma} = \{ s \in \mathbb{C} : \Re{s} > \sigma \}
$$
and its closure will be written $\overline{\Omega}_{\sigma}$. Unlike a power series, a Dirichlet series can converge in an open region without converging absolutely anywhere in that region, for example. A Dirichlet series has several different ``regions of convergence'' $\Omega_{\sigma}$, with several different abscissae $\sigma$ accordingly. The abscissae most often considered are:
\begin{align}
\sigma_a &= \inf \{ \sigma : \sum a_n n^{-s} \text{ converges absolutely for } s \in \Omega_\sigma \} \nonumber \\
\sigma_u &= \inf \{ \sigma : \sum a_n n^{-s} \text{ converges uniformly on } \Omega_{\sigma} \} \nonumber \\
\sigma_b &= \inf \{ \sigma : \sum a_n n^{-s} \text{ converges to a bounded function on } \Omega_\sigma \} \nonumber \\
\sigma_c &= \inf \{ \sigma : \sum a_n n^{-s} \text{ converges for all } s \in \Omega_\sigma \} \nonumber
\end{align}
From the definitions, it is evident that $\sigma_c \le \sigma_b \le \sigma_u \le \sigma_a$. It is also a basic result that the function $f$ defined by (\ref{DirSer}) is holomorphic on the open region $\Omega_{\sigma_c}$. Further relations among these abscissae, the coefficients $\{a_n\}$, and the function $f$ are of considerable interest. Some of the standard results are the following:
\begin{itemize}
\item{ $\sigma_a - \sigma_c \le 1$ (a basic result), and this is sharp (ex. the alternating zeta function $\sum (-1)^{n+1} n^{-s}$)}
\item{ $\sigma_u = \sigma_b$ (\cite{Bohr_1913_1}), henceforth we will denote this abscissa by $\sigma_b$ }
\item{ $\sigma_a - \sigma_b \le 1/2$ (\cite{Bohr_1913_2}), and this is sharp (\cite{Hille_Bohnenblust}) }
\end{itemize}
For other standard results in analytic number theory and Dirichlet series, we refer the interested reader to \cite{apostol}.
There has been recent interest in applying tools from modern analysis to Dirichlet series (see the survey of Hedenmalm \cite{hedenmalm_survey}). A short list (non-exhaustive in both topics and articles within those topics) includes the interpolation problem within Hilbert spaces of Dirichlet series (\cite{olsen_seip_interp}), the multiplier algebras of Hilbert spaces of Dirichlet series (\cite{hls}, \cite{mccarthy_03}), Carleson-type theorems for Dirichlet series (\cite{hedenmalm_saksman}, \cite{bayart_konyagin_queffelec_04}), and composition operators on spaces of Dirichlet series (\cite{bayart_et_al_2008}).
We mention the above results for contrast, because our result will be ``classic'' in both statement and proof, and we will investigate Dirichlet series which (among other things) satisfy
\begin{equation}\label{E:sigma_c_sigma_a}
\sigma_a = \sigma_c
\end{equation}
Specifically, we are interested in extending the following theorem of Landau (we will find it convenient to translate and assume $\sigma_a = 0$ for all functions we consider):
\begin{theorem}[E. Landau \cite{Landau_Handbuch}]\label{thm_Landau}
Suppose that $f(s)= \sum a_n n^{-s}$ has abscissa of absolute convergence equal to $0$. If $a_n \in \mathbb{R}, a_n \ge 0$ for all $n$ then $f$ does not extend holomorphically to a neighborhood of $s = 0$.
\end{theorem}
Logically, the property that must account for the situation $\sigma_c < \sigma_a$ is cancellation among the coefficients $\{a_n\}$. Therefore, once we strictly limit cancellation among the $\{a_n\}$, (\ref{E:sigma_c_sigma_a}) should follow. A straightforward way to do this is to require $a_n \ge 0$, and the above theorem confirms this (note that the absence of a holomorphic extension about $s=0$ is stronger than (\ref{E:sigma_c_sigma_a}) ).
It is a natural question to ask whether we could impose less strict conditions on the $\{a_n\}$ and still arrive at the same conclusion. One would expect that our freedom in choosing the coefficients $\{a_n\}$ will be substantially limited, but can these limitations be quantified in some sense? Our purpose in this article is to explore these questions.
We wish to mention that there are many interesting conclusions which follow from the assumption ``$a_n \ge 0$,'' the above theorem being but one. We recall a few of them here. Define $A_N = \sum_{n=1}^N a_n$.
Suppose $a_n \ge 0$, $\sum a_n = \infty$, and $\sum a_n n^{-s}$ converges for $s \in \Omega_0$ (together, these imply $\sigma_a = 0$). For arbitrary $t_n \in \mathbb{C}$, consider $\sum a_n t_n n^{-s}$. To conclude that $\sum a_n t_n n^{-s}$ converges in $\Omega_0$, by a basic result applicable to any Dirichlet series it suffices to assume that $\sum a_n t_n$ converges. In \cite{Borwein_87}, this is improved in this specific situation; he proves it suffices to show that
$$
(1/A_N) \sum_{n=1}^N a_n t_n
$$
converges as $N \rightarrow \infty$.
Suppose $a_n \ge 0$ and $f(s) = \sum a_n n^{-s}$ has $\sigma_a=0$. This implies $f$ is log-convex on $(0,\infty)$ (this is due to the log-convexity of each term $n^{-s}$, see the discussion in \cite{Cerone_Dragomir_09} ). It is also proved in \cite{Kolyada_Leindler_98} that $\| f \|_{L^p(0,\infty)}$ can be estimated above and below by a weighted $l^p$ norm of (modified) dyadic blocks of the $\{a_n\}$, and that $\|f\|_{BMO(0,\infty)}$ can be estimated above and below by another ``dyadic block''-type quantity involving the $\{a_n\}$.
We will obtain an extension of the theorem of Landau, it is an interesting question whether there is perhaps a common thread among more than one of the results mentioned above that would extend the requirement ``$a_n \ge 0$.''
Let us write $a_n = |a_n| e^{i \theta _n}$. In section \ref{S:examine_proof} we examine the proof of Landau's theorem, and one notes that the proof can be extended in a straightforward way to obtain
\begin{theorem}[Landau's Theorem, First Extension]\label{thm_landau_firstextension}
Suppose that $f(s)= \sum a_n n^{-s}$ has abscissa of absolute convergence equal to $0$. If there exists $\gamma > 0 $ such that $\cos( \theta _n) \ge \gamma$ for all $n$ then $f$ does not extend holomorphically to a neighborhood of $s =0$.
\end{theorem}
We will develop conditions on the $\{a_n\}$ which are expressed as certain restrictions on the sequence $|a_n|$, and related restrictions on the sequence $\{\cos( \theta _n)\}$. We will see that as the restrictions on $|a_n|$ are relaxed, the restrictions on $\cos( \theta _n)$ become more strict. The above theorem falls on one end of this spectrum, with no requirements on $|a_n|$ and strict requirements on $\cos( \theta _n)$.
For $\rho \in (0,\infty)$, let us define $B^{\rho} \subset [0,\infty)^M$ by
\begin{equation}\label{Brho}
B^{\rho} = B^{\rho,M} = \left\{ \beta = (\beta_1 , \ldots, \beta_M) \in [0,\infty)^M \; : \beta_1 \le \rho \beta_2 \le \rho^2 \beta_3 \le \cdots \le \rho^{M-1} \beta_M \right\}
\end{equation}
Note that ``$B^0 = \{ (0,0, \ldots , 0, \mathbb{R}_{\ge0}) \}$'', and ``$B^{\infty} = [0,\infty)^M$''; as $\rho$ proceeds from $0 \rightarrow \infty$, $B^{\rho}$ grows to fill $[0,\infty)^M$.
The standard inner product in Euclidean space will be denoted $x \cdot y$. We denote the polar cone of a convex cone $C \subset \mathbb{R}^M$ by
$$
C^{\sharp} = \{ x \in \mathbb{R}^M : x \cdot c \le 0 \;\; \forall c \in C\}
$$
We obtain the following result:
\begin{theorem}\label{T}
Suppose that $f(s)= \sum a_n n^{-s}$ has abscissa of absolute convergence equal to $0$. Write $a_n = |a_n| e^{i \theta _n}$, and fix $M \in \mathbb{N}$. Suppose that there exists $ \rho > 0$ and $\gamma >0$ such that, for all $l = 0,1, \ldots$, we have
\begin{align}
( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) &\in B^{\rho} \label{E:eta_in_Brho} \\
( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) &\in -\left( B^{\rho} \right)^{\sharp} + \gamma (1,1, \ldots , 1) \label{E:psi_in_Brhosharp}
\end{align}
Then $f$ does not have a holomorphic extension to a neighborhood of $s=0$.
\end{theorem}
Some comments on this result. First, note that condition (\ref{E:eta_in_Brho}) is not a ``global'' growth or decay condition; with $M=2$ it is satisfied by $\rho, 1, \rho , 1 , \ldots$. Second, noting that
$$
C_1 \subset C_2 \implies C_2^{\sharp} \subset C_1^{\sharp}
$$
we see that if $\rho$ increases then $B^{\rho}$ becomes larger and therefore $\left( B^{\rho} \right)^{\sharp} $ becomes smaller. In this sense, (\ref{E:eta_in_Brho}) and (\ref{E:psi_in_Brhosharp}) are ``dual'' to one another; the amount of restriction on $|a_n|$ is inversely proportional to the restriction on $\cos( \theta _n)$.
In theorem \ref{thm_landau_firstextension}, we saw that with no restrictions on the $|a_n|$ we are free to choose $ \theta _n$ with $\cos( \theta _n) \in [\gamma,1]$; taking a group of $M$ terms, we are free to choose
$$
( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) \in [\gamma,1]^M
$$
i.e. the $M$-dimensional volume of the set of admissable values of cosines is less than or equal to $1^M=1$.
In theorem \ref{T}, we have placed restrictions on the sequence $|a_n|$. Therefore, theorem \ref{T} is only interesting if we can considerably increase the freedom in choosing $ \theta _n$, beyond the amount in theorem \ref{thm_landau_firstextension}. The following volume estimates demonstrate that this is the case. We need to consider $\rho \ge 1$ and $\rho <1$ separately, because different constraints will bind in the formation of the set $-\left( B^{\rho} \right)^{\sharp} \cap [-1,1]^M$.
\begin{proposition}\label{P:volume_estimate}
\begin{align}
Vol^{\mathbb{R}^M} \left( -\left( B^{\rho} \right)^{\sharp} \cap [-1,1]^M \right) &\ge \left[ 1 + \left( \frac{1 - 1/M}{4 \rho + 1/M} \right) \; \right]^M \qquad &\text{for } \rho \ge 1 \label{E:volume_estimate_ge1} \\
Vol^{\mathbb{R}^M} \left( -\left( B^{\rho} \right)^{\sharp} \cap [-1,1]^M \right) &\ge 2^{M-1} \left[ 1 - \rho / 2 (1-\rho) \; \right] \qquad &\text{for } \rho < 1 \label{E:volume_estimate_less1}
\end{align}
\end{proposition}
Note that (\ref{E:volume_estimate_less1}) is only useful if $\rho < 2/3$; it may seem that values of $\rho$ near $1$ have been missed by this proposition. However, for $\rho < 1$ the set $(B^{\rho})^{\sharp}$, and therefore the volume of the set above, is larger than in the case $\rho = 1$, and so for all $\rho < 1$ we have
$$
Vol^{\mathbb{R}^M} \left( -\left( B^{\rho} \right)^{\sharp} \cap [-1,1]^M \right) \ge \left[ 1 + \left( \frac{1 - 1/M}{4 + 1/M} \right) \; \right]^M
$$
This suffices, because the only points we want to make are the following:
\begin{itemize}
\item{For any $M \ge 2$ and any $\rho \in (0,\infty)$, the amount of ``freedom'' in choosing $ \theta _n$ is strictly greater than that afforded in theorem \ref{thm_landau_firstextension}. Indeed, although there is \emph{not} some $a >0$ such that we can freely choose each $\cos( \theta _n)$ in the interval $(-a,1]$, the amount of freedom we are afforded is equivalent to this.}
\item{As $\rho \rightarrow 0$, the amount of ``freedom'' we are afforded approaches $2^{M-1}$. This is an ``amount'' of freedom equivalent to the following (although the following is \emph{not} the choice we actually have): Choose a single $\cos( \theta _{Ml+j}) \in (0,1]$ and then $ \theta _{Ml+j'}$ can be arbitrary for $j' \ne j, \; j' \in \{1, \ldots, M\}$.}
\end{itemize}
We also obtain the following sharpness result:
\begin{proposition}\label{P:sharpness}
(I) ($\gamma>0$ is sharp): For any $M$ and any $\rho \in (0,\infty)$, there exists $\{a_n\}$ such that
\begin{itemize}
\item{$\sum a_n n^{-s}$ has $\sigma_a=0$}
\item{ $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho} $ }
\item{ $( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) \in -\left( B^{\rho} \right)^{\sharp}$ [This is (\ref{E:psi_in_Brhosharp}) with $\gamma=0$]}
\item{ $\sum a_n n^{-s}$ has a holomorphic extension past $s=0$}
\end{itemize}
(II) ($B^{\rho}, (B^{\rho})^{\sharp}$ is sharp): For any $M$ and any $0 < \rho' < \rho$ there exists $\{a_n\}$ and $\gamma >0$ such that
\begin{itemize}
\item{$\sum a_n n^{-s}$ has $\sigma_a=0$}
\item{ $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho} $ }
\item{ $( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) \in -\left( B^{\rho'} \right)^{\sharp} + \gamma (1,1, \ldots , 1)$ }
\item{ $\sum a_n n^{-s}$ has a holomorphic extension past $s=0$}
\end{itemize}
\end{proposition}
In section \ref{S:orig_proof}, we review the proof of Landau's theorem. In section \ref{S:examine_proof}, we examine the proof and broaden the hypotheses. In section \ref{S:extend_thm}, we obtain conditions on $\{a_n\}$ which imply that these broadened hypotheses are satisfied and thus prove theorem \ref{T}. In section \ref{S:volume}, we prove the volume estimates in proposition \ref{P:volume_estimate}, and in section \ref{S:Sharpness} we prove the sharpness result in proposition \ref{P:sharpness}.
\section{Proof of Landau's Theorem}\label{S:orig_proof}
Our result will build on a standard proof of Landau's theorem, so we begin by reviewing this proof.
\begin{proof}[Proof of Theorem \ref{thm_Landau}]
We begin by supposing \emph{only} that $f(s) = \sum a_n n^{-s}$ has abscissa of absolute convergence equal to $0$. The condition $a_n \ge 0$ is not yet assumed; when it is used, we will indicate this explicitly.
For contradiction, we assume that $f$ does extend holomorphically to a neighborhood of $0$; suppose that $f$ is holomorphic on $\mathbb{D}(0, 2 \epsilon )$, $ \epsilon >0$. We have
\begin{align}
f(s) &= \sum_{n =1}^{\infty} a_n n^{- \epsilon } n^{-(s- \epsilon )} \nonumber \\
&= \sum_{n =1}^{\infty} a_n n^{- \epsilon } \exp( - (s - \epsilon ) \log n) \nonumber \\
&= \sum_{n =1}^{\infty} a_n n^{- \epsilon } \left\{ \sum_{k = 0}^{\infty} \frac{ (-1)^k (\log n)^k (s - \epsilon )^k}{k!} \right\} \nonumber \\
&= \sum_{n =1}^{\infty} \left\{ \sum_{k = 0}^{\infty} a_n n^{- \epsilon } \frac{ (-1)^k (\log n)^k (s - \epsilon )^k}{k!} \right\} \nonumber
\end{align}
This double series converges absolutely for $|s - \epsilon | < \epsilon $, since the sum of the absolute values can be re-arranged to equal
$$
\sum_{n =1}^{\infty} |a_n | n^{-( \epsilon - |s - \epsilon |)}
$$
which is finite by assumption. Therefore, we re-arrange the double series to obtain
$$
f(s) = \sum_{k = 0}^{\infty} \left\{ \frac{(-1)^k}{k!} \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right\} (s - \epsilon )^k
$$
We see that this is the power series for $f$ about the point $s= \epsilon $. We have only asserted the convergence of this power series for $|s- \epsilon |< \epsilon $. However, by the assumption that $f$ is holomorphic on $\mathbb{D}(0,2 \epsilon )$, it must be the case that this power series in fact converges absolutely for $|s- \epsilon |<2 \epsilon $ (since $\mathbb{D}( \epsilon ,2 \epsilon ) \subset \big( \; \mathbb{D}(0,2 \epsilon ) \cup RHP \; \big) $). Therefore, we have finiteness of the expression
\begin{equation}\label{E:power_series_finite}
\sum_{k = 0}^{\infty} \left| \frac{(-1)^k}{k!} \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| |s - \epsilon |^k
\end{equation}
for $|s- \epsilon | < 2 \epsilon $.
We could complete the proof \emph{if} we could obtain finiteness of the expression
\begin{equation}\label{E:double_series_finite}
\sum_{k = 0}^{\infty} \sum_{n =1}^{\infty} |a_n| n^{- \epsilon } \frac{ (\log n)^k |s - \epsilon |^k}{k!}
\end{equation}
for $|s- \epsilon | < 2 \epsilon $. This is because, if (\ref{E:double_series_finite}) were finite, then we could re-arrange (\ref{E:double_series_finite}) to obtain
$$
\sum_{n =1}^{\infty} |a_n | n^{-( \epsilon - |s - \epsilon |)} < \infty
$$
for $|s- \epsilon | < 2 \epsilon $. This would mean that $\sum a_n n^{-s}$ converges absolutely at $s = - \epsilon /2$ (for example), a contradiction.
It is here that we use the assumption $a_n \ge 0$. With this requirement on the $a_n$, we note that
$$
\sum_{k = 0}^{\infty} \left| \frac{(-1)^k}{k!} \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| |s - \epsilon |^k \; = \; \sum_{k = 0}^{\infty} \sum_{n =1}^{\infty} |a_n| n^{- \epsilon } \frac{ (\log n)^k |s - \epsilon |^k}{k!}
$$
Therefore, we obtain finiteness of (\ref{E:double_series_finite}) and the proof is complete.
\end{proof}
\section{Examining The Proof}\label{S:examine_proof}
Examining this proof, we see that if we only assume:
\begin{itemize}
\item{ $f$ has abscissa of absolute convergence equal to $0$ }
\item{ $f$ extends holomorphically to $\mathbb{D}(0,2 \epsilon )$ }
\end{itemize}
then (\ref{E:power_series_finite}) is finite for all $s \in \mathbb{D}( \epsilon , 2 \epsilon )$. We will re-write (\ref{E:power_series_finite}) as
\begin{equation}\label{E:power_series_finite2}
\sum_{k = 0}^{\infty} \frac{1}{k!} \left| \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| \; |s - \epsilon |^k
\end{equation}
We obtain a contradiction if we can show that (\ref{E:double_series_finite}) is finite for some $s, \; |s- \epsilon | > \epsilon $. We will re-write (\ref{E:double_series_finite}) as
\begin{equation}\label{E:double_series_finite2}
\sum_{k = 0}^{\infty} \frac{1}{k!} \left[ \sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k \; \right] |s - \epsilon |^k \; .
\end{equation}
We can prove that $f$ fails to have a holomorphic extension about $s=0$ if, for all sufficiently small $ \epsilon $, the finiteness of (\ref{E:power_series_finite2}) for all $s \in \mathbb{D}( \epsilon ,2 \epsilon )$ implies the finiteness of (\ref{E:double_series_finite2}) for some $s, |s- \epsilon | > \epsilon $. In other words, we can prove the theorem if the implication (\ref{E:general_suff_condition}) below is true for all sufficiently small $ \epsilon $:
\begin{align}
&\sum_{k = 0}^{\infty} \frac{1}{k!} \left| \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| \; |s - \epsilon |^k < \infty \;\; \text{for all } s \in \mathbb{D}( \epsilon , 2 \epsilon ) \nonumber \\
& \;\;\;\; \implies \; \sum_{k = 0}^{\infty} \frac{1}{k!} \left[ \sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k \; \right] |s - \epsilon |^k < \infty \;\; \text{for some } s, \; |s- \epsilon | > \epsilon \label{E:general_suff_condition}
\end{align}
We will investigate a very specific way in which (\ref{E:general_suff_condition}) will be true for all sufficiently small $ \epsilon $. Specifically, we seek conditions on the $\{a_n\}$ which imply that the ``key'' set of inequalities
\begin{equation}\label{keyineq}
\sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k \le C_{ \epsilon } \left| \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| \;\; \forall k \ge 0, \;\;\; C_{ \epsilon } \text{ independant of } k \; .
\end{equation}
holds for all sufficiently small $ \epsilon $. In principle, one could obtain ``(\ref{E:general_suff_condition}) for all sufficiently small $ \epsilon $'' in other ways, but we will focus on obtaining ``(\ref{keyineq}) for all sufficiently small $ \epsilon $''.
To summarize, we have
\begin{theorem}[Landau's Theorem, Re-formulated]\label{thm_keyineq}
Suppose that $f(s)= \sum a_n n^{-s}$ has abscissa of absolute convergence equal to $0$. If
$$
\sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k \le C_{ \epsilon } \left| \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| \;\; \forall k \ge 0, \;\;\; C_{ \epsilon } \text{ independant of } k
$$
holds for all sufficiently small $ \epsilon $, then $f$ does not have a holomorphic extension to a neighborhood of $0$.
\end{theorem}
\section{Extending Landau's Theorem: Conditions on Groups of Terms}\label{S:extend_thm}
In order for (\ref{keyineq}) to hold, it is evident that the arguments of the $a_n$ must be ``aligned'' to some degree. Our main tool for detecting this alignment will be to examine the real part of $\sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k$. This will detect alignment that is ``oriented towards the positive real axis'' (by rotation, this is equivalent to alignment that is ``oriented'' in any given direction in the same manner). One clear extension of Landau's theorem is obtained in this way.
\begin{proof}[Proof of Theorem \ref{thm_landau_firstextension}]
Observe that
\begin{align}
\left| \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right| &\ge \Re \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \nonumber \\
&= \sum_{n =1}^{\infty} (\Re \; a_n ) n^{- \epsilon } (\log n)^k \nonumber
\end{align}
We write $a_n = |a_n| e^{i \theta _n}$. If we had some $\gamma >0$ such that $\cos( \theta _n) \ge \gamma \; \forall n$, then we would have $ \Re \; a_n = |a_n| \cos( \theta _n) \ge \gamma |a_n|$ and therefore
$$
\sum_{n =1}^{\infty} (\Re \; a_n ) n^{- \epsilon } (\log n)^k \ge \gamma \sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k
$$
or
$$
\sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k \le (1/\gamma) \left| \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k \right|
$$
We see that $1/\gamma$ is independant of $k$, and therefore we apply Theorem \ref{thm_keyineq} and the proof is complete.
\end{proof}
To obtain Theorem \ref{T}, we employ this method, but apply it to groups of terms instead of single terms. Fix $M \ge 2$ and write
$$
\sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k = \sum_{l=0}^{\infty} \sum_{j=1}^M a_{Ml+j} (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k
$$
which yields
\begin{align}
&\Re{ \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k } \nonumber \\
& = \sum_{l=0}^{\infty} \sum_{j=1}^M \Re{ a_{Ml+j} } (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \nonumber \\
& = \sum_{l=0}^{\infty} \sum_{j=1}^M |a_{Ml+j}| \cos( \theta _{Ml+j}) (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \nonumber
\end{align}
We develop a condition on the group of coefficients $a_{Ml+1} , \ldots , a_{Ml+M} $ that will imply the existence of some $c_{ \epsilon } > 0$ (independant of $k,l$, and in fact it will be independant of $ \epsilon $) such that
\begin{align}
&\sum_{j=1}^M |a_{Ml+j}| \cos( \theta _{Ml+j}) (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \nonumber \\
& \;\; \ge c_{ \epsilon } \Big( \; \sum_{j=1}^M |a_{Ml+j}| (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \; \Big) \label{E:M_term_condition}
\end{align}
for all sufficiently small $\epsilon > 0$. Once (\ref{E:M_term_condition}) holds with $c_{ \epsilon }$ independant of $k,l$, for all sufficiently small $ \epsilon $, we have
$$
\Re{ \sum_{n =1}^{\infty} a_n n^{- \epsilon } (\log n)^k } \ge c_{ \epsilon } \sum_{n =1}^{\infty} |a_n| n^{- \epsilon } (\log n)^k
$$
and the proof of theorem \ref{T} is complete.
We begin with the RHS of (\ref{E:M_term_condition}). By Taylor expansion, we write
$$
(Ml+j)^{- \epsilon } = (Ml)^{- \epsilon } + A \; , \qquad |A| \le \epsilon (Ml)^{- \epsilon } l^{-1}
$$
and therefore we have
\begin{align}
&\sum_{j=1}^M |a_{Ml+j}| (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \nonumber \\
& \;\;\;\;\; \le (Ml)^{- \epsilon } \big( 1 + \epsilon l^{-1} \big) \sum_{j=1}^M |a_{Ml+j}| (\log (Ml+j) \; )^k \label{E:RHS_with_epsilon_1st_step}
\end{align}
\emph{Suppose} that the following inequality held for $\gamma$ independant of $l,k$:
\begin{equation}\label{E:ineq_no_epsilons}
\sum_{j=1}^M |a_{Ml+j}| (\log (Ml+j) \; )^k \le \gamma^{-1} \sum_{j=1}^M |a_{Ml+j}| \cos( \theta _{Ml+j}) (\log (Ml+j) \; )^k
\end{equation}
Applying the Taylor expansion to the LHS in (\ref{E:M_term_condition}) (estimating $\cos( \theta ) \le 1$), we define
$$
\tilde{A} = \epsilon (Ml)^{- \epsilon } l^{-1} \sum_{j=1}^M |a_{Ml+j}| (\log (Ml+j) \; )^k
$$
and we have
\begin{align}
&\sum_{j=1}^M |a_{Ml+j}| \cos( \theta _{Ml+j}) (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \nonumber \\
& \;\; \ge (Ml)^{- \epsilon } \sum_{j=1}^M |a_{Ml+j}| \cos( \theta _{Ml+j}) (\log (Ml+j) \; )^k - \tilde{A} \nonumber \\
& \;\; \ge (Ml)^{- \epsilon } \gamma \sum_{j=1}^M |a_{Ml+j}| (\log (Ml+j) \; )^k - \tilde{A} \qquad \text{ [by (\ref{E:ineq_no_epsilons}) ]} \nonumber \\
& \;\; = (Ml)^{- \epsilon } \left[ \gamma - \epsilon l^{-1} \right] \sum_{j=1}^M |a_{Ml+j}| (\log (Ml+j) \; )^k \nonumber \\
& \;\; \ge \left[ \gamma - \epsilon l^{-1} \right] \big( 1 + \epsilon l^{-1} \big)^{-1} \sum_{j=1}^M |a_{Ml+j}| (Ml+j)^{- \epsilon } (\log (Ml+j) \; )^k \qquad \text{ [by (\ref{E:RHS_with_epsilon_1st_step}) ]} \nonumber
\end{align}
With $ \epsilon <1$ we have $\left[ \gamma - \epsilon l^{-1} \right] \big( 1 + \epsilon l^{-1} \big)^{-1} \ge \left[ \gamma - l^{-1} \right] \big( 1 + l^{-1} \big)^{-1}$. We may assume that $a_n=0$ for all small $n$ (since $\sum_{n=1}^{\infty} a_n n^{-s}$ has a holomorphic extension iff $\sum_{n=N}^{\infty} a_n n^{-s}$ does), and therefore we may assume that we are concerned only with large $l$. For $l$ large (depending only on $\gamma$) we have $\left[ \gamma - l^{-1} \right] \big( 1 + l^{-1} \big)^{-1} \ge \gamma/2$, and therefore (\ref{E:M_term_condition}) holds (with $c_{ \epsilon } = \gamma/2$) for all sufficiently small $ \epsilon $, independant of $k,l$ and we are finished.
Therefore, to prove the theorem, it suffices to show that (\ref{E:ineq_no_epsilons}) holds for some $\gamma>0$, independant of $k,l$. We focus now on (\ref{E:ineq_no_epsilons}).
Let
\begin{align}
\beta_j = \beta_j^{(k,l)} &= |a_{Ml+j}| (\log (Ml+j) \; )^k \label{E:beta_j}
\end{align}
and
$$
\beta = \beta^{(k,l)} = ( \beta_1^{(k,l)} , \ldots , \beta_M^{(k,l)} )
$$
We abbreviate
\begin{align*}
\psi = \psi^{(l)} &= ( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) \\
\tilde{\psi} &= \psi - \gamma ( 1,1, \ldots , 1)
\end{align*}
We re-write (\ref{E:ineq_no_epsilons}) as $\beta \cdot \psi \ge \gamma ( \beta \cdot ( 1,1, \ldots , 1) \; )$ or
\begin{equation}\label{E:psi_beta_ge_0}
\beta^{(k,l)} \cdot \tilde{\psi}^{(l)} \ge 0 \qquad \forall k,l
\end{equation}
Our strategy is as follows: Develop a condition on the $|a_n|$ which implies that $\beta^{(k,l)}$ lies in a particular subset $B$ of $[0,\infty)^M$ for all $k,l$ (i.e. $B$ does not depend on $k,l$). Then, the condition on the $ \theta _n$ is simply $\tilde{\psi}^{(l)} \in -B^{\sharp}$ and (\ref{E:psi_beta_ge_0}) is satisfied.
We have
$$
\frac{\beta_j^{(k,l)} }{ \beta_{j+1}^{(k,l)} }= \frac{|a_{Ml+j}|}{|a_{Ml+(j+1)}|} \left( \frac{\log(Ml+j)}{\log(Ml+(j+1) )} \right)^k
$$
Suppose we assume that
$$
\frac{|a_{Ml+j}|}{|a_{Ml+(j+1)}|} \le \rho \qquad \forall l , \; \forall j = 1, \ldots, M-1, \; \text{for some } \rho \in (0,\infty)
$$
Recalling definition (\ref{Brho}), this can be written $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho}$. This implies $\beta_j / \beta_{j+1} \le \rho$ for all $k,l$, or
$$
\beta^{(k,l)} \in B^{\rho} \qquad \forall k,l
$$
The set $B^{\rho}$ meets the requirement of being a proper subset of $[0,\infty)^M$ not depending on $k,l$, therefore this is the condition we seek. We can now prove theorem \ref{T}. Suppose $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho}$; this means $\beta^{(k,l)} \in B^{\rho}$. By definition, the set of $\tilde{\psi}$ which satisfy $\tilde{\psi} \cdot \beta \ge 0$ for all $\beta \in B^{\rho}$ equals $-(B^{\rho})^{\sharp}$. For $\tilde{\psi}^{(l)} \in -(B^{\rho})^{\sharp}$, we therefore have $\tilde{\psi}^{(l)} \cdot \beta^{(k,l)} \ge 0 ,\; \forall k,l$. In other words, (\ref{E:psi_beta_ge_0}) holds, thus (\ref{E:ineq_no_epsilons}) holds, and the proof of Theorem \ref{T} is complete.
\section{Volume Calculation}\label{S:volume}
As we mentioned, theorem \ref{T} is only interesting if the restrictions on $ \theta _n$ are broad enough to be a measurable improvement over the requirement $\cos \theta _n \ge \gamma$. We require $\tilde{\psi} \in-(B^{\rho})^{\sharp}$, i.e.
$$
( \cos( \theta _{Ml+1}) , \ldots , \cos( \theta _{Ml+M}) \; ) \in -(B^{\rho})^{\sharp} + \gamma \begin{pmatrix} 1 \\ \ldots \\1 \end{pmatrix}
$$
We want to answer the question
$$
\text{``How much freedom do we have in choosing }\cos( \theta _{Ml+1}) , \ldots , \cos( \theta _{Ml+M}) \text{ ?''}
$$
One way to answer this is to measure the volume
$$
Vol^{\mathbb{R}^M} \left[ \; \left( -(B^{\rho})^{\sharp} + \gamma \begin{pmatrix} 1 \\ \ldots \\1 \end{pmatrix} \right) \cap [-1,1]^M \right]
$$
Since this in continuous in $\gamma$, we will estimate
\begin{equation}\label{E:volume}
Vol^{\mathbb{R}^M} \left[ -(B^{\rho})^{\sharp} \cap [-1,1]^M \right]
\end{equation}
First, we obtain a more direct description of $ (B^{\rho})^{\sharp} $, by writing $B^{\rho}$ as the convex cone generated by a finite point set.
\begin{proposition}
Let $x^{(r)} \in \mathbb{R}^M , r = 1, \ldots , M$ be defined by
$$
x^{(r)} = ( 0 , \ldots , 0 , \rho^{-r} , \rho^{-(r+1)} , \ldots , \rho^{-M})
$$
Then $B^{\rho}$ equals the positive linear span of the $ \{ x^{(r)} \} $.
\end{proposition}
\begin{corollary}
\begin{equation}\label{E:B_rho_sharp_equations}
(B^{\rho})^{\sharp} = \left\{ y = ( y_1 , \ldots , y_M) : \sum_{j=r}^M \rho^{-j} y_j \le 0 \;\;\; \forall r=1, \ldots, M \right\}
\end{equation}
\end{corollary}
\begin{proof}[Proof of Proposition]
We see that $x^{(r)} \in B^{\rho}$ is clear. If $\beta \in B^{\rho}$ then
$$
\beta = \rho^1 \beta_1 x^{(1)} + \rho^2 (\beta_2 - \rho^{-1} \beta_1)x^{(2)} + \cdots + \rho^M (\beta_M - \rho^{-1} \beta_{M-1} ) x^{(M)}
$$
Each coefficient is positive, so we have written $B^{\rho}$ as a positive linear combination of the $x^{(r)}$.
\end{proof}
Now, we wish to estimate the expression in (\ref{E:volume}). The cases $\rho \ge 1, \rho < 1$ are treated separately (since different constraints will bind to form the set $-(B^{\rho})^{\sharp} \cap [-1,1]^M$ in these two cases).
\subsection*{The case $\rho \ge 1$}
The idea is to exhibit a certain disjoint union of rectangles contained in $-(B^{\rho})^{\sharp} \cap [-1,1]^M$, using the description of $-(B^{\rho})^{\sharp}$ given in (\ref{E:B_rho_sharp_equations}). This is obtained by bisecting a subinterval of $[-1,1]$ in each coordinate (so we will have $2^M$ rectangles), but the location where the $j$th coordinate is bisected in a particular rectangle will depend on the ``location'' of that rectangle in the coordinates $j' > j$. The natural order in which to consider the indices will be ``$ M, M-1, \ldots$,'' as we shall see. An example will clarify this; consider $M=2$. We have the set
$$
-(B^{\rho})^{\sharp} = \left\{ (y_1, y_2 ) : \; \rho^{-2} y_2 \ge 0 \; , \;\; \rho^{-1} y_1 + \rho^{-2} y_2 \ge 0 \; \right\}
$$
(In the following discussion we use $2^{-1}$ to denote $1/2$, our aim is to minimize the number of parentheses and improve readability, we apologize for any confusion.)
We divide the $y_2$ coordinate into the ranges $(0,2^{-1}) \; , \; (2^{-1},1)$. If $y_2 \in (0,2^{-1})$, the ``worst case'' estimate for the range of values of $y_1$ is the trivial one, $y_1 \ge 0$, so we divide the range for $y_1$ into $(0,2^{-1}) \; , \; (2^{-1},1)$ as well. If $y_2 \in (2^{-1},1)$, we can estimate the range of values of $y_1$ to always contain the interval
$$
(- 2^{-1} \rho^{-1} , 1)
$$
and we evenly divide this interval into two pieces:
\begin{align*}
& \Big( -2^{-1} \rho^{-1} \;\; , \; -2^{-1} \rho^{-1} + 2^{-1} ( 1 + 2^{-1} \rho^{-1} \; ) \; \Big) \\
& \Big( -2^{-1} \rho^{-1} + 2^{-1} ( 1 + 2^{-1} \rho^{-1} \; ) \;\; , \; -2^{-1} \rho^{-1} + 2 \; 2^{-1} ( 1 + 2^{-1} \rho^{-1} \; ) \; \Big)
\end{align*}
To summarize, we obtain
\begin{align*}
-(B^{\rho})^{\sharp} \cap [-1,1]^2 & \supset \\
&(0,2^{-1}) \times (0,2^{-1}) \\
\cup \; &(2^{-1},1) \times (0,2^{-1}) \\
\cup \; &\Big( -2^{-1} \rho^{-1} \;\; , \; -2^{-1} \rho^{-1} + 2^{-1} ( 1 + 2^{-1} \rho^{-1} \; ) \; \Big) \times (2^{-1},1) \\
\cup \; &\Big( -2^{-1} \rho^{-1} + 2^{-1} ( 1 + 2^{-1} \rho^{-1} \; ) \;\; , \; -2^{-1} \rho^{-1} + 2 \; 2^{-1} ( 1 + 2^{-1} \rho^{-1} \; ) \; \Big) \times (2^{-1},1)
\end{align*}
(a disjoint union of four rectangles). Using the set-addition notation
$$
\Bigg( a + (b+c) , a + 2(b+c) \Bigg) = a + (b+c) \Bigg( 1, 2 \Bigg)
$$
(with large delimiters to distinguish the actual interval from parentheses), this can be written
$$
-(B^{\rho})^{\sharp} \cap [-1,1]^2 \supset \bigcup_{j_1, j_2 = 0}^1 \; - 2^{-1} \rho^{-1} j_2 + 2^{-1} ( 1 + 2^{-1} \rho^{-1} j_2 ) \Bigg( j_1, j_1+1 \Bigg) \times 2^{-1} \Bigg( j_2 , j_2+1 \Bigg)
$$
The expressions above will soon become cumbersome, so we define the function $P$, for $x_1, \ldots, x_n \in \mathbb{R}$, by
$$
P[ x_1 , \ldots , x_n ] = x_1 ( 1+ x_2 ( 1+ x_3( \ldots +x_{n-1} ( 1 + x_n ) ) \cdots )
$$
(Use of square brackets in the definition of $P$ is again for readability). We will use the convention that, if $x_1, \ldots , x_n$ is an ``empty list,'' then $P[ x_1 , \ldots , x_n ] =0$. In addition, we write the set $ (a_1,b_1 ) \times (a_2,b_2) \times \cdots \times (a_n, b_n)$ as
$$
\{ y : y_i \in (a_i, b_i) , \; i = 1, \ldots , n \}
$$
At last, we can write the following for the case $M=2$:
\begin{align*}
&-(B^{\rho})^{\sharp} \cap [-1,1]^2 \supset \\
&\bigcup_{j_1, j_2 = 0}^1 \left\{ y : \; y_k \in \;\;\;\; -P\big[ 2^{-1} \rho^{-1} j_{k+1} \big] + P\big[ 2^{-1}, 2^{-1} \rho^{-1} j_{k+1} \big] \Bigg( j_k, j_k+1 \Bigg) \;\; , \;\; k = 1, 2 \right\}
\end{align*}
Note that, if $k=2$ then $k+1 = 3$ and ``$2^{-1} \rho^{-1} j_3$'' is an empty list (there is no $j_3$), so
$$
P\big[ 2^{-1} \rho^{-1} j_{k+1} \big] = 0 \; , \qquad P\big[ 2^{-1}, 2^{-1} \rho^{-1} j_{k+1} \big] = P\big[ 2^{-1} \big] = 2^{-1}
$$
Applying this idea in dimension $M$, we obtain the following.
\begin{lemma}\label{P:volume_lemma}
For $B^{\rho} = B^{\rho,M}$ ($\rho \ge 1$), we have,
\begin{align*}
&-(B^{\rho})^{\sharp} \cap [-1,1]^M \supset \\
&\bigcup_{j_1, ... , j_M = 0}^1 \Bigg\{ y : \; y_k \in \;\;\;\; - P\big[ 2^{-1} \rho^{-1} j_{k+1}, 2^{-1} \rho^{-1} j_{k+2}, \ldots , 2^{-1} \rho^{-1} j_M \big] \\
& \; + P\big[ 2^{-1}, 2^{-1} \rho^{-1} j_{k+1} , 2^{-1} \rho^{-1} j_{k+2}, \ldots , 2^{-1} \rho^{-1} j_M \big] \Bigg( j_k, j_k+1 \Bigg) \;\;\; , \;\; k = 1, \ldots, M \Bigg\}
\end{align*}
This is a disjoint union.
\end{lemma}
\begin{proof}
We wish to consider just one rectangle from the RHS, so fix $j_1 , \ldots , j_M$. By picking the left endpoint from $(j_k , j_k+1)$, and noting that
$$
a P\big[x_1, \ldots, x_n\big] = P\big[ a x_1, \ldots , x_n\big]
$$
we have
\begin{equation}\label{E:psi_k}
y_k \ge -P\big[ 2^{-1} \rho^{-1} j_{k+1}, \ldots , 2^{-1} \rho^{-1} j_M \big] + P\big[ 2^{-1} j_k, 2^{-1} \rho^{-1} j_{k+1} , \ldots , 2^{-1} \rho^{-1} j_M \big]
\end{equation}
Plugging this estimate into the sum $\sum_{j=r}^M \rho^{-j} y_j$, the result telescopes to give just the positive term for $j=r$ and the negative term for $j=M$ (and this $j=M$ term is itself an ``empty list''):
\begin{align*}
\sum_{j=r}^M \rho^{-j} y_j \ge & \rho^{-r} P\big[ 2^{-1} j_r, 2^{-1} \rho^{-1} j_{r+1} , \ldots , 2^{-1} \rho^{-1} j_M \big] - \rho^{-M} P\big[ 2^{-1} \rho^{-1} j_{M+1}, \ldots , 2^{-1} \rho^{-1} j_M \big] \; \\
= & \rho^{-r} P\big[ 2^{-1} j_r, 2^{-1} \rho^{-1} j_{r+1} , \ldots , 2^{-1} \rho^{-1} j_M \big]
\end{align*}
We see that this is positive, since each expression $2^{-1} \rho^{-1} j_n$ is positive, and therefore by (\ref{E:B_rho_sharp_equations}) the rectangle is contained in $-(B^{\rho})^{\sharp} $.
Next, we prove containment in $[-1,1]^M$. Note that, because $\rho \ge 1$, we have $0 \le 2^{-1} \rho^{-1} j_n \le 2^{-1}$, and because $P$ is monotone increasing in each coordinate (as long as all coordinates are positive), we have
$$
P\big[ 2^{-1} \rho^{-1} j_{k+1}, \ldots , 2^{-1} \rho^{-1} j_M \big] \le P\big[ 2^{-1}, 2^{-1} , \ldots , 2^{-1} \big] \le 1
$$
and therefore, by (\ref{E:psi_k}), we have $y_k \ge -1$.
Picking the right endpoint from the interval $(j_k, j_k + 1)$ in lemma \ref{P:volume_lemma}, we have
\begin{align*}
y_k &\le - P\big[ 2^{-1} \rho^{-1} j_{k+1} , \ldots \big] + (j_k+1) P\big[ 2^{-1}, 2^{-1} \rho^{-1} j_{k+1} , \ldots \big] \\
&= - P\big[ 2^{-1} \rho^{-1} j_{k+1} , \ldots \big] + (j_k+1) 2^{-1} \Big( 1+ P\big[ 2^{-1} \rho^{-1} j_{k+1} , \ldots \big] \Big) \\
&\le - P\big[ 2^{-1} \rho^{-1} j_{k+1} , \ldots \big] + \Big( 1+ P\big[ 2^{-1} \rho^{-1} j_{k+1} , \ldots \big] \Big) \\
&= 1
\end{align*}
Lastly, we show the union is disjoint. Let $(j_1, \ldots , j_M) \ne (j_1', \ldots , j_M')$ and denote the respective rectangles by $R, R'$. Let $K$ be the largest value $i$ between $1$ and $M$ such that $j_i \ne j_i'$. WLOG, suppose $j_K =0, j_K' = 1$ (and we have $j_i = j_i'$ for $ i>K$). Consider the $K$th coordinate. We see that, by definition of the rectangles in lemma \ref{P:volume_lemma}, the above information on $j_i$ implies that, $\forall y \in R, \forall y' \in R'$ we have
$$
y_K < y_K'
$$
and this proves disjointness.
\end{proof}
Having lemma \ref{P:volume_lemma}, we can prove the estimate (\ref{E:volume_estimate_ge1}) from proposition \ref{P:volume_estimate}.
\begin{proof}[Proof of (\ref{E:volume_estimate_ge1})]
To obtain the estimate in (\ref{E:volume_estimate_ge1}), it remains to sum the volume of the rectangles from lemma \ref{P:volume_lemma}. Let $R$ be the rectangle corresponding to $(j_1, \ldots , j_M)$, and let $V$ be its volume. We have
$$
V = \prod_{k=1}^M P\big[ 2^{-1}, 2^{-1} \rho^{-1} j_{k+1} , 2^{-1} \rho^{-1} j_{k+2}, \ldots , 2^{-1} \rho^{-1} j_M \big]
$$
and we have
$$
P\big[ 2^{-1}, 2^{-1} \rho^{-1} j_{k+1} , 2^{-1} \rho^{-1} j_{k+2}, \ldots , 2^{-1} \rho^{-1} j_M \big] \ge 2^{-1} ( 1 + 2^{-1} \rho^{-1} j_{k+1} )
$$
Noting the value of this expression when $k=M$ (namely $2^{-1}$), $V$ is greater than or equal to $2^{-M} \prod_{k=1}^{M-1} ( 1 + 2^{-1} \rho^{-1} j_{k+1} )$. By binomial expansion, this is
$$
2^{-M} \sum_{ \epsilon = ( \epsilon _1 , \ldots , \epsilon _{M-1})} (2 \rho)^{- \sum \epsilon _i } \; j_1^{ \epsilon _1} \cdots j_{M-1}^{ \epsilon _{M-1}}
$$
Summing this over the index set, the total volume of all the rectangles is greater than or equal to
\begin{align*}
& \sum_{j_1 , \ldots , j_M = 0}^1 \;\; 2^{-M} \sum_{ \epsilon = ( \epsilon _1 , \ldots , \epsilon _{M-1})} (2 \rho)^{- \sum \epsilon _i } \; j_1^{ \epsilon _1} \cdots j_{M-1}^{ \epsilon _{M-1}} \\
= \; &2^{-M} \sum_{ \epsilon = ( \epsilon _1 , \ldots , \epsilon _{M-1})} (2 \rho)^{- \sum \epsilon _i } \sum_{j_1 , \ldots , j_M = 0}^1 \;\; \; j_1^{ \epsilon _1} \cdots j_{M-1}^{ \epsilon _{M-1}} \\
\end{align*}
We have
\begin{align*}
\sum_{j_1 , ... , j_M = 0}^1 \;\; \; j_1^{ \epsilon _1} \cdots j_{M-1}^{ \epsilon _{M-1}} &= \left( \sum_{j_1=0}^1 j_1^{ \epsilon _1} \right) \left( \sum_{j_2=0}^1 j_2^{ \epsilon _2} \right) \cdots \left( \sum_{j_{M-1}=0}^1 j_{M-1}^{ \epsilon _{M-1}} \right) \left( \sum_{j_M=0}^1 1 \right) \\
&= 2^{1- \epsilon _1} 2^{1- \epsilon _2} \cdots 2^{1- \epsilon _{M-1}} 2 \\
&= 2^M 2^{- \sum \epsilon _i } \\
\end{align*}
Thus
\begin{align*}
V &\ge 2^{-M} \sum_{ \epsilon = ( \epsilon _1 , \ldots , \epsilon _{M-1})} (2 \rho)^{- \sum \epsilon _i } \;\; 2^M \;\; 2^{- \sum \epsilon _i } \\
&= \sum_{ \epsilon = ( \epsilon _1 , \ldots , \epsilon _{M-1})} (4 \rho)^{- \sum \epsilon _i } \\
&= (1 + (4 \rho)^{-1} \; )^{M-1}
\end{align*}
It is an elementary exersize to show that
$$
\epsilon \le \frac{1 - 1/M}{4 \rho + 1/M} \; \implies \; (1 + (4 \rho)^{-1} \; )^{M-1} \ge (1+ \epsilon )^M
$$
and therefore
$$
V \ge \left( 1 + \frac{1 - 1/M}{4 \rho + 1/M} \right)^M
$$
which completes the proof of (\ref{E:volume_estimate_ge1}).
\end{proof}
\subsection*{The case $\rho < 1$}
This proof will be somewhat simpler, we look at the set
$$
[-1,1]^{M-1} \times [0,1]
$$
and prove that $-(B^{\rho})^{\sharp}$ has large intersection with this set. This is done by taking the first $M-1$ coordinates and splitting each into the cases $[-1,0]$ and $[0,1]$, giving a division of $[-1,1]^{M-1}$ into $2^{M-1}$ cubes of dimension $M-1$ (all side lengths being $1$). Then, on each cube we find the range of values for the $M$th coordinate which will remain within $-(B^{\rho})^{\sharp}$.
\begin{lemma}
For $\rho < 1$,
\begin{align}
&-(B^{\rho})^{\sharp} \cap [-1,1]^M \supset \nonumber \\
& \;\;\;\;\;\;\; \bigcup_{j_1, ..., j_{M-1} = 0}^1 \left\{ y : y_{k} \in (-j_k, -j_k+1) \; \text{for } k<M, \;\; y_M \in \left(\sum_{k=1}^{M-1} j_k \rho^{M-k}, 1 \right) \; \right\} \nonumber
\end{align}
\end{lemma}
We allow for the possibility that the interval $\left(\sum_{k=1}^{M-1} j_k \rho^{M-k}, 1 \right)$ (and thus the corresponding rectangle) is empty.
\begin{proof}[Proof of Lemma]
Suppose $y$ is a member of one of the rectangles on the $RHS$. (We index by $d$ to avoid $j_j$). Using (\ref{E:B_rho_sharp_equations}), we calculate: for $r \in \{1, \ldots, M\}$, we have
\begin{align*}
\sum_{d=r}^M \rho^{-d} y_d &= \sum_{d=r}^{M-1} \rho^{-d} y_d + \rho^{-M} y_M \\
&\ge \sum_{d=r}^{M-1} \rho^{-d} (-j_d) + \rho^{-M} \sum_{d=1}^{M-1} j_d \rho^{M-d} \\
&\ge \sum_{d=1}^{M-1} \rho^{-d} (-j_d) + \sum_{d=1}^{M-1} j_d \rho^{-d} \\
&=0
\end{align*}
\end{proof}
Here, disjointness of these rectangles, and containment in $[-1,1]^M$ is clear, so the volume of $-(B^{\rho})^{\sharp} \cap [-1,1]^M$ can be bounded below. The volume of the rectangle corresponding to $(j_1, \ldots, j_{M-1})$ is greater than or equal to
$$
1 - \sum_{k=1}^{M-1} j_k \rho^{M-k}
$$
(note that this is true when $\sum_{k=1}^{M-1} j_k \rho^{M-k} > 1$ and therefore the rectangle is empty). So, the volume of all the rectangles together is greater than or equal to
\begin{align*}
\sum_{j_1, ... , j_{M-1} = 0}^1 \left( 1 - \sum_{k=1}^{M-1} j_k \rho^{M-k} \right) &= 2^{M-1} - \sum_{k=1}^{M-1} \rho^{M-k} \sum_{j_1, ... , j_{M-1} = 0}^1 j_k \\
&= 2^{M-1} - \sum_{k=1}^{M-1} \rho^{M-k} ( 2^{M-2} ) \\
&= 2^{M-1} - 2^{M-2} \rho \left( \frac{1-\rho^{M-1}}{1-\rho} \right) \\
&= 2^{M-1} \left[ 1 - 2^{-1} \rho \left( \frac{1-\rho^{M-1}}{1-\rho} \right) \; \right] \\
&\ge 2^{M-1} \left[ 1 - 2^{-1} \rho / (1-\rho) \; \right]
\end{align*}
and (\ref{E:volume_estimate_less1}) is proved.
\section{Sharpness}\label{S:Sharpness}
We prove proposition \ref{P:sharpness}, by constructing counterexamples. Let $\rho \in (0,\infty)$ and $M \ge 2$ be fixed. All the counterexamples will be of the following form:
\begin{align}
|a_{Ml+j}| &= l^{-1} \rho^{-j} \nonumber \\
\cos( \theta _{Ml+j}) = \cos_j &= \lambda \delta_j + \gamma \nonumber \\
\sin( \theta _{Ml+j}) &= (-1)^l \sqrt{ 1 - \cos^2( \theta _{Ml+j}) } \label{E:counterexamples}
\end{align}
where $\lambda > 0, \; \delta_j \in [-1,1]$, and $\gamma$ are yet to be determined (subject to the requirement $\lambda \delta_j + \gamma \in [-1,1]$). We see that our construction already has the following properties:
\begin{itemize}
\item{$\sum a_n n^{-s}$ has $\sigma_a=0$ [This is due to the factor $l^{-1}$]}
\item{ $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho}$ }
\end{itemize}
We now develop a sufficient condition on $\lambda, \delta_j, \gamma$ under which the sequence of partial sums $\sum_{n=1}^N a_n n^{ \epsilon }$ is a Cauchy sequence for some $ \epsilon >0$, this proves that $\sum a_n n^{-s}$ has a holomorphic extension past $s=0$.
Consider
$$
\sum_{n=N}^J a_n n^{ \epsilon }
$$
Let $N = M l_0 + j_0, \; J = M l_1 + j_1$ for $l_0, l_1 \ge 0, j_0, j_1 \in \{1, \ldots , M\}$. This gives
\begin{align}
\sum_{n=N}^J a_n n^{ \epsilon } &= \sum_{j=j_0}^M a_{M l_0 +j} (M l_0 +j)^{ \epsilon } + \sum_{j=1}^{j_1} a_{M l_1 +j} (M l_1 +j)^{ \epsilon } + \sum_{l=l_0+1}^{l_1-1} \sum_{j=1}^M a_{M l +j} (M l +j)^{ \epsilon } \nonumber \\
&= (I) + (II) + (III) \label{E:split_sum_anepsilon}
\end{align}
We see that $(I)$ and $(II)$ are bounded in size by a constant times $l_0^{-(1- \epsilon )}$, which converges to zero as $N \rightarrow \infty$, so we concentrate on $(III)$. Note that
$$
(Ml+j)^{ \epsilon } = (Ml)^{ \epsilon } + A_{l,j} \; , \qquad |A_{l,j}| \le \epsilon (Ml)^{ \epsilon } l^{-1}
$$
We have
\begin{align}
\sum_{l=l_0+1}^{l_1-1} \sum_{j=1}^M a_{M l +j} (M l +j)^{ \epsilon } &= \sum_{l=l_0+1}^{l_1-1} l^{-1} \sum_{j=1}^M \rho^{-j} e^{i \theta _{Ml+j} } ( (Ml)^{ \epsilon } + A_{l,j} ) \nonumber \\
&= \sum_{l=l_0+1}^{l_1-1} l^{-1} \sum_{j=1}^M \rho^{-j} e^{i \theta _{Ml+j} } A_{l,j} + \sum_{l=l_0+1}^{l_1-1} l^{-1} (Ml)^{ \epsilon } \sum_{j=1}^M \rho^{-j} e^{i \theta _{Ml+j} } \nonumber \\
&= (IIIa) + (IIIb) \label{E:split_subsum_anepsilon}
\end{align}
The first sum, $(IIIa)$, is bounded in size by
$$
\sum_{l=l_0+1}^{l_1-1} l^{-1} \epsilon (Ml)^{ \epsilon } l^{-1} \sum_{j=1}^M \rho^{-j} \le \epsilon M^{ \epsilon } \left( \sum_{j=1}^M \rho^{-j} \right) \sum_{l=l_0+1}^{l_1-1} l^{-(2- \epsilon )}
$$
and $\sum_{l=l_0+1}^{l_1-1} l^{-(2- \epsilon )}$ is (part of) the tail of a convergent sum, so it converges to $0$ as $N \rightarrow \infty$.
We have
$$
(IIIb) = \sum_{l=l_0+1}^{l_1-1} l^{-1} (Ml)^{ \epsilon } \sum_{j=1}^M \rho^{-j} \left[ \cos( \theta _{Ml+j}) + i \sin( \theta _{Ml+j}) \right]
$$
and since $\cos( \theta _{Ml+j})$ depends only on $j$, this can be written
\begin{align}
(IIIb) &= M^{ \epsilon } \left( \sum_{j=1}^M \rho^{-j} \cos_j \right) \sum_{l=l_0+1}^{l_1-1} l^{-(1- \epsilon )} \nonumber \\
& \;\;\; + i M^{ \epsilon } \left( \sum_{j=1}^M \rho^{-j} \sqrt{ 1 - \cos_j^2 } \right) \sum_{l=l_0+1}^{l_1-1} (-1)^l l^{-(1- \epsilon )} \nonumber \\
& = (IIIb1) + (IIIb2) \label{E:split_subsubsum_anepsilon}
\end{align}
We see that $ \sum_{l=l_0+1}^{l_1-1} (-1)^{l} l^{-(1- \epsilon )}$ is (part of) the tail of an alternating series, so $(IIIb2)$ converges to $0$ as $N \rightarrow \infty$. Therefore, if we have $\sum_{j=1}^M \rho^{-j} \cos_j = 0$, i.e.
\begin{equation}\label{E:cauchy_suff_condition}
\lambda \sum_{j=1}^M \rho^{-j} \delta_j + \gamma \left( \sum_{j=1}^M \rho^{-j} \right) = 0
\end{equation}
then we will have $\sum_{j=N}^J a_n n^{ \epsilon } = o(N)$; this is the sufficient condition under which our construction will also satisfy
\begin{itemize}
\item{ $\sum a_n n^{-s}$ has a holomorphic extension past $s=0$}
\end{itemize}
\subsection*{Proposition \ref{P:sharpness} part $(I)$}
Here, we want to find $\{a_n\}$ which satisfy
\begin{itemize}
\item{$\sum a_n n^{-s}$ has $\sigma_a=0$}
\item{ $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho}$ }
\item{ $( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) \in -\left( B^{\rho} \right)^{\sharp} $ \text{ [this is (\ref{E:psi_in_Brhosharp}) with $\gamma=0$]} }
\item{ $\sum a_n n^{-s}$ has a holomorphic extension past $s=0$}
\end{itemize}
We choose $\{a_n\}$ as in (\ref{E:counterexamples}), and furthermore we set $\gamma = 0$. In view of the discussion above, it only remains to prove that we can choose $\lambda, \delta_j \in [-1,1]$ such that the following three properties hold:
\begin{align*}
\lambda \sum_{j=1}^M \rho^{-j} \delta_j &= 0 \qquad \qquad \text{ [this is (\ref{E:cauchy_suff_condition}) with $\gamma=0$]} \\
\lambda ( \delta_1 , \ldots , \delta_M ) &\in - (B^{\rho})^{\sharp} \qquad \qquad \text{ [this is (\ref{E:psi_in_Brhosharp}) with $\gamma=0$]} \\
\lambda \delta_j &\in [-1,1] \qquad \qquad \text{[this is the requirement $\cos \theta \in [-1,1]$ ]}
\end{align*}
Evidently, $\lambda$ is irrelevant to the first two properties, so we set it to $1$ (and then the third property is satisfied). Writing $\delta = (\delta_1 , \ldots , \delta_M)$, and recalling (\ref{E:B_rho_sharp_equations}), the remaining requirements are that there exists $\delta \in [-1,1]^M$ such that
\begin{align*}
\delta \cdot (\rho^{-1} , \ldots , \rho^{-M}) &= 0 \\
\delta \cdot (0 , 0 , \ldots , \rho^{-r} , \ldots , \rho^{-M}) &\ge 0 \;\;\; \forall r = 1 , \ldots , M \\
\end{align*}
This is nothing more than the statement that, in the system of inequalities
$$
x \cdot (0 , 0 , \ldots , \rho^{-r} , \ldots , \rho^{-M}) \ge 0 \;\;\; : \; r = 1 , \ldots , M
$$
the inequality corresponding to $r=1$ does bind at some point. This is true because the vectors
$$
(0 , 0 , \ldots , \rho^{-r} , \ldots , \rho^{-M}) \ge 0 \;\;\; : \; r = 1 , \ldots , M
$$
are linearly independant. For a specific example, we could choose
\begin{align*}
\delta_j &= c (-1)^{M-j} \rho^j \qquad &\text{[$M$ even]} \\
\delta_1=0 , \;\;\;\; \delta_j &= c (-1)^{M-j} \rho^j \;\; j > 1 \qquad &\text{[$M$ odd]} \\
\end{align*}
\subsection*{Proposition \ref{P:sharpness} part $(II)$}
Fix $M \ge 2$ and $0 < \rho' < \rho$. Here, we want to find $\{a_n\}$ and $\gamma>0$ which satisfy
\begin{itemize}
\item{$\sum a_n n^{-s}$ has $\sigma_a=0$}
\item{ $( |a_{Ml+1}|, |a_{Ml+2}|, \ldots, |a_{Ml+M}| ) \in B^{\rho} $ }
\item{ $( \cos( \theta _{Ml+1}), \cos( \theta _{Ml+2}), \ldots, \cos( \theta _{Ml+M}) \; ) \in -\left( B^{\rho'} \right)^{\sharp} + \gamma (1,1, \ldots , 1)$ }
\item{ $\sum a_n n^{-s}$ has a holomorphic extension past $s=0$}
\end{itemize}
We choose $\{a_n\}$ as in (\ref{E:counterexamples}). In view of the discussion above, it only remains to prove that we can choose $\lambda, \delta_j \in [-1,1], \gamma>0$ such that the following three properties hold:
\begin{align*}
\lambda \sum_{j=1}^M \rho^{-j} \delta_j + \gamma \left( \sum_{j=1}^M \rho^{-j} \right) &= 0 \\
\lambda ( \delta_1 , \ldots , \delta_M ) &\in - (B^{\rho'})^{\sharp} \\
\lambda \delta_j + \gamma &\in [-1,1]
\end{align*}
Evidently, if we find $\delta \in - (B^{\rho'})^{\sharp}$ such that
$$
\sum_{j=1}^M \rho^{-j} \delta_j < 0
$$
then we can find arbitrarily small values of $\lambda, \gamma$ such that the first property is satisfied, and therefore we can simultaneously satisfy the third property as well. Therefore, it suffices to find $\delta$ such that
\begin{align}
\delta \in - (B^{\rho'})^{\sharp} \nonumber \\
\delta \cdot (\rho^{-1} , \ldots , \rho^{-M}) &< 0 \label{E:counterexample_delta}
\end{align}
Intuitively, this is stating a certain ``properness'' of the containment relations
$$
\rho_1 < \rho_2 \;\; \implies \;\; B^{\rho_1} \subset B^{\rho_2} \;\; \implies \;\; (B^{\rho_1})^{\sharp} \supset (B^{\rho_2})^{\sharp}
$$
The following example suffices: Define
$$
x = ( - \rho^{-(M-1)} , 0 , 0 , \ldots , 0 , 1)
$$
We have
$$
x \cdot ( \; (\rho')^{-1} , \ldots , (\rho')^{-M}) = (\rho')^{-M} - (\rho')^{-1} \rho^{-(M-1)}
$$
which is greater than $0$, and clearly
$$
x \cdot ( 0 , \ldots , 0 ,\; (\rho')^{-r} , \ldots , (\rho')^{-M}) >0 \qquad \forall r>1
$$
so we have
$$
\sum_{j=r}^M x_j (\rho')^{-j} > 0 \qquad \forall r \ge 1
$$
Therefore, there exists $ \epsilon $ such that for any $y$, $|y-x|< \epsilon $, we have
$$
\sum_{j=r}^M y_j (\rho')^{-j} > 0 \qquad \forall r \ge 1
$$
which implies $y \in -(B^{\rho'})^{\sharp}$. We selected $x$ to satisfy
$$
x \cdot (\rho^{-1} , \ldots , \rho^{-M}) = 0
$$
This means (since a non-zero linear functional on $\mathbb{R}^M$ is an open mapping) we have $\delta$, $|\delta - x|< \epsilon $ such that
$$
\delta \cdot (\rho^{-1} , \ldots , \rho^{-M}) < 0
$$
and thus $\delta$ satisfies (\ref{E:counterexample_delta}), concluding the proof.
\end{document}
|
\begin{document}
\title{Quantum Error Correction Alleviates Detection-Induced Coherent Errors}
\author{Qinghong Yang$^{1}$}
\author{Dong E. Liu$^{1,2,3}$}\email{Corresponding to: [email protected]}
\affiliation{$^{1}$State Key Laboratory of Low Dimensional Quantum Physics, Department of Physics, Tsinghua University, Beijing, 100084, China}
\affiliation{$^{2}$Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\affiliation{$^{3}$Frontier Science Center for Quantum Information, Beijing 100184, China}
\date{\today}
\begin{abstract}
We study the performance of quantum error correction codes(QECCs) under the {\em detection-induced coherent error} due to the imperfectness of practical implementations of stabilizer measurements, after running a quantum circuit. Considering the most promising surface code, we find that the detection-induced coherent error will result in undetected error terms, which will accumulate and evolve into logical errors. However, we show that such errors will be alleviated by increasing the code size, akin to eliminating other types of errors discussed previously. We also find that with detection-induced coherent errors, the exact surface code becomes an approximate QECC.
\end{abstract}
\maketitle
\section{Introduction}
The advent of programmable quantum computers~\cite{arute,Arute-Google2020Science,Gong-USTC2021-Science,Pino-TrapIon2021-Science,wu2021strong,ryananderson2021} using noisy intermediate-scale quantum (NISQ)~\cite{Preskill2018NISQ} processors has recently shown strong evidence of quantum ``supremacy" or quantum advantage~\cite{arute,Zhong-USTC2020-Science,wu2021strong}. Those quantum hardware achievements along with the software development~\cite{QuantumSoftware-Nature2017-Review} pave the way for the potential fault-tolerant universal quantum computation. It is believed that the next milestone will be the experimental demonstrations of fault-tolerant quantum error corrections (QEC)~\cite{shor,stean,Calderbank96}, and some important progresses~\cite{linke,andersen,mcewen,googleai} have been reported recently.
In the past twenty years, people have invented many QECCs, such as the $9$-qubit Shor code~\cite{shor}, the $7$-qubit Steane code~\cite{stean}, and the most promising surface code~\cite{kitaev,dennis,fowler}. A remarkable result of QEC is the threshold theorem, which states that if the physical error rate is below a critical value, QECCs can suppress the logical error rate to an arbitrarily low level~\cite{knill2,aharonov2,aliferis,nielsen}. The threshold theorem is proved based on stochastic error models~\cite{dennis,fowler,bombin,stephens,tomita,tuckett}, and the effectiveness of QECCs is also confirmed later from discussions of the correlated noise model~\cite{aharonov,novais,novais2,benaroya,chubb,staudt,chao} and the coherent error model~\cite{barnes,beale,bravyi,ehuang,cai,ouyang}, assuming perfect syndrome measurements. However, due to the imperfectness of experimental implementations of quantum gates, syndrome measurements can not be perfect and this will also has some influences on QECCs. Previously, people have partially considered the faulty measurement problem, and treat it using the stochastic error model. In this case, one can repeat several rounds of syndrome measurements, roughly of order $d$ times(where $d$ is the code size of the surface code)~\cite{dennis,Shigeo}, to ensure fault-tolerance.
Here, we focus on another type of stabilizer measurement error, dubbed as {\em detection-induced coherent error}. For a measurement of a stabilizer $U$, one can implement it using a controlled-$U$ circuit, see Fig.~\ref{fig:code_illustration}(a). However, the practical implementations may not be accurate, and the actual operation generally measures another operator $V\equiv U-\mathrm{d}lta U$, where $\mathrm{d}lta U$ is the deviation from the ideal one. Note that for the ion trap quantum computing architecture, in order to combat such coherent errors during stabilizer measurements, Ref.~\cite{debroy} introduces an extra technique, which works efficiently for the same magnitude of errors in implementations of each stabilizer. Since the connectivity of other quantum computing architectures are not as good as the trapped ion architecture, and coherent errors in each stabilizer can not perfectly be of the same order of magnitude, in reality, such coherent errors will always exist. Thus, here comes a natural question: with the existence of $\mathrm{d}lta U$, can the final state after the QEC procedure be used for quantum computation, or in other words, can the computation using this state reach the accuracy we want?
In this paper, through the most promising surface code(see Fig.~\ref{fig:code_illustration}(b)), we show that the detection-induced coherent error coming from stabilizer measurements of QECCs will result in accumulated logical errors, but those errors can be alleviated by QECCs themselves without extra techniques. Our discussions of the surface code are mainly concentrated on the superconducting qubit architecture \cite{arute,wu2021strong}. In addition, we also find that under the detection-induced coherent error, the exact surface code becomes an approximate QECC, thus our results imply that some approximate QECCs may also be possible for fault-tolerant quantum computation.
\begin{figure}
\caption{Illustrations of the controlled-$U$ circuit and the surface code. (a) The controlled-$U$ circuit for measuring operator $U$. (b) The $d=3$ surface code with $X_L=X_1X_2X_3$ and $Z_L=Z_1Z_6Z_{11}
\label{fig:code_illustration}
\end{figure}
\section{Preliminary of detection-induced coherent errors}
Initially, the state of data qubits is supposed to be the eigenstate of all stabilizers with eigenvalue $+1$, denoted as $|\psi\rangle$. After implementing the quantum circuit shown in Fig.~\ref{fig:code_illustration}(a), we have
\begin{equation}
|0\rangle|\psi\rangle\rightarrow |\Psi_1\rangle\equiv \frac{1}{2}|0\rangle(|\psi\rangle+V_1|\psi\rangle)+\frac{1}{2}|1\rangle(|\psi\rangle-V_1|\psi\rangle),
\end{equation}
where $V_1=U_1-\mathrm{d}lta U_1$ is the first stabilizer operator to be measured. Note that if there is no deviation, i.e., $V_1=U_1$, we have $|\Psi_1\rangle=|0\rangle|\psi\rangle$, as expected. For simplicity, we suppose that $V_1$ is still unitary, but not Hermitian. Since now $|\psi\rangle-V_1|\psi\rangle\neq 0$, after measuring the ancillary qubit, one can get $-1$ with probability $p(|1-\rangle)=1/2\operatorname{Re}\langle\psi|\mathrm{d}lta U_1|\psi\rangle$,
and $+1$ with probability $p(|1+\rangle)=1-p(|1-\rangle)$, where $|1\pm\rangle\propto|\psi\rangle\pm V_1|\psi\rangle)$, up to a normalized factor.
We'd like to know what will result from $\mathrm{d}lta U$ in consecutive measurements, thus we consider another stabilizer measurement. Practically, it will become $V_2\equiv U_2-\mathrm{d}lta U_2$. Suppose after the measurement of $U_1$, the state of data qubits collapses to $|1+\rangle$ (it is more likely for data qubits to collapse into this state), then the $V_2$ measurement will lead to
\begin{equation}
|0\rangle|1+\rangle\rightarrow\frac{1}{2}|0\rangle(|1+\rangle+V_2|1+\rangle)+\frac{1}{2}|1\rangle(|1+\rangle-V_2|1+\rangle).
\end{equation}
Letting $|2\pm\rangle\propto|1+\rangle\pm V_2|1+\rangle$, up to a normalized factor.
Considering the fidelity $F(| n\pm\rangle)=|\langle \psi|n\pm\rangle|$, we have
\begin{equation}
\Delta F^2=[F(|1+\rangle)]^2-[F(|2+\rangle)]^2\sim\frac{2a(a+1)}{(a-2)(5a-4)}>0
\end{equation}
for small $a$, where we have assumed that $\operatorname{Re}\langle\psi|\mathrm{d}lta U_1|\psi\rangle=\operatorname{Re}\langle\psi|\mathrm{d}lta U_2|\psi\rangle=a$ for the same status of $U_1$ and $U_2$. This fact shows that after two controlled-$V$ gates, the state becomes less accurate, even though syndromes show no error. Thus, one can conclude that with the proceeding of syndrome measurements, the state will become less and less accurate. We now have a concrete discussion using the surface code.
\begin{comment}
$F(|2+\rangle)=|\langle\psi|2+\rangle|^2\sim(1-\operatorname{Re}\langle\psi|(\mathrm{d}lta U_1+\mathrm{d}lta U_2)|\psi\rangle)/(1-1/4\langle\psi|(3\mathrm{d}lta U_1+2\mathrm{d}lta U_2)|\psi\rangle)$, and $F(|1+\rangle)=(1-\operatorname{Re}\langle\psi|\mathrm{d}lta U_1|\psi\rangle)/(1-1/2\operatorname{Re}\langle\psi|\mathrm{d}lta U_1|\psi\rangle)$. Suppose $\operatorname{Re}\langle\psi|\mathrm{d}lta U_1|\psi\rangle=\operatorname{Re}\langle\psi|\mathrm{d}lta U_2|\psi\rangle=a$ is small, we have
\end{comment}
\section{Detection-Induced Coherent Errors for Surface Codes}
\subsection{Introduction to Surface Codes}\label{sub:sc}
The surface code is an example of the stabilizer codes \cite{nielsen}, and due to its locality and the high threshold \cite{dennis,fowler}, it is believed to be one of the most promising quantum error correction code for the fault-tolerant quantum computation. We briefly review the idea of the surface code~\cite{kitaev,dennis,fowler} in this subsection.
As shown in Fig. \ref{fig:code_illustration}(b), the surface code is implemented on a 2D array of physical qubits. Those physical qubits can be classified into two classes: data qubits ($1,\cdots,13$) residing on those round rectangular edges in Fig. \ref{fig:code_illustration}(b) and ancillary qubits ($a_1,\cdots,a_{12}$) residing on centers of vertices and plaquettes in Fig. \ref{fig:code_illustration}(b). Data qubits are used to encoded quantum states, while ancillary qubits are used to detect the information, known as the error syndrome, of errors occurring on the encoded quantum state. For surface codes, we use two types of stabilizers to detect errors: $X$-stabilizers ($X^{\otimes 4}$) for detecting $Z$ errors and $Z$-stabilizers ($Z^{\otimes 4}$) for detecting $X$ errors. $X$-stabilizers appear as vertices on the 2D array, such as $X_2X_4X_7X_5$ and $X_1X_6X_4$ in Fig. \ref{fig:code_illustration}(b). If one $X$ error appears in data qubit $7$, $X_2X_4X_7X_5$ and $X_7X_9X_{12}X_{10}$ will detect it. This information is stored in ancillary qubits $a_4$ and $a_9$ through changing the states of $a_4$ and $a_9$ from $|0\rangle$ to $|1\rangle$. $Z$-stabilizers appear as plaquettes on the 2D array, such as $Z_4Z_6Z_9Z_7$ and $Z_1Z_4Z_2$. Information of $X$ errors from implementing $Z$-stabilizers is encoded on those ancillary qubits residing on the center of each plaquette. Due to the appearances of those stabilizers on the 2D array, in the following, we will call $X$-stabilizers and $Z$-stabilizers as site operators and plaquette operators, respectively.
After implementing all stabilizers to detect errors, one should measure those ancillary qubits to obtain the error syndrome, and then use the encoding algorithms, such as the minimum-weight perfect-matching algorithm \cite{fowler}, to decode the error syndrome. The error chain can be fixed after decoding. To realize the error correction procedure, one just need to act this error chain back on the quantum state. There will be two type of errors that can not be detected: the logical $X$ error and the logical $Z$ error. Note that these two logical errors are nothing but logical $X$ and $Z$ operators for the surface code, see $X_L$ and $Z_L$ in Fig. \ref{fig:code_illustration}(b). The reason that these logical errors can not be detected is because $X_L$ and $Z_L$ commute with all stabilizers. Note that error chains differing up to some stabilizers are equivalent and can be regarded as the same error. For a complete and thorough introduction to surface codes, we refer interested readers to Ref. \cite{fowler}.
\subsection{Imperfect CNOT and Modified Stabilizer Measurements}
From the discussion in Subsec. \ref{sub:sc}, we know that for a surface code, in order to detect errors, we need to implement two type of stabilizer operators --- $Z^{\otimes4}$ and $X^{\otimes4}$. Experimentally, each stabilizer can be realized through four $\operatorname{CNOT}$ gates (see Fig.~\ref{fig:code_illustration}(c) and \ref{fig:code_illustration}(d)), which can be constructed from two single-qubit Hadamard gates and a two-qubit controlled-Z gate denoted as $\Phi_{ct}$ (c and t are abbreviations for controlled and target qubits, respectively), that is $\operatorname{CNOT}_{ct}=\operatorname{H}_t\Phi_{ct}\operatorname{H}_t$. This strategy is widely used in superconducting quantum computation~\cite{krantz}. The controlled-Z gate can be realized through two-body interactions, which produce a time evolution operator $U(t)=\operatorname{diag}(1,1,1,\operatorname{exp}(-iJt/\hbar))$; and when $Jt_0/\hbar=\pi$, $U(t)$ reduces to the ideal contolled-Z gate $\Phi_{ct}$~\cite{krantz}. However, the fluctuation of the interaction constant $J$ (or the time interval) will produce an imperfect controlled-Z gate and then an imperfect $\operatorname{CNOT}$ gate. The actual evolution operator can be written as $U^{\prime}(t_0)=\Phi_{ct}^{\kappa}\Phi_{ct}$, where $\kappa=\mathrm{d}lta J/J$ is the {\em imperfect rate} and $\mathrm{d}lta J$ is the fluctuation of $J$. We assume that the fluctuation is constant (see
App. \ref{sec:IR&MGF} for its connection with the experimentally measurable quantity). We assume the single-qubit gate is much better than the two-qubit gate~\cite{arute} and the imperfectness of $\operatorname{CNOT}$ gates only results from the imperfect controlled-Z gate, that is $\operatorname{CNOT}^{\prime}=\operatorname{H}_t\Phi_{ct}^{\kappa}\Phi_{ct}\operatorname{H}_t$. Note that the imperfect controlled-$Z$ gate was also considered in Ref.~\cite{brien,huang} for different motivations.
Using the above error model, we can derive the modified measurement operators of stabilizers. We assume the surface code is prepared in $|\Psi\rangle$, where ancillary qubits are all in $|0\rangle$. Then, the measurements of plaquette operators (see Fig.~\ref{fig:code_illustration}(c)) can be expressed as
\begin{equation}
\begin{split}
&\quad\operatorname{CNOT}^{\prime}_{d_4a}\operatorname{CNOT}^{\prime}_{d_3a}\operatorname{CNOT}^{\prime}_{d_2a}\operatorname{CNOT}^{\prime}_{d_1a}|\Psi\rangle\\
&=\left(\operatorname{H}_a\Phi_{d_4a}^{\kappa}\Phi_{d_3a}^{\kappa}\Phi_{d_2a}^{\kappa}\Phi_{d_1a}^{\kappa}\operatorname{H}_a\right)\\
&\quad\times\operatorname{CNOT}_{d_4a}\operatorname{CNOT}_{d_3a}\operatorname{CNOT}_{d_2a}\operatorname{CNOT}_{d_1a}|\Psi\rangle,
\end{split}
\end{equation}
which means the imperfect $\operatorname{CNOT}$ introduces an extra term for measuring a plaquette operator. Keeping terms up to the first order of $\kappa$, the extra term yields
\begin{equation}
\begin{split}
&\quad\operatorname{H}_a\Phi_{d_4a}^{\kappa}\Phi_{d_3a}^{\kappa}\Phi_{d_2a}^{\kappa}\Phi_{d_1a}^{\kappa}\operatorname{H}_a\\
&\sim\quad\left[(1-i\pi\kappa)\mathbf{I}^{\otimes 4}+\frac{i\pi\kappa}{4}(Z_{d_1}+Z_{d_2}+Z_{d_3}+Z_{d_4})\right]\mathbf{I}_a\\
&\quad-\left[(0-i\pi\kappa)\mathbf{I}^{\otimes 4}+\frac{i\pi\kappa}{4}(Z_{d_1}+Z_{d_2}+Z_{d_3}+Z_{d_4})\right]X_a,
\end{split}
\end{equation}
which shows that for plaquette operator measurements, imprfect $\operatorname{CNOT}$ gates will introduce coherent $Z$ errors for data qubits, as well as the coherent bit-flip error for the ancillary qubit. As one will see in later parts, these detection-induced coherent errors will result in undetected errors, which will accumulate into logical errors after a couple rounds of syndrome measurements.
Similarly, for site operators, the extra term yields
\begin{equation}
\begin{split}
&\quad\operatorname{H}_a\operatorname{CNOT}^{\prime}_{ad_4}\operatorname{CNOT}^{\prime}_{ad_3}\operatorname{CNOT}^{\prime}_{ad_2}\operatorname{CNOT}^{\prime}_{ad_1}\operatorname{H}_a|\Psi\rangle\\
&\sim\left\{\mathbf{I}_a\left[(1-i\pi\kappa)\mathbf{I}^{\otimes 4}+\frac{i\pi\kappa}{4}(X_{d_1}+X_{d_2}+X_{d_3}+X_{d_4})\right]\right.\\
&\quad-\left.X_a\left[-i\pi\kappa\mathbf{I}^{\otimes 4}+\frac{i\pi\kappa}{4}(X_{d_1}+X_{d_2}+X_{d_3}+X_{d_4})\right]\vphantom{\frac{i\pi\kappa}{4}}\right\}\\
&\quad\times\operatorname{H}_a\operatorname{CNOT}_{ad_4}\operatorname{CNOT}_{ad_3}\operatorname{CNOT}_{ad_2}\operatorname{CNOT}_{ad_1}\operatorname{H}_a|\Psi\rangle.
\end{split}
\end{equation}
Thus, for site operator measurements, imprfect $\operatorname{CNOT}$ gates will introduce coherent $X$ errors for data qubits.
\section{Effect of modified stabilizer measurements and its alleviation}
Now we discuss the effect of those modified stabilizer measurements for quantum computation. Suppose the initial state is prepared perfectly in the logical code space, and then experiences error $E$. We further apply stabilizer measurements to detect errors.
\begin{comment}
Previous studies~\cite{dennis,wang,fowler} rearrange and incorporate the $\operatorname{CNOT}$ gates from both plaquette and site measurements into the same round to reduce the measurement time. However, a $\operatorname{CNOT}$ gate of plaquette measurements and a $\operatorname{CNOT}$ gate of site measurements sharing the same data qubit do not commute, thus the correct stabilizer measurements require a two-round strategy
\end{comment}
Here, we analyze effects of the detection-induced coherent error using the two-round strategy~\cite{versluis,brien}---first measuring plaquette (site) operators and followed by measuring site (plauqette) operators. Assume that the plaquette measurement is applied first, and direct calculations yield (see App.~\ref{sec:MS} for details)
\begin{equation}
\begin{split}
\mathcal{G}_Z^1(\operatorname{CNOT}^{\prime})E|\Psi\rangle&=\mathcal{D}^1(Z)\mathcal{G}_Z^1(\operatorname{CNOT})E|\Psi\rangle\\
&=E^Z\left[\mathcal{D}^1(Z)E^X\mathcal{A}(E^X)\right]|\Psi\rangle,
\end{split}
\end{equation}
where $\mathcal{G}_{Z}^{1}\left(\operatorname{CNOT}^{\prime}\right)$ denotes measurements of {\em all} plaquette operators in the first round based on imperfect $\operatorname{CNOT}$ gates, $E^{X}$ is the $X$ error acting on data qubits, operators acting on ancillary qubits and containing the information of $E^X$ is denoted by $\mathcal{A}\left(E^{X}\right)$, and $\mathcal{D}^1(Z)$ is the total deviation from ideal plaquette measurements caused by imperfect $\operatorname{CNOT}$ gates in the first round. After the plaquette measurement, one should measure ancillary qubits so as to get the syndrome for $X$ errors. We use $\sum_{j}\mathcal{D}_{ij}^{1}\left(Z\right)$ to denote the term that survives after measurements of ancillary qubits, where $i$ stands for the $i$th independent ancilla configuration in $\mathcal{D}^{1}(Z)$, and the summation (over $j$) contains all coherent terms that share the same ancilla configuration. Thus, after the ancilla measurement, we have
\begin{equation}
\begin{split}
\mathcal{G}_Z^1(\operatorname{CNOT}^{\prime})E|\Psi\rangle\rightarrow \left[E^Z\sum_j\mathcal{D}_{ij}^1(Z)\right]E^X\mathcal{A}(E^X)|\Psi\rangle.
\end{split}
\end{equation}
$E^Z\sum_j\mathcal{D}_{ij}^1(Z)$ contains $Z$ errors that need to be detected in the next round of site operator measurements, which leads to the following expression
\begin{equation}
\begin{split}
&\mathcal{G}_X^2(\operatorname{CNOT}^{\prime})\left[E^Z\sum_j\mathcal{D}_{ij}^1(Z)\right]E^X\mathcal{A}(E^X)|\Psi\rangle=\mathcal{D}^2(X)\\
&\times\left[E^Z\mathcal{A}(E^Z)\right]\left[\sum_j\mathcal{D}_{ij}^1(Z)\mathcal{A}(\mathcal{D}_{ij}^1(Z))\right]\left[E^X\mathcal{A}(E^X)\right]|\Psi\rangle.
\end{split}
\end{equation}
Since each $\mathcal{D}_{ij}^{1}\left(Z\right)$ is an independent operator, each $\mathcal{A}\left(\mathcal{D}_{ij}^{1}\left(Z\right)\right)$ is also independent, which uniquely reflects the Z error configuration in $\mathcal{D}_{ij}^{1}\left(Z\right)$. After implementing this round of site operator measurements, we also need to measure ancillary qubits of site operators. Since $\mathcal{D}^{2}\left(X\right)$ also contains several ancilla configurations, $\mathcal{D}^{2}\left(X\right)\sum_{j}\mathcal{D}_{ij}^{1}\left(Z\right)\mathcal{A}\left(\mathcal{D}_{ij}^{1}\left(Z\right)\right)$ will result in some different Z errors sharing the same ancilla configuration, and this means the error correction can not exactly correct errors, and even introduce more errors.
Therefore, the final state will be a coherent state with some correct terms and some incorrect terms. Those incorrect terms get worse due to the error correction procedure and will accumulate errors. Here, we suppose that all ancilla measurements are error-free, and also ignore long error chains which is roughly longer than one half of the code size.
By doing so, error $E$ can be perfectly corrected, and we can concentrate on effects of the detection-induced coherent error just as there was no error $E$. In order to have a quick and clear interpretation of this effect, we repeatedly execute the quantum error correction cycles (including error detections and error corrections). Then, we use Mathematica based on the package {QUANTUM}~\cite{muoz} to simulate detection, decoding and error-correction procedures for the $d=3$ surface code according to the previous discussion (see App.~\ref{sec:MS} for details), we find undetected error terms accumulate errors and evolve to logical operators with a certain probability after several QEC cycles. For simplicity, we again keep terms up to the first order of imperfect rate $\kappa$. Similar analyses can be applied to other code sizes and a quantum circuit with logical operations.
Calculating the probability of undetected error terms, that evolve to logical errors, is very hard for arbitrary code sizes, thus we can not exactly measure the performance of the surface code.
Fortunately, we can obtain the worst case performance. Note that the total deviation $\mathcal{D}(Z)$ contains several different configurations of operators acting on data qubits and ancillary qubits, such as the error-free configuration $\mathbf{I}^{\otimes (n+n_a^z)}$ with $n$ being the number of data qubits and $n_a^z$ being the number of ancillary qubits for plaquette operator measurements, and other configurations represent $Z$ errors on data qubits and $X$ errors on ancillary qubits induced by imperfect $\operatorname{CNOT}$ gates. For those configurations containing $\mathbf{I}^{\otimes n_a^z}$, there are $n+1$ possibilities, where one possibility corresponds to $\mathbf{I}^{\otimes(n+n_a^z)}$ and $n$ possibilities correspond to $\mathbf{I}^{\otimes(n-1+n_a^z)}\otimes Z_i$ with $i$ being the qubit acted by $Z$. The structure of $\mathcal{D}(X)$ is similar with $\mathcal{D}(Z)$. In order to simplify the analysis, we consider a worst-case scenario: only the case, that each round of ancilla measurements projects ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$, does not exist accumulated logical errors. For other cases (projecting to other ancilla configurations), we assume that the data qubits will finally experience accumulated logical errors (not all cases in reality).
According to the structure of $\mathcal{D}$, we find the probability of projecting ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$ in $k$th round stabilizer measurements, under the condition that the $(k-1)$th round of ancilla measurements projects ancilla into the state acted by $\mathbf{I}^{\otimes n_a}$, is (see App.~\ref{sec:WCA} for details)
\begin{equation}
\begin{split}
&\quad P_{k|k-1}(\mathbf{I}^{\otimes n_a})\\
&=\mathcal{N}_k\left[1+\frac{k^2(2d^2-3d+1)^2}{4}\pi^2\kappa^2+\frac{4d^2-7d+2}{8}\pi^2\kappa^2\right],
\end{split}
\end{equation}
where $P_{1|0}\equiv P_1$, $\mathcal{N}_k$ is the normalized factor, and $k\ge1$ is the round count of stabilizer measurements (for the first plaquette round, $k=1$, and for the first site round, $k=2$, etc.). Combining with those cases that are not $\mathbf{I}^{\otimes n_a}$ configurations, we can derive the normalized factor (see App. \ref{sec:WCA} for the explicit expression).
\begin{comment}
\begin{widetext}
\begin{equation}
\mathcal{N}_k=\frac{1}{1+\frac{k^2(2d^2-3d+1)^2}{4}\pi^2\kappa^2+(1+\theta(k-2))\frac{4d^2-7d+2}{8}\pi^2\kappa^2+\frac{5d^2-9d+4}{4}\pi^2\kappa^2},
\end{equation}
\end{widetext}
where $\theta(x)$ is the unit step function with $\theta(x\ge 0)=1$ and $\theta(x<0)=0$.
\end{comment}
After $2m$ rounds of syndromes measurements, the probability of the case, where each round of ancilla measurements projects ancilla into the state acted by $\mathbf{I}^{\otimes n_a}$, is
\begin{equation}
P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})=P_{1}(\mathbf{I}^{\otimes n_a}) P_{2|1}(\mathbf{I}^{\otimes n_a})\cdots P_{2m|2m-1}(\mathbf{I}^{\otimes n_a}).
\end{equation}
For the worst case, the fidelity of the final state satisfies (see App. \ref{sec:WCA} for details)
\begin{equation}\label{eq:fidelity}
\begin{split}
F&\ge P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})|\langle\Psi|\Psi_{f_1}\rangle|\\
&= P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})|\alpha|,
\end{split}
\end{equation}
where $|\Psi_{f_1}\rangle$ is the final state without logical errors, and $\alpha$ is the amplitude of the correct state in $|\Psi_{f_1}\rangle$ with
\begin{equation}
|\alpha|=\sqrt{\frac{1+m^2(2d^2-3d+1)^2\pi^2\kappa^2}{1+m^2(2d^2-3d+1)^2\pi^2\kappa^2+\frac{4d^2-7d+2}{8}\pi^2\kappa^2}}.
\end{equation}
In Eq.~\eqref{eq:fidelity}, we can focus on the minimum fidelity $F_{min}=P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})|\alpha|$, which characterizes the share of correct terms in the final state, to measure the performance of the surface code under detection-induced coherent errors.
\begin{figure}
\caption{Log-log plot of the infidelity $r$ changing with the code size $d$. The infidelity is defined as $r=1-F_{min}
\label{fig:infidelity}
\end{figure}
We plot the infidelity $r=1-F_{min}$ as a function of the code size $d$, at different imperfect rate $\kappa$s, shown in Fig.~\ref{fig:infidelity}. We can find that the infidelity in the worst case decreases with the increasing of the code size $d$ and decays in the form of power law functions in the large code size regime. Thus, the detection-induced coherent error can be alleviated by increasing $d$ (see App. \ref{sec:WCA} for discussions). We also find that with the increasing of $\kappa$ (still in the small $\kappa$ regime to ensure the small parameter expansion), the decay behavior saturates to $r\sim10^{0.47}d^{-2.1}$ (the top straight line in Fig.~\ref{fig:infidelity}). In Fig.~\ref{fig:infidelity}, we take $m=3$. For larger $m$s, $P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})$ will be smaller and $r$ will be larger. However, since for large $k$s, $P_{k|k-1}(\mathbf{I}^{\otimes n_a})$ will approach to $1$, and then there will be less differences between large-$m$ cases and small-$m$ cases (see App.~\ref{sec:WCA} for details). One may notice that for small code sizes, there is a bending behavior in the $r\sim d$ curve, indicating that in the error model under considerations, some larger-size codes have worse performance than smaller-size codes. What's more, one may also notice large values of the infidelity of such errors. However, we emphasize that since we just focus on the worst case, negative results can not be used to judge QECCs. On the contrary, positiveness shown here again indicates the corresponding effectiveness of QECCs.
\section{Connection with approximate QECCs}\label{sec:CwAQECCs}
Before we discuss the connection between surface codes under the DICE and {\em approximate} QECCs, we provide a brief introduction to the Knill-Laflamme condition for the approximate QECCs which have attracted many interests~\cite{crepeau,beny,wangPRL18,faist,brandao}. For details, we refer to Ref. \cite{beny,brandao}.
Briefly speaking, approximate QECCs are QECCs which can not correct errors perfectly. Rigorously speaking, a QECC is called an approximate QECC, if it satisfies the Knill-Laflamme condition \cite{brandao}:
\begin{equation}\label{eq:KLCfAQECC}
\langle\psi_i|O|\psi_j\rangle=C_{O}\mathrm{d}lta_{ij}+\varepsilon_{ij}(O),
\end{equation}
where $\{|\psi_1\rangle,\cdots,|\psi_{2^k}\rangle\}$ are codewords that span the code space $\mathcal{C}$, and $O=E_a^{\dagger}E_b$ with $\{E_a\}$ being the operation elements of correctable local error channels $\mathcal{E}$. Note that the Kill-Laflamme condition for the approximate QECCs has an extra term $\varepsilon_{ij}(O)$ comparing with that of the exact QECCs. Intuitively, this tail implies that even local error operators can result in logical errors. If we let $i\neq j$, we have $\langle\psi_i|O|\psi_j\rangle$=$\varepsilon_{ij}(O)\neq 0$, and this tells us that the transition probability (transit through local operator $O$) from one logical state $|\psi_i\rangle$ to another logical state $|\psi_j\rangle$ is not zero. Note that in previous works, stabilizer measurements are assumed to be perfect, thus the encoding process using stabilizer operators is also perfect. Then, in those case, correctable errors can be perfectly corrected, and different logical states can not be converted through local operators. Therefore, those cases are within the category of {\em exact} QECCs. However, as we will see in the following, if stabilizer measurements are assumed to be imperfect due to the experimental limitation, situations will change, and approximate QECCs will come out naturally.
We now return to our focus -- surface codes. The logical ``0'' state of the surface code can be written as \cite{kitaev,pachos}
\begin{equation}\label{0state}
|\bar{0}\rangle=\prod_{s}\frac{1}{\sqrt{2}}(1+A(s))|0\rangle^{\otimes n},
\end{equation}
where $A(s)$ stands for site operator $X^{\otimes 4}$ and the logical ``1'' state can be derived from $|\bar{1}\rangle=X_L|\bar{0}\rangle$. For preparing $|\bar{0}\rangle$, we just need to implement one round of site operator measurements, then select an arbitrary open chain whose boundary consists of positions of all ancillary qubits with outcome $-1$ and apply $Z$ to each link of this chain \cite{dennis}. Previously, we assumed that the initial state can be prepared perfectly in the code space, but with the detection-induced coherent error, the initial state can not be perfect anymore. For example, practically, the logical ``0'' state and the logical ``1'' state will be dressed by $\mathcal{D}(X)$. Ancilla measurements will be more probable to project ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$ coming from $\mathcal{D}(X)$. Thus, the logical states in reality will be more likely to be $|\bar{0}^{\prime}\rangle=\mathcal{Y}(X)|\bar{0}\rangle$ and $|\bar{1}^{\prime}\rangle=\mathcal{Y}(X)|\bar{1}\rangle$, where for the $d=3$ surface code and keeping terms up to the first order of $\kappa$,
\begin{equation}
\begin{split}
\mathcal{Y}(X)&\propto\left(1-5i\pi\kappa\right)\mathbf{I}^{\otimes13}\\
&\quad+\frac{i\pi\kappa}{4}\left(X_{1}+X_{2}+X_{3}+X_{11}+X_{12}+X_{13}\right)\\
&\quad+\frac{i\pi\kappa}{2}\left(X_{4}+X_{5}+X_{6}+X_{7}+X_{8}+X_{9}+X_{10}\right),
\end{split}
\end{equation}
where we omit the normalization factor. For a general $d$, $\mathcal{Y}(X)$ has the same structure. Note that for the $d=3$ surface code, the correctable error $\mathcal{E}$ can be constructed from the operation elements $E_a=\{I,X_1\cdots,X_{13},Z_1\cdots,Z_{13},Y_1,\cdots,Y_{13}\}$. Therefore, taking $O=X_2$ as an example, we have $\langle\bar{1}^{\prime}|X_2|\bar{0}^{\prime}\rangle\propto\langle\bar{1}|\mathcal{Y}^{\dagger}(X)X_2\mathcal{Y}(X)|\bar{0}\rangle=(1/8)\pi^2\kappa^2\neq 0$. Note that the $d=3$ surface code $\left[\left[ 13,1,3\right]\right]$ is an exact QECC, satisfying Kill-Laflamme condition $\langle\phi_i|O|\phi_j\rangle=C_O\mathrm{d}lta_{ij}$ with $\left\{|\phi_i\rangle\right\}$ forming the code space of a QECC and $E$ being the correctable local errors \cite{knill}. However, above discussions show that due to the detection-induced coherent error, $\langle\phi_i^{\prime}|O|\phi_j^{\prime}\rangle\neq 0$ for $i\neq j$, which means the Knill-Laflamme condition becomes $\langle\phi_i^{\prime}|O|\phi_j^{\prime}\rangle=C_O\mathrm{d}lta_{ij}+\varepsilon_{ij}(O)$. One finds that this is the Knill-Laflamme condition for approximate QECCs, Eq. \eqref{eq:KLCfAQECC}. For the $d=3$ suface code case, the exact structure of $\varepsilon_{ij}(O)$ can be found in App. \ref{sec:DSV}. Note that for a general $d$, the structure of $\varepsilon_{ij}(O)$ has the same form with modifications of values, and the derivation is also similar with that of the $d=3$ case.
It is easy to check that considering larger code sizes, some local errors still satisfies the exact Knill-Laflamme condition, if we just keep terms up to the first order of $\kappa$.
However, the detection-induced coherent error can also introduce higher order terms, thus in general surface codes become approximate QECCs. In order to justify the whole QEC procedure, we need to include QEC cycles (as shown in the last section) along with the initial encoding step. Since this encoding step just needs one round of site operator measurements, our former discussions can be applied immediately and imply that the residual errors of approximate QECCs
can be alleviated. Previous studies show that allowing negligible errors existing in the recovery procedure can lead to better QECCs \cite{benaroya,leung,crepeau}, thus together with our result, one can anticipate that there may be good approximate QECCs suitable for fault-tolerant quantum computation.
\section{Summary and discussion}
Based on the surface code, we show that the detection-induced coherent error will result in accumulated logical errors after running a quantum circuit, and this impact can be alleviated by increasing the code size. Effects of the detection-induced coherent error may be more serious for a nontrivial quantum circuit beyond QEC-only circuits, and we leave it to further investigations. We also show that the actual logical states are imperfect under the detection-induced coherent error, and then the exact surface code becomes an approximate QECC. Therefore, our results imply that some approximate QECCs may also be possible for fault-tolerant quantum computation.
\begin{comment}
Due to the structure of the surface code, we can classify those data qubits into $5$ groups, that is there are
\begin{enumerate}
\item $4$ data qubits, each of which only belongs to one three-operator stabilizer;
\item $2(d-2)$ data qubits, each of which only belongs to one four-operator stabilizer;
\item $2(d-2)$ data qubits, each of which is shared by two three-operator stabilizers;
\item $2(d-1)$ data qubits, each of which is shared by one three-operator stabilizer and one four-operator stabilizer;
\item $(d-2)^2+(d-3)(d-1)$ data qubits, each of which is shared by two four-operator stabilizers.
\end{enumerate}
For those cases that ancillary qubits are not acted by $\mathbf{I}^{\otimes n_a}$, the total probability is
\begin{equation}
p_{k=1}(\cancel{\mathbf{I}^{\otimes n_a}})=\mathcal{N}_1\frac{5d^2-9d+4}{4}\pi^2\kappa^2
\end{equation}
for $k=1$ and
\begin{equation}
p_{k\neq1}(\cancel{\mathbf{I}^{\otimes n_a}})=\mathcal{N}_{k\neq1}\frac{14d^2-25d+10}{8}\pi^2\kappa^2
\end{equation}
\begin{equation}
\begin{split}
&\quad p_k(\mathbf{I}^{\otimes n_a})\\
&=\mathcal{N}_k\left[1+\frac{k^2(2d^2-3d+1)^2}{4}\pi^2\kappa^2+\frac{4d^2-7d+2}{8}\pi^2\kappa^2\right],
\end{split}
\end{equation}
\end{comment}
\begin{acknowledgments}
Authors thank Xuegang Li, Yuanchen Zhao, Li Rao, Rui Chao, and Xiaosi Xu for helpful discussions. The work is supported by NSF-China (GrantNo.11974198) and the startup grant from State Key Laboratory of Low-Dimensional Quantum Physics of Tsinghua University.
\end{acknowledgments}
\begin{appendix}
\begin{widetext}
\section{Connection between imperfect rate and minimum gate fidelity}\label{sec:IR&MGF}
As mentioned in the main text, the imperfect CNOT gate can be modeled by $\operatorname{CNOT}=\operatorname{H}_t\Phi^{\kappa}_{ct}\Phi_{ct}\operatorname{H}_t$. Since the imperfect rate $\kappa$ is a theoretical quantity and can not be directly measured experimentally, we need to connect it with an experimentally measurable quantity, and then we can know the performance of a CNOT gate with a specific imperfect rate. Here we show the connection between the imperfect rate and the minimum gate fidelity, defined as~\cite{nielsen}
\begin{equation}
F_G(U,\mathcal{E})\equiv \min_{|\psi\rangle}F(U|\psi\rangle,\mathcal{E}(|\psi\rangle\langle\psi|)),
\end{equation}
where $U$ stands for the operation of an ideal quantum gate, $\varepsilon$ stands for the quantum channel representing an imperfect quantum gate, $F$ is the fidelity between two quantum states, and the minimization is over all possible two-qubit states. Here we rewrite the ideal CNOT as $U$ and the imperfect CNOT as $U^{\prime}$, then the minimum gate fidelity is
\begin{equation}\label{eq:MGF}
\begin{split}
F_G(U,U^{\prime})&=\min_{|\psi\rangle}F(U|\psi\rangle,U^{\prime}|\psi\rangle)\\
&=\min_{|\psi\rangle}\operatorname{Tr}\sqrt{\left(U|\psi\rangle\langle\psi|U^{\dagger}\right)^{1/2}\left(U^{\prime}|\psi\rangle\langle\psi|U^{\prime\dagger}\right)\left(U|\psi\rangle\langle\psi|U^{\dagger}\right)^{1/2}}\\
&=\min_{|\psi\rangle}|\langle\psi|UU^{\prime}|\psi\rangle|\\
&=\min_{|\psi\rangle}|a+be^{-i\pi\kappa}|\\
&=\min_{|\psi\rangle}\left(a\sqrt{1+2\frac{b}{a}\cos\left(\pi\kappa\right)+\left(\frac{b}{a}\right)^{2}}\right)\\
&=\frac{1}{2}\sqrt{2+2\cos\left(\pi\kappa\right)},
\end{split}
\end{equation}
where
\begin{equation}
\begin{split}
a&=\frac{3}{4}+\frac{1}{4}\left[\left(|\alpha|^{2}+|\beta|^{2}-|\gamma|^{2}-|\kappa|^{2}\rangle\right)+\left(2\gamma^{*}\kappa+2\gamma\kappa^{*}\right)\right],\\b&=\frac{1}{4}-\frac{1}{4}\left[\left(|\alpha|^{2}+|\beta|^{2}-|\gamma|^{2}-|\kappa|^{2}\rangle\right)+\left(2\gamma^{*}\kappa+2\gamma\kappa^{*}\right)\right],
\end{split}
\end{equation}
we have let $|\psi\rangle=\alpha|0\rangle_{c}|0\rangle_{t}+\beta|0\rangle_{c}|1\rangle_{t}+\gamma|1\rangle_{c}|0\rangle_{t}+\kappa|1\rangle_{c}|1\rangle_{t}$ with $\alpha,\beta,\gamma$ and $\gamma$ being complex numbers, and the minimum is obtained when $\left(|\alpha|^{2}+|\beta|^{2}-|\gamma|^{2}-|\kappa|^{2}\rangle\right)+\left(2\gamma^{*}\kappa+2\gamma\kappa^{*}\right)=-1$. For this minimum condition, $\alpha=\beta=\gamma=0,\kappa=1$ and $\alpha=\beta=\kappa=0,\gamma=1$ are possible configurations, and the corresponding states are $|\psi\rangle=|1\rangle_c|0\rangle_t$ and $|\psi\rangle=|1\rangle_c|1\rangle_t$. Since they are both direct product states, they can be easily prepared experimentally(if we measure the minimum gate fidelity of imperfect controlled-Z gate, the state statisfying the minimum condition is a coherent state, which will be difficult for the experimental preparation), and then one can measure the minimum gate fidelity instead of the average gate fidelity through randomized benchmarking. According to Eq.~\eqref{eq:MGF}, one can know that if $\kappa=0.01,0.02,0.05,0.1$ and $0.4$(values shown in Fig.~2 in the main text), then the minimum gate fidelity will be $0.99988,0.99951,0.9969,0.9877$ and $0.809$, respectively.
\section{Notes on the simulation of $d=3$ surface code }\label{sec:MS}
As mentioned in the main text, we use the two-round strategy to implement the simulation. We first do the plaquette measurement, whose operation can be expressed as
\begin{equation}
\begin{split}
\mathcal{G}_Z^1(\operatorname{CNOT}^{\prime})E|\Psi\rangle&=\mathcal{D}^1(Z)\mathcal{G}_Z^1(\operatorname{CNOT})E|\Psi\rangle\\
&=\mathcal{D}^1(Z)\mathcal{G}_Z^1(\operatorname{CNOT})E^ZE^X|\Psi\rangle\\
&=E^Z\mathcal{D}^1(Z)\left[\mathcal{G}_Z^1(\operatorname{CNOT})E^X\mathcal{G}_Z^1(\operatorname{CNOT})\right]\mathcal{G}_Z^1(\operatorname{CNOT})|\Psi\rangle\\
&=E^Z\left[\mathcal{D}^1(Z)E^X\mathcal{A}(E^X)\right]|\Psi\rangle,
\end{split}
\end{equation}
in the second equality, we split $E$ into $Z$ errors and $X$ errors, and in the forth equality, we have used the fact that $|\Psi\rangle$ is an perfect state in the code space and is invariant under the action of ideal stabilizers. $\mathcal{A}(E^X)$ results from the error propagation nature of $\operatorname{CNOT}$ gates, which will propagate $X$ errors from data qubits to ancillary qubits. Attributed to this feature, one can figure out errors in data qubits from syndromes shown in ancillary qubits. $\mathcal{D}^1(Z)$ is the total deviation from ideal plaquette measurements caused by imperfect $\operatorname{CNOT}$ gates in the first round, and can be represented as $\mathcal{D}^1(Z)=\sum_{i}\sum_j\mathcal{D}^1_{ij}(Z)$, where $i$ stands for the $i$th independent ancilla configuration, and $j$ stands for $j$th term that shares the same ancilla configuration. The expression of $\mathcal{D}^{1}(Z)$, which is the product of deviations of all plaquette stabilizers(Eq.~(5) in the main text), reads as
\begin{equation}
\begin{split}\label{eq:d1z}
\mathcal{D}^{1}(Z)=&\left[(1-5i\pi\kappa)\mathbf{I}^{\otimes 13} +\frac{1}{4}i\pi\kappa\left(Z_{1}+Z_{3}+Z_{6}+Z_{8}+Z_{11}+Z_{13}\right)
+\frac{1}{2}i\pi\kappa\left(Z_{2}+Z_{4}+Z_{5}+Z_{7}+Z_{9}+Z_{10}+Z_{12}\right)\right]\mathbf{I}^{\otimes 6}_a\\
&\quad+\frac{1}{4}i\pi\kappa(3\mathbf{I}^{\otimes 13}+Z_1+Z_2+Z_4)X_{a1}+\frac{1}{4}i\pi\kappa(3\mathbf{I}^{\otimes 13}+Z_2+Z_3+Z_5)X_{a2}\\
&\quad +\frac{1}{4}i\pi\kappa(3\mathbf{I}^{\otimes 13}+Z_9+Z_{11}+Z_{12})X_{a11}+\frac{1}{4}i\pi\kappa(3\mathbf{I}^{\otimes 13}+Z_{10}+Z_{12}+Z_{13})X_{a12}\\
&\quad+\frac{1}{4}i\pi\kappa(4\mathbf{I}^{\otimes 13}+Z_4+Z_6+Z_7+Z_9)X_{a6}+\frac{1}{4}i\pi\kappa(4\mathbf{I}^{\otimes 13}+Z_5+Z_7+Z_8+Z_{10})X_{a7},
\end{split}
\end{equation}
where $a1,a2,a6,a7,a11$ and $a12$ stand for the ancillary qubits (as shown in Fig. \ref{fig:code_illustration} (b) of the main text) used for implementing plaquette operator measurements, $i(i=1,\cdots,13)$ stands for data qubits, and we have kept terms up to the first order of $\kappa$,
and used the fact that for the three-operator stabilizers, such as $Z_{d_1}Z_{d_2}Z_{d_3}$, the deviation(Eq.(5) in the main text) reads as
\begin{equation}
\begin{split}
\operatorname{H}_a\Phi_{d_3a}^{\kappa}\Phi_{d_2a}^{\kappa}\Phi_{d_1a}^{\kappa}\operatorname{H}_a\sim\left[(1-\frac{3i\pi\kappa}{4})\mathbf{I}^{\otimes 4}+\frac{i\pi\kappa}{4}(Z_{d_1}+Z_{d_2}+Z_{d_3})\right]\mathbf{I}_a
+\left[\frac{3i\pi\kappa}{4}\mathbf{I}^{\otimes 4}-\frac{i\pi\kappa}{4}(Z_{d_1}+Z_{d_2}+Z_{d_3})\right]X_a.
\end{split}
\end{equation}
Similar for site operators.
For a particular ancilla configuration, say $X_{a6}$, $\sum_j\mathcal{D}^1_{ij}(Z)$ reads as
\begin{equation}
\begin{split}
\sum_j\mathcal{D}^1_{ij}(Z)=\frac{1}{4}i\pi\kappa(4\mathbf{I}^{\otimes 13}+Z_4+Z_6+Z_7+Z_9)X_{a6},
\end{split}
\end{equation}
where $\mathbf{I}^{\otimes 13},Z_4,Z_6,Z_7$ and $Z_9$ are five different error configurations sharing the same ancilla configuration, which resulted from the imperfect implementation of $Z_4Z_6Z_7Z_9$ stabilizer. After measuring the ancillary qubits, only one ancilla configuration survives. Thus, we have
\begin{equation}
\begin{split}
\mathcal{G}_Z^1(\operatorname{CNOT}^{\prime})E|\Psi\rangle\rightarrow \left[E^Z\sum_j\mathcal{D}_{ij}^1(Z)\right]E^X\mathcal{A}(E^X)|\Psi\rangle.
\end{split}
\end{equation}
For the next site operator measurements, by similar analysis, we have
\begin{equation}
\begin{split}
&\mathcal{G}_X^2(\operatorname{CNOT}^{\prime})\left[E^Z\sum_j\mathcal{D}_{ij}^1(Z)\right]E^X\mathcal{A}(E^X)|\Psi\rangle=\mathcal{D}^2(X)\left[E^Z\mathcal{A}(E^Z)\right]\left[\sum_j\mathcal{D}_{ij}^1(Z)\mathcal{A}(\mathcal{D}_{ij}^1(Z))\right]\left[E^X\mathcal{A}(E^X)\right]|\Psi\rangle.
\end{split}
\end{equation}
After measuring site operators, we also need to measure the ancillary qubits to get the syndrome. Since there are also several ancilla configurations in $\mathcal{D}^2(X)$, $\mathcal{D}^{2}\left(X\right)\sum_{j}\mathcal{D}_{ij}^{1}\left(Z\right)\mathcal{A}\left(\mathcal{D}_{ij}^{1}\left(Z\right)\right)$ will result in some different Z error configurations sharing the same ancilla configuration, such as $1/4i\pi\kappa(3\mathbf{I}^{\otimes 13}+X_3+Z_3+X_5+X_8)X_{a5}$, where $a5$ is the ancilla for site operators used to detect $Z$ errors, and $X_3,X_5$ and $X_8$ errors are introduced from the imperfect implementation of $X_3X_5X_8$ stabilizer. If we correct $Z$ errors according to the syndrome, then we will apply $Z_3$ to the state of data qubits, and then what we will get is not a corrected state(without $Z$ errors), but a state acted by $(3Z_3+Y_3+\mathbf{I}^{\otimes 13}+Z_3X_5+Z_3X_8)$, which is a state with more $Z$ errors. Above analyses are based on keeping terms up to the first order of $\kappa$; and for higher order terms, we have the same picture. Similar analysis show that with the proceeding of detection-correction processes, those incorrect terms will get worse and accumulate more errors, and will evolve into logical errors.
\section{Notes on the worst-case analysis}\label{sec:WCA}
Since there are too many possibilities, it is unlikely to calculate the probability of undetected error terms evolving to logical errors. Therefore, we can not exactly measure the performance of the surface code in such errors. Thus, we turn to the discussion of the worst case. We consider the worst case: Only for the case that each round of ancilla measurements projects ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$, there will be a chance with no accumulated logical errors. For other cases (projecting to other ancilla configurations), we assume that final data qubits will always experience accumulated logical errors (not all cases in reality). Thus, for measuring the performance of the surface code under the detection-induced coherent error, we just need the probability of projecting ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$ in each round of stabilizer measurements---$P_{k|k-1}(\mathbf{I}^{\otimes n_a})$. We now explain how to derive $P_{k|k-1}(\mathbf{I}^{\otimes n_a})$.
\begin{figure}
\caption{Log-log plot of the infidelity $r$ changing with the code size $d$ for different $m$'s. $\kappa$ is chosen to be $0.4$ here. One can find that the infidelity (for the worst case) is insensitive to $m$.}
\label{fig:infidelity_for_ms}
\end{figure}
From the configuration of the surface code, we know that for one specific round of stabilizer measurements, there are
\begin{enumerate}
\item $4$ data qubits, each of which only belongs to one three-operator stabilizer;
\item $2\times(d-2)$ data qubits, each of which only belongs to one four-operator stabilizers;
\item $2\times(d-2)$ data qubits, each of which is shared by two three-operator stabilizers;
\item $2\times(d-1)$ data qubits, each of which is shared by one three-operator stabilizer and one four-operator stabilizer;
\item $(d-2)^2+(d-3)(d-1)$ data qubits, each of which is shared by two four-operator stabilizers.
\end{enumerate}
Since $\mathcal{D}^k$ is the product of the deviation of each stabilizer measurement, one can get the amplitude of each independent term in $\mathcal{D}^{k}$(see Eq. \eqref{eq:d1z} for $\mathcal{D}^{1}(Z)$ of the $d=3$ case). For example, the amplitude of the configuration $\mathbf{I}^{\otimes (n+n_a)}$ is $1-k(2d^2-3d+1)i\pi\kappa/2$ with $k$ being the round count of stabilizer measurements, and the amplitude of the configuration $\mathbf{I}^{\otimes (n-1+n_a^z)}\otimes Z_1$ is $i\pi\kappa/4$(since qubit-$1$ only belongs to one three-operator stabilizer, $Z$ errors occurring on it can only come from the deviation of this stabilizer, and then only one $i\pi\kappa/4$ factor will contribute to the amplitude.). Amplitudes of other configurations can also be derived from the same strategy. Thus, from those amplitudes, one can get the probability of projecting ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$ in $k$th round stabilizer measurements under the condition that the $(k-1)$th round of ancilla measurements projects ancilla into the state acted by $\mathbf{I}^{\otimes n_a}$:
\begin{equation}
\begin{split}
P_{k|k-1}(\mathbf{I}^{\otimes n_a})=\mathcal{N}_k\left[1+\frac{k^2(2d^2-3d+1)^2}{4}\pi^2\kappa^2+\frac{4d^2-7d+2}{8}\pi^2\kappa^2\right],
\end{split}
\end{equation}
where
\begin{equation}
\mathcal{N}_k=\frac{1}{1+\frac{k^2(2d^2-3d+1)^2}{4}\pi^2\kappa^2+(1+\theta(k-2))\frac{4d^2-7d+2}{8}\pi^2\kappa^2+\frac{5d^2-9d+4}{4}\pi^2\kappa^2}
\end{equation}
with $\theta(x)$ being the unit step function, and $\theta(x\ge 0)=1$ while $\theta(x<0)=0$. The $\theta$ function comes from the fact that the first round stabilizer measurements is different from the subsequent ones, as the first round stabilizer measurements measures a perfect state. The forth term in the denominator of $\mathcal{N}_k$ comes from those configurations that ancillary qubits are not acted by $\mathbf{I}^{\otimes n_a}$. One should note that for calculating the amplitude, we keep terms up to the first order of $\kappa$, and then we should keep terms up to $\kappa^2$ for the probability.
For the fidelity of the final state after $2m$ rounds of stabilizer measurements in the worst case, we know that there will be three possibilities of logical errors for the surface code, that is $X_L,Z_L$ and $X_LZ_L$. For a general logical state $|\psi\rangle=a|\bar{0}\rangle+b|\bar{1}\rangle$, if it experiences logical errors, then the fidelity will be $F=|\langle \Psi|E_L|\Psi\rangle|\ge 0$, where $E_L$ denotes the logical error. For example, if $|\Psi\rangle=|\bar{0}\rangle$ and $E_L=X_L$, then $F=0$. Since there will exist three possibilities of logical errors with different probabilities in the worst case, the fidelity of the final state satisfies $F\ge P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})|\langle\Psi|\Psi_{f_1}\rangle|$, where we have used the fact that the minimum of the fidelity between the original state and the final state with logical errors is $0$, and $|\Psi_{f_1}\rangle$ is the final state without logical errors and its probability is $P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})$ in the worst case. Note that even though each round of ancilla measurements projects ancillary qubits into the state acted by $\mathbf{I}^{\otimes n_a}$, data qubits can still suffer from errors (such as there will be $\mathbf{I}^{\otimes (n-1+n_a^z)}\otimes Z_i$ in $\mathcal{D}(Z)$). Thus, $|\Psi_{f_1}\rangle$ can be expressed as $|\Psi_{f_1}\rangle=\alpha|\Psi\rangle+|\Psi^{\prime}\rangle$, where $\alpha$ is the amplitude of the correct state in $|\Psi_{f_1}\rangle$ and $|\Psi^{\prime}\rangle$ is a state with non-logical errors. Then, $|\langle\Psi|\Psi_{f_1}\rangle|=|\alpha+\langle\Psi|\Psi^{\prime}\rangle|=|\alpha|$. Therefore, $F\ge P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})|\langle\Psi|\Psi_{f_1}\rangle|=P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})|\alpha|$, and
\begin{equation}
|\alpha|=\sqrt{\frac{1+m^2(2d^2-3d+1)^2\pi^2\kappa^2}{1+m^2(2d^2-3d+1)^2\pi^2\kappa^2+\frac{4d^2-7d+2}{8}\pi^2\kappa^2}}.
\end{equation}
As mentioned in the main text, the infidelity $r=1-F_{min}$ decays with the increasing of code sizes. If we do not focus on the worst case, then the final state will be a coherent state containing correct terms, non-logical error terms and logical error terms. Since the actual fidelity of the final state $F$ is larger than the minimum fidelity $F_{min}$, which is contributed by the correct terms in the final state, the extra fidelity $F-F_{min}$ must come from the logical error terms(those non-logical error terms are orthogonal with the perfect state). If we focus on the worst case, then the final state will just be a coherent state containing correct terms and logical error terms(since $|\alpha|\rightarrow 1$, it is almost accurate to say this.). Then, suppose $F=1$(taking this maximum will facilitate the analysis, and the corresponding case can be that the initial state is $|\bar{0}\rangle$ with the logical error being $Z_L$), and $1-F_{min}$ is the contribution from logical error terms. In the main text, we show that $F_{min}$ will increase by increasing code sizes($r$ decays), thus the share of the correct terms in the final state will get higher with the increasing of code sizes, and then the computation accuracy will be higher. Thus, we can say that the effect of the detection-induced coherent error can be alleviated by QECCs.
In Fig. \ref{fig:infidelity} in the main text, we choose $m=3$. For larger $m$, $P_{1\rightarrow 2m}(\mathbf{I}^{\otimes n_a})$ will be smaller and $r$ will be larger. However, since for large $k$, $P_{k|k-1}(\mathbf{I}^{\otimes n_a})$ will approach to $1$, and then there will be less differences between large-$m$ cases and small-$m$ cases (see Fig.~\ref{fig:infidelity_for_ms}). One should note that this does not mean that the repeating round of stabilizer measurements does not have any effect. In reality(do not focus on the worst case), it is more possible to appear logical error terms in the final state with larger $m$.
Therefore, at a fixed code size, even though the final state may have a high fidelity in the larger $m$ case, we can not perceive that the state is in good quality, as the logical error terms can also contribute to the fidelity. Therefore, the final computing results may not be reliable, although used states are in a high fidelity.
\section{Derivation of the exact structure of $\varepsilon_{ij}(O)$}\label{sec:DSV}
In Sec. \ref{sec:CwAQECCs} of the main text, based on an explicit example, we show that the surface code under the DICE becomes an approximate QECC, which satisfies the modified Knill-Laflamme condition
\begin{equation}
\langle\phi_{i}^{\prime}|O|\phi_{j}^{\prime}\rangle=C_{O}\mathrm{d}lta_{ij}+\varepsilon_{ij}\left(O\right).
\end{equation}
Since the surface code is a degenerate code, $C_{O}=1$ for $O=I$, and $C_{O}=0$ for $O\neq I$. We now show the exact structure of $\varepsilon_{ij}\left(O\right)$.
Note that for the d=3 surface code, the correctable error set is $\left\{ I,X_{1},\cdots,X_{13},Z_{1},\cdots,Z_{13},Y_{1},\cdots,Y_{13}\right\}$ . Since $O=E_{a}^{\dagger}E_{b}$,
\begin{equation}
\begin{split}
O&\in\left\{ I,X_{1},\cdots X_{13},X_{a}X_{b}\left(a,b=1,\cdots,13|a\neq b\right),Z_{1},\cdots,Z_{13},Z_{a}Z_{b}\left(a,b=1,\cdots,13|a\neq b\right),\right.\\
&\quad\;\left.X_{a}Z_{b}\left(a,b=1,\cdots,13\right),X_{a}Y_{b}\left(a,b=1,\cdots,13\right),Y_{a}Z_{b}\left(a,b=1,\cdots,13\right)\right\} .
\end{split}
\end{equation}
Those $O$s can be classified into four classes:
\begin{enumerate}
\item $O=I$;
\item The second one is constructed from $O$s containing $Z$ operators;
\item The third one is constructed from $O$s containing only one $X$ operator;
\item The last one is constructed from $O$s containing two $X$ operators.
\end{enumerate}
To avoid tedious algebraic calculations of $\langle\phi_{i}^{\prime}|O|\phi_{j}^{\prime}\rangle$, where $|\phi_{i}^{\prime}\rangle=|\bar{0}^{\prime}\rangle,|\bar{1}^{\prime}\rangle$, we provide some intuitive analyses. Note that $\langle\phi_{i}^{\prime}|O|\phi_{j}^{\prime}\rangle\propto\langle\phi_{i}|\mathcal{Y}^{\dagger}\left(X\right)O\mathcal{Y}\left(X\right)|\phi_{j}\rangle$, where $|\phi_{i}\rangle=|\bar{0}\rangle,|\bar{1}\rangle$. Thus, for $i=j$, in order that $\langle\phi_{i}|\mathcal{Y}^{\dagger}\left(X\right)O\mathcal{Y}\left(X\right)|\phi_{i}\rangle$ is not zero, $\mathcal{Y}^{\dagger}\left(X\right)O\mathcal{Y}\left(X\right)$ should be proportional to the identity or stabilizers; for $i\neq j$, in order that $\langle\phi_{i}|\mathcal{Y}^{\dagger}\left(X\right)O\mathcal{Y}\left(X\right)|\phi_{j}\rangle$ is not zero, $\mathcal{Y}^{\dagger}\left(X\right)O\mathcal{Y}\left(X\right)$ should be logical operators. Based on those intuitive analyses, $\langle\phi_{i}^{\prime}|O|\phi_{j}^{\prime}\rangle$ can be calculated very quickly, and we show the results in the following.
For $O=I$, we have
\begin{equation}
\langle\bar{0}^{\prime}|I|\bar{0}^{\prime}\rangle=\langle\bar{1}^{\prime}|I|\bar{1}^{\prime}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)I\mathcal{Y}\left(X\right)|\bar{0}\rangle=1,
\end{equation}
and
\begin{equation}
\langle\bar{0}^{\prime}|I|\bar{1}^{\prime}\rangle=\langle\bar{1}^{\prime}|I|\bar{0}^{\prime}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)I\mathcal{Y}\left(X\right)|\bar{1}\rangle=0.
\end{equation}
Therefore, for $O=I$, we have $\varepsilon_{ij}\left(O\right)=0$.
We now consider those Os which contains Z operator. Simple algebraic calculations lead to
\begin{equation}
\langle\bar{0}^{\prime}|Z_{a}Z_{b}|\bar{0}^{\prime}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)Z_{a}Z_{b}\mathcal{Y}\left(X\right)|\bar{0}\rangle=0,
\end{equation}
\begin{equation}
\langle\bar{1}^{\prime}|Z_{a}Z_{b}|\bar{1}^{\prime}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)X_{L}Z_{a}Z_{b}X_{L}\mathcal{Y}\left(X\right)|\bar{0}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)Z_{a}Z_{b}\mathcal{Y}\left(X\right)|\bar{0}\rangle=0,
\end{equation}
\begin{equation}
\begin{split}
\langle\bar{0}^{\prime}|Z_{a}Z_{b}|\bar{1}^{\prime}\rangle&=\langle\bar{1}^{\prime}|Z_{a}Z_{b}|\bar{0}^{\prime}\rangle\\&\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)Z_{a}Z_{b}X_{L}\mathcal{Y}\left(X\right)|\bar{0}\rangle\\&=\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)Z_{a}Z_{b}\mathcal{Y}\left(X\right)X_{L}|\bar{0}\rangle\\&=\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)Z_{a}Z_{b}\mathcal{Y}\left(X\right)|\bar{1}\rangle\\&=0,
\end{split}
\end{equation}
\begin{equation}
\langle\bar{0}^{\prime}|X_{a}Z_{b}|\bar{0}^{\prime}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)X_{a}Z_{b}\mathcal{Y}\left(X\right)|\bar{0}\rangle=0,
\end{equation}
\begin{equation}
\langle\bar{1}^{\prime}|X_{a}Z_{b}|\bar{1}^{\prime}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)X_{L}X_{a}Z_{b}X_{L}\mathcal{Y}\left(X\right)|\bar{0}\rangle\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)X_{a}Z_{b}\mathcal{Y}\left(X\right)|\bar{0}\rangle=0,
\end{equation}
and
\begin{equation}
\begin{split}
\langle\bar{0}^{\prime}|X_{a}Z_{b}|\bar{1}^{\prime}\rangle&=\langle\bar{1}^{\prime}|X_{a}Z_{b}|\bar{0}^{\prime}\rangle\\&\propto\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)X_{a}Z_{b}X_{L}\mathcal{Y}\left(X\right)|\bar{0}\rangle\\&=\langle\bar{0}|\mathcal{Y}^{\dagger}\left(X\right)X_{a}Z_{b}\mathcal{Y}\left(X\right)|\bar{1}\rangle\\&=0.
\end{split}
\end{equation}
Similarly, we have $\varepsilon_{ij}\left(Z_{a}\right)=0$. Therefore, we find that for those $O$s containing $Z$ operators, $\varepsilon_{ij}\left(O\right)$ always equals to $0$.
According to similar calculations, for the case that $O$ only contains one $X$ operator, we have
\begin{itemize}
\item $i\neq j$:\\
\begin{equation}
\begin{split}
\varepsilon_{ij}(X_a) \propto\begin{cases}
\frac{1}{8}\pi^{2}\kappa^{2} & a=1,2,3,11,12,13\\
0 & a=4,5,9,10\\
\frac{1}{2}\pi^{2}\kappa^{2} & a=6,7,8
\end{cases};
\end{split}
\end{equation}
\item $i=j$:
\begin{equation}
\begin{split}
\varepsilon_{ij}\left(X_{a}\right)\propto\begin{cases}
-2\pi^{2}\kappa^{2} & a=1,3,11,13\\
-\frac{19}{4}\pi^{2}\kappa^{2} & a=4,5,9,10\\
-\frac{5}{2}\pi^{2}\kappa^{2} & a=2,12\\
-\frac{9}{2}\pi^{2}\kappa^{2} & a=6,8\\
-5\pi^{2}\kappa^{2} & a=7
\end{cases}.
\end{split}
\end{equation}
\end{itemize}
And for the case that $O$ contains two $X$ operators, we have
\begin{itemize}
\item $i\neq j$:
\begin{equation}
\varepsilon_{ij}\left(O=X_{a}X_{b}\right)\\
\;
\propto\begin{cases}
-\frac{5}{2}\pi^{2}\kappa^{2} & a,b\in\left\{ 1,2,3,11,12,13\right\} \\
& \cup\left(a,b\operatorname{in\;the\;same\;row}\right)\\
-5\pi^{2}\kappa^{2} & a,b\in\left\{ 6,7,8\right\} \\
0 & a,b\in\left\{ 1,2,3,6,7,8,11,12,13\right\} \\
& \cup\left(a,b\operatorname{not\;in\;the\;same\;row}\right)\\
0 & a,b\in\left\{ 4,5,9,10\right\}\\ 0&a\left(b\right)\in\left\{ 1,2,3,6,7,8,11,12,13\right\} \\&\cup b\left(a\right)\in\left\{ 4,5,9,10\right\}
\end{cases};
\end{equation}
\item $i=j$:
\begin{equation}
\varepsilon_{ij}\left(O=X_{a}X_{b}\right)\propto\begin{cases}
\frac{1}{8}\pi^{2}\kappa^{2} & a,b\in\left\{ 1,2,3,11,12,13\right\} \\
\frac{1}{2}\pi^{2}\kappa^{2} & a,b\in\left\{ 6,7,8\right\} \\
\frac{1}{4}\pi^{2}\kappa^{2} & a\left(b\right)\in\left\{ 1,2,3,11,12,13\right\} \\
& \cup b\left(a\right)\in\left\{ 6,7,8\right\} \\
& \cup\left(a,b\operatorname{not\;in\;the\;same\;column}\right)\\
-\frac{19}{4}\pi^{2}\kappa^{2} & \left(a,b\right)\in\left\{ \left(1,6\right),\left(1,4\right),\left(3,5\right),\left(3,8\right),\left(11,9\right),\left(11,6\right),\left(13,8\right),\left(13,10\right)\right\} \\
\frac{1}{4}\pi^{2}\kappa^{2} & \left(a,b\right)\in\{\left(1,5\right),\left(1,9\right),\left(1,10\right),\left(2,7\right),\left(2,9\right),\left(2,10\right),\left(3,4\right),\left(3,9\right),\left(3,10\right),\\
& \qquad\left(11,4\right),\left(11,5\right),\left(11,10\right),\left(12,4\right),\left(12,5\right),\left(12,7\right),\left(13,4\right),\left(13,5\right),\left(13,9\right)\}\\
\frac{3}{4}\pi^{2}\kappa^{2} & a,b\in\left\{ 4,5,9,10\right\} \\
& \cup\left(a,b\operatorname{in\;the\;same\;column}\right)\\
\frac{1}{2}\pi^{2}\kappa^{2} & a,b\in\left\{ 4,5,9,10\right\} \\
& \cup\left(a,b\operatorname{not\;in\;the\;same\;column}\right)\\
\frac{3}{4}\pi^{2}\kappa^{2} & \left(a,b\right)\in\left\{ \left(2,4\right),\left(2,5\right),\left(12,9\right),\left(12,10\right),\left(7,4\right),\left(7,5\right),\left(7,9\right),\left(7,10\right)\right\} \\
-2\pi^{2}\kappa^{2} & \left(a,b\right)\in\left\{ \left(6,4\right),\left(6,9\right),\left(8,5\right),\left(8,10\right)\right\} \\
\frac{1}{2}\pi^{2}\kappa^{2} & \left(a,b\right)\in\left\{ \left(6,5\right),\left(6,10\right),\left(8,4\right),\left(8,9\right)\right\}
\end{cases}.
\end{equation}
\end{itemize}
\end{widetext}
\end{appendix}
\end{document}
|
\begin{document}
\title{Enriques diagrams and adjacency of planar curve singularities}
{\bf m}aketitle
\begin{abstract}
We study adjacency of equisingularity types of planar
curve singularities in terms of their Enriques diagrams.
The goal is, given two equisingularity types,
to determine whether one of them is adjacent to the other.
For linear adjacency a complete answer is
obtained, whereas for arbitrary (analytic) adjacency
a necessary condition and a sufficient condition
are proved.
We also show an example
of singular curve of type ${\bf m}athcal{D}'$
that can be deformed to a curve of type
${\bf m}athcal{D}$ without ${\bf m}athcal{D}'$
being adjacent to ${\bf m}athcal{D}$.
This suggests that
\emph{analytical} rather than topological
equivalence should be considered when
studying adjacency of singularity types.
\end{abstract}
\section*{Introduction}
A class of reduced (germs of) planar curve singularities
${\bf m}athcal{D}'$ is said to be \emph{adjacent}
to the class ${\bf m}athcal{D}$ when every member of the
class ${\bf m}athcal{D}'$ can be deformed
into a member of the class ${\bf m}athcal{D}$ by an arbitrarily
small deformation. If this can be done with a linear deformation,
then we say that ${\bf m}athcal{D}'$ is \emph{linearly adjacent}
to ${\bf m}athcal{D}$.
It is well known that equisingularity and topological equivalence of
reduced germs of curves on smooth surfaces are equivalent, and that
analytical equivalence implies topological equivalence (see for
instance \cite{Zar32}, \cite{Zar65I} or \cite{Cas00}).
We shall focus on the
equisingularity (or topological equivalence) classes,
and we will call them simply \emph{types}. The Enriques diagrams
introduced by Enriques in \cite[IV.I]{EC15} represent the types:
two reduced curves are equisingular at $O$ if and only if
their associated Enriques diagrams are isomorphic
(see \cite[3.9]{Cas00}).
In \cite{Arn76} Arnold classified critical points of
functions with modality at most two; this implies the
classification of types (of planar curve singularities)
with multiplicity at most four. He also described some
adjacencies between them, introducing the so-called
\emph{series} of types $A$, $D$, $E$ and $J$. The
construction of series was generalized by Siersma in
\cite{Sie77} using a kind of Enriques diagrams;
in particular, he classified types of multiplicity
at most five. As we shall see below, all adjacencies
within one series are linear.
Apart from series, only some particular cases of adjacency
are known, obtained using explicit deformations.
On the other hand, the semicontinuity of some numerical
invariants such as the genus discrepancy $\delta$ or
the Milnor number ${\bf m}u$ provide necessary conditions
for adjacency, but these invariants are far from
determining the type of a singularity, and so it
is not to be expected that they give a complete answer
to the adjacency question.
Here, instead of numerical invariants, the Enriques
diagram (which does determine the type) is used,
providing a necessary condition and a sufficient
condition for adjacency.
In the case of \emph{linear} adjacency
a complete answer follows, namely, we determine all
linear adjacencies in terms of Enriques diagrams.
Non-linear adjacencies are a much subtler subject, as
shown by the fact that the types do not form a stratification
of ${\bf m}athbb{C}[[x,y]]$ (see example {\bf r}ef{contrex}); this
suggests that a complete understanding
of analytic adjacencies can only be achieved by
considering analytic moduli of singularities,
rather than equisingularity classes alone.
We present a definition of Enriques diagrams in a purely combinatorial
way, that was used by Kleiman and Piene (\cite{KP99})
to list all equisingularity types with codimension up to 8, which
is needed for the enumeration of 8--nodal curves
(see also \cite{GSG92}).
A \emph{tree} is a finite directed graph, without
loops; it has a single initial vertex, or \emph{root}, and every other
vertex has a unique immediate predecessor.
If $p$ is the immediate predecessor of the vertex
$q$, we say that $q$ is a successor of $p$. If $p$
has no successors then it is an extremal vertex.
An \emph{Enriques diagram} is a tree
with a binary relation between vertices, called \emph{proximity},
which satisfies:
\begin{enumerate}
\item The root is proximate to no vertex.
\item Every vertex that is not the root is proximate to
its immediate predecessor.
\item No vertex is proximate to more than two vertices.
\item If a vertex $q$ is proximate to two vertices
then one of them is the immediate predecessor of $q$,
and it is proximate to the other.
\item Given two vertices $p$, $q$ with $q$ proximate to $p$,
there is at most one vertex proximate to both of them.
\end{enumerate}
The vertices which are proximate to two points are called
\emph{satellite}, the other vertices are called \emph{free}.
We usually denote the set of vertices of an Enriques diagram
${\bf D}$ with the same letter ${\bf D}$.
To show graphically the proximity relation, Enriques diagrams are
drawn according to the following rules:
\begin{enumerate}
\item If $q$ is a free successor of $p$
then the edge going from $p$ to $q$ is smooth and curved and,
if $p$ is not the root, it has at $p$ the same tangent
as the edge joining $p$ to its predecessor.
\item The sequence of edges connecting a maximal
succession of vertices proximate to the same vertex $p$
are shaped into a line segment, orthogonal to the edge joining $p$
to the first vertex of the sequence.
\end{enumerate}
An \emph{isomorphism} of Enriques diagrams is a bijection $i$
between the sets of vertices of the two diagrams so that
$q$ is proximate to $p$ if and only if $i(q)$ is proximate to
$i(p)$; two Enriques diagrams are \emph{isomorphic} if there
is an isomorphism between them.
\begin{Exa}
\label{exa1}
Figure {\bf r}ef{dia1} shows an Enriques diagram with
nine vertices. $p_1$ is the root of the
diagram, $p_4, p_5$ are satellites proximate to $p_2$,
$p_6$ is a satellite proximate to $p_3$ and the
remaining vertices are free.
\begin{figure}
\caption{Enriques diagram of example {\bf r}
\label{dia1}
\end{figure}
\end{Exa}
A \emph{subdiagram} of an Enriques diagram ${\bf D}$ is a subtree
${\bf D}_0 \subset {\bf D}$
together with the induced proximity relation, such that the
predecessors of every vertex $q \in {\bf D}_0$ belong to ${\bf D}_0$.
An \emph{admissible ordering} for an Enriques diagram
${\bf D}$ is a total ordering $\preceq$ for its set of vertices
refining the natural ordering of ${\bf D}$, i.e., such that for
every vertex $p$, and every successor $q$ of $p$, $p \preceq q$.
Given an Enriques diagram ${\bf D}$ of $n$ vertices with an admissible
ordering $\preceq$, let $p_1$, $p_2$, \dots, $p_n$ denote its vertices,
numbered according to $\preceq$.
The \emph{proximity matrix} of ${\bf D}$ is a square matrix $P=(p_{i,j})$
of order $n$, with
$$
p_{i,j}=
\begin{cases}
1 & \text{if }i=j,\\
-1 & \text{if $p_i$ is proximate to $p_j$},\\
0 & \text{otherwise.}
\end{cases}
$$
A \emph{system of multiplicities} for (the vertices of) an Enriques diagram
${\bf D}$ is any map ${\bf n}u:{\bf D} {\bf r}ightarrow {\bf m}athbb{Z}$. We will usually write
${\bf n}u_p={\bf n}u(p)$. A pair $({\bf D},{\bf n}u)$, where ${\bf D}$ is an Enriques
diagram and ${\bf n}u$ a system of multiplicities for it, will be called a
\emph{weighted Enriques diagram}. The \emph{degree} of a
weighted Enriques diagram is
$\deg ({\bf D},{\bf n}u)= \sum_{p \in {\bf D}} {\bf n}u_p({\bf n}u_p+1)/2$.
A \emph{consistent Enriques diagram}
is a weighted Enriques diagram such that, for all $p\in {\bf D}$,
$$ {\bf n}u_p \geq \sum_{q \text{ prox. to } p} {\bf n}u_q.$$
Note that if $({\bf D},\preceq)$ is an Enriques diagram of $n$ vertices
with an admissible ordering, then a system of multiplicities for ${\bf D}$
may be identified with a vector ${\boldsymbol {\bf n}u}=({\bf n}u_1, {\bf n}u_2, \dots, {\bf n}u_n)\in
{\bf m}athbb{Z}^n$, taking ${\bf n}u_i={\bf n}u_{p_i}$, $i=1, \dots, n$; we shall
use the notation $({\bf D},\preceq, {\boldsymbol {\bf n}u})$ for a weighted ordered
Enriques diagram, where ${\boldsymbol {\bf n}u} \in {\bf m}athbb{Z}^n$.
To every system of multiplicities ${\bf n}u$ for a diagram ${\bf D}$ we
associate a \emph{system of values}, which is another map
$v:{\bf D} {\bf r}ightarrow {\bf m}athbb{Z}$, defined recursively as
$$
v_p=
\begin{cases}
{\bf n}u_p & \text{if $p$ is the root,} \\
{\bf n}u_p + \sum_{p \text{ prox. to } q} v_q & \text{otherwise.}
\end{cases}
$$
Observe that any map
$v:{\bf D} {\bf r}ightarrow {\bf m}athbb{Z}$ is the system of values associated the system of
multiplicities ${\bf n}u:{\bf D} {\bf r}ightarrow {\bf m}athbb{Z}$ defined recursively as
$$
{\bf n}u_p=
\begin{cases}
v_p & \text{if $p$ is the root,} \\
v_p - \sum_{p \text{ prox. to } q} v_q & \text{otherwise.}
\end{cases}
$$
Hence giving a system of multiplicities for an Enriques diagram is
equivalent to give a system of values.
Figure {\bf r}ef{dia2} shows the system of values
associated to a consistent system of multiplicities.
\begin{figure}
\caption{A consistent system of multiplicities (in bold shape)
for the Enriques diagram of example {\bf r}
\label{dia2}
\end{figure}
The relationship between the combinatorial properties of Enriques
diagrams
and the topology of planar curve singularities is explained next.
Assume that $O$ is a smooth point on a complex surface $S$,
whose local ring is isomorphic to ${\bf m}athbb{C}[[x,y]]$, and
let $f \in {\bf m}athbb{C}[[x,y]]$ be the equation of a (germ of)
curve with an isolated singularity at $O$.
Let $K$ be a finite set of points equal or infinitely near to
the smooth point $O$, such that for each $p\in K$, $K$ contains
all points to which $p$ is infinitely near. Such a set is
called a \emph{cluster} of points infinitely near to $O$.
A point $p\in K$ is said to be \emph{proximate} to another
$q \in K$ if it is infinitely near to $q$ and lies on the \emph{strict}
transform of the exceptional divisor of blowing up $q$. Thus
being proximate to is a binary relation between points of a cluster
which satisfies the same conditions as proximity between vertices of an
Enriques diagram.
Therefore for every cluster
there is an associated Enriques diagram which encodes all the
information on proximities between points of $K$.
The \emph{value} of a germ of curve at a point
$p$ of a cluster $K$ is the multiplicity at $p$ of the
pullback of the germ of curve in the blown up surface containing
$p$; in the case $p=O$ the value is just the multiplicity
of the curve at $O$.
Given a cluster $K$ and a system of values $v: K {\bf r}ightarrow {\bf m}athbb{Z}$
(associated to the system of multiplicities ${\bf n}u $)
there is a complete ideal $H_{K,{\bf n}u} \subset {\bf m}athbb{C}[[x,y]]$ containing
all equations of the germs of curve which have at every point $p\in K$
\emph{value} at least $v_p$ (see \cite[4.4.4]{Cas00}).
\begin{Exa}
If $C \subset S$ is a reduced curve going through $O$, then the
set of singular points of $C$ equal or infinitely near to $O$
is a cluster $K$. The Enriques diagram of $K$, weighted
with the multiplicities of $C$ at the points of $K$, is a consistent
Enriques diagram, which we call the Enriques diagram associated
to the singularity of $C$. Such a diagram
has no extremal free vertices of multiplicity
${\bf n}u_p \le 1$ (because $K$ consists only of singular points of $C$,
which either have multiplicity bigger than 1, are satellites
or precede some satellite point on $C$).
Conversely, if ${\bf D}$ is a consistent
Enriques diagram with no extremal free vertices of multiplicity
${\bf n}u_p \le 1$ then there are germs of curve at $O$ whose cluster
of singular
points has Enriques diagram isomorphic to ${\bf D}$ (see \cite{Cas00}).
\end{Exa}
It is well known that two reduced curves are equisingular at $O$
if and only if
their associated Enriques diagrams
are isomorphic (see \cite[3.8]{Cas00}, for instance).
Therefore the equisingularity types (types for short) of reduced
germs of curves on smooth surfaces are identified
with the isomorphism classes of consistent
Enriques diagrams with no extremal free vertices of multiplicity
${\bf n}u_p \le 1$.
\section{Linear adjacency}
Let $I \subset {\bf m}athbb{C}[[x,y]]$ be an ideal.
According to \cite[7.2.13]{Cas00} general members (by the Zariski
topology of the coefficients of
the series) of $I$ define
equisingular germs.
\begin{Lem}
$({\bf D}',{\bf n}u')$ is linearly adjacent
to $({\bf D},{\bf n}u)$ if and only if for every $f \in {\bf m}athbb{C}[[x,y]]$
defining a reduced germ of curve of type $({\bf D}',{\bf n}u')$,
there exists an ideal $I \subset {\bf m}athbb{C}[[x,y]]$ with
$f \in I$ and whose general member defines a reduced germ of type
$({\bf D},{\bf n}u)$.
\end{Lem}
\begin{proof}
The if part of the claim is evident. To see the only if
part, assume that $f$ defines a reduced germ of type $({\bf D}',{\bf n}u')$
that can be deformed to a reduced germ of type $({\bf D},{\bf n}u)$
by a linear deformation $f+tg$, $g \in {\bf m}athbb{C}[[x,y]]$.
This means that general members of the pencil $f+tg$
define reduced germs of type $({\bf D},{\bf n}u)$.
Hence
general members of the ideal $I=(f,g)$
define germs of type $({\bf D},{\bf n}u)$ as well
(\cite[7.2]{Cas00}).
\end{proof}
\begin{Pro}
\label{cideal}
Let $({\bf D}, {\bf m}u)$ and $({\bf D}',{\bf m}u')$ be
weighted Enriques diagrams, with $({\bf D}',{\bf m}u')$
consistent. The following are
equivalent:
\begin{enumerate}
\item \label{ex}There are two clusters,
$K$ and $K'$, whose Enriques diagrams are
${\bf D}$ and ${\bf D}'$ respectively, such that
$H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
\item \label{bs}For every cluster $K$ with
Enriques diagram ${\bf D}$, there is a cluster
$K'$ with Enriques diagram ${\bf D}'$ such that
$H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
\item \label{sb}For every cluster $K'$ with
Enriques diagram ${\bf D}'$, there is a cluster
$K$ with Enriques diagram ${\bf D}$ such that
$H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
\item \label{comb}There exist
isomorphic subdiagrams ${\bf D}_0 \subset {\bf D}$,
${\bf D}_0' \subset {\bf D}'$ and an isomorphism
$$ i: {\bf D}_0 \longrightarrow {\bf D}_0'$$
such that the system of multiplicities
${\bf n}u$ for ${\bf D}$ defined as
$$
{\bf n}u(p)=
\begin{cases}
{\bf m}u'(i(p)) & \text{ if } p \in {\bf D}_0 , \\
0 & \text{ otherwise}
\end{cases}
$$
has the property that the values $v$ and $v'$ associated
to the multiplicities ${\bf m}u$ and ${\bf n}u$ respectively
satisfy $v(p) \le v'(p) \ \forall p \in {\bf D}$.
\end{enumerate}
\end{Pro}
\begin{proof}
Clearly both {\bf r}ef{sb} and {\bf r}ef{bs} imply
{\bf r}ef{ex}. We shall prove that {\bf r}ef{ex}
implies {\bf r}ef{comb} and that {\bf r}ef{comb}
implies both {\bf r}ef{bs} and {\bf r}ef{sb}.
Let us first prove that {\bf r}ef{ex} implies {\bf r}ef{comb}.
So assume there are two clusters,
$K$ and $K'$, whose Enriques diagrams are
${\bf D}$ and ${\bf D}'$ respectively, such that
$H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
The points common to $K$ and $K'$ clearly
form a cluster, which we call $K_0$.
The vertices in ${\bf D}$ and ${\bf D}'$
corresponding to points in $K_0$ form
subdiagrams ${\bf D}_0$ and ${\bf D}_0'$, and
the coincidence of points in $K_0$
determines an isomorphism
$ i: {\bf D}_0 \longrightarrow {\bf D}_0'$.
It only remains to be seen that
the values $v$ and $v'$ associated
to the multiplicities ${\bf m}u$ and ${\bf n}u$ respectively
(with ${\bf n}u$ as in the claim)
satisfy $v(p) \le v'(p) \ \forall p \in {\bf D}$.
Now choose a germ $f \in H_{K',{\bf m}u'}$ having
multiplicity exactly ${\bf m}u_p'$ at each point
$p \in K'$ (such an $f$ exists because
$({\bf D}',{\bf m}u')$ is consistent, see \cite[4.2.7]{Cas00}).
This implies that $f$ has value exactly
$v'(p)$ at each point $p\in K$.
Then $f \in H_{K,{\bf m}u}$ because
$H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$,
and the claim follows by the
definition of $H_{K,{\bf m}u}$.
Let us now prove that {\bf r}ef{comb} implies {\bf r}ef{sb}.
Assume that {\bf r}ef{comb} holds, and let $K'$
be a cluster whose Enriques diagram is ${\bf D}'$. We
must prove the existence of a cluster $K$ with
Enriques diagram ${\bf D}$ such that
$H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
Let $K_0$ be the cluster
formed by the points corresponding to vertices in ${\bf D}_0'$.
Add to $K_0$ the points necessary to get a cluster $K$ with
Enriques diagram ${\bf D}$. Because of the
hypothesis on the values $v$ and $v'$
and the characterization of $H_{K,{\bf m}u}$ (see for
instance \cite[4.5.4]{Cas00}), $H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
In the same way it is proved that
{\bf r}ef{comb} implies {\bf r}ef{bs}.
\end{proof}
If the conditions of proposition {\bf r}ef{cideal}
are satisfied, we shall write
$({\bf D}', {\bf m}u') \ge({\bf D}, {\bf m}u)$.
Now we can prove our main result.
The interest of proposition {\bf r}ef{cideal}
and theorem {\bf r}ef{linesp}
lies on the fact that condition {\bf r}ef{comb} of {\bf r}ef{cideal}
can be checked directly on the Enriques diagrams,
using their combinatorial properties, thus giving a practical
means to decide whether a type is
or is not linearly adjacent to another.
\begin{Teo}
\label{linesp}
Let $({\bf D},{\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ be types.
$(\tilde {\bf D},\tilde {\bf m}u)$ is linearly adjacent to $({\bf D},{\bf m}u)$ if
and only if there exists a weighted consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ at most
in some free vertices of multiplicity one,
satisfying $({\bf D}',{\bf m}u') \ge ({\bf D},{\bf m}u)$.
\end{Teo}
\begin{proof}
To prove the if part,
given a reduced germ $f \in {\bf m}athbb{C}[[x,y]]$
defining a curve singularity of type $(\tilde {\bf D}, \tilde {\bf m}u)$
we have to show the existence of
an ideal $I \subset {\bf m}athbb{C}[[x,y]]$ containing $f$ and whose general
member defines a reduced germ of type $({\bf D}, {\bf m}u)$,
provided that $({\bf D}',{\bf m}u') \ge ({\bf D},{\bf m}u)$.
Let $\tilde K$ be the cluster of singular points of $f$ (whose
Enriques diagram is $\tilde {\bf D}$). For each vertex $p$
of ${\bf D}'$ not in $\tilde {\bf D}$, whose predecessor is denoted by $q$,
choose a point on $f=0$ on the first neighbourhood
of the point corresponding to the vertex $q$.
$\tilde K$ together with all these additional points
(which are nonsingular, therefore free of multiplicity 1)
form a cluster $K'$ with Enriques diagram ${\bf D}'$,
with $f \in H_{K',{\bf m}u'}$. As
$({\bf D}',{\bf m}u') \ge ({\bf D},{\bf m}u)$, proposition {\bf r}ef{cideal}
says that there is a cluster $K$ with
Enriques diagram ${\bf D}$ such that
$f \in H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$.
On the other hand
\cite[4.2.7]{Cas00} says that the general
member of $H_{K,{\bf m}u}$ defines a germ of type $({\bf D}, {\bf m}u)$
so we are done.
Let us now prove the only if part, so assume that,
for every $f\in {\bf m}athbb{C}[[x,y]]$ defining a reduced germ of
type $(\tilde {\bf D}, \tilde {\bf m}u)$, there exists an ideal
$I\subset {\bf m}athbb{C}[[x,y]]$, with $f \in I$,
whose general member defines a reduced germ of type $({\bf D}, {\bf m}u)$.
We first reduce to the case that $I$ has no fixed part.
Indeed, for $n$ big enough and $h \in (x,y)^n$, the
types of $f$ and $f+h$ coincide (see for instance
\cite[7.4.2]{Cas00}), and also the types of $g$ and
$g+h$ for $g$ general in $I$, so we can take $I+ (x,y)^n$
instead of $I$, and this has no fixed part.
Then by \cite[7.2.13]{Cas00} the Enriques diagram of the weighted
cluster $BP(I)$ of base points of $I$ is $({\bf D}, {\bf m}u)$ plus some free
vertices of multiplicity one; let $K$ be the subcluster of $BP(I)$
whose Enriques diagram is ${\bf D}$. As $f \in I$, $f$ goes through the
weighted cluster $BP(I)$, and therefore $f \in H_{K, {\bf m}u}$.
By \cite[4.5.4]{Cas00} this means that
the \emph{value} of $f$ at each point $p \in K$ is at
least $v_p$. Add to the cluster $(\tilde K, \tilde {\bf m}u)$
of singular points of $f$ all points on $f=0$ which
belong to $K$, weighted with multiplicity 1
(these are all infinitely near points at which $f=0$
is smooth). Then the resulting cluster $(K',{\bf m}u')$
satisfies $H_{K',{\bf m}u'} \subseteq H_{K,{\bf m}u}$
and by {\bf r}ef{cideal} we obtain $({\bf D}',{\bf m}u') \ge ({\bf D},{\bf m}u)$,
where ${\bf D}'$ is the Enriques diagram of $K'$.
\end{proof}
\begin{figure}
\caption{In white, the vertices of the isomorphic subdiagrams,
in bold shape, the systems of multiplicities and, in italics,
the systems of values.
With notations as in proposition {\bf r}
\label{incidA}
\end{figure}
\begin{Exa}
Let $A_k$, $D_k$, $E_k$, $J_{k,p}$ and so on denote the
types of germs of curve of Arnold's lists (cf. \cite{Arn76}).
Then for every $k, d>0$, $A_{k+d}$ is linearly adjacent to $A_k$,
$D_{k+d}$ is linearly adjacent to $D_k$,
$E_{k+d}$ is linearly adjacent to $E_k$,
$J_{k+d,p+d}$ is linearly adjacent to $J_{k+d,p}$
and to $J_{k,p+d}$ and so on.
To see this, just take the weighted Enriques
diagrams corresponding to each type, and apply
theorem {\bf r}ef{linesp}. For instance, figure {\bf r}ef{incidA}
shows the Enriques diagrams corresponding to types $A_{2k}$
and $A_{2k+1}$ with the corresponding isomorphic subdiagrams,
the multiplicities and the values involved.
All other cases are handled similarly.
\end{Exa}
\begin{Exa}
\label{ex3}
The simplest example in which one needs to consider
$({\bf D}',{\bf m}u') {\bf n}e (\tilde {\bf D}, \tilde {\bf m}u)$ is to
prove that a triple point ($D_4$ in Arnold's notation)
is linearly adjacent to the tacnode of type $A_3$ in Arnold's
notation. Indeed, in this case $({\bf D}', {\bf m}u')$ is obtained
from the triple point by adding a free point with multiplicity
1 to it (see figure {\bf r}ef{AD}).
\begin{figure}
\caption{Diagrams corresponding to example {\bf r}
\label{AD}
\end{figure}
\end{Exa}
\begin{Rem}
In the proof of the only if part of {\bf r}ef{linesp} one just needs to
assume that there exists a $f\in {\bf m}athbb{C}[[x,y]]$
defining a reduced germ of type $(\tilde {\bf D}, \tilde {\bf m}u)$
and an ideal $I\subset {\bf m}athbb{C}[[x,y]]$, with $f \in I$,
whose general member defines a reduced germ of type $({\bf D}, {\bf m}u)$.
It follows therefore that if a germ of curve of type
$(\tilde {\bf D}, \tilde {\bf m}u)$ can be deformed linearly to
a germ of type $({\bf D}, {\bf m}u)$, then $(\tilde {\bf D}, \tilde {\bf m}u)$
is linearly adjacent to $({\bf D}, {\bf m}u)$.
\end{Rem}
\section{Non-linear adjacency}
We have shown in the preceding section a criterion
to decide whether a type
is or is not linearly adjacent to another.
Non-linear adjacencies are a much subtler subject,
as shown by example {\bf r}ef{contrex} below,
and we cannot give a criterion to decide in all cases.
However, we are able to give a necessary condition
and a sufficient condition.
We say that a weighted Enriques diagram $({\bf D},{\bf m}u)$
is \emph{tame} whenever it is consistent or the
sequence of unloadings that determines, leading to
a consistent Enriques diagram $({\bf D}, {\bf m}u')$ is
tame (see \cite[4.7]{Cas00}); the fact that
$({\bf D},{\bf m}u)$ is tame or not depends on the multiplicities
and the proximities between vertices of ${\bf D}$, i.e.
on the combinatorial properties of the weighted cluster.
Moreover, $({\bf D},{\bf m}u)$ is \emph{tame} if and only
if for every cluster $K$ with Enriques diagram ${\bf D}$ the condition
$\dim {\bf m}athbb{C}[[x,y]]/H_{K,{\bf m}u}=\deg ({\bf D}, {\bf m}u)$ holds
(see \cite[4.7.3]{Cas00}).
For every Enriques diagram ${\bf D}$, endowed with an
admissible ordering $\preceq$ of the vertices,
there is a variety ${{\bf m}athit{Cl}}({\bf D},\preceq)$ parameterizing all
ordered clusters with ordered Enriques diagram $({\bf D}, \preceq)$
(see \cite{Roe?4}). In the sequel we shall
make use of these spaces and the results on
their relative positions in the variety of all clusters
obtained in \cite{Roe?4}. In particular,
we write $({\bf D}, \preceq) {\bf r}ightsquigarrow ({\bf D}', \preceq')$ to mean
${{\bf m}athit{Cl}}({\bf D}',\preceq')\subset \overline{{{\bf m}athit{Cl}}({\bf D}, \preceq)}$.
We begin with a sufficient condition for adjacency.
\begin{Pro}
\label{suf}
Let $({\bf D},{\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ be types, and
assume that there exist a weighted consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ at most
in some free vertices of multiplicity one,
an Enriques diagram ${\bf D}_0$ with the same
number of vertices as ${\bf D}$, and admissible orderings $\preceq$
and $\preceq_0$ of ${\bf D}$ and ${\bf D}_0$ respectively satisfying
\begin{enumerate}
\item $({\bf D},\preceq) {\bf r}ightsquigarrow ({\bf D}_0,\preceq_0)$,
\item $({\bf D}_0,\preceq_0,{\boldsymbol {\bf m}u})$ is tame, and
\item $({\bf D}', {\bf m}u') \ge ({\bf D}_0,\preceq_0,{\boldsymbol {\bf m}u})$,
\end{enumerate}
where $\boldsymbol {\bf m}u$ is the vector of multiplicities
of $({\bf D},{\bf m}u)$ for the ordering $\preceq$ of ${\bf D}$.
Then the type $(\tilde {\bf D},\tilde {\bf m}u)$ is
adjacent to the type $({\bf D},{\bf m}u)$.
\end{Pro}
\begin{proof}
Let $C$ be a germ of curve of type $(\tilde {\bf D},\tilde {\bf m}u)$; we
have to see that there is a family of germs containing $C$ whose
general member is of type $({\bf D},{\bf m}u)$. Let $f \in {\bf m}athbb{C}[[x,y]]$ be
an equation of $C$, and let $\tilde K$ be the cluster
of singular points of $C$. For each vertex $p$
of ${\bf D}'$ not in $\tilde {\bf D}$, whose predecessor is denoted by $q$,
choose a point on $C$ on the first neighbourhood
of the point corresponding to the vertex $q$.
$\tilde K$, together with all these additional points
(which are nonsingular, therefore free of multiplicity 1)
form a cluster $K'$ with Enriques diagram ${\bf D}'$,
with $f \in H_{K',{\bf m}u'}$. As
$({\bf D}',{\bf m}u') \ge ({\bf D}_0,\preceq_0,{\boldsymbol {\bf m}u})$, proposition {\bf r}ef{cideal}
says that there is a cluster $K_0$ with
Enriques diagram ${\bf D}_0$ such that
$f \in H_{K',{\bf m}u'} \subseteq H_{K_0,\preceq_0,{\boldsymbol{\bf m}u}}$.
The hypothesis ${\bf D} {\bf r}ightsquigarrow {\bf D}_0$ says that we can
deform $K_0$ to a family $K_t$ of clusters, $t \in {\bf D}elta \subset {\bf m}athbb{C}$,
where ${\bf D}elta$ is a suitably small disc, such that for $t {\bf n}e 0$
the cluster $K_t$ has Enriques diagram ${\bf D}$. Now the
$H_{K_t, {\bf m}u}$ form a family of linear subspaces of ${\bf m}athbb{C}[[x,y]]$
with constant codimension (because $({\bf D}_0,\preceq_0,{\boldsymbol {\bf m}u})$ is tame
and $({\bf D},{\bf m}u)$ is consistent) and therefore determine
a family of germs which contain $f$
and whose general member has type $({\bf D},{\bf m}u)$, as wanted.
\end{proof}
If needed, it is not hard to obtain from the
family described in the proof of proposition {\bf r}ef{suf}
a one-dimensional family $C_t$ with the desired properties
and $C_0=C$, even explicitly.
For the particular case when ${\bf D}$ is unibranched,
the reader may find details on the family
$H_{K_t, {\bf m}u}$, with explicit equations,
in \cite[3]{Roe01a}.
Note that, as in the linear case, the interest of proposition
{\bf r}ef{suf} lies in the fact that the conditions can be
checked directly on the Enriques diagrams, using their
combinatorial properties. This is always true for the conditions
that $({\bf D}_0,\preceq_0,{\boldsymbol {\bf m}u})$ is tame, and
$(\tilde {\bf D},\tilde {\bf m}u) \ge ({\bf D}_0,\preceq_0,{\boldsymbol {\bf m}u})$.
The condition $({\bf D},\preceq) {\bf r}ightsquigarrow ({\bf D}_0,\preceq_0)$
is more difficult to handle, but in some cases (such as when
${\bf D}$ has no satellite points, or when it is unibranched)
it can also be determined from the combinatorial properties of
${\bf D}$ and ${\bf D}_0$ (see \cite{Roe?4}) using proximity matrices.
Next we prove a necessary condition
for adjacency (other necessary conditions,
involving invariants such as the codimension
or the Milnor number, are also known).
\begin{Pro}
Let $({\bf D},{\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ be types such that
there exists a family of curves $C_t$, $t \in {\bf D}elta \subset {\bf m}athbb{C}$,
whose general members are of type
$({\bf D},{\bf m}u)$ and with $C_0$ of type $(\tilde {\bf D},\tilde {\bf m}u)$.
Then there exist a weighted consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ at most
in some free vertices of multiplicity one,
an Enriques diagram ${\bf D}_0$ with the same
number of vertices as ${\bf D}$ and admissible orderings
$\preceq$ and $\preceq_0$ of ${\bf D}$ and ${\bf D}_0$
respectively such that
\begin{enumerate}
\item $({\bf D}',{\bf m}u') \ge ({\bf D}_0,\preceq_0, {\boldsymbol {\bf m}u})$,
and
\item the matrix $P_0^{-1}P$, where $P$ and $P_0$ are the proximity
matrices of $({\bf D},\preceq)$ and $({\bf D}_0,\preceq_0)$ respectively, has
no negative entries.
\end{enumerate}
\end{Pro}
\begin{proof}
Let $S_t \longrightarrow \operatorname{Spec} {\bf m}athbb{C}[[x,y]]$ be a desingularization
of the family $C_t$, $t{\bf n}e 0$ (\cite{Zar65II}, see also
\cite{Wah74}). Because of the universal
property of the space $X_{n-1}$ of all ordered clusters of $n$ points (see
\cite{Har85} or \cite{Roe?4}) this induces a family of clusters $K_t$
(parameterized by a possibly smaller punctured disc ${\bf D}elta' \setminus \{0\}$)
which can be uniquely extended taking $K_0=\lim_{t {\bf r}ightarrow 0} K_t$
($X_{n-1}$ is projective and therefore complete). All
clusters of this family except maybe $K_0$ have type ${\bf D}$,
and for all $t \in {\bf D}elta'$, it is easy to see that
$C_t$ goes through the weighted cluster $(K_t,{\bf m}u)$.
Taking ${\bf D}_0$ to be the Enriques diagram of $K_0$, both
claims follow (see \cite{Roe?4} for the second claim).
\end{proof}
Obviously this implies
\begin{Cor}
\label{nec}
Let $({\bf D},{\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ be types such that
$(\tilde {\bf D},\tilde {\bf m}u)$ is adjacent to $({\bf D},{\bf m}u)$.
Then there exist a weighted consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ at most
in some free vertices of multiplicity one,
an Enriques diagram ${\bf D}_0$ with the same
number of vertices as ${\bf D}$, and admissible orderings $\preceq$
and $\preceq_0$ of ${\bf D}$ and ${\bf D}_0$ respectively such that
\begin{enumerate}
\item $({\bf D}', {\bf m}u') \ge ({\bf D}_0,\preceq_0,{\boldsymbol{\bf m}u})$, and
\item the matrix $P_0^{-1}P$, where $P$ and $P_0$ are the proximity
matrices of $({\bf D},\preceq)$ and $({\bf D}_0,\preceq_0)$ respectively,
has no negative entries.
\end{enumerate}
\end{Cor}
Again, the interest of
{\bf r}ef{nec} lies in the fact that the conditions can be
checked directly on the Enriques diagrams, using their
combinatorial properties. Thus we prove, for example, that some
types (including all irreducible curve
singularities with a single characteristic exponent
$m/n$ with $n<m<2n$) allow only linear adjacencies:
\begin{Cor}
Let $({\bf D},{\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ be types such that
$(\tilde {\bf D},\tilde {\bf m}u)$ is adjacent to $({\bf D},{\bf m}u)$, and suppose that
${\bf D}$ has at most two free vertices. Then $(\tilde {\bf D},\tilde {\bf m}u)$ is
linearly adjacent to $({\bf D},{\bf m}u)$.
\end{Cor}
\begin{proof}
If $p$ is a satellite vertex of ${\bf D}$ then there are
at least two vertices in ${\bf D}$ preceding it (namely,
the two vertices to which $p$ is proximate). Therefore,
if ${\bf D}$ has only one free vertex then it consists of the
root alone, and if it has two free vertices they must
be the root and another vertex which is the unique one
which has the root as immediate predecessor. Under
these conditions, it is not hard to see
that, given any admissible ordering $\preceq$ on ${\bf D}$, if
$({\bf D}_0,\preceq_0)$ is an ordered Enriques diagram
such that the matrix $P_0^{-1}P$ has no negative
entries, where $P$ and $P_0$ are the proximity
matrices of $({\bf D},\preceq)$ and $({\bf D}_0,\preceq_0)$ respectively,
then $({\bf D},\preceq)=({\bf D}_0,\preceq_0)$. Now the claim
follows from {\bf r}ef{nec} and {\bf r}ef{linesp}.
\end{proof}
The fact that the varieties ${{\bf m}athit{Cl}}({\bf D})$ do not form a stratification
of the space of all clusters (i.e. there exist ${\bf D}$, ${\bf D}'$
with ${{\bf m}athit{Cl}}({\bf D}') \cap \overline {{{\bf m}athit{Cl}}({\bf D})} {\bf n}e \emptyset$ and
${\bf D} {\bf n}ot{\bf r}ightsquigarrow {\bf D}'$), which is proved in \cite{Roe?4},
implies that the equisingularity classes do not form a stratification
of ${\bf m}athbb{C}[[x,y]]$ (i.e. there exist types
$({\bf D}, {\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ and curves of
type $(\tilde {\bf D},\tilde {\bf m}u)$ that can be deformed to
curves of type $({\bf D}, {\bf m}u)$ without $(\tilde {\bf D},\tilde {\bf m}u)$
being adjacent to $({\bf D}, {\bf m}u)$). This is shown in the
following example:
\begin{Exa}
\label{contrex}
Let $({\bf D}, {\bf m}u)$, $({\bf D}', {\bf m}u')$, $(\tilde {\bf D},\tilde {\bf m}u)$ be the
Enriques diagrams of figure {\bf r}ef{figcontrex}.
In \cite{Roe?4} it is shown that there exist clusters
$K$ and $K'$ with Enriques diagram ${\bf D}'$ such that
$K'$ can be deformed to clusters with Enriques diagram
${\bf D}$ and $K$ can not.
If $C$ is a curve of type $(\tilde {\bf D},\tilde {\bf m}u)$,
and $(K', {\bf m}u')$ is the cluster (of type $({\bf D}' , {\bf m}u ')$) formed by the
singular points and the two first nonsingular points on each branch of
$C$, then it is not hard to deform
it to curves of type $({\bf D}, {\bf m}u)$, using the method
of the proof of proposition {\bf r}ef{suf}.
On the other hand, $(\tilde {\bf D},\tilde {\bf m}u)$ is
not adjacent to $({\bf D}, {\bf m}u)$; this can be proved
using that $K$ cannot be deformed to clusters
with Enriques diagram ${\bf D}$ or, more easily, by
observing that both types have the
same codimension.
\begin{figure}
\caption{Enriques diagrams corresponding
to the types of example {\bf r}
\label{figcontrex}
\end{figure}
\end{Exa}
\section{Non-linear adjacency via Hilbert schemes}
Non-linear adjacency can be approached using Hilbert schemes
instead of varieties of clusters. In fact, it is possible to
give a characterization of all adjacencies in terms of
the relative positions of some subschemes of the Hilbert
scheme of points on a surface. However, these relative
positions are in general not known, so the answer obtained
using Hilbert schemes is theoretical and not easy to put in practice,
in contrast with the criteria given above, which are
combinatorial and can be effectively applied.
As customary, $\operatorname{Hilb}^n R$ will denote the Hilbert
scheme parameterizing ideals of colength $n$ in
$R= {\bf m}athbb{C}[[x,y]]$. We consider also the ``nested Hilbert
scheme''
$Z_{n_1, n_2}R \subset (\operatorname{Hilb}^{n_1} R) \times (\operatorname{Hilb}^{n_2} R)$
studied by J. Cheah, which parameterizes pairs of ideals
$(I_1, I_2)$ with $I_1 \supset I_2$
(see \cite{Cheah}, \cite{Che98a}).
For every type $({\bf D},{\bf m}u)$, let $\operatorname{Hilb}_{\bf D}^{{\bf m}u} R$ be
the subset of $\operatorname{Hilb}^n R$ parameterizing the
ideals $H_{K,{\bf m}u}$ where $K$ are clusters
with Enriques diagram ${\bf D}$, and $n=\deg ({\bf D}, {\bf m}u)$. It is known that
$\operatorname{Hilb}_{\bf D}^{{\bf m}u} R$ is a locally closed irreducible
subscheme of $\operatorname{Hilb}^n R$
(see \cite{KP99}, \cite{NV97}, \cite{Lossen}, for example);
$\overline {\operatorname{Hilb}_{\bf D}^{{\bf m}u} R}$ will denote its closure
in $\operatorname{Hilb}^n R$.
\begin{Teo}
\label{hilbesp}
Let $({\bf D},{\bf m}u)$, $(\tilde {\bf D},\tilde {\bf m}u)$ be types.
$(\tilde {\bf D},\tilde {\bf m}u)$ is adjacent to $({\bf D},{\bf m}u)$ if
and only if there exists a weighted consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ at most
in some free vertices of multiplicity one, satisfying
$\operatorname{Hilb}^{{\bf m}u'}_{{\bf D}'} R \subset \pi' \pi^{-1}
\left(\overline {\operatorname{Hilb}^{{\bf m}u}_{{\bf D}}R}{\bf r}ight)$,
where $\pi$ and $\pi'$ are the projections of
$Z_{n,n'}R$ onto $\operatorname{Hilb}^{n}R$ and $\operatorname{Hilb}^{n'} R$
respectively, and $n=\deg ({\bf D}, {\bf m}u)$,
$n'=\deg ({\bf D}', {\bf m}u')$.
\end{Teo}
To prove theorem {\bf r}ef{hilbesp} we shall use the following
lemma:
\begin{Lem}
\label{unicf}
Let $({\bf D},{\bf m}u)$, $({\bf D}', {\bf m}u')$ be types such that
$({\bf D}', {\bf m}u')$ is adjacent to $({\bf D},{\bf m}u)$. Then
for every $f \in {\bf m}athbb{C}[[x,y]]$
defining a reduced germ of curve of type $({\bf D}',{\bf m}u')$,
there exists an ideal $I \in \overline {\operatorname{Hilb}_{\bf D}^{{\bf m}u} R}$ with
$f \in I$.
\end{Lem}
\begin{proof}
Let $f \in {\bf m}athbb{C}[[x,y]]$ be a germ of equation
of a curve of type $({\bf D}',{\bf m}u')$. Because of the adjacency,
there exists a family of germs $f_t$, $t \in {\bf D}elta \subset {\bf m}athbb{C}$,
whose general members are of type
$({\bf D},{\bf m}u)$ and with $f_0=f$.
Let $S_t \longrightarrow \operatorname{Spec} {\bf m}athbb{C}[[x,y]]$ be a desingularization
of the family $f_t$, $t{\bf n}e 0$ (\cite{Zar65II}, see also
\cite{Wah74}). Because of the universal
property of the space of all clusters (see \cite{Har85} or
\cite{Roe?4}) this induces a family of clusters $K_t$
(parameterized by a possibly smaller punctured disc
${\bf D}elta' \setminus \{0\}$). Now the
$I_t=H_{K_t, {\bf m}u}$ form a (complex) one-dimensional family
inside $\operatorname{Hilb}_{\bf D}^{{\bf m}u} S$
which can be uniquely extended with
$I_0=\lim_{t {\bf r}ightarrow 0} I_t$.
It is easy to see that, for all $t \in {\bf D}elta'$,
$f_t \in I_t$, so the claim follows for $I=I_0$.
\end{proof}
\begin{proof}[Proof of theorem {\bf r}ef{hilbesp}]
The \emph{if} part of the claim is proved in a similar
way to the proof of proposition {\bf r}ef{suf};
we leave the details to check for the reader.
For the \emph{only if} part of the claim
we shall prove that assuming
$(\tilde {\bf D},\tilde {\bf m}u)$ is adjacent to $({\bf D},{\bf m}u)$
and that there exists no consistent weighted Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ only
in free vertices of multiplicity one, in the conditions
of the claim, leads to contradiction.
The second assumption means that, for every
consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ only
in free vertices of multiplicity one, there are clusters $K'$
with $H_{K',{\bf m}u'} \in \operatorname{Hilb}^{{\bf m}u'}_{{\bf D}'} R \setminus
\pi' \pi^{-1} \left(\overline {\operatorname{Hilb}^{{\bf m}u}_{{\bf D}}R}{\bf r}ight)$.
Consider the sequence of weighted Enriques diagrams
defined as follows. $({\bf D}_1, {\bf m}u_1)$ is obtained
from $(\tilde {\bf D}, \tilde {\bf m}u)$ by adding
$$\tilde {\bf m}u_p- \sum_{q \text{ prox. to } p} \tilde {\bf m}u_q$$
free successors of multiplicity 1 to each $p \in \tilde {\bf D}$,
and for $k>1$, $({\bf D}_k, {\bf m}u_k)$ is obtained from
$({\bf D}_{k-1}, {\bf m}u_{k-1})$ by adding a free successor
of multiplicity 1 to
each extremal vertex (which will be free of multiplicity 1).
Obviously $({\bf D}_{k-1}, {\bf m}u_{k-1})$ is a subdiagram
of $({\bf D}_k, {\bf m}u_k)$ for all $k>1$, and
it is not hard to see that the
map $F_k:\operatorname{Hilb}^{{\bf m}u_k}_{{\bf D}_k}R \longrightarrow
\operatorname{Hilb}^{{\bf m}u_{k-1}}_{{\bf D}_k-1}R$
defined by sending $H_{K,{\bf m}u_k}$ to $H_{\breve K, {\bf m}u_{k-1}}$,
where $\breve K$ is the subcluster of $K$ with diagram
${\bf D}_{k-1}$, satisfies
$$F_k\left(\operatorname{Hilb}^{{\bf m}u_k}_{{\bf D}_k} R \setminus
\pi_k \pi^{-1} \left(\overline {\operatorname{Hilb}^{{\bf m}u}_{{\bf D}}R}{\bf r}ight){\bf r}ight)=
\operatorname{Hilb}^{{\bf m}u_{k-1}}_{{\bf D}_{k-1}} R \setminus
\pi_{k-1}\pi^{-1} \left(\overline {\operatorname{Hilb}^{{\bf m}u}_{{\bf D}}R}{\bf r}ight).$$
Therefore we can construct a sequence of clusters
$K_1, K_2, \dots$ such that
$H_{K_k, {\bf m}u_k} \in \operatorname{Hilb}^{{\bf m}u_k}_{{\bf D}_k}R \setminus
\pi_k \pi^{-1} \left(\overline {\operatorname{Hilb}^{{\bf m}u}_{{\bf D}}R}{\bf r}ight)$
and each $K_k$ is obtained from
$K_{k-1}$ by adding in the first neighbourhood of each extremal point
a free point of multiplicity one. But then there exists
a reduced germ $f$ of type $(\tilde {\bf D}, \tilde {\bf m}u)$
belonging to all $H_{K_k, {\bf m}u_k}$ (see \cite[5.7]{Cas00}]).
Now by lemma {\bf r}ef{unicf}, there exists an ideal
$I \in \overline {\operatorname{Hilb}_{\bf D}^{{\bf m}u} R}$ with
$f \in I$; as $\dim_{{\bf m}athbb{C}} {\bf m}athbb{C}[[x,y]]/I = n$,
we must have $I \supset (x,y)^n$ also.
On the other hand, applying \cite[5.7.1]{Cas00}
and \cite[7.2.16]{Cas00},
for $k$ big enough we infer that
$H_{K_k, {\bf m}u_k} \subset (f)+(x,y)^n$,
which implies $H_{K_k, {\bf m}u_k} \subset I$, a contradiction.
\end{proof}
\begin{Rem}
Linear adjacencies may also be dealt with using Hilbert
schemes; indeed, with notations as above,
$(\tilde {\bf D},\tilde {\bf m}u)$ is linearly adjacent to $({\bf D},{\bf m}u)$ if
and only if there exists a weighted consistent Enriques diagram
$({\bf D}',{\bf m}u')$, differing from $(\tilde {\bf D},\tilde {\bf m}u)$ at most
in some free vertices of multiplicity one, satisfying
$\operatorname{Hilb}^{{\bf m}u'}_{{\bf D}'} R \subset \pi' \pi^{-1}
\operatorname{Hilb}^{{\bf m}u}_{{\bf D}}R$. Again this criterion is hard to
apply, in contrast to the purely combinatorial we gave
before. We skip the proof, which adds no new ideas
to what we did before.
\end{Rem}
\begin{Rem}
For types $({\bf D},{\bf m}u)$ where ${\bf D}$ has three vertices or
less, the closure of $\operatorname{Hilb}^{\bf m}u_{\bf D} R$ is known, due to
the works \cite{Evain} and \cite{eva?1} of \'Evain; so in this case
the Hilbert scheme method does give a characterization of adjacencies.
Very few other particular situations can be handled
explicitly; we would like to mention an example due
to Russell (see \cite{Rus??}) in which the study of
the Hilbert scheme provides an example (like {\bf r}ef{contrex})
showing that types do not stratify ${\bf m}athbb{C}[[x,y]]$.
\end{Rem}
\end{document}
|
\betaegin{equation}gin{document}
\tauhispagestyle{empty}
\tauitle[Wave equation with singular Kelvin-Voigt damping]{Stabilization for the wave equation with singular Kelvin-Voigt damping}
\author{Ka\"{\i}s AMMARI}
\address{UR Analysis and Control of Pde, UR 13ES64, Department of Mathematics, Faculty of Sciences of Monastir, University of Monastir, 5019 Monastir, Tunisia}
\mathrm{e}mail{[email protected]}
\author{Fathi HASSINE}
\address{UR Analysis and Control of Pde, UR 13ES64, Department of Mathematics, Faculty of Sciences of Monastir, University of Monastir, 5019 Monastir, Tunisia}
\mathrm{e}mail{[email protected]}
\author{Luc ROBBIANO}
\address{Laboratoire de Math\'ematiques, Universit\'e de Versailles Saint-Quentin en Yvelines, 78035 Versailles, France}
\mathrm{e}mail{[email protected]}
\betaegin{equation}gin{abstract}
We consider the wave equation with Kelvin–Voigt damping in a bounded domain. The exponential stability result proposed by Liu and Rao \cite{liu-rao2} or T\'ebou \cite{tebou} for that system assumes that the damping is localized in a neighborhood of the whole or a part of the boundary under some consideration. In this paper we propose to deal with this geometrical condition by considering a singular Kelvin-Voigt damping which is localized faraway from the boundary. In this particular case it was proved by Liu and Liu \cite{liu-liu} the lack of the uniform decay of the energy. However, we show that the energy of the wave equation decreases logarithmically to zero as time goes to infinity. Our method is based on the frequency domain method. The main feature of our contribution is to write the resolvent problem as a transmission system to which we apply a specific Carleman estimate.
\mathrm{e}nd{abstract}
\sigmaubjclass[2010] {35A01, 35A02, 35M33, 93D20}
\keywords{Carleman estimate, stabilization, wave equation, Singular Kelvin-Voigt damping}
\maketitle
\tauableofcontents
\sigmaection{Introduction and main results}
\sigmaetcounter{equation}{0}
There are several mathematical models representing physical damping. The most often encountered type of damping in vibration studies are linear viscous damping \cite{ammari-niciase, blr, lebeau, lebeau-robbiano2} and Kelvin-Voigt damping \cite{hassine1,liu-liu, liu-rao1,liu-rao2} which are special cases of proportional damping. Viscous damping usually models external friction forces such as air resistance acting on the vibrating structures and is thus called "external damping", while Kelvin-Voigt damping originate from the internal friction of the material of the vibrating structures and thus called "internal damping" or "material damping". This type of material is encountered in real life when one uses patches to suppress vibrations, the modeling aspect of which may be found in \cite{banks-smith-wang}. This type of question was examined in the one-dimensional setting in \cite{liu-liu} where it was shown that the longitudinal motion of an Euler-Bernoulli beam modeled by a locally damped wave equation with Kelvin-Voigt damping is not exponentially stable when the junction between the elastic part and the viscoelastic part of the beam is not smooth enough. Later on, the wave equation with Kelvin-Voigt damping in the multidimensional setting was examined in \cite{liu-rao2}; in particular, those authors showed the exponential decay of the energy by assuming that the damping region is a neighborhood of the whole boundary. Later on, it was shown that the exponential decay of the energy could be obtained with just imposing that the damping is a neighborhood of part of the boundary \cite{tebou}.
Let $\Omegamega \sigmaubset \betal{R}^{n}$, $n \geq 2,$ be a bounded domain with a sufficiently smooth boundary $\Gammaamma=\partial \Omegamega$. Let $\omegamega$ be an no empty and open subset of $\Omegamega$ with smooth boundary $\mathcal{I}=\partial\omegamega$ (see Figure \mathrm{Re}f{fig1}).
Consider the damping wave system
\betaegin{equation}gin{equation}
\label{wave1}
\partial_t^2 u - \Deltaelta u - \, \mathrm{div}(a(x) \, \nabla \partial_t u) = 0, \, \Omegamega \tauimes (0,+\infty),
\mathrm{e}nd{equation}
\betaegin{equation}gin{equation}
\label{wave2}
u = 0, \, \partial \Omegamega \tauimes (0,+\infty),
\mathrm{e}nd{equation}
\betaegin{equation}gin{equation}
\label{wave3}
u(x,0) = u^0, \, \partial_t u(x,0) = u^1 (x), \, \Omegamega,
\mathrm{e}nd{equation}
where $a(x)=d\,\mathbb{1}_{\omegamega}(x)$ and $d>0$ is a constant.
\betaegin{equation}gin{figure}[htbp]
\includegraphics[scale=0.8]{figure1.pdf}
\caption{The domain $\Omegamega$}
\label{fig1}
\mathrm{e}nd{figure}
System \mathrm{e}qref{wave1}-\mathrm{e}qref{wave3}, involving a constructive viscoelastic damping $\mathrm{div}(a(x)\nabla u_{t})$, models the vibrations of an elastic body which has one part made of viscoelastic material. In the case of global viscoelastic damping $(a>0$), the wave equation \mathrm{e}qref{wave1}-\mathrm{e}qref{wave3} generates an analytic semigroup, and the spectrum of which is contained in a sector of the left half complex plan (see \cite{chen-liu-liu}). While the situation of local viscoelastic damping is more delicate due to the unboundedness of the viscoelastic damping and the discontinuity of the materials.
In \cite{liu-liu}, it was proved that the energy of an one-dimensional wave equation with local viscoelastic damping does not decay uniformly if the damping coefficient $a$ is discontinuous across the interface of the materials. Because of the discontinuity of the materials across the interface, the dissipation is badly transmitted from the viscoelastic region to the elastic region, where the energy decays slowly. Nevertheless, this does not contradict the well-known ``geometric optics'' condition in \cite{blr}, since the viscoelastic damping is unbounded in the energy space. The loss of uniform stability is caused by the discontinuity of material properties across the interface and the unboundedness of the viscoelastic damping. In this paper, we prove a logarithmically decay of energy. Our idea is transform the resolvent problem of system \mathrm{e}qref{wave1}-\mathrm{e}qref{wave2} to a transmission system to be able to quantify the discontinuity of the material properties across the interface through the so-called Carleman estimate. Noting that recently the same problem was treated in \cite{hassine1} where it was proved that the energy is polynomially decreases over the time but only on one-dimensional case (even for a transmission system).
We define the natural energy of $u$ solution of \rhofb{wave1}-\rhofb{wave3} at instant $t$ by
\betaegin{equation}gin{equation*}
E(u,t)=
\varphirac{1}{2} \left\|(u(t),\partial_t u(t))\rhoight\|_{H^1_0 (\Omegamega) \tauimes L^2(\Omegamega)}^2, \, \varphiorall \, t \geq 0.
\mathrm{e}nd{equation*}
Simple formal calculations give
\betaegin{equation}gin{equation*}
E(u,0)-E(u,t)= - \, d \,\int_{0}^{t} \int_{\omegamega}\left|\nabla\partial_{t} u(x,s)\rhoight|^2 \,\mathrm{d} x\,\mathrm{d} s,\varphiorall t\geq 0,
\mathrm{e}nd{equation*}
and therefore, the energy is non-increasing function of the time variable $t$.
\betaegin{equation}gin{thm}\label{LogStab}
For any $k\in\betal{N}^*$ there exists $C>0$ such that for any initial data $(u^{0},u^{1})\in\mathcal{D}(\mathcal{A}^{k})$ the solution $u(x,t)$ of \mathrm{e}qref{wave1} starting from $(u^{0},u^{1})$ satisfying
$$E(u,t)\leq\varphirac{C}{(\ln(2+t))^{2k}}\|(u^{0},u^{1})\|_{\mathcal{D}(\mathcal{A}^{k})}^{2},\quad\varphiorall\,t>0,$$
where $(\mathcal{A}, \mathcal{D}(\mathcal{A}))$ is defined in Section \mathrm{Re}f{wellposed}.
\mathrm{e}nd{thm}
This paper is organized as follows. In Section \mathrm{Re}f{wellposed}, we give the proper functional setting for systems \rhofb{wave1}-\rhofb{wave3}, and prove that this system is well-posed. In Section \mathrm{Re}f{carleman}, we establish some Carleman estimate which is correspond to the system \rhofb{wave1}-\rhofb{wave3}. Finally, in Section \mathrm{Re}f{stab}, we study the stabilization for \rhofb{wave1}-\rhofb{wave3} by resolvent method and give the explicit decay rate of the energy of the solutions of \rhofb{wave1}-\rhofb{wave3}.
\sigmaection{Well-posedness and strong stability}\label{wellposed}
We define the energy space by $\mathcal{H}=H_{0}^{1}(\Omegamega)\tauimes L^{2}(\Omegamega)$ which is endowed with the usual inner product
$$
\left\langle(u_{1},v_{1});(u_{2},v_{2})\rhoight\rhoangle=\int_{\Omegamega}\nabla u_{1}(x).\nabla \omegaverline{u}_{2}(x)\,\dd x+\int_{\Omegamega}v_{1}(x)\omegaverline{v}_{2}(x)\,\dd x.
$$
We next define the linear unbounded operator $\mathcal{A}:\mathcal{D}(\mathcal{A})\sigmaubset\mathcal{H}\longrightarrow\mathcal{H}$ by
$$
\mathcal{D}(\mathcal{A})=\{(u,v)\in\mathcal{H}: v\in H_{0}^{1}(\Omegamega),\; \Deltaelta u+\mathrm{div}(a\nabla v)\in L^{2}(\Omegamega)\}
$$
and
$$
\mathcal{A}(u,v)^{t}=(v,\Deltaelta u+\mathrm{div}(a\nabla v))^{t}
$$
Then, putting $v=\partial_{t} u$, we can write \mathrm{e}qref{wave1}-\mathrm{e}qref{wave3} into the following Cauchy problem
$$
\varphirac{d}{dt}(u(t),v(t))^{t}=\mathcal{A}(u(t),v(t))^{t},\;(u(0),v(0))=(u^{0}(x),u^{1}(x)).
$$
\betaegin{equation}gin{thm}
The operator $\mathcal{A}$ generates a $C_{0}$-semigroup of contractions on the energy space $\mathcal{H}$.
\mathrm{e}nd{thm}
\betaegin{equation}gin{proof}
Firstly, it is easy to see that for all $(u,v)\in\mathcal{D}(\mathcal{A})$, we have
$$
\mathrm{Re}\left\langle\mathcal{A}(u,v);(u,v)\rhoight\rhoangle=-\int_{\Omegamega}a|\nabla v(x)|^{2}\,\dd x,
$$
which show that the operator $\mathcal{A}$ is dissipative.
Next, for any given $(f,g)\in\mathcal{H}$, we solve the equation $\mathcal{A}(u,v)=(f,g)$, which is recast on the following way
\betaegin{equation}gin{equation}\label{WPwave}
\left\{\betaegin{equation}gin{array}{l}
v=f,
\\
\Deltaelta u+\mathrm{div}(a\nabla f)=g.
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
It is well known that by Lax-Milgram's theorem the system \mathrm{e}qref{WPwave} admits a unique solution $u\in H_{0}^{1}(\Omegamega)$. Moreover by multiplying the second line of \mathrm{e}qref{WPwave} by $\omegaverline{u}$ and integrating over $\Omegamega$ and using Poincar\'e inequality and Cauchy-Schwarz inequality we find that there exists a constant $C>0$ such that
$$
\int_{\Omegamega}|\nabla u(x)|^{2}\,\dd x\leq C\left(\int_{\Omegamega}|\nabla f(x)|^{2}\,\dd x+\int_{\Omegamega}|g(x)|^{2}\,\dd x\rhoight).
$$
It follows that for all $(u,v)\in\mathcal{D}(\mathcal{A})$ we have
$$
\|(u,v)\|_{\mathcal{H}}\leq C\|(f,g)\|_{\mathcal{H}}.
$$
This imply that $0\in\rhoho(\mathcal{A})$ and by contraction principle, we easily get $R(\lambda\mathrm{I}-\mathcal{A})=\mathcal{H}$ for sufficient small $\lambda>0$. The density of the domain of $\mathcal{A}$ follows from \cite[Theorem 1.4.6]{Pazy}. Then thanks to Lumer-Phillips Theorem (see \cite[Theorem 1.4.3]{Pazy}), the operator $\mathcal{A}$ generates a $C_{0}$-semigroup of contractions on the Hilbert $\mathcal{H}$.
\mathrm{e}nd{proof}
\betaegin{equation}gin{thm}
The semigroup $e^{t\mathcal{A}}$ is strongly stable in the energy space $\mathcal{H}$, i.e,
$$
\lim_{t\tauo+\infty}\|e^{t\mathcal{A}}(u_{0},v_{0})^{t}\|_{\mathcal{H}}=0,\;\varphiorall\,(u_{0},v_{0})\in\mathcal{H}.
$$
\mathrm{e}nd{thm}
\betaegin{equation}gin{proof}
To show that the semigroup $(e^{t\mathcal{A}})_{t\geq 0}$ is strongly stable we only have to prove that the intersection of $\sigmaigma(\mathcal{A})$ with $i\mathbb{R}$ is an empty set. Since the resolvent of the operator $\mathcal{A}$ is not compact (see \cite{liu-liu, liu-rao2}) but $0\in\rhoho(\mathcal{A})$ we only need to prove that $(i\mu I-\mathcal{A})$ is a one-to-one correspondence in the energy space $\mathcal{H}$ for all $\mu\in\mathbb{R}^{*}$. The proof will be done in two steps: in the first step we will prove the injective property of $(i\mu I-\mathcal{A})$ and in the second step we will prove the surjective property of the same operator.
i) Let $(u,v)\in\mathcal{D}(\mathcal{A})$ such that
\betaegin{equation}gin{equation}\label{Iwave}
\mathcal{A}(u,v)^{t}=i\mu(u,v)^{t}.
\mathrm{e}nd{equation}
Then taking the real part of the scalar product of \mathrm{e}qref{Iwave} with $(u,v)$ we get
$$
\mathrm{Re}(i\mu\|(u,v)\|_{\mathcal{H}}^{2})=\mathrm{Re}\left\langle\mathcal{A}(u,v),(u,v)\rhoight\rhoangle=-d\int_{\omegamega}|\nabla v|^{2}\dd x=0.
$$
which implies that
\betaegin{equation}gin{equation}\label{Dwave}
\nabla v=0 \qquad \tauext{ in }\,\omegamega.
\mathrm{e}nd{equation}
Inserting \mathrm{e}qref{Dwave} into \mathrm{e}qref{Iwave}, we obtain
\betaegin{equation}gin{equation}\label{waveI1}
\left\{\betaegin{equation}gin{array}{ll}
\mu^{2}u+\Deltaelta u=0&\tauext{in }\Omegamega\betaackslash\omegamega,
\\
\nabla u=0&\tauext{in }\omegamega
\\
u=0&\tauext{on }\Gammaamma,
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
We denote by $w_{j}=\partial_{x_{j}}u$ and we derive the first and the second equations of \mathrm{e}qref{waveI1}, one gets
\betaegin{equation}gin{equation*}
\left\{\betaegin{equation}gin{array}{ll}
\mu^{2}w_{j}+\Deltaelta w_{j}=0&\tauext{in }\Omegamega,
\\
w_{j}=0&\tauext{in }\omegamega.
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation*}
Hence, from the unique continuation theorem we deduce that $w_{j}=0$ in $\Omegamega$ and therefore $u$ is constant in $\Omegamega$ and since $u_{|\Gammaamma}=0$ we follow that $u\mathrm{e}quiv 0$. We have thus proved that $\mathrm{Ker}(i\mu I-\mathcal{A})=0$.
ii) Now given $(f,g)\in\mathcal{H}$, we solve the equation
$$
(i\mu I-\mathcal{A})(u,v)=(f,g)
$$
Or equivalently,
\betaegin{equation}gin{equation}\label{Swave}
\left\{\betaegin{equation}gin{array}{l}
v=i\mu u-f
\\
\mu^{2}u+\Deltaelta u+i\mu\,\mathrm{div}(a\nabla u)=\mathrm{div}(a\nabla f)-i\mu f-g.
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
Let's define the operator
$$
Au=-(\Deltaelta u+i\mu\,\mathrm{div}(a\nabla u)),\quad \varphiorall\, u\in H_{0}^{1}(\Omegamega).
$$
It is easy to show that $A$ is an isomorphism from $H_{0}^{1}(\Omegamega)$ onto $H^{-1}(\Omegamega)$. Then the second line of \mathrm{e}qref{Swave} can be written as follow
\betaegin{equation}gin{equation}\label{Eqwave}
u-\mu^{2}A^{-1}u=A^{-1}\left[g+i\mu f-\mathrm{div}(a\nabla f)\rhoight].
\mathrm{e}nd{equation}
If $u\in\mathrm{Ker}(I-\mu^{2}A^{-1})$, then $\mu^{2}u-Au=0$. It follows that
\betaegin{equation}gin{equation}\label{Awave}
\mu^{2}u+\Deltaelta u+i\mu\mathrm{div}(a\nabla u)=0.
\mathrm{e}nd{equation}
Multiplying \mathrm{e}qref{Awave} by $\omegaverline{u}$ and integrating over $\Omegamega$, then by Green's formula we obtain
$$
\mu^{2}\int_{\Omegamega}|u(x)|^{2}\,\dd x-\int_{\Omegamega}|\nabla u(x)|^{2}\,\dd x-id\mu\int_{\omegamega}|\nabla u(x)|^{2}\,\dd x=0.
$$
This shows that
$$
d\int_{\omegamega}|\nabla u(x)|^{2}\,\dd x=0,
$$
which imply that $\nabla u=0$ in $\omegamega$.
\\
Inserting this last equation into~\mathrm{e}qref{Awave} we get
$$
\mu^{2}u+\Deltaelta u=0,\qquad \tauext{in }\Omegamega.
$$
Once again, using the unique continuation theorem as in the first step where we recall that $u_{|\Gammaamma}=0$, we get $u=0$ in $\Omegamega$. This imply that $\mathrm{Ker}(I-\mu^{2}A^{-1})=\{0\}$. On the other hand thanks to the compact embeddings $H_{0}^{1}(\Omegamega)\hookrightarrow L^{2}(\Omegamega)$ and $L^{2}(\Omegamega)\hookrightarrow H^{-1}(\Omegamega)$ we see that $A^{-1}$ is a compact operator in $H_{0}^{1}(\Omegamega)$. Now thanks to Fredholm's alternative, the operator $(I-\mu^{2}A^{-1})$ is bijective in $H_{0}^{1}(\Omegamega)$, hence the equation \mathrm{e}qref{Eqwave} have a unique solution in $H_{0}^{1}(\Omegamega)$, which yields that the operator $(i\mu I-\mathcal{A})$ is surjective in the energy space $\mathcal{H}$. The proof is thus complete.
\mathrm{e}nd{proof}
\sigmaection{Carleman estimate}\label{carleman}
For any $s\in\betal{R}$ we define the Sobolev space with a parameter $\tauau$, $H_{\tauau}^{s}$ by
$$
u(x,\tauau)\in H^{s}_{\tauau}\,\Lambdaongleftrightarrow\,\left\langle\xi,\tauau\rhoight\rhoangle^{s}\hat{u}(\xi,\tauau)\in L^{2};\qquad\langle\xi,\tauau\rhoangle^{2}=|\xi|^{2}+\tauau^{2},
$$
where $\hat{u}$ denote the partial Fourier transform with respect to $x$.
The class of symbols of order $m$ defined by
$$
\mathcal{S}_{\tauau}^{m}=\left\{a(x,\xi,\tauau)\in\mathcal{C}^{\infty};
\;|\partial_{x}^{\alpha}\partial_{\xi}^{\betaegin{equation}ta}a(x,\xi,\tauau)|\leq C_{\alpha,\betaegin{equation}ta}\langle\xi,\tauau\rhoangle ^{m-|\betaegin{equation}ta|} \rhoight\}
$$
and the class of tangential symbols of order $m$ by
$$
\mathcal{TS}_{\tauau}^{m}=\left\{a(x,\xi',\tauau)\in\mathcal{C}^{\infty}
;\;|\partial_{x}^{\alpha}\partial_{\xi'}^{\betaegin{equation}ta}a(x,\xi',\tauau)|\leq C_{\alpha,\betaegin{equation}ta}\langle\xi',\tauau\rhoangle^{m-|\betaegin{equation}ta|} \rhoight\}
$$
We denote by $\mathcal{O}^{m}$ (resp. $\mathcal{TO}^{m}$) the set of pseudo-differential operators $A=\mathrm{op}(a)$, $a\in\mathcal{S}^{m}$ (resp. $a\in\mathcal{TS}^{m}$). We shall use the symbol $\Lambdaambda=\langle\xi',\tauau\rhoangle=(|\xi'|^{2}+\tauau^{2})^{\varphirac{1}{2}}$.
Consider a bounded smooth open set $\mathcal{U}$ of $\betal{R}^{n}$ with boundary $\partial\mathcal{U}=\gamma$. We set $\mathcal{U}_{1}$ and $\mathcal{U}_{2}$ two smooth open subsets of $\mathcal{U}$ with boundaries $\partial\mathcal{U}_{1}=\gamma_{0}$ and $\partial\mathcal{U}_{2}=\gamma\cup\gamma$ such that $\omegaverline{\gamma}_{0}\cup\omegaverline{\gamma}=\mathrm{e}mptyset$. We denote by $\nu(x)$ the unit outer normal to $\mathcal{U}_{2}$ if $x\in\gamma_{0}\cup\gamma$.\label{carl53}
For $\tauau$ a large parameter and $\varphi_{1}$ and $\varphi_{2}$ two weight functions of class $\mathcal{C}^{\infty}$ in
$\omegaverline{\mathcal{U}}_{1}$ and $\omegaverline{\mathcal{U}}_{2}$ respectively such that
$\varphi_{1|\gamma_{0}}=\varphi_{2|\gamma_{0}}$ we denote by $\varphi(x)=\mathrm{diag}(\varphi_{1}(x),\varphi_{2}(x))$
and let $\alpha$ be a non null complex number.
We set the differential operator
$$
P=\mathrm{diag}(P_{1},P_{2})=\mathrm{diag}\left(-\Deltaelta+\varphirac{\tauau^{2}}{1+\alpha\tauau},-\Deltaelta-\tauau^{2}\rhoight),
$$
and its conjugate operator
$$
P(x,D,\tauau)=\mathrm{e}^{\tauau\varphi}P\mathrm{e}^{-\tauau\varphi}=\mathrm{diag}(P_{1}(x,D,\tauau),P_{2}(x,D,\tauau)),
$$
with principal symbol $p(x,\xi,\tauau)$ given by
\betaegin{equation}gin{align*}
p(x,\xi,\tauau)&=\mathrm{diag}(p_{1}(x,\xi,\tauau),p_{2}(x,\xi,\tauau))
\\
&=\mathrm{diag}(|\xi|^{2}
+2i\tauau \xi\nabla\varphi_{1}-\tauau^{2}|\nabla\varphi_{1}|^{2},|\xi|^{2}+2i\tauau\xi\nabla\varphi_{2}-\tauau^{2}|\nabla\varphi_{2}|^{2}-\tauau^{2}).
\mathrm{e}nd{align*}
In a small neighborhood $W$ of a point $x_{0}$ of $\gamma_{0}$, we place ourselves in normal geodesic
coordinates and we denote by $x_{n}$ the variable that is normal to the interface $\gamma_{0}$ and by $x'$
the reminding spacial variables, i.e., $x=(x',x_{n})$. The interface $\gamma_{0}$ is now given by
$\gamma_{0}=\{x\,;\;x_{n}=0\}$ where in particular we can assume that $x_{0}=(0,0)$ and that $W$ is symmetric with
respect to $x_{n}\longmapsto-x_{n}$. We denote by
$$
W_{1}=\{x\in\betal{R}^{n},\;x_{n}>0\}\cap W,\qquad \tauext{and}\qquad W_{2}=\{x\in\betal{R}^{n},\;x_{n}<0\}\cap W.
$$
Next we will proceed like Bellassoued in \cite{bellassoued} and we will reduce the problem of the transmission in only one side. The operator $P_{1}$ on $W_{1}$ is written in the form
$$
P_{1}(x,D)=D_{x_{n}}^{2}+R(+x_{n},x',D_{x'})+\varphirac{\tauau^{2}}{1+\alpha\tauau}.
$$
and the operator $P_{2}$ on $W_{2}$ can be identified locally to an operator in $W_{1}$ given by
$$
P_{2}(x,D)=D_{x_{n}}^{2}+R(-x_{n},x',D_{x'})-\tauau^{2}
$$
We denote the tangential operator, with the $\mathcal{C}^{\infty}$ coefficients defined in $W_{1}$ by
$$
R(x,D_{x'})=\mathrm{diag}(R(+x_{n},x',D_{x'}),R(-x_{n},x',D_{x'}))=\mathrm{diag}(R_{1}(x,D_{x'}),R_{2}(x,D_{x'}))
$$
with principal symbol $r(x,\xi')=\mathrm{diag}(r_{1}(x,\xi'),r_{2}(x,\xi'))$ where the quadratic form $r_{k}(x,\xi')$, $k=1,2$ satisfies
$$
\mathrm{e}xists\,C>0,\quad\varphiorall\,(x,\xi')\in W_{1}\tauimes\betal{R}^{n-1},\quad r_{k}(x,\xi')\geq C|\xi'|^{2},\qquad k=1,2.
$$
We assume that $\varphi$ satisfies
\betaegin{equation}gin{eqnarray}
|\nabla\varphi_{k}(x)|> 0,\;\varphiorall\,x\in\omegaverline{W}_{1},\quad k=1,2,\label{carl2}
\\
\partial_{x_{n}}\varphi_{1}(x',0)<0\quad\tauext{and}\quad\partial_{x_{n}}\varphi_{2}(x',0)>0
\\
\left(\partial_{x_{n}}\varphi_{1}(x',0)\rhoight)^{2}-\left(\partial_{x_{n}}\varphi_{2}(x',0)\rhoight)^{2}>1,
\mathrm{e}nd{eqnarray}
The principal symbol $p(x,\xi,\tauau)$ of $P(x,D,\tauau)$ is now given by
$$
p(x,\xi,\tauau)=\mathrm{diag}(p_{1}(x,\xi,\tauau),p_{2}(x,\xi,\tauau))=\left(\xi+i\tauau(\partial_{x_{n}}\varphi)\rhoight)^{2}
+r(x,\xi'+i\tauau(\partial_{x'}\varphi))-\mathrm{diag}(0,\tauau^{2})\,\in\mathcal{S}_{\tauau}^{2},
$$
where we assume that it satisfies to following the sub-ellipticity condition
\betaegin{equation}gin{equation}\label{carl3}
\mathrm{e}xists\,c>0,\;\varphiorall\,(x,\xi)\in\omegaverline{W}_{1}\tauimes\betal{R}^{n},\;p_{k}(x,\xi,\tauau)=0\,\Lambdaongrightarrow\,\left\{\mathrm{Re}(p_{k}),\mathrm{Im}(p_{k})\rhoight\}(x,\xi,\tauau)\geq c\langle \xi,\tauau\rhoangle^3.
\mathrm{e}nd{equation}
We defined on the boundary $\{x_{n}=0\}\cap W$ the operators
\betaegin{equation}gin{equation*}
\left\{\betaegin{equation}gin{array}{ll}
\mathrm{op}(b_{1})w=w_{1}-w_{2}&\tauext{on }\{x_{n}=0\}\cap W
\\
\mathrm{op}(b_{2})w=\left(D_{x_{n}}+i\tauau\partial_{x_{n}}\varphi_{1}\rhoight)w_{1}+\left(D_{x_{n}}+i\tauau\partial_{x_{n}}\varphi_{2}\rhoight)w_{2}&\tauext{on }\{x_{n}=0\}\cap W.
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation*}
We denote by $\|v\|=\|v\|_{L^{2}(W_{2})}$ with the correspondent scalar product denoted by $(v_{1},v_{2})$. For $s\in\betal{R}$ we denote by $\|v\|_{s}^{2}=\|\mathrm{op}(\Lambdaambda^{s})v\|^{2}$ and $|v|_{s}^{2}=\|v_{|x_{n}=0}\|_{s}^{2}$ such that when $s=0$ the norm $|v|_{0}$ with the scalar product $(v_{1},v_{2})_{0}=(v_{1|x_{n}=0},v_{2|x_{n}=0})$ will be denoted simply $|v|$. Finally, we denote by $|v|_{1,0,\tauau}^{2}=|v|_{1}^{2}+|D_{n}v|^{2}$.
Before proving the Carleman estimate we recall the following theorem given by \cite[Theorem 2.3]{rousseau-robbiano}.
\betaegin{equation}gin{prop}
Let $\varphi$ satisfies \mathrm{e}qref{carl2}-\mathrm{e}qref{carl3}. Then there exist $C>0$ and $\tauau_{0}>0$ such that for any $\tauau\geq\tauau_{0}$ we have the following estimate
\betaegin{equation}gin{equation}\label{carl6}
\tauau^{3}\|w\|^{2}+\tauau\|\nabla w\|^{2}\leq C\left(\|P(x,D,\tauau)w\|^{2}+\tauau|w|_{1,0,\tauau}^{2}\rhoight)
\mathrm{e}nd{equation}
and
\betaegin{equation}gin{equation}\label{carl7}
\tauau^{3}\|w\|^{2}+\tauau\|\nabla w\|^{2}+\tauau|w|_{1,0,\tauau}^{2}\leq C\left(\|P(x,D,\tauau)w\|^{2}+\tauau|\mathrm{op}(b_{1})w|_{1}^{2}+\tauau|\mathrm{op}(b_{2})w|^{2}\rhoight)
\mathrm{e}nd{equation}
for any $w\in\mathcal{C}_{0}^{\infty}(K)$ where $K\sigmaubset\omegaverline{W}_{1}$ is a compact subset.
\mathrm{e}nd{prop}
Now we are ready to state our local Carleman estimate whose main ingredients are estimates \mathrm{e}qref{carl6} and \mathrm{e}qref{carl7}. In fact, the Carleman estimate established here is an estimate analogous the previous one but with another scale of Sobolev spaces.
\betaegin{equation}gin{thm}
Let $\varphi$ satisfies \mathrm{e}qref{carl2}-\mathrm{e}qref{carl3}. There exist $C>0$ and $\tauau_{0}>0$ such that for any $\tauau\geq\tauau_{0}$ we have the following estimate
\betaegin{equation}gin{equation}\label{carl51}
\tauau^{3}\|w\|^{2}+\tauau\|\nabla w\|^{2}+\tauau^{2}|w|_{\varphirac{1}{2}}^{2}+\tauau^{2}|D_{x_{n}}w|_{-\varphirac{1}{2}}^{2}\leq C\left(\|P(x,D,\tauau)w\|^{2}
+\tauau^2 |\mathrm{op}(b_{1})w|_{\varphirac{1}{2}}^{2}+\tauau|\mathrm{op}(b_{2})w|^{2}\rhoight)
\mathrm{e}nd{equation}
for any $w\in\mathcal{C}_{0}^{\infty}(K)$ where $K\sigmaubset\omegaverline{W}_{1}$ is a compact subset.
\mathrm{e}nd{thm}
\betaegin{equation}gin{proof}
We can write the operator $P(x,D,\tauau)$ as follow
$$
P(x,D,\tauau)=D_{x_{n}}^{2}+R+\tauau c_{0}(x)D_{x_{n}}+\tauau C_{1}(x)+\tauau^{2} c_{0}'(x),
$$
where $c_{0},\,c_{0}'\in \mathcal{TO}^{0}$, $C_{1}\in\mathcal{TO}^{1}$ and $R\in \mathcal{TO}^{2}$ with $\displaystyle R=\sigmaum_{j,k=1}^{n-1}a_{j,k}D_{x_{j}}D_{x_{k}}$. Let $v\in\mathcal{C}_{0}^{\infty}({W}_{1})$, then we have
\betaegin{equation}gin{equation}\label{carl8}
\betaegin{equation}gin{split}
\|(D_{x_{n}}^{2}+R)\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}&\leq C\Big(\|P\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\tauau^{2}\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}
\\
&\quad +\tauau^{2}\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\tauau^{4}\|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\Big).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
We can estimate the three last terms of the right hand side of \mathrm{e}qref{carl8} as follow
$$
\tauau^{2}\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\tauau^{4}\|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\leq C(\tauau\|D_{x_{n}}v\|^{2}+\tauau^{3}\|v\|^{2}),
$$
and
\betaegin{equation}gin{equation}\label{carl28}
\tauau^{2}\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}=\tauau^{2}\left(\varphirac{1}{\sigmaqrt{\tauau}}\mathrm{op}(\Lambdaambda)v,\sigmaqrt{\tauau}v\rhoight)\leq C\left(\tauau\|\mathrm{op}(\Lambdaambda)v\|^{2}+\tauau^{3}\|v\|^{2}\rhoight)\leq C\tauau\|\mathrm{op}(\Lambdaambda)v\|^{2}.
\mathrm{e}nd{equation}
Then following \mathrm{e}qref{carl8} we obtain
\betaegin{equation}gin{equation}\label{carl9}
\|(D_{x_{n}}^{2}+R)\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\leq C\left(
\|P\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\tauau\|\mathrm{op}(\Lambdaambda)v\|^{2}+\tauau^{3}\|v\|^{2}
+\tauau\| D_{x_{n}} v\|^{2}
\rhoight).
\mathrm{e}nd{equation}
Combining \mathrm{e}qref{carl6} and \mathrm{e}qref{carl9} and using the fact that $\tauau(\|\mathrm{op}(\Lambdaambda)v\|^{2}+\|D_{x_{n}}v\|^{2})\sigmaim\tauau^{3}\|v\|^{2}+\tauau\|\nabla v\|^{2}$ we obtain
\betaegin{equation}gin{equation}\label{carl10}
\|(D_{x_{n}}^{2}+R)\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\leq C\left(\|P\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
We can write
\betaegin{equation}gin{equation}\label{carl11}
\betaegin{equation}gin{split}
P\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v&=\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})Pv+[P,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v=\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})Pv+[R,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v
\\
&+\tauau[c_{0}(x)D_{x_{n}},\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v+\tauau[C_{1}(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v+\tauau^{2}[c_{0}'(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Since $[R,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{\varphirac{1}{2}}$, then following to \mathrm{e}qref{carl6} we have
\betaegin{equation}gin{equation}\label{carl12}
\left\|[R,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v\rhoight\|^{2}\leq C\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}\leq C\|\mathrm{op}(\Lambdaambda)v\|^{2}\leq C\left(\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
Since $[c_{0}(x)D_{x_{n}},\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{1}{2}}D_{x_{n}}$, then following to \mathrm{e}qref{carl6} we have
\betaegin{equation}gin{equation}\label{carl13}
\tauau^{2}\left\|[c_{0}(x)D_{x_{n}},\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v\rhoight\|^{2}\leq C\tauau^{2}\|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})D_{x_{n}}v\|^{2}\leq C\tauau\|D_{x_{n}}v\|^{2}\leq C\left(\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
Since $[C_{1}(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{1}{2}}$ then following to \mathrm{e}qref{carl6} we have
\betaegin{equation}gin{equation}\label{carl14}
\tauau^{2}\left\|[C_{1}(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v\rhoight\|^{2}\leq C\tauau^{2}\|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\leq C\tauau\|v\|^{2}\leq C\left(\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
Since $[c_{0}'(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}$, then following to \mathrm{e}qref{carl6} we have
\betaegin{equation}gin{equation}\label{carl15}
\tauau^{4}\left\|[c_{0}'(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]v\rhoight\|\leq C\tauau^{4}\|\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})v\|^{2}\leq C\tauau^{3}\|v\|^{2}\leq C\left(\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
From \mathrm{e}qref{carl11}-\mathrm{e}qref{carl15}, one gets
\betaegin{equation}gin{equation}\label{carl16}
\|P\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\leq C\left(\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
Then the combination of \mathrm{e}qref{carl10} and \mathrm{e}qref{carl16} gives
\betaegin{equation}gin{equation}\label{carl17}
\|(D_{x_{n}}^{2}+R)\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\leq C\left(\|Pv\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{equation}
In another hand, by integration by parts we find
\betaegin{equation}gin{align}\label{carl18}
\|(D_{x_{n}}^{2}+R)\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}
&=\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2} \notag \\
&\quad +\|R\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}
\notag
+2\mathrm{Re}(D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,R\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v) \notag \\
& =\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\|R\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}
\\
&\quad +2\mathrm{Re}\Big(i\betaig(D_{x_{n}}v,R\mathrm{op}(\Lambdaambda^{-1})v\betaig)_{0}
+i\betaig(D_{x_{n}}v,[\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}}),R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\betaig)_{0}\Big)
\notag \\
&\quad +2\mathrm{Re}\betaig(RD_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\betaig) \notag
\\
&\quad +2\mathrm{Re} \betaig(D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,[D_{x_{n}},R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\betaig). \notag
\mathrm{e}nd{align}
Let $\chi_{0}\in\mathcal{C}_{0}^{\infty}(\omegaverline{\betal{R}_{+}^{n}})$ be a positive function such that $\chi_{0}\mathrm{e}quiv 1$ in the support of $v$ then by integration by parts and using the fact that $(1-\chi_{0})v\mathrm{e}quiv 0$ we obtain
\betaegin{equation}gin{align}\label{carl19}
\left\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\rhoight\|^{2}& =(\mathrm{op}(\Lambdaambda^{2})\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v)
=\tauau^{2}\left\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight\|^{2}+\sigmaum_{j=1}^{n-1}\left(D_{x_{j}}^{2}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)
\notag \\
&=\tauau^{2}\left\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight\|^{2}
+\sigmaum_{j=1}^{n-1}\left(D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)
\notag \\
&=\tauau^{2}\left\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight\|^{2}
+\sigmaum_{j=1}^{n-1}\left(\chi_{0} D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)
\\
&\quad +\sigmaum_{j=1}^{n-1}\left([(1-\chi_{0}),D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})]v,D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight) \notag
\mathrm{e}nd{align}
Since $[(1-\chi_{0}),D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})]\in\mathcal{TO}^{\varphirac{1}{2}}$ and $D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\in\mathcal{TO}^{\varphirac{3}{2}}$ for $j=1,\ldots,n-1$, we show
\betaegin{equation}gin{equation}\label{carl20}
\left|\sigmaum_{j=1}^{n-1}\left([(1-\chi_{0}),D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})]v,D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)\rhoight|\leq C\|\mathrm{op}(\Lambdaambda)v\|^{2}.
\mathrm{e}nd{equation}
We recall that $\displaystyle\sigmaum_{j,k=1}^{n-1}\chi_{0} a_{j,k}D_{x_{j}}v\omegaverline{D_{x_{k}}v}\geq c\chi_{0}\sigmaum_{j=1}^{n-1}|D_{x_{j}}v|^{2}$, for some constant $c>0$ and using the fact that $[\chi_{0},a_{j,k}D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})]\in\mathcal{TO}^{\varphirac{1}{2}}$ and $D_{x_{k}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\in\mathcal{TO}^{\varphirac{3}{2}}$, we obtain
\betaegin{equation}gin{align}\label{carl21}
\sigmaum_{j=1}^{n-1}\left(\chi_{0} D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)&\leq C\sigmaum_{j,k=1}^{n-1}\left(\chi_{0} a_{j,k}D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{k}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)
\\
&\leq C\sigmaum_{j,k=1}^{n-1}\left([\chi_{0},a_{j,k}D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})]v,D_{x_{k}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)\notag
\\
&+C\sigmaum_{j,k=1}^{n-1}\left(a_{j,k}D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{x_{k}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)\notag
\\
&\leq C\sigmaum_{j,k=1}^{n-1}\left(a_{j,k}D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{x_{k}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)+C\|\mathrm{op}(\Lambdaambda)v\|^{2}.\notag
\mathrm{e}nd{align}
Integrating by parts the first term of the right hand side of \mathrm{e}qref{carl21}, with
$ \displaystyle R=\sigmaum_{j,k=1}^{n-1}a_{j,k}D_{x_{j}}D_{x_{k}}$, one gets
\betaegin{equation}gin{align}\label{carl22}
\sigmaum_{j,k=1}^{n-1}\left(a_{j,k}D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,D_{x_{k}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)
&=(R\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v)
\\
&\quad +\sigmaum_{j,k=1}^{n-1}\left([D_{x_{k}},a_{j,k}]D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight). \notag
\mathrm{e}nd{align}
Since $[D_{x_{k}},a_{j,k}]D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\in\mathcal{TO}^{\varphirac{3}{2}}$, then
\betaegin{equation}gin{equation}\label{carl23}
\left|\sigmaum_{j,k=1}^{n-1}\left([D_{x_{k}},a_{j,k}]D_{x_{j}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\rhoight)\rhoight|\leq C\|\mathrm{op}(\Lambdaambda)v\|^{2}.
\mathrm{e}nd{equation}
Since
\betaegin{equation}gin{equation}\label{carl24}
(R\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v)=(R\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v)+([\mathrm{op}(\Lambdaambda),R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v),
\mathrm{e}nd{equation}
and using the fact that $[\mathrm{op}(\Lambdaambda ),R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\in\mathcal{TO}^{\varphirac{3}{2}}$ and the Cauchy-Schwarz inequality, we obtain
\betaegin{equation}gin{equation}\label{carl25}
\left|(R\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v,\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v)\rhoight|\leq C\left(\mathrm{e}psilon\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\|^{2}+\varphirac{1}{\mathrm{e}psilon}\|R\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\|\mathrm{op}(\Lambdaambda)v\|^{2}\rhoight).
\mathrm{e}nd{equation}
Combining \mathrm{e}qref{carl19}--\mathrm{e}qref{carl25}, we obtain for $\mathrm{e}psilon$ small enough
\betaegin{equation}gin{equation}\label{carl26}
\|R\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}\geq C\left(\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\|^{2}-\tauau\|\mathrm{op}(\Lambdaambda)v\|^{2}\rhoight),
\mathrm{e}nd{equation}
where we have used again \mathrm{e}qref{carl28}. The same computation shows
\betaegin{equation}gin{equation}\label{carl27}
\mathrm{Re}\left(RD_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\rhoight)\geq C\left(\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}-\tauau\|D_{x_{n}}v\|^{2}\rhoight).
\mathrm{e}nd{equation}
Since $[\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}}),R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\in\mathcal{TO}^{0}$ and $R\mathrm{op}(\Lambdaambda^{-1})\in\mathcal{TO}^{1}$, we have
\betaegin{equation}gin{equation}\label{carl29}
\left|\left(D_{x_{n}}v,R\mathrm{op}(\Lambdaambda^{-1})v\rhoight)_{0}\rhoight|+\left|\left(D_{x_{n}}v,[\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}}),R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\rhoight)_{0}\rhoight|\leq C\left(|D_{x_{n}}v|^{2}+|v|_1^{2}\rhoight)\leq C|v|_{1,0,\tauau}^{2},
\mathrm{e}nd{equation}
and
\betaegin{equation}gin{equation}\label{carl30}
\left|\left(D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v,[D_{x_{n}},R]\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\rhoight)\rhoight|\leq C\|v\|^{2}+\|\nabla v\|^{2}.
\mathrm{e}nd{equation}
Putting \mathrm{e}qref{carl17} and \mathrm{e}qref{carl26}--\mathrm{e}qref{carl30} into \mathrm{e}qref{carl18}, we find
\betaegin{equation}gin{multline}\label{carl31}
\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}+\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\|^{2}
\\
\leq C\left(\|Pv\|^{2}+\tauau^{3}\|v\|^{2}+\tauau\|\nabla v\|^{2}+\tauau|v|_{1,0,\tauau}^{2}\rhoight).
\mathrm{e}nd{multline}
Following \mathrm{e}qref{carl7} and \mathrm{e}qref{carl31} will be reduced to the following estimate
\betaegin{equation}gin{multline}\label{carl32}
\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}+\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}+\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\|^{2}+\tauau|v|_{1,0,\tauau}^{2}
\\
\leq C\left(\|Pv\|^{2}+\tauau|\mathrm{op}(b_{1})v|_{1}^{2}+\tauau|\mathrm{op}(b_{2})v|^{2}\rhoight).
\mathrm{e}nd{multline}
Let $\chi\in\mathcal{C}_{0}^{\infty}(\betal{R}^{n})$ such that $\chi\mathrm{e}quiv 1$ in the support of $w$. We set $v=\chi\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w$ and we write
\betaegin{equation}gin{equation}\label{carl33}
\betaegin{equation}gin{split}
Pv&=\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})Pw+[P,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+P[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w
\\
&=\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})Pw+[P,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+D_{x_{n}}^{2}[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+R[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w
\\
&\quad +\tauau c_{0}(x)D_{x_{n}}[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+\tauau C_{1}(x)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+\tauau^{2}c_{0}'(x)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
We have $[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}$, then
\betaegin{equation}gin{equation}\label{carl34}
\left\|D_{x_{n}}^{2}[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w\rhoight\|^{2}\leq C\left(\left\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})w\rhoight\|^{2}+\left\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})w\rhoight\|^{2}+\left\|\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})w\rhoight\|^{2}\rhoight),
\mathrm{e}nd{equation}
and
\betaegin{equation}gin{equation}\label{carl35}
\tauau^{2}\left\|c_{0}(x)D_{x_{n}}[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w\rhoight\|^{2}\leq C\tauau^{2}\left(\left\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})w\rhoight\|^{2}+\left\|\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})w\rhoight\|^{2}\rhoight) .
\mathrm{e}nd{equation}
Since $R[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{\varphirac{1}{2}}$, $C_{1}(x)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{1}{2}}$ and $c_{0}'(x)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}$, we obtain
\betaegin{equation}gin{equation}\label{carl36}
\left\|R[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w\rhoight\|^{2}+\tauau^{2}\left\|C_{1}(x)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w\rhoight\|^{2}+\tauau^{4}\left\|c_{0}'(x)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w\rhoight\|^{2}\leq C\left\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w\rhoight\|^{2}.
\mathrm{e}nd{equation}
Since we can write
$$
[P,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]=[R,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]+\tauau[c_{0}(x)D_{x_{n}},\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]+\tauau[C_{1}(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]+\tauau^{2}[c_{0}'(x),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})],
$$
then by using \mathrm{e}qref{carl12}--\mathrm{e}qref{carl15}, we obtain
\betaegin{equation}gin{equation}\label{carl37}
\left\|[P,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w\rhoight\|^{2}\leq C\left(\left\|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w\rhoight\|^{2}+\tauau^{-1}\left\|D_{x_{n}}w\rhoight\|^{2}\rhoight).
\mathrm{e}nd{equation}
Inserting \mathrm{e}qref{carl34}-\mathrm{e}qref{carl37} into \mathrm{e}qref{carl33}, we find
\betaegin{equation}gin{equation}\label{carl38}
\|Pv\|^{2}\leq C\left(\tauau^{-1}\|Pw\|^{2}+\tauau^{-1}\|\mathrm{op}(\Lambdaambda)w\|^{2}+\tauau^{-1}\|D_{x_{n}}w\|^{2}+\tauau^{-1}\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-1})w\|^{2}\rhoight).
\mathrm{e}nd{equation}
We have
$$
\mathrm{op}(b_{1})v=\mathrm{op}(b_{1})\chi\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w=\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\mathrm{op}(b_{1})w+\mathrm{op}(b_{1})[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+[\mathrm{op}(b_{1}),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w.
$$
Since $\mathrm{op}(b_{1})\in\mathcal{TO}^{0}$ then $\mathrm{op}(b_{1})[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}$ and $[\mathrm{op}(b_{1}),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}$ which gives
\betaegin{equation}gin{equation}\label{carl39}
\betaegin{equation}gin{split}
\tauau|\mathrm{op}(b_{1})v|_{1}^{2}=\tauau|\mathrm{op}(\Lambdaambda)\mathrm{op}(b_{1})v|^{2}&\leq C\left(\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\mathrm{op}(b_{1})w|^{2}+|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}\rhoight)
\\
&\leq C\left(\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\mathrm{op}(b_{1})w|^{2}+\tauau^{-2}|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}\rhoight).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
We have
$$
\mathrm{op}(b_{2})v=\mathrm{op}(b_{2})\chi\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w=\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\mathrm{op}(b_{2})w+\mathrm{op}(b_{1})[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w+[\mathrm{op}(b_{2}),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w.
$$
Since $\mathrm{op}(b_{2})\in D_{x_{n}}+\mathcal{TO}^{1}$ then it is clear that $\mathrm{op}(b_{2})[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}D_{x_{n}}+\mathcal{TO}^{-\varphirac{1}{2}}$ and $[\mathrm{op}(b_{2}),\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{3}{2}}D_{x_{n}}+\mathcal{TO}^{-\varphirac{1}{2}}$ hence
\betaegin{equation}gin{equation}\label{carl40}
\betaegin{equation}gin{split}
\tauau|\mathrm{op}(b_{2})v|^{2}&\leq C\tauau\left(|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\mathrm{op}(b_{2})w|^{2}+|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}+|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{3}{2}})w|^{2}\rhoight)
\\
&\leq C\left(\tauau|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\mathrm{op}(b_{2})w|^{2}+\tauau^{-1}|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}+\tauau^{-1}|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}\rhoight).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Moreover, we can write
$$
\mathrm{op}(\Lambdaambda)v=\mathrm{op}(\Lambdaambda)\chi\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w=\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w+\mathrm{op}(\Lambdaambda)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w,
$$
since $\mathrm{op}(\Lambdaambda)[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{-\varphirac{1}{2}}$ then we get
$$
\tauau|\mathrm{op}(\Lambdaambda)v|^{2}\geq\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}-C\tauau|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}\geq\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}-C\tauau^{-1}|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2},
$$
and for $\tauau$ large enough we obtain
\betaegin{equation}gin{equation}\label{carl41}
\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}\leq C\tauau|\mathrm{op}(\Lambdaambda)v|^{2}.
\mathrm{e}nd{equation}
By using \mathrm{e}qref{carl41} similarly we can prove that for $\tauau$ large enough we have
\betaegin{equation}gin{equation}\label{carl42}
\tauau|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}\leq C\tauau|D_{x_{n}}v|^{2}+ C\tauau |v |_1^{2}.
\mathrm{e}nd{equation}
Recalling that
$$
\tauau|v|_{1,0,\tauau}^{2}=\tauau|v|_{1}^{2}+\tauau|D_{n}v|^{2}=\tauau|\mathrm{op}(\Lambdaambda)v|^{2}+\tauau|D_{n}v|^{2},
$$
and combining \mathrm{e}qref{carl41} and \mathrm{e}qref{carl42}, we obtain
\betaegin{equation}gin{equation}\label{carl43}
\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}+\tauau|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}\leq C\tauau|v|_{1,0,\tauau}^{2}.
\mathrm{e}nd{equation}
Since we have
$$
\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v=\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})\chi\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w=\mathrm{op}(\Lambdaambda)w+\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]w
$$
where $\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})[\chi,\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})]\in\mathcal{TO}^{0}$ we obtain
\betaegin{equation}gin{equation}\label{carl44}
\|\mathrm{op}(\Lambdaambda)w\|^{2}-C\|w\|^{2}\leq\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\|^{2}.
\mathrm{e}nd{equation}
Similarly we can prove also that
\betaegin{equation}gin{equation}\label{carl45}
\|D_{x_{n}}w\|^{2}-C\left(\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-1})w\|^{2}+\|\mathrm{op}(\Lambdaambda^{-1})w\|^{2}\rhoight)\leq \|D_{x_{n}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2},
\mathrm{e}nd{equation}
and
\betaegin{equation}gin{multline}\label{carl46}
\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-1})w\|^{2}-C\betaig(\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-2})w\|^{2}+\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-2})w\|^{2}
+\|\mathrm{op}(\Lambdaambda^{-2})w\|^{2}\betaig)
\\ \leq \|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}.
\mathrm{e}nd{multline}
Combining \mathrm{e}qref{carl44}--\mathrm{e}qref{carl46} we find
\betaegin{equation}gin{multline}\label{carl47}
\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-1})w\|^{2}+\|D_{x_{n}}w\|^{2}+\|\mathrm{op}(\Lambdaambda)w\|^{2}
\\
\leq\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})v\|^{2}
+\|D_{x_{n}}\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})v\|^{2}+\|\mathrm{op}(\Lambdaambda^{\varphirac{3}{2}})v\|^{2}.
\mathrm{e}nd{multline}
Inserting \mathrm{e}qref{carl38}--\mathrm{e}qref{carl40}, \mathrm{e}qref{carl43} and \mathrm{e}qref{carl47} into \mathrm{e}qref{carl32}, we obtain
\betaegin{equation}gin{align*}
&\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-1})w\|^{2}+\|D_{x_{n}}w\|^{2}+\|\mathrm{op}(\Lambdaambda)w\|^{2}
+\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}+\tauau|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}
\\
&\leq C\Big(\tauau^{-1}\|Pw\|^{2}+\tauau^{-1}\|\mathrm{op}(\Lambdaambda)w\|^{2}+\tauau^{-1}\|D_{x_{n}}w\|^{2}+\tauau^{-1}\|D_{x_{n}}^{2}\mathrm{op}(\Lambdaambda^{-1})w\|^{2}+\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\mathrm{op}(b_{1})w|^{2}
\\
&\qquad \quad +\tauau^{-2}|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}+\tauau|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\mathrm{op}(b_{2})w|^{2}+\tauau^{-1}|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}+\tauau^{-1}|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}\Big).
\mathrm{e}nd{align*}
For $\tauau$ large enough we yield
\betaegin{equation}gin{multline*}
\|D_{x_{n}}w\|^{2}+\|\mathrm{op}(\Lambdaambda)w\|^{2}+\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})w|^{2}+\tauau|D_{x_{n}}\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})w|^{2}
\\
\leq C\left(\tauau^{-1}\|Pw\|^{2}+\tauau|\mathrm{op}(\Lambdaambda^{\varphirac{1}{2}})\mathrm{op}(b_{1})w|^{2}+\tauau|\mathrm{op}(\Lambdaambda^{-\varphirac{1}{2}})\mathrm{op}(b_{2})w|^{2}\rhoight),
\mathrm{e}nd{multline*}
which obviously leads to the Carleman estimate. And this end the proof.
\mathrm{e}nd{proof}
For $u=(u_{1},u_{2})\in H^{1}(\mathcal{U}_{1})\tauimes H^{1}(\mathcal{U}_{2})$ we define the tangential operators $\mathrm{op} (B_{1})$ and $\mathrm{op}(B_{2})$ by
\betaegin{equation}gin{equation}\label{carl1}
\mathrm{op}(B_{1})u=u_{1|\gamma_{0}}-u_{2|\gamma_{0}}\qquad\tauext{and}\qquad\mathrm{op}(B_{2})u=\partial_{\nu}u_{1|\gamma_{0}}-\partial_{\nu}u_{2|\gamma_{0}}.
\mathrm{e}nd{equation}
We note that $\mathrm{op} (B_{1})$ measure the continuity of the displacement of $u$ through the interface $\gamma_{0}$ where $\mathrm{op}(B_{2})$ describe the difference of the flux through $\gamma_{0}$ of the two sides of the interface.
\betaegin{equation}gin{cor}
Let $\varphi$ satisfies \mathrm{e}qref{carl2}--\mathrm{e}qref{carl3}. There exist $C>0$ and $\tauau_{0}>0$ such that for any $\tauau\geq\tauau_{0}$ we have the following estimate
\betaegin{equation}gin{equation}\label{carl50}
\tauau^{3}\|\mathrm{e}^{\tauau\varphi}u\|^{2}+\tauau\|\mathrm{e}^{\tauau\varphi}\nabla u\|^{2}+\leq C\left(\|\mathrm{e}^{\tauau\varphi}P(x,D)u\|^{2}
+\tauau^2 |\mathrm{e}^{\tauau\varphi}\mathrm{op}(B_{1})u|_{\varphirac{1}{2}}^{2}+\tauau|\mathrm{e}^{\tauau\varphi}\mathrm{op}(B_{2})u|^{2}\rhoight)
\mathrm{e}nd{equation}
for any $u\in\mathcal{C}_{0}^{\infty}(K)$ where $K\sigmaubset\omegaverline{W}_{1}$ is a compact subset.
\mathrm{e}nd{cor}
\betaegin{equation}gin{proof}
Let $w=\mathrm{e}^{\tauau\varphi}u$ and we recall that $P(x,D,\tauau)w=\mathrm{e}^{\tauau\varphi}P(x,D)u$, $\mathrm{op}(b_{1})w=\mathrm{e}^{\tauau\varphi_{1}}.\mathrm{op}(B_{1})u$ and $\mathrm{op}(b_{2})w=\mathrm{e}^{\tauau\varphi_{1}}.\mathrm{op}(B_{2})u$ then using the fact that $\varphi_{1}$ and $\varphi_{2}$ have the same trace on $\gamma_{0}$ and estimate \mathrm{e}qref{carl51} we obtain \mathrm{e}qref{carl50}.
\mathrm{e}nd{proof}
Now we can state the global Carleman estimate in $\mathcal{U}_{1}$ and $\mathcal{U}_{2}$ (defined in the beginning of this section page \pageref{carl53}) which is given by the following theorem.
\betaegin{equation}gin{thm}\label{carl5}
Assume that $\varphi$ satisfies
\betaegin{equation}gin{eqnarray}
|\nabla\varphi_{k}(x)|> 0,\;\varphiorall\,x\in\omegaverline{\mathcal{U}}_{k},\quad k=1,2,\label{carl48}
\\
\partial_{\nu}\varphi_{|\gamma}(x)< 0,\label{carl52}
\\
\partial_{\nu}\varphi_{k|\gamma_{0}}(x)>0,\quad k=1,2,
\\
\left(\partial_{\nu}\varphi_{1|\gamma_{0}}(x)\rhoight)^{2}-\left(\partial_{\nu}\varphi_{2|\gamma_{0}}(x)\rhoight)^{2}>1,
\mathrm{e}nd{eqnarray}
and the sub-ellipticity condition
\betaegin{equation}gin{equation}\label{carl49}
\mathrm{e}xists\,c>0,\;\varphiorall\,(x,\xi)\in\omegaverline{\mathcal{U}}_{k}\tauimes\betal{R}^{n},\;p_{k}(x,\xi)=0\,\Lambdaongrightarrow\,\left\{\mathrm{Re}(p_{k}),\mathrm{Im}(p_{k})\rhoight\}(x,\xi,\tauau)\geq c\langle\xi,\tauau\rhoangle^{3}.
\mathrm{e}nd{equation}
Then there exist $C>0$ and $\tauau_{0}>0$ such that we have the following estimate
\betaegin{equation}gin{align}\label{carl4}
&\tauau^{3}\|\mathrm{e}^{\tauau\varphi}u\|_{L^{2}(\mathcal{U})}^{2}+\tauau\|\mathrm{e}^{\tauau\varphi}\nabla u\|_{L^{2}(\mathcal{U})}^{2}
\\
&\qquad \leq C\Big(\|\mathrm{e}^{\tauau\varphi}Pu\|_{L^{2}(\mathcal{U})}^{2}+\tauau^{2}\|\mathrm{e}^{\tauau\varphi}\mathrm{op}(B_{1})u\|_{H^{\varphirac{1}{2}}(\gamma_{0})}^{2}
+\tauau\|\mathrm{e}^{\tauau\varphi}\mathrm{op}(B_{2})u\|_{L^{2}(\gamma_{0})}^{2}\Big) \notag
\mathrm{e}nd{align}
for all $\tauau\geq\tauau_{0}$ and $u=(u_{1},u_{2})\in H^{2}(\mathcal{U}_{1})\tauimes H^{2}(\mathcal{U}_{2})$ such that $u_{2|\gamma}=0$.
\mathrm{e}nd{thm}
Actually a weight functions with assumptions \mathrm{e}qref{carl48}-\mathrm{e}qref{carl49} can not exist. So, since the proof of the theorem is local then we can do without the conditions \mathrm{e}qref{carl48} and \mathrm{e}qref{carl52} in some region where the entries is supposed to be vanishing around the critical points of the weight functions and where the damping is active. Next, the missing information will be recuperated with a new entries which vanishing far away when the first do (See next section).
\sigmaection{Stabilization result}\label{stab}
In this section, we will prove the logarithmic stability of the system \mathrm{e}qref{wave1}. To this end, we establish a particular resolvent estimate precisely we will show that for some constant $C>0$ we have
\betaegin{equation}gin{equation}\label{Swave24}
\|(\mathcal{A}-i\mu\,I)^{-1}\|_{\mathcal{L}(\mathcal{H})}\leq C\mathrm{e}^{C|\mu|},\qquad \varphiorall\,|\mu|\gg 1,
\mathrm{e}nd{equation}
and then by Burq's result \cite{burq} and the remark of Duyckaerts \cite[section 7]{Duyckaerts} (see also \cite{batty,borichevtomilov}) we obtain the expected decay rate of the energy.
Let $\mu$ be a real number such that $|\mu|$ is large, and assume that
\betaegin{equation}gin{equation}\label{Swave1}
(\mathcal{A}-i\mu\,I)(u,v)^{t}=(f,g)^{t},\quad (u,v)\in\mathcal{D}(\mathcal{A}),\quad (f,g)\in\mathcal{H}.
\mathrm{e}nd{equation}
which can be written as follow
\betaegin{equation}gin{equation*}
\left\{\betaegin{equation}gin{array}{ll}
v-i\mu u=f&\tauext{in }\Omegamega
\\
\Deltaelta u+\mathrm{div}(a(x)\nabla v)-i\mu v=g&\tauext{in }\Omegamega,
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation*}
or equivalently,
\betaegin{equation}gin{equation}\label{Swave2}
\left\{\betaegin{equation}gin{array}{ll}
v=f+i\mu u&\tauext{in }\Omegamega
\\
\Deltaelta u+i\mu\mathrm{div}(a(x)\nabla u)+\mu^{2}u=g+i\mu f-\mathrm{div}(a(x)\nabla f)&\tauext{in }\Omegamega.
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
Multiplying the second line of \mathrm{e}qref{Swave2} by $\omegaverline{u}$ and integrating over $\Omegamega$ then by Green's formula we obtain
\betaegin{equation}gin{equation}\label{Swave3}
\int_{\Omegamega}(g-i\mu f)\omegaverline{u}\,\mathrm{d} x+d\int_{\omegamega}\nabla f.\nabla\omegaverline{u}\,\mathrm{d} x=\mu^{2}\int_{\Omegamega}|u|^{2}\,\mathrm{d} x-\int_{\Omegamega}|\nabla u|^{2}\,\mathrm{d} x-id\mu\int_{\omegamega}|\nabla u|^{2}\,\mathrm{d} x.
\mathrm{e}nd{equation}
Taking the imaginary part of \mathrm{e}qref{Swave3} and using the Cauchy-Schwarz inequality and Poincar\'e inequality we find
\betaegin{equation}gin{equation}\label{Swave4}
d|\mu|\int_{\omegamega}|\nabla u|^{2}\,\dd x\leq C\left(\mu^{2}\int_{\Omegamega}|\nabla f|^{2}\,\mathrm{d} x+\int_{\Omegamega}|g|^{2}\,\mathrm{d} x\rhoight).
\mathrm{e}nd{equation}
By setting $u=u_{1}\,\mathbb{1}_{\omegamega}+ u_{2}\, \mathbb{1}_{\Omegamega\sigmaetminus\betaar{\omegamega}}$, $v
=v_{1}\,\mathbb{1}_{\omegamega}+v_{2}\,\mathbb{1}_{\Omegamega\sigmaetminus\betaar{\omegamega}}$,
$f=f_{1}\,\mathbb{1}_{\omegamega}+f_{2}\, \mathbb{1}_{\Omegamega \sigmaetminus \betaar{\omegamega}}$ and
$g=g_{1}\,\mathbb{1}_{\omegamega}+g_{2}\,\mathbb{1}_{\Omegamega\sigmaetminus\betaar{\omegamega}}$ system
\mathrm{e}qref{Swave2} is transformed to the following transmission equation
\betaegin{equation}gin{equation}\label{Swave5}
\left\{\betaegin{equation}gin{array}{ll}
v_{1}=i\mu u_{1}+f_{1}&\tauext{in }\omegamega
\\
v_{2}=i\mu u_{2}+f_{2}&\tauext{in }\Omegamega\betaackslash\omegaverline{\omegamega}
\\
\Deltaelta((1+id\mu) u_{1}+df_{1})+\mu^{2} u_{1}=g_{1}+i\mu f_{1}&\tauext{in }\omegamega
\\
\Deltaelta u_{2}+\mu^{2}u_{2}=g_{2}+i\mu f_{2}&\tauext{in }\Omegamega\betaackslash\omegaverline{\omegamega},
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
with the transmission conditions
\betaegin{equation}gin{equation}\label{Swave6}
\left\{\betaegin{equation}gin{array}{ll}
u_{1}=u_{2}&\tauext{on }\mathcal{I}
\\
\partial_{\nu}((1+id\mu)u_{1}+df_{1})=\partial_{\nu}u_{2}&\tauext{on }\mathcal{I},
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
and the boundary condition
\betaegin{equation}gin{equation}\label{Swave7}
u_{2}=0\quad\tauext{on }\Gammaamma,
\mathrm{e}nd{equation}
where $\nu(x)$ denote the outer unit normal to $\Omegamega\sigmaetminus\omegamega$ on $\Gammaamma$ and on $\mathcal{I}$ (see Figure \mathrm{Re}f{fig1}).
To prove Theorem \mathrm{Re}f{LogStab} we need the following technical lemma
\betaegin{equation}gin{lem}
\label{lem: Swave8}
Let $\mathcal{O}$ be a bounded open set of $\betal{R}^{n}$. Then there exist $C>0$ and $\mu_{0}>0$, such that for any $w$ and $F$ satisfying
$$
\Deltaelta w+\varphirac{\mu^{2}}{1+id\mu}w=F\quad\tauext{in }\mathcal{O}
$$
and for all $|\mu|>\mu_{0}$ we have the following estimate
\betaegin{equation}gin{equation}\label{Swave8}
\|w\|_{H^{1}}^{2}\leq C\left(\|\nabla w\|_{L^{2}(\mathcal{O})}^{2}+\|F\|_{L^{2}(\mathcal{O})}^{2}\rhoight).
\mathrm{e}nd{equation}
\mathrm{e}nd{lem}
\betaegin{equation}gin{proof}
We need to distinguish two cases
\underline{Inside $\mathcal{O}$}: Let $\chi\in\mathcal{C}_{0}^{\infty}(\mathcal{O})$, we have by integration by parts
$$
\int_{\mathcal{O}}\left(\Deltaelta w+\varphirac{\mu^{2}}{1+id\mu}w\rhoight).\chi^{2}\omegaverline{w}\,\mathrm{d} x=\varphirac{\mu^{2}}{1+id\mu}\|\chi w\|_{L^{2}(\mathcal{O})}^{2}-\int_{\mathcal{O}}|\chi \nabla w|^{2}\,\mathrm{d} x-2\int_{\mathcal{O}}\nabla\chi.\nabla w\chi\omegaverline{w}\,\mathrm{d} x.
$$
Then we obtain
$$
\varphirac{\mu^{2}}{\sigmaqrt{1+d^{2}\mu^{2}}}\|\chi w\|_{L^{2}(\mathcal{O})}^{2}\leq C\left(\|F\|_{L^{2}(\mathcal{O})}.\|\chi^{2}w\|_{L^{2}(\mathcal{O})}+\|\nabla w\|_{L^{2}(\mathcal{O})}^{2}+\|\nabla w\|_{L^{2}(\mathcal{O})}.\|\chi w\|_{L^{2}(\mathcal{O})}\rhoight).
$$
Using Cauchy-Schwarz inequality and for $|\mu|$ large enough, one gets
\betaegin{equation}gin{equation}\label{Swave9}
\|\chi w\|_{L^{2}(\mathcal{O})}^{2}\leq C\left(\|\nabla w\|_{L^{2}(\mathcal{O})}^{2}+\|F\|_{L^{2}(\mathcal{O})}^{2}\rhoight).
\mathrm{e}nd{equation}
hence the result inside $\mathcal{O}$.
\underline{In the neighborhood of the boundary}: Let $x=(x',x_{n})\in\betal{R}^{n-1}\tauimes\betal{R}$. then
$$
\partial\mathcal{O}=\{x\in\betal{R}^{n},\; x_{n}=0\}.
$$
Let $\varepsilon>0$ such that $0<x_{n}<\varepsilon$. Then we have
$$
w(x,\varepsilon)-w(x',x_{n})=\int_{x_{n}}^{\varepsilon}\partial_{x_{n}}w(x',t)\,\mathrm{d} t.
$$
It follows
$$
|w(x',x_{n})|^{2}\leq 2|w(x',\varepsilon)|^{2}+2\left(\int_{x_{n}}^{\varepsilon}|\partial_{x_{n}}w(x',t)|\,\mathrm{d} t\rhoight)^{2}.
$$
Using the Cauchy-Schwarz inequality, we obtain
$$
|w(x',x_{n})|^{2}\leq 2|w(x',\varepsilon)|^{2}+2\varepsilon \int_{x_{n}}^{\varepsilon}|\partial_{x_{n}}w(x',t)|^{2}\,\mathrm{d} t.
$$
Integrating with respect to $x'$, we obtain
\betaegin{equation}gin{equation}\label{Swave10}
\int_{|x'|<\varepsilon}|w(x',x_{n})|^{2}\,\mathrm{d} x'\leq 2\int_{|x'|<\varepsilon}|w(x',\varepsilon)|^{2}\,\mathrm{d} x'+2\varepsilon\int_{|x'|<\varepsilon}\int_{|x_{n}|<\varepsilon}|\partial_{x_{n}}w(x',t)|^{2}\,\mathrm{d} t\,\mathrm{d} x'.
\mathrm{e}nd{equation}
Using the trace theorem, we have
\betaegin{equation}gin{equation}\label{Swave11}
\int_{|x'|<\varepsilon}|w(x',\varepsilon)|^{2}\,\mathrm{d} x'\leq C\int_{|x'|<2\varepsilon,|x_{n}-\varepsilon|<\varphirac{\varepsilon}{2}}\left(|w(x)|^{2}+|\nabla w(x)|^{2}\rhoight)\,\mathrm{d} x.
\mathrm{e}nd{equation}
We introduce the following cut-off functions
$$
\chi_{1}(x)=\left\{\betaegin{equation}gin{array}{lll}
1&\tauext{if}&\displaystyle 0<x_{n}<\varphirac{\varepsilon}{2}
\\
0&\tauext{if}&\displaystyle x_{n}>\varepsilon,
\mathrm{e}nd{array}\rhoight.
$$
and
$$
\chi_{2}(x)=\left\{\betaegin{equation}gin{array}{lll}
1&\tauext{if}&\displaystyle\varphirac{\varepsilon}{2}<x_{n}<\varphirac{3\varepsilon}{2}
\\
0&\tauext{if}&\displaystyle x_{n}<\varphirac{\varepsilon}{4},\quad x_{n}>2\varepsilon.
\mathrm{e}nd{array}\rhoight.
$$
Combining \mathrm{e}qref{Swave10} and \mathrm{e}qref{Swave11}, we obtain for $\varepsilon$ small enough
\betaegin{equation}gin{equation}\label{Swave12}
\|\chi_{1} w\|^{2}\leq C\left(\|\chi_{2}w\|^{2}+\|\nabla w\|^{2}\rhoight).
\mathrm{e}nd{equation}
From \mathrm{e}qref{Swave9}, we have
\betaegin{equation}gin{equation}\label{Swave13}
\|\chi_{2} w\|^{2}\leq C\left(\|\nabla w\|^{2}+\|F\|^{2}\rhoight).
\mathrm{e}nd{equation}
Inserting \mathrm{e}qref{Swave13} into \mathrm{e}qref{Swave12} we find
\betaegin{equation}gin{equation}\label{Swave14}
\|\chi_{1} w\|^{2}\leq C\left(\|\nabla w\|^{2}+\|F\|^{2}\rhoight).
\mathrm{e}nd{equation}
hence the result in the neighborhood of the boundary.
Following to \mathrm{e}qref{Swave9}, we can write
\betaegin{equation}gin{equation}\label{Swave15}
\|(1-\chi_{1})w\|^{2}\leq C\left(\|\nabla w\|^{2}+\|F\|^{2}\rhoight).
\mathrm{e}nd{equation}
Adding \mathrm{e}qref{Swave14} and \mathrm{e}qref{Swave15} we obtain \mathrm{e}qref{Swave8}.
\mathrm{e}nd{proof}
Now we can prove Theorem \mathrm{Re}f{LogStab}. We set $w_{1}=(1+id\mu)u_{1}+df_{1}$ and $w_{2}=u_{2}$, then the system \mathrm{e}qref{Swave5}-\mathrm{e}qref{Swave7} can be recast as follow
\betaegin{equation}gin{equation}\label{Swave16}
\left\{\betaegin{equation}gin{array}{ll}
\displaystyle\Deltaelta w_{1}+\varphirac{\mu^{2}}{1+id\mu}w_{1}=\Phi_{1}&\tauext{in }\omegamega
\\
\displaystyle\Deltaelta w_{2}+\mu^{2}w_{2}=\Phi_{2}&\tauext{in }\Omegamega\sigmaetminus\omegamega,
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
with the transmission conditions
\betaegin{equation}gin{equation}\label{Swave17}
\left\{\betaegin{equation}gin{array}{ll}
w_{1}=w_{2}+\phi&\tauext{on }\mathcal{I}
\\
\partial_{\nu}w_{1}=\partial_{\nu}w_{2}&\tauext{on }\mathcal{I},
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
and the boundary condition
\betaegin{equation}gin{equation}\label{Swave18}
\betaegin{equation}gin{array}{ll}
w_{2}=0&\tauext{on }\Gammaamma,
\mathrm{e}nd{array}
\mathrm{e}nd{equation}
where we have denoted by $\displaystyle\Phi_{1}=g_{1}+\varphirac{i\mu}{1+id\mu}f_{1}$, $\displaystyle\Phi_{2}=g_{2}+i\mu f_{2}$ and $\displaystyle\phi=df_{1}+id\mu u_{1}$.
We denoted by $B_{r}$ a ball of radius $r>0$ in $\omegamega$ and $B_{r}^{c}$ its complementary such that $B_{4r}\sigmaubset\omegamega$. Let's introduce the cut-off function $\chi\in\mathcal{C}^{\infty}(\omegamega)$ by
$$
\chi(x)=\left\{\betaegin{equation}gin{array}{ll}
1&\tauext{in } B_{3r}^{c}
\\
0&\tauext{in } B_{2r}.
\mathrm{e}nd{array}\rhoight.
$$
Next, we denote by $\widetilde{w}_{1}=\chi w_{1}$ then from the first line of \mathrm{e}qref{Swave16}, one sees that
\betaegin{equation}gin{equation}\label{Swave19}
\betaegin{equation}gin{array}{ll}
\displaystyle\Deltaelta\widetilde{w}_{1}+\varphirac{\mu^{2}}{1+id\mu}\widetilde{w}_{1}=\widetilde{\Phi}_{1}&\tauext{in }\omegamega,
\mathrm{e}nd{array}
\mathrm{e}nd{equation}
where $\widetilde{\Phi}_{1}=\chi\Phi_{1}-[\Deltaelta,\chi]w_{1}$. We denote by $\Omegamega_{1}=\omegamega\sigmaetminus\omegaverline{B}_{r}$ and $\Omegamega_{2}=\Omegamega\sigmaetminus\omegaverline{\omegamega}$.
According to \cite{burq}, \cite{hassine2} or \cite{hassine3} we can find four weight functions $\varphi_{1,1}$, $\varphi_{1,2}$, $\varphi_{2,1}$ and $\varphi_{2,2}$, a finite number of points $x_{j,k}^{i}$ where $\omegaverline{B(x_{j,k}^{i},2\varepsilon)}\sigmaubset\Omegamega_{j}$ for all $j,k=1,2$ and $i=1,\ldots,N_{i,k}$ such that by denoting $U_{j,k}=\displaystyle\Omegamega_{k}\betaigcap\left(\betaigcup_{i=1}^{N_{j,k}}\omegaverline{B(x_{j,k}^{i},\mathrm{e}psilon)}\rhoight)^{c}$, the weight function $\varphi_{k}=\mathrm{diag}(\varphi_{1,k},\varphi_{2,k})$ verifying the assumption \mathrm{e}qref{carl48}-\mathrm{e}qref{carl49} in $U_{1,k}\cup U_{2,k}$ with $\gamma_{1}=\partial B_{r}$, $\gamma_{2}=\Gammaamma$ and $\gamma=\mathcal{I}$. Moreover, $\varphi_{j,k}<\varphi_{j,k+1}$ in $\displaystyle\betaigcup_{i=1}^{N_{j,k}}B(x_{j,k}^{i},2\mathrm{e}psilon)$ for all $j,k=1,2$ where we denoted by $\varphi_{j,3}=\varphi_{j,1}$.
Let $\chi_{j,k}$ (for $j,k=1,2$) four cut-off functions equal to $1$ in $\displaystyle\left(\betaigcup_{i=1}^{N_{j,k}}B(x_{j,k}^{i},2\mathrm{e}psilon)\rhoight)^{c}$ and supported in $\displaystyle\left(\betaigcup_{i=1}^{N_{j,k}}B(x_{j,k}^{i},\mathrm{e}psilon)\rhoight)^{c}$ (in order to eliminate the critical points of the weight functions $\varphi_{j,k}$). We set $w_{1,1}=\chi_{1,1}\widetilde{w}_{1}$, $w_{1,2}=\chi_{1,2}\widetilde{w}_{1}$, $w_{2,1}=\chi_{2,1}w_{2}$ and $w_{2,2}=\chi_{2,2}w_{2}$. Then from system \mathrm{e}qref{Swave17} and equations \mathrm{e}qref{Swave7} and \mathrm{e}qref{Swave19}, then for $k=1,2$ we obtain
\betaegin{equation}gin{equation}\label{Swave20}
\left\{\betaegin{equation}gin{array}{ll}
\displaystyle\Deltaelta w_{1,k}+\varphirac{\mu^{2}}{1+id\mu}w_{1,k}=\Psi_{1,k}&\tauext{in }\omegamega
\\
\Deltaelta w_{2,k}+\mu^{2}w_{2,k}=\Psi_{2,k}&\tauext{in }\Omegamega\sigmaetminus\omegamega
\\
w_{1,k}=w_{2,k}+\phi&\tauext{on }\mathcal{I}
\\
\partial_{\nu}w_{1,k}= \partial_{\nu} w_{2,k}&\tauext{on }\mathcal{I}
\\
w_{2,k}=0&\tauext{on }\Gammaamma,
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
where
\betaegin{equation}gin{equation}\label{Swave21}
\left\{\betaegin{equation}gin{array}{l}
\Psi_{1,k}=\chi_{1,k}\widetilde{\Phi}_{1}-[\Deltaelta,\chi_{1,k}]\widetilde{w}_{1}
\\
\Psi_{2,k}=\chi_{2,k}\Phi_{2}-[\Deltaelta,\chi_{2,k}]w_{2}.
\mathrm{e}nd{array}\rhoight.
\mathrm{e}nd{equation}
Applying now Carleman estimate \mathrm{e}qref{carl4} to the system \mathrm{e}qref{Swave20} with $\tauau=|\mu|$ then for $k=1,2$ we have
\betaegin{equation}gin{multline*}
\tauau^{3 }\sigmaum_{j=1,2}\|\mathrm{e}^{\tauau\varphi_{j,k}}w_{j,k}\|_{L^{2}(U_{j,k})}^{2}
+\tauau\sigmaum_{j=1,2}\|\mathrm{e}^{\tauau\varphi_{j,k}}\nabla w_{j,k}\|_{L^{2}(U_{j,k})}^{2}
\\
\leq C\Big(\|\mathrm{e}^{\tauau\varphi_{1,k}}\Psi_{1,k}\|_{L^{2}(U_{1,k})}^{2}
+\|\mathrm{e}^{\tauau\varphi_{2,k}}\Psi_{2,k}\|_{L^{2}(U_{2,k})}^{2}
+\tauau^{2}\|\mathrm{e}^{\tauau\varphi_{1,k}}\phi\|_{H^{\varphirac{1}{2}}(\mathcal{I})}^{2}\Big).
\mathrm{e}nd{multline*}
We recall the expression of $\Psi_{1,k}$ and $\Psi_{2,k}$ in \mathrm{e}qref{Swave21}, then we can write
\betaegin{equation}gin{multline*}
\tauau^{3 }\sigmaum_{j=1,2}\|\mathrm{e}^{\tauau\varphi_{j,k}}w_{j,k}\|_{L^{2}(U_{j,k})}^{2}
+\tauau\sigmaum_{j=1,2}\|\mathrm{e}^{\tauau\varphi_{j,k}}\nabla w_{j,k}\|_{L^{2}(U_{j,k})}^{2}
\\
\leq C\Big(\|\mathrm{e}^{\tauau\varphi_{1,k}}\Phi_{1}\|_{L^{2}(U_{1,k})}^{2}+\|\mathrm{e}^{\tauau\varphi_{2,k}}\Phi_{2}\|_{L^{2}(U_{2,k})}^{2}
+\|\mathrm{e}^{\tauau\varphi_{1,k}}[\Deltaelta,\chi_{1,k}]\widetilde{w}_{1}\|_{L^{2}(U_{1,k})}^{2} \\
+\|\mathrm{e}^{\tauau\varphi_{1,k}}[\Deltaelta,\chi]w_{1}\|_{L^{2}(U_{1,k})}^{2}
+\|\mathrm{e}^{\tauau\varphi_{2,k}}[\Deltaelta,\chi_{2,k}]w_{2,k}\|_{L^{2}(U_{2,k})}^{2}+\tauau^{2}\|\mathrm{e}^{\tauau\varphi_{1,k}}\phi\|_{H^{\varphirac{1}{2}}(\mathcal{I})}^{2}\Big).
\mathrm{e}nd{multline*}
Adding the two last estimates and using the property of the weight functions $\varphi_{j,1}<\varphi_{1,2}$ in
$\displaystyle\betaigcup_{i=1}^{N_{j,1}}B(x_{j,1}^{i},2\mathrm{e}psilon)$ and $\varphi_{j,2}<\varphi_{j,1}$ in $\displaystyle\betaigcup_{i=1}^{N_{j,2}}B(x_{j,2}^{i},2\mathrm{e}psilon)$
for all $j=1,2$, then we can absorb first order the terms $[\Deltaelta,\chi_{1,k}]\widetilde{w}_{1}$ and $[\Deltaelta,\chi_{2,k}]w_{2}$ at the right
hand side into the left hand side for $\tauau>0$ sufficiently large, mainly we obtain
\betaegin{equation}gin{multline*}
\tauau\int_{\Omegamega_{1}}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|\nabla w_{1}|^{2}\,\mathrm{d} x+\tauau\int_{\Omegamega_{2}}\left(\mathrm{e}^{2\tauau\varphi_{2,1}}+\mathrm{e}^{2\tauau\varphi_{2,2}}\rhoight)|\nabla w_{2}|^{2}\,\mathrm{d} x
\\
\leq C\betaigg(\int_{\Omegamega_{1}}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|\Phi_{1}|^{2}\,\mathrm{d} x+\tauau\int_{\Omegamega_{2}}\left(\mathrm{e}^{2\tauau\varphi_{2,1}}+\mathrm{e}^{2\tauau\varphi_{2,2}}\rhoight)|\Phi_{2}|^{2}\,\mathrm{d} x
\\
+\int_{\Omegamega_{1}}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|[\Deltaelta,\chi]w_{1}|^{2}\,\mathrm{d} x+\tauau^{2}\left(\|\mathrm{e}^{\tauau\varphi_{1,1}}\phi\|_{H^{\varphirac{1}{2}}(\mathcal{I})}^{2}+\|\mathrm{e}^{\tauau\varphi_{1,2}}\phi\|_{H^{\varphirac{1}{2}}(\mathcal{I})}^{2}\rhoight)\betaigg).
\mathrm{e}nd{multline*}
Since we can write $\displaystyle\phi=\varphirac{id\mu}{1+id\mu}w_{1}+\varphirac{d}{1+id\mu}f_{1}$ then using the trace theorem, Green's formula and the fact that the operator $[\Deltaelta,\chi]$ is of the first order with support in $\omegamega$ we find
\betaegin{equation}gin{multline}\label{Swave22}
\tauau\int_{\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|\nabla w_{1}|^{2}\,\mathrm{d} x+\tauau\int_{\Omegamega\sigmaetminus\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{2,1}}+\mathrm{e}^{2\tauau\varphi_{2,2}}\rhoight)|\nabla w_{2}|^{2}\,\mathrm{d} x
\\
\leq C\betaigg(\int_{\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|\Phi_{1}|^{2}\,\mathrm{d} x+\tauau\int_{\Omegamega\sigmaetminus\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{2,1}}+\mathrm{e}^{2\tauau\varphi_{2,2}}\rhoight)|\Phi_{2}|^{2}\,\mathrm{d} x
\\
+\tauau^{4}\int_{\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|w_{1}|^{2}\,\mathrm{d} x+\tauau^{2}\int_{\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|\nabla w_{1}|^{2}\,\mathrm{d} x
\\
+\tauau^{4}\int_{\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|f_{1}|^{2}\,\mathrm{d} x+\tauau^{2}\int_{\omegamega}\left(\mathrm{e}^{2\tauau\varphi_{1,1}}+\mathrm{e}^{2\tauau\varphi_{1,2}}\rhoight)|\nabla f_{1}|^{2}\,\mathrm{d} x\betaigg).
\mathrm{e}nd{multline}
Using the expression of $\Phi_{1}$ and $\Phi_{2}$, taking the maximum of $\varphi_{1,1}$, $\varphi_{1,2}$, $\varphi_{2,1}$ and $\varphi_{2,2}$ in the right hand side of \mathrm{e}qref{Swave22} and their minimum in the left hand side and Lemma \mathrm{Re}f{lem: Swave8} we follow
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\|\nabla w_{1}\|_{L^{2}(\omegamega)}^{2}+\|\nabla w_{2}\|_{L^{2}(\Omegamega\sigmaetminus\omegamega)}^{2}\leq C\mathrm{e}^{C\tauau}\Big(\|f_{1}\|_{L^{2}(\omegamega)}^{2}+\|\nabla f_{1}\|_{L^{2}(\omegamega)}^{2}+\|f_{2}\|_{L^{2}(\Omegamega\sigmaetminus\omegamega)}^{2}
\\
+\|g_{1}\|_{L^{2}(\omegamega)}^{2}+\|g_{2}\|_{L^{2}(\Omegamega\sigmaetminus\omegamega)}^{2}+\|\nabla w_{1}\|_{L^{2}(\omegamega)}^{2}\Big).
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
We evoke $u_{1}$ and $u_{2}$ through the expression of $w_{1}$ and $w_{2}$, one gets
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\|\nabla u_{1}\|_{L^{2}(\omegamega)}^{2}+\|\nabla u_{2}\|_{L^{2}(\Omegamega\sigmaetminus\omegamega)}^{2}\leq C\mathrm{e}^{C\tauau}\Big(\|f_{1}\|_{L^{2}(\omegamega)}^{2}+\|\nabla f_{1}\|_{L^{2}(\omegamega)}^{2}+\|f_{2}\|_{L^{2}(\Omegamega\sigmaetminus\omegamega)}^{2}
\\
+\|g_{1}\|_{L^{2}(\omegamega)}^{2}+\|g_{2}\|_{L^{2}(\Omegamega\sigmaetminus\omegamega)}^{2}+\|\nabla u_{1}\|_{L^{2}(\omegamega)}^{2}\Big).
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
Using the Poincar\'e inequality then we have
\betaegin{equation}gin{equation}\label{Swave23}
\|\nabla u\|_{L^{2}(\Omegamega)}^{2}\leq C\mathrm{e}^{c|\mu|}\left(\|\nabla f\|_{L^{2}(\Omegamega)}^{2}+\|g\|_{L^{2}(\Omegamega)}^{2}+\|\nabla u\|_{L^{2}(\omegamega)}^{2}\rhoight).
\mathrm{e}nd{equation}
The combination of the two estimates \mathrm{e}qref{Swave4} and \mathrm{e}qref{Swave23} leads to
\betaegin{equation}gin{equation}\label{Swave25}
\|\nabla u\|_{L^{2}(\Omegamega)}^{2}\leq C\mathrm{e}^{c|\mu|}\left(\|\nabla f\|_{L^{2}(\Omegamega)}^{2}+\|g\|_{L^{2}(\Omegamega)}^{2}\rhoight).
\mathrm{e}nd{equation}
We can obtain the same estimate as \mathrm{e}qref{Swave25} with the $v$ variable with the $L^{2}$ norm instead of $u$ by using again the Poincar\'e inequality and recalling the expression of $v$ in the first line of \mathrm{e}qref{Swave2} namely, we have
\betaegin{equation}gin{equation}\label{Swave26}
\|v\|_{L^{2}(\Omegamega)}^{2}\leq C\mathrm{e}^{c|\mu|}\left(\|\nabla f\|_{L^{2}(\Omegamega)}^{2}+\|g\|_{L^{2}(\Omegamega)}^{2}\rhoight).
\mathrm{e}nd{equation}
So that, the estimate \mathrm{e}qref{Swave24} is obtained by the combination of the two estimates \mathrm{e}qref{Swave25} and \mathrm{e}qref{Swave26}.
\betaegin{equation}gin{thebibliography}{99}
\betaibitem{ammari-niciase} {\sigmac K.~Ammari and S.~Nicaise,} {\mathrm{e}m Stabilization of elastic systems by collocated feedback,} 2124, Springer, Cham, 2015.
\betaibitem{banks-smith-wang} {\sigmac H.T.~Banks, R.C.~Smith and Y.~Wang,} Modeling aspects for piezoelectric patch actuation of shells, plates and beams, {\mathrm{e}m Quart. Appl. Math.,} LIII (1995), 353--381.
\betaibitem{blr} {\sigmac C.~Bardos, G.~Lebeau and J.~Rauch,} Sharp sufficient conditions for the observation, control, and stabilization of waves from the boundary, {\mathrm{e}m SIAM J. Control Optim.,} {\betaf 30} (1992), 1024--1065.
\betaibitem{batty} {\sigmac C. J. K.~Batty and T.~Duyckaerts,} Non-uniform stability for bounded semi-groups on Banach spaces, {\mathrm{e}m J. Evol. Equ.,} {\betaf 8} (2008), 765--780.
\betaibitem{bellassoued} {\sigmac M.~Bellasoued,} Carleman estimates and distribution of resonnances for the transparent obstacle and application to the stabilization, {\mathrm{e}m Asymptot. Anal.,} {\betaf 35} (2003), 257--279.
\betaibitem{borichevtomilov}{\sigmac A.~Borichev and Y.~Tomilov,} Optimal polynomial decay of function and operator semigroups, {\mathrm{e}m Math. Ann.,} {\betaf 347}(2) (2010), 455--478.
\betaibitem{burq} {\sigmac N.~Burq,} D\'ecroissance de l'\'energie locale de l'\'equation des ondes pour le probl\`eme ext\'erieur et absence de r\'esonance au voisinage du r\'eel, {\mathrm{e}m Acta Math.,} {\betaf 180} (1998), 1--29.
\betaibitem{chen-liu-liu} {\sigmac S.~Chen, K.~Liu and Z.~Liu,} Spectrum and stability for elastic systems with global or local Kelvin-Voigt damping, {\mathrm{e}m SIAM J. Appl. Math.,} {\betaf 59} (1999), 651--668.
\betaibitem{Duyckaerts} {\sigmac T.~Duyckaerts,} Optimal decay rates of the energy of a hyperbolic-parabolic system coupled by an interface {\mathrm{e}m Asymptot. Anal.,} {\betaf 51} (2007), 17--45.
\betaibitem{hassine1}{\sigmac F.~Hassine,} Stability of elastic transmission systems with a local Kelvin–Voigt damping, {\mathrm{e}m European Journal of Control,} {\betaf 23} (2015), 84--93.
\betaibitem{hassine2}{\sigmac F.~Hassine,} Asymptotic behavior of the transmission Euler-Bernoulli plate and wave equation with a localized Kelvin-Voigt damping, {\mathrm{e}m Discrete and Continuous Dynamical Systems - Series B,} {\betaf 21} (2016), 1757--1774.
\betaibitem{hassine3}{\sigmac F.~Hassine,} Logarithmic stabilization of the Euler-Bernoulli plate equation with locally distributed Kelvin-Voigt damping, {\mathrm{e}m Evolution Equations and Control Theory,} in press, 2017.
\betaibitem{lebeau} {\sigmac G.~Lebeau} \'Equations des ondes amorties, Algebraic and geometric methods in mathematical physics (Kaciveli, 1993), 73–109, Math. Phys. Stud., 19, Kluwer Acad. Publ., Dordrecht, 1996.
\betaibitem{lebeau-robbiano1} {\sigmac G.~Lebeau and \sigmac L.~Robbiano,} Contr\^ole exacte de l'\'equation de la chaleur, {\mathrm{e}m Comm. Partial Differential Equations,} {\betaf 20} (1995), 335--356.
\betaibitem{lebeau-robbiano2} {\sigmac G.~Lebeau and \sigmac L.~Robbiano,} Stabilisation de l'\'equation des ondes par le bord, {\mathrm{e}m Duke Math. J.,} {\betaf 86} (1997), 465--491.
\betaibitem{liu-liu} {\sigmac K. Liu and Z.~Liu,} Exponential decay of energy of the Euler--Bernoulli beam with locally distributed Kelvin--Voigt damping, {\mathrm{e}m SIAM Journal on Control and Optimization,} {\betaf 36} (1998), 1086--1098.
\betaibitem{liu-rao1} {\sigmac K. S.~Liu and B.~Rao,} Characterization of polynomial decay rate for the solution of linear evolution equation, {\mathrm{e}m Zeitschrift für Angewandte Mathematik und Physik (ZAMP),} {\betaf 56} (2005), 630--644.
\betaibitem{liu-rao2} {\sigmac K. S.~Liu and B.~Rao,} Exponential stability for wave equations with local Kelvin-Voigt damping, {\mathrm{e}m Zeitschrift für Angewandte Mathematik und Physik (ZAMP),} {\betaf 57} (2006), 419--432.
\betaibitem{Pazy} {\sigmac A.~Pazy,} {\mathrm{e}m Semigroups of linear operators and applications to partial differential equations}, Springer, New York, 1983.
\betaibitem{rousseau-robbiano} {\sigmac J.~Le Rousseau and \sigmac L.~Robbiano,} Carleman estimate for elliptic operators with coefficients with jumps at an interface in arbitrary dimension and application to the null controllability of linear parabolic equations, {\mathrm{e}m Arch. Rational Mech. Anal.,} {\betaf 195} (2010), 953--990.
\betaibitem{tebou} {\sigmac L.~Tebou,} A constructive method for the stabilization of the wave equation with
localized Kelvin–Voigt damping, {\mathrm{e}m C. R. Acad. Sci. Paris}, Ser. I, {\betaf 350} (2012), 603--608.
\mathrm{e}nd{thebibliography}
\mathrm{e}nd{document}
|
\begin{document}
\begin{titlepage}
\title{Finding the Right Curve:\Optimal Design of Constant Function Market Makers}
\end{titlepage}
\section{Introduction}
Agents in any economic system need to be able to exchange one asset for another efficiently.
Some assets are frequently traded by many market participants, and for these assets,
a seller offering a reasonable price can likely find a buyer quickly and vice versa. However, not every pair of assets
is traded frequently, and sellers in these markets might have to wait a long time to find a buyer or accept a
highly unfavourable price.
The role of a \emph{market-maker} is to fill this gap --- to facilitate easy and rapid trading between pairs of assets
for which otherwise there is very little trading activity. Market-makers trade in both directions on the market, buying and selling
assets when traders arrive at the market \cite{amihud1980dealership}. In this sense, market-makers facilitate asynchronous trading
between buyers and sellers, thereby increasing the market \emph{liquidity} between two assets.
Our topic of study is a subclass of automated market-making strategies known as {\it Constant Function Market Makers} (CFMMs). A CFMM maintains reserves of two assets $X$ and $Y,$ provided by a so-called \emph{liquidity provider (LP),} and makes trades according to a predefined \emph{trading function} $f(x,y)$ of its asset reserves (the eponymous "constant function"); specifically, a CFMM accepts a trade $(\Delta x, \Delta y)$ from reserves $(x,y)$ to $(x - \Delta x, y + \Delta y)$
if and only if $f(x - \Delta x, y + \Delta y)=f(x, y)$.
CFMMs earn revenue by charging a small commission on each trade (i.e. creating a bid-ask spread) but are subject to several associated
expenses \cite{amihud1986asset}, such as the costs of maintaining the asset inventory and adverse selection by arbitrageurs (i.e., stale quote sniping). The loss of the LP relative to the counterfactual strategy of ``buy-and-hold'' is referred to as the ``divergence loss'' \cite{milionis2022automated}.
Automated market-making has long been an important topic of study \cite{aoyagi2020liquidity,gerig2010automated,othman2013practical},
but CFMMs have recently become some of the most widely used exchanges
\cite{uniswapv2,uniswapv3,balancer,egorov2019stableswap} within the modern
Decentralized Finance (DeFi) ecosystem \cite{werner2021sok}. The success of CFMMs in DeFi is primarily due to their ability to run via smart contracts \cite{mohanta2018overview} with a fairly low computation requirement on blockchains. CFMMs also reduce the barrier to entering the liquidity provision business or ``market-making'' \cite{ammdemocratize}.
CFMMs have also been widely deployed in prediction markets
as a method for aggregating opinions \cite{hanson2007logarithmic,chen2010new}.
For completeness, we describe the precise translation from market scoring rules studied in the prediction markets literature to CFMMs in Appendix \S \ref{apx:cost_fn}.
\begin{example}[Real-world CFMMs]
~
\begin{enumerate}
\item
The decentralized exchange Uniswap \cite{uniswapv2} uses the product function $f(x,y) = xy$.
\item
The Logarithmic Market Scoring Rule (LMSR) \cite{hanson2007logarithmic}, used extensively to design prediction markets, corresponds to a CFMM with trading function
$f(x,y) = (1-e^{-x}) + (1 - e^{-y})$ \cite{univ3paradigm}.
\item
The trading function $f(x,y)=xe^y$ has powered automated storefronts in online games \cite{hyperconomy}.
\end{enumerate}
\end{example}
Despite facilitating billions of US dollars worth of trade volume per day, a complete formal understanding of CFMM design trade-offs is missing in the literature.
Our goal, therefore, is to explain what guides a CFMM designer to choose one
trading function over another. We provide an optimization framework which compiles a market-maker's beliefs on future prices into an optimal CFMM trading function, making substantial progress towards an important open problem \cite{timtweet}.
\input{our_results}
\subsection{Related Work}
The closest line of work \cite{fan2022differential,neuder2021strategic,cartea2022decentralised,heimbach2022risks,bar2023uniswap} to our paper is the one which considers profit-maximizing market-making strategies which can be implemented via the Uniswap v3 \cite{uniswapv3} protocol. Additionally, \cite{neuder2021strategic,cartea2022decentralised,bar2023uniswap} design ``rebalancing'' strategies for the LPs, wherein they effectively modify the CFMM trading function periodically. In contrast, we consider designing CFMM trading functions from the first principles and do not use the Uniswap v3 framework. We also do not consider rebalancing the CFMM trading function in this work. A non-exhaustive list of papers in this line is:
\begin{itemize}[leftmargin = 0.3cm]
\item Fan et al. \cite{fan2022differential}, study the question of maximizing risk-adjusted
profit for LPs while accounting for the gas fee for traders.
Their model assumes that all trading on a CFMM occurs only in response to price movements on an external market (i.e. arbitrageurs realigning the CFMM spot price to the external market).
Their model suggests that risk-neutral
LPs must allocate all of their capital at a single price point (\S 4.2, \cite{fan2022differential}), while ours better explains the choices of practitioners.
\item Neuder et al. \cite{neuder2021strategic}
study dynamic liquidity allocation strategies for risk-adjusted fee revenue maximization,
but do not consider the ``divergence loss'' incurred in the process.
\item Cartea et al. \cite{cartea2022decentralised} decompose the CFMM divergence loss into two components -- the convexity cost (loss due to arbitrage) and the opportunity cost (the cost of locking up capital).
They give a stochastic optimal control-based closed-form strategy for a profit-maximizing LP.
\item
Heimbach et al. \cite{heimbach2022risks} model liquidity positions on Uniswap V3 and perform a data-based analysis of the risks and returns of LPs as a function of the volatility of the underlying assets.
\end{itemize}
Similar to \cite{cartea2022decentralised}, Milionis et al. \cite{milionis2022automated} show that a part of the divergence loss corresponds to the market risk and can be hedged by a rebalancing strategy; the remainder of the divergence loss corresponds to the profit made by arbitragers trading against the CFMM -- they call this loss the LVR (loss-vs-rebalancing). When the variance of the price of $X$ relative to $Y$ is $\sigma^2,$ they show that the rate of accrual of LVR (what they call the instantaneous-LVR) is $\sigma^2 p^2 |x^{\prime}(p)|,$ where $x^{\prime}(p)$ denotes the rate of change of $x$ in the CFMM with respect to the price $p$ under perfect arbitrage. Since the LVR is a linear function of our notion of liquidity, our convex optimization framework can accommodate the LVR as a cost for the LP in the objective function.
Automated market-making has also been studied extensively in the context of prediction markets
\cite{hanson2007logarithmic,chen2010new,chen2012utility}.
The theory of CFMMs and the dynamics around trading with CFMMs
have been studied in DeFi \cite{angeris2020improved,angeris2019analysis,angeris2021replicating,capponi2021adoption,bartoletti2021theory,bergault2022automated},
and many different DeFi applications have been deployed or proposed using different CFMM trading functions
\cite{uniswapv2,uniswapv3,balancer,angeris2021replicatingwithoutoracles}.
\section{Preliminaries}
\label{sec:preliminaries}
\begin{definition}[CFMM]
\label{defn:cfmm}
A CFMM trades between two assets $X$ and $Y$, and has a set of asset reserves --- $x$ units of $X$ and $y$ units of $Y$.
Its trading rule is defined by its {\it trading function} $f(\cdot, \cdot)$ such that it accepts a trade of $\Delta_X$ units of $X$ in exchange for $\Delta_Y$ units of $Y$
if and only if $f(x, y)=f(x-\Delta_X, y + \Delta_Y)$.
\end{definition}
All of the CFMM trading functions discussed in this work have the following properties.
\begin{assumption}
\label{ass:fn_form}
A trading function $f(\cdot, \cdot): \mathbb{R}_+^2 \rightarrow \mathbb{R}$ is continuous, non-negative, increasing in both coordinates, and strictly quasi-concave. Further, it is defined only on the non-negative orthant.
\end{assumption}
The assumption that $f$ is increasing, quasi-concave, and never holds a short position in any asset (and is therefore only defined on the non-negative orthant) is standard in the literature (e.g. \cite{angeris2020improved}).
We assume strict quasi-concavity for clarity of exposition.
The CFMM's trading function implicitly defines a marginal exchange rate (the ``spot exchange rate'')
for a trade of infinitesimal size.
\begin{definition}[Spot Exchange Rate]
\label{defn:spot}
At asset reserves $(x_0, y_0),$ the spot exchange rate of a CFMM with trading function $f$ is
$-\frac{\partial f}{\partial X}/\frac{\partial f}{\partial Y}$ at $(x_0, y_0)$.
When $f$ is not differentiable, the spot exchange rate is any subgradient of $f$.
When $x_0=0$, the spot exchange rate is $[-\frac{\partial f}{\partial X}/\frac{\partial f}{\partial Y}, \infty)$,
and when $y_0=0$, the spot exchange rate is $[0,-\frac{\partial f}{\partial X}/\frac{\partial f}{\partial Y}]$.
\end{definition}
These definitions directly lead to some useful observations.
We give the proofs in Appendix \ref{apx:prelim_obs}.
\begin{observation}
\label{obs:y_fn_p}
If $f$ is strictly quasi-concave, then for any constant $K>0$ and spot exchange rate $p$,
there is a unique point $(x,y)$ where $f(x,y)=K$ and $p$ is a spot exchange rate at
$(x,y)$.
\end{observation}
\begin{observation}
\label{obs:y_fn_x}
Under Assumption~\ref{ass:fn_form}, for a given constant function value $K$,
the amount of $Y$ in the CFMM reserves
uniquely specifies the amount of $X$ in the reserves, and vice versa.
\end{observation}
Observations \ref{obs:y_fn_p} and \ref{obs:y_fn_x} imply that the amounts of $X$ and $Y$ in the CFMM reserves can be written as functions $\mathcal X(p)$ and $\mathcal Y(p)$ of its spot exchange rate for the trading function equals constant $K.$
In the rest of the discussion, we describe CFMM reserve states by the amount of $Y$ in the reserves.
\begin{observation}
\label{obs:y_nondecreasing}
$\mathcal Y(p)$ is monotone nondecreasing.
\end{observation}
\iffalse
\begin{example}
Some examples of CFMMs deployed in practice are as follows:
\begin{enumerate}
\item
Uniswap V2 \cite{uniswapv2} uses the constant product rule $f(x,y) = xy$, for a spot exchange rate of $y/x$.
\item
Balancer \cite{balancer} uses a weighted constant product rule $f(x,y) = x^\alpha y$ for a constant $\alpha>0$, for a spot
exchange rate of $\frac{\alpha y}{x}$.
\item
Uniswap V3 \cite{uniswapv3} uses a piecewise-defined function (over different ranges of spot prices) function, where
each piece is a constant product rule.
\item
The Logarithmic Market Scoring Rule \cite{hanson2007logarithmic} corresponds to a CFMM with trading function
$f(x,y) = -(e^{-x} + e^{-y}) + 2$ \cite{univ3paradigm} for a spot exchange rate of $e^{y-x}$.
\item
Curve \cite{egorov2019stableswap} uses a combination of a sum term $x+y$ and a product-like term, $\frac{1}{xy}$.
\item
The Minecraft \cite{minecraft} modification package \cite{hyperconomy},
uses $f(x,y)=xe^y$
for a spot exchange rate of $\frac{1}{x}$.
\end{enumerate}
\end{example}
\fi
\iffalse
Finally, (as in \cite{neuder2021strategic,fan2022differential}) we assume in \S \ref{sec:model} that there is an external ``reference market''
between $X$ and $Y$ that is liquid and gives a reference exchange
\begin{definition}[Arbitrage]
\label{def-arbitrage}
We define arbitrage as a sequence of trades after which the trader makes a sure profit without taking any risk.
\end{definition}
The presence of atbitrage trades in a market is detrimental to its performance in general.
However, for CFMMs, which have trades as the only way of aligning its spot exchange rate with the reference market's exchange rate,
arbitrage trades are useful.
It has been shown in previous work \cite{angeris2020improved} that for CFMMs satsifying Assumption~\ref{ass:fn_form}, rational arbitrage traders
leave the CFMM's spot exchange rate aligned with the reference market's exchange rate to maximize their profit.
In this paper we make the assumption that there are always traders present that exploit arbitrage opportunities.
\fi
\iffalse
\XXX{not sure where to make this point}
Our viewpoint here is on maximizing the functionality of the CFMM, in the sense of maximizing the amount of trading activity that it
can facilitate. This is in contrast to work of \XXX{cite parkes, angeris} which views the CFMM as a financial instrument directly.
\XXX{do this better, obviously}
We think that's a dumb way to do things, because it ignores the actual point of a CFMM.
Increased trading activity directly leads to increased fee revenue (more trades).
Parkes's paper just leads to depositing everything at
a fixed exchange rate, which is bad from point of view of actually providing a market.
Their model assumes prices evolve externally, without reacting to how much liquidity is present in the CFMM.
This means that all of the price movements are driven by arbitrageurs arbing the CFMM against the external market,
(which seems to be assumed to be infinitely liquid) -- so it's not actually facilitating trading,
and is really just acting as a strange financial position. Not capturing what makes a CFMM interesting.
\fi
\section{Model}
\label{sec:model}
As used in Definition~\ref{defn:spot}, we adopt the notation wherein exchange rates are given as the rate of a unit of $X$ in terms of $Y$ (i.e., a trade of $x$ units of X for $y$ units of $Y$ implies an exchange rate of $p^\prime = \frac{y}{x}$).
Unless specified otherwise, $p$ refers to the CFMM spot exchange rate, $\hat p$ denotes the exchange rate in an external market, and $p^\prime$ denotes the exchange rate of a particular trade.
We now turn to our trading model and our formulation of market liquidity.
\begin{definition}[System Model] \label{defn:system_model}
\begin{enumerate}
\item
There are two assets $X$ and $Y$, and a relatively liquid ``primary'' external market that provides a (public) reference exchange rate $\hat p$
between $X$ and $Y$.
\item
An LP creates a CFMM that trades between $X$ and $Y$
by providing an initial set of reserves and choosing a CFMM trading function.
\item Whenever the reference exchange rate $\hat p$ on the external market changes, arbitrageurs
immediately realign the CFMM's spot exchange rate $p$ with the reference exchange rate.
\footnote{
There is always a strictly profitable arbitrage trade to be made when the CFMM's spot exchange rate
differs from the reference exchange rate \cite{angeris2021optimal}; this phenomena is akin to
``stale quote sniping'' in traditional exchanges \cite{baldauf2020high}.
} \label{item:arb}
\item At each time step, a trade request arrives with probability $q: 0<q<1$ (Definitions \ref{defn:trade} and \ref{defn:trademodel}).
\end{enumerate}
We assume, however, that for small fluctuations in the CFMM spot exchange rate resulting from small trades, arbitrageurs do not realign the CFMM spot exchange rate. This assumption is reasonable since the trading fee and other associated costs (e.g., gas fee in DeFi) make such an action unprofitable.
\end{definition}
Since the reference exchange rate is public knowledge, traders using the CFMM can compare the exchange rate that a CFMM offers
with the reference rate. This difference is the \emph{slippage} of a trade.
\begin{definition}[Slippage]
\label{defn:slippage}
The exchange rate of a trade of $y$ units of $Y$ for $x$ units of $X$ is $p^\prime=y/x$.
Relative to a reference exchange rate of $\hat p$ units of $Y$ per $X$,
the \emph{slippage} of this trade is $(p^\prime- \hat p)/ \hat p.$
\end{definition}
Traders in our model are willing to tolerate a fixed amount of maximum slippage $\varepsilon$.
\begin{definition}[Trade Request]
\label{defn:trade}
A \emph{Trade Request} with a CFMM is a request to SELL or BUY $k$ units of $X$ or $Y$,
on the condition that the slippage of the trade is at most $\varepsilon$ relative to the reference exchange rate $\hat p$ --- in other words,
a trade request is a tuple
(SELL or BUY, X or Y, k, $\hat p$, $\varepsilon$).
\end{definition}
\begin{definition}[Trade Success]
\label{defn:trade_success}
A trade request buying $X$ for $Y$ with maximum slippage $\varepsilon$
succeeds if and only if the CFMM can satisfy the entire trade with an exchange rate
$p^\prime$ and, for the reference exchange rate $\hat p$,
$p^\prime/\hat p \leq 1+\varepsilon$. Similarly, a trade request selling $X$ for $Y$ succeeds if and only if the CFMM can satisfy the entire trade with an exchange rate
$p^\prime$ such that
$p^\prime /\hat p \geq 1/(1+\varepsilon)$.
\end{definition}
Trade requests are not partially fulfilled. Failed requests are not retried and are deleted. If a request succeeds, the CFMM transfers assets accordingly.
Otherwise, the CFMM's reserves are unchanged.
This notion of trade success mirrors the operation of CFMMs in practice;
users supply a trade size, exchange rate, and slippage parameter when submitting a trade request
(e.g. \cite{uniswapinterface,balancerinterface}).
\iffalse
This is a typical web interface for the CFMMs deployed in a DeFi context.
Users know the value of an asset on an external market
(i.e. a centralized exchange). When orders are submitted, users typically specify a
slippage parameter along with an exchange rate and quantity.
The CFMM implementation converts this information to a minimum overall exchange rate
and then checks that the overall exchange rate offered by the protocol is sufficiently high
for the trader and declines the trade if the check fails.
With these definitions, we can now present the trade model we consider.
\fi
Putting these definitions together gives the trading model of our study.
\begin{definition}[Trade Model]
\label{defn:trademodel}
There exists a static (in the short term) reference exchange rate $\hat p$.
The size of the trade request is drawn from distribution $size(\cdot)$. The choice of X or Y is arbitrary, but the trade is for BUY or SELL with equal probability.
Each request has the same maximum slippage $\varepsilon$.
\end{definition}
Every successful trade changes the reserves of the CFMM -- this model induces a Markov chain on the state of the CFMM's reserves. We assume that the Markov chain, at a given reference exchange rate, has sufficient time to mix before the reference exchange rate changes.
Natural restrictions on the distribution of the trades (made explicit below) make this Markov chain ergodic. We study, therefore,
the expected fraction of trade requests that a CFMM can satisfy when its state is drawn from the stationary distribution of this Markov chain
(we formalize this notion in Definition \ref{defn:yfail}).
\input{liquidity}
\input{inefficiency}
\input{belief}
\iffalse
We make the following assumption to make the model amenable to simpler analysis.
\begin{assumption}[Small Trade Size]
\label{assumption:small_trade_size}
We assume that the trade sizes are small relative to $\vert L_\varepsilon(p)\vert $ such that the spot exchange rate of the CFMM after the trade
is approximately equal to the overall exchange rate of the trade.
\end{assumption}
Note that by the quasi-concavity of the CFMM trading function, the overall exchange rate of a trade must be
between the spot exchange rates before and after the trade.
Assumption~\ref{assumption:small_trade_size} is reasonable for most CFMMs with sufficient liquidity, such that a single trade does not change the
spot exchange rate by a large amount.
Larger trades can always be broken down to smaller parts and be sent in a sequence to the CFMM, thereby making it useful for all traders.
With Assumption~\ref{assumption:small_trade_size}, we use the spot exchange rate after the trade as a proxy for the overall exchange rate of the trade
in the analysis in the rest of the paper.
Specifically, we use the following modified definition of \emph{trade success} (Definition~\ref{defn:trade_success}).
\begin{definition}[Relaxed Trade Success]
\label{defn:modified_trade_success}
A trade selling $Y$ for $X$ with maximum slippage $\varepsilon$ succeeds if the CFMM can satisfy
the trade with the final spot exchange rate $p^\prime$ and, for the reference exchange rate $p$,
$\frac{p^\prime}{p} \leq 1+\varepsilon$.
Trades selling $X$ for $Y$ succeed if the analogous condition in the other direction is satisfied.
Otherwise, the trade fails, and the CFMM's state is unchanged.
\end{definition}
\begin{definition}
\label{defn:dist_cont}
Let $size(\cdot)$ be some distribution on $\mathbb{R}_{\geq 0}$ with support in a neighborhood of $0$.
A trader appears at every timestep. The trade has size $k$ units of $Y$, where $k$ is drawn from $size(\cdot)$.
A trade buys or sells from the CFMM with
equal probability.
\end{definition}
This definition implicitly encodes an assumption that the amount of trading from $X$ to $Y$ is balanced in expectation
against the amount of trading from $Y$ to $X$.
An additional assumption makes this setting analytically tractable.
\begin{assumption}[Strict Slippage]
\label{ass:modified_trade_success}
Trade requests measure slippage relative to the post-trade \emph{spot exchange rate} of the CFMM,
not the overall exchange rate of the trade.
\end{assumption}
In other words, a trade request succeeds if and only if it would move the CFMM's reserves to some state within $L_\varepsilon(\hat p)$.
Assumption~\ref{ass:modified_trade_success} implies a pessimistic view of the trade failure probability. However, the following result signifies that Assumption~\ref{ass:modified_trade_success} is reasonable.
\begin{lemma} \label{lemma:slippage}
At any state of the CFMMs asset reserves, the maximum size of a trade that will be successful under the model in Definition~\ref{defn:trade_success} is at most twice the maximum size of a trade that will be successful under Strict Slippage as in Assumption~\ref{ass:modified_trade_success}.
\end{lemma}
\begin{proof}
See that the strict-quasi concavity of the trading function $f$ implies a ``convex-pricing'' of any asset. For any buy trade, the marginal exchange rate received is non-decreasing in the size of the trade. Therefore the slippage of a buy trade of $2k$ units is at least as much as the strict slippage of a buy trade of $k$ units. A similar argument follows for sell trades.
\end{proof}
We now analyze the Markov chain over the CFMM's state, the stationary distribution of which gives us the trade failure probability under Assumption~\ref{ass:modified_trade_success}.
\begin{lemma} \label{lemma:uniform}
Let $M$ be the Markov chain defined by the state of $Y$ in the asset reserves of the CFMM with $ \mathcal Y \in L_\varepsilon(\hat p)$ and transitions
induced by trades drawn from the distribution in Definition \ref{defn:dist_cont}.
Under Assumption~\ref{ass:modified_trade_success}, the stationary distribution of $M$ is uniform over $L_\varepsilon(\hat p)$.
\end{lemma}
\begin{proof}
\input{proof_cont}
\iffalse
We first derive the transition probabilities of the Markov Chain $M.$ Given that $M$ is in state $y_i,$ it can reach a state $y_f$ either via a successful trade or a
failed trade. Let $s \sim size(\cdot).$ The trade fails if $y_i +s > y_2$ if the trade is a SELL Y. It fails if $y_i -s < y_1$ if the trade is a BUY Y.
Since trade size distribution $size(\cdot)$ has a support in a neighborhood of $0$, the chain $M$ is irreducible and hence has a unique stationary distribution.
Let the chain start in the uniform distribution on $L_\varepsilon(p)$.
It suffices to show that $M$ has equal probability of reaching any state in $L_\varepsilon(p)$ in the next time step.
The chain reaches state $y$ if $s~size(\cdot)$ is such that $s > y_2- y$, the trade is a SELL Y, and the initial state was $y$.
It also reaches $y$ if $ s > y - y_1$ , the trade is a BUY Y and the initial state was $y$. In the third case, it reaches $y$ if $s+y_i = y$, the initial state
is $y_i$ and the trade is a SELL Y, or if $y_i -s = y$, the initial state is $y_i$ and trade is a BUY Y. See that for any $s$ and $y$ there is exactly one unique state
in $L_\varepsilon(p)$ from which the chain reaches $y$. Since we started in a uniform distribution, the final distribution is also uniform.
\fi
\extraqed{}
\end{proof}
\begin{proposition}
The probability that a trade of size $k$ units of $Y$ fails is approximately $\min(1, \frac{k}{\vert L_\varepsilon(\hat p)\vert})$,
where the approximation error
is up to Assumption \ref{ass:modified_trade_success}.
\end{proposition}
\begin{proof}
The probability that a (without loss of generality) sell of size $k$ units of $Y$ fails is equal to the probability that a
state $y$, drawn uniformly from the range $L_\varepsilon(p)=[y_1, y_2]$, lies in the range $[y_2-k, y_2]$.
Lemma \ref{lemma:uniform} shows this probability is $\min(1, \frac{k}{y_2-y_1})$.
\extraqed{}
\end{proof}
We can relax Assumption 2 and still have direct analysis of the model, with qualitatively similar results,
if we restrict the class of trade distributions. One example of such a trade distribution with the associated analysis
is given in the following subsection.
\fi
\section{Optimizing for Liquidity Provision}
\label{sec:optimization}
How should LPs allocate capital to market-making at different exchange rates?
This question is the core topic of our work.
At any point in time, only the capital deployed near the reference exchange rate is useable for market-making.
Thus, the ``optimal'' CFMM design necessarily depends on an LP's belief on the distribution of future exchange rates.
We show here that an LP's beliefs on future asset valuations can be compiled into an optimal
CFMM design, which is the solution to a convex optimization problem (Theorem \ref{thm:problem}). Specifically,
the optimization framework outputs a capital allocation $L(\cdot)$ (as in Definition \ref{defn:liquidity}) that minimizes the expected CFMM inefficiency (Proposition \ref{prop:failchance}).
Ultimately, we show that this relationship goes both ways; a liquidity allocation uniquely specifies an equivalence class
of beliefs (Corollary \ref{cor:lp_to_equiv}).
Per Observation \ref{obs:alloc}, a liquidity allocation $L(\cdot)$ fully
specifies a CFMM trading function.
This section discusses ``optimality'' from a viewpoint of minimizing CFMM inefficiency; however, we show in \S \ref{sec:fees} that
this optimization framework, with a different objective function,
computes a CFMM that maximizes expected CFMM fee revenue. Furthermore,
we show in \S \ref{sec:loss} how to modify the objective of this program to account for losses incurred
during CFMM operation.
\subsection{A Convex Program for Optimal Liquidity Allocation}
\iffalse
The last assumption is purely for clarity of exposition; it rules out discrete distributions, which
would lead to large regions of space of feasible asset reserves where the trading function
would give the same spot exchange rate and where therefore we would not be able to talk about the notion of the amount
of $Y$ in the reserves as a function of the spot exchange rate. Observe that a discrete distribution
can be approximated arbitrarily closely by a function satisfying our assumption; informally, this corresponds
to approximating a linear function (with negative slope and positive y-intercept on the positive orthant)
arbitrarily closely by a strictly convex function.
\fi
\subsubsection{Objective: Minimize Expected CFMM Inefficiency}
\newcommand{\propfailchance}
{
Suppose every trade order on a CFMM is for one unit numeraire's worth of either $X$ or $Y$, and buys or sells the asset in question with equal probability.
The expected CFMM inefficiency is
$ \frac{1}{N_\psi}\iint_{p_X, p_Y}\frac{\psi(p_X, p_Y)}{p_YL(p_X/p_Y)}dp_X~dp_Y $.
We define the integral only where $\psi(p_X, p_Y) > 0$. Further, we define $\psi(p_X, p_Y)/L(p_X/p_Y)$ to be $\infty$
when $L(p_X/p_Y) = 0$. $N_\psi$ is as in Definition \ref{defn:belief}.
}
\begin{proposition}
\label{prop:failchance}
\propfailchance{}
\end{proposition}
\begin{proof}
Suppose that a trader order is for $1$ unit of numeraire's worth of $X$ with probability $\alpha$,
and for $1$ unit of numeraire's worth of $Y$ with probability $1 - \alpha$.
The size of a trade denominated in $X$ is therefore $1/p_X$, and the size of a trade denominated in $Y$ is
$1/p_Y$.
Recall from Definition ~\ref{defn:yfail} that at a given set of reference prices $p_X, p_Y$,
the CFMM inefficiency for a trade buying or selling $1$ numeraire's worth of $X$ is
$\frac{\hat p}{p_X}\frac{1}{L(p_X/p_Y)} = \frac{p_X}{p_Y p_X}\frac{1}{L(p_X/p_Y)} = \frac{1}{p_Y L(p_X/p_Y)}$.
Similarly, also from Definition ~\ref{defn:yfail},
the CFMM inefficiency corresponding to a trade of $1$ numeraire's worth of $Y$ is $\frac{1}{p_Y}\frac{1}{L(p_X/p_Y)}$.
Hence, the overall expected CFMM inefficiency is
\begin{equation*}
\frac{1}{N_\psi}\iint_{p_X, p_Y} \psi(p_X, p_Y)\left(\frac{\alpha}{p_YL(p_X/p_Y)} + \frac{1 - \alpha}{p_YL(p_X/p_Y)}\right) dp_X~dp_Y,
\end{equation*}
\begin{equation} \label{eq:failure_chance}
=\frac{1}{N_\psi} \iint_{p_X, p_Y} \frac{\psi(p_X, p_Y)}{p_YL(p_X/p_Y)} dp_X~dp_Y. \qedhere
\end{equation} \extraqed{}
\end{proof}
For clarity of exposition, we focus on the scenario where each order trades $1$ unit of the numeraire's worth of value.
Our model can study, however, scenarios where for general trade sizes and also when the trade size is a function of $p_X$ and $p_Y$.
The CFMM inefficiency is a linear function of trade size. A distribution of trade sizes can be multiplied with the belief function.
\iffalse
\begin{example}
For some distribution on trade sizes $size(p_X, p_Y)$
and belief $\psi$, the expected failure chance at $(p_X, p_Y)$ is
$\frac{\mathop{\mathbb{E}}\left[size(p_X,p_Y)\right]\psi(p_X, p_Y)}{p_YL(p_X/p_Y)}$,
which is equal to the expected failure chance for another belief $\psi^\prime(p_X, p_Y)=\mathop{\mathbb{E}}\left[size(p_X,p_Y)\right]\psi(p_X, p_Y)$
that makes trades of size $1$ unit's worth of the numeraire.
\end{example}
\fi
Proposition \ref{prop:failchance} also implies that the trade failure chance is the same for a trader buying $X$ or $Y$. The $p_Y$ in the denominator of the integrand in equation~\eqref{eq:failure_chance} appears because the liquidity $L(\cdot)$ is defined with respect to the reserves of asset Y, i.e., $\mathcal Y(\cdot)$ (recall Lemma~\ref{lemma:y_diffable}). Overall, there is no distinction between $X$ and $Y$ for the purpose of the CFMM inefficiency. \\
\subsubsection{Constraints: A Finite Budget for Market-Making} ~
The asset reserves of a CFMM are finite. Clearly, the best CFMM to minimize expected inefficiency has liquidity $L(p)=\infty$ at every exchange rate $p$,
but this would require
an infinite amount of each asset (Observation \ref{obs:alloc}).
We model an LP with a fixed budget $B$ who creates a CFMM
when the reference exchange rates of $X$ and $Y$ in the numeraire are $P_X$ and $P_Y$, respectively.
With this budget, the LP can purchase (or borrow)
any amount of $X$ and $Y$, say, $X_0$ and $Y_0$, subject to the constraint that $P_X X_0 + P_Y Y_0 \leq B$.
With this intuition, we have the following technical lemmas:
\begin{lemma}
\label{lemma:supply_constraints}
Given a purchasing choice of $X_0$ and $Y_0$, the LP can choose $L(\cdot)$ and set the initial spot exchange rate of the CFMM
to be $p_0$, subject to the following asset conservation constraints.
\begin{enumerate}
\item
$\int_{0}^{p_0} \frac{L(p)}{p} dp\leq Y_0$
\item
$\int_{p_0}^{\infty} \frac{L(p)}{p^2} dp \leq X_0$
\end{enumerate}
\end{lemma}
\begin{proof}
Follows from Observation \ref{obs:alloc}.
\end{proof}
\begin{lemma}
For any two budgets $B,B^\prime$ with $B^\prime > B$ and any capital allocation $L_1(\cdot)$ satisfying the constraints of
Lemma \ref{lemma:supply_constraints} with budget $B$, there exists a capital allocation $L_2(\cdot)$ satisfying the constraints
of Lemma \ref{lemma:supply_constraints}
using the larger budget $B^\prime$ that gives a strictly lower expected CFMM inefficiency.
\end{lemma}
\begin{proof}
Duplicate $L_1(\cdot)$ and allocate the capital $B^\prime-B$ to any $p$ with $\psi(p, 1)>0$ to build $L_2(\cdot)$. \extraqed{}
\end{proof}
A rational LP sets the initial spot exchange rate of the CFMM to be equal to the current
reference exchange rate (i.e. $p_0=\frac{P_X}{P_Y}$). If not, a trader could arbitrage the CFMM against an external market. The arbitrage
profit of this trader is the LP's loss, which effectively reduces the LP's initial budget.
Our convex program combines the above objective and constraints to compute an optimal liquidity allocation $L(p)$.
The core of the rest of this work is in using this
program to understand the relationship between LP beliefs and optimal liquidity allocations.
\begin{theorem}
\label{thm:problem}
Suppose that the initial reference prices of assets $X$ and $Y$ are $P_X$ and $P_Y$, and that an LP has
initial budget $B > 0$ and belief function $\psi(\cdot, \cdot)$.
The optimal liquidity provision strategy, $L(\cdot)$, is the solution to the following convex optimization problem (COP). The decision
variables are $X_0, Y_0$, and $L(p)$ for each exchange rate $p>0.$
\footnote{
The optimization is over a Banach space with one dimension for each $p>0$; we elide this technicality when possible for clarity of exposition.
}
\begin{align}
minimize ~& \iint_{p_X, p_Y} \frac{\psi(p_X, p_Y)}{p_YL(p_X/p_Y)} dp_X~dp_Y \tag{COP}\label{eq:cop}\\
subject~to ~& \int_{0}^{p_0} \frac{L(p)}{p} dp\leq Y_0 \tag{COP$1$} \label{eq:cop1}\\
~& \int_{p_0}^{\infty} \frac{L(p)}{p^2} dp \leq X_0 \tag{COP$2$} \label{eq:cop2}\\
~& X_0 P_X + Y_0 P_Y \leq B \tag{COP$3$} \label{eq:cop3}\\
~& L(p) \geq 0 ~& \forall ~p>0 \tag{COP$4$} \label{eq:cop4}
\end{align}
\end{theorem}
\begin{proof}
The $L(\cdot)$ that solves \ref{eq:cop} minimizes the expected
transaction failure chance (the expression in Proposition \ref{prop:failchance}),\footnote{The normalization term in the denominator is dropped for clarity since it doesn't change the solution of the problem.}
while satisfying the LP's budget constraint.
The objective and the constraints are integrals of convex functions and thus are convex.
This optimization problem is over a Banach space (there are uncountably many $L(p)$). Well-established results from the theory of optimization over Banach spaces show that optimal solutions exist (Theorem 47.C, \cite{zeidler1985})
and the KKT conditions
are well defined (\S 4.14, Proposition 1, \cite{zeidler1995}).
\extraqed{}
\end{proof}
A CFMM offers only a spot exchange rate ($X$ relative to $Y$), not a spot valuation for each asset (relative to the numeraire).
In this light, we find that the objective function of \ref{eq:cop} can be rearranged to one that depends only
on ratios of valuations.
\begin{lemma}
\label{lemma:polar}
Define $r,\theta$ to be the standard polar coordinates,
with $p_X=r\cos(\theta)$ and $p_Y=r\sin(\theta)$.
\begin{dmath*}
\iint_{p_X, p_Y} \frac{\psi(p_X, p_Y)}{p_YL(p_X/p_Y)} dp_X~dp_Y
= \int_\theta \left( \frac{1}{L(\cot(\theta))\sin(\theta)} \int_r \psi(r\cos(\theta), r\sin(\theta))dr\right) d\theta
\end{dmath*}
\end{lemma}
\begin{proof}
Follows by standard algebraic manipulations ($dp_X~dp_Y = r~dr~d\theta$). \extraqed{}
\end{proof}
This rearrangement reveals a useful equivalence class among LP beliefs.
\newcommand{\corequivclass}
{
Any two beliefs $\psi_1, \psi_2$ give the same optimal liquidity allocations if there exists a constant $\alpha>0$ such
that for every $\theta$,
\begin{equation*}
\int_r \psi_1(r\cos(\theta), r\sin(\theta))dr = \alpha \int_r \psi_2(r\cos(\theta), r\sin(\theta))dr
\end{equation*}
}
\begin{corollary}
\label{cor:equivclass}
\corequivclass{}
\end{corollary}
This corollary has important implications for the closed-form results we obtain in \S\ref{sec:beliefs} for commonly deployed CFMMs. The analysis of a belief defined on the square $p_X, p_Y \in (0, P_X] \times (0,P_Y]$ gives the results for all beliefs defined analogously on $p_X, p_Y \in (0, \alpha P_X] \times (0,\alpha P_Y]$ for any $\alpha > 0.$
\newcommand{\corvarphi}
{
Define $\varphi_\psi(\theta) = \int_r \psi(r\cos(\theta), r\sin(\theta))dr$.
Then
\begin{dmath*}
\iint_{p_X, p_Y} \frac{\psi(p_X, p_Y)}{p_YL(p_X/p_Y)} dp_X~dp_Y
= \int_p \frac{\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}{L(p)}dp
\end{dmath*}
}
\begin{corollary}
\label{cor:varphi}
\corvarphi{}
\end{corollary}
Corollary \ref{cor:varphi} enables a straightforward construction of a feasible solution to \ref{eq:cop}.
\begin{lemma}
\label{lemma:finite_soln}
\ref{eq:cop} always has a solution with finite objective value.
\end{lemma}
\begin{corollary}
\label{corolary:phi_and_L}
On any set of nonzero measure, we cannot have $\psi(p_X, p_Y) > 0$ and $L(p_X/p_Y) = 0.$
\end{corollary}
Proofs of Corollarys~\ref{cor:equivclass} and~\ref{cor:varphi} and Lemma~\ref{lemma:finite_soln} are in the Appendix~\ref{sec:proof_equiv},~\ref{sec:proof_varphi}, and~\ref{sec:prooffinitesol} respectively.
\subsection{Optimality Conditions}
We first give some lemmas about the structure of optimal solutions to \ref{eq:cop}.
\newcommand{\lemmabasicopt}
{
The following hold at any optimal solution.
\begin{enumerate}
\item
$\int_{0}^{p_0} \frac{L(p)}{p} dp = Y_0$
\item
$\int_{p_0}^{\infty} \frac{L(p)}{p^2} dp = X_0$
\item
$X_0 P_X + Y_0 P_Y = B$
\end{enumerate}
}
\begin{lemma}
\label{lemma:basicopt}
\lemmabasicopt{}
\end{lemma}
Lemma~\ref{lemma:basicopt} says that at optimum, the constraints of \ref{eq:cop} are tight. A full proof is in Appendix~\ref{proof_basicopt}.
Using the result of Lemma \ref{lemma:basicopt}, the KKT conditions (\S 5.5.3, \cite{boyd2004convex}) of \ref{eq:cop} are the following:
\begin{lemma}[KKT Conditions]
\label{lemma:kktconds}
Let $\lambda_Y, \lambda_X$, and $\lambda_B$ be the Lagrange multipliers for \ref{eq:cop1},~\ref{eq:cop2}, and~\ref{eq:cop3} respectively. Let $\lbrace \lambda_{L(p)} \rbrace $
be the Lagrange multipliers for each $L(p)\geq 0$ constraint.
When $\varphi_\psi(\cot^{-1}(p)) > 0 :$
\begin{enumerate}
\item For all $p$ with $p\geq p_0$,
$\frac{\lambda_X}{p^2} = \frac{1}{L(p)^2}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \lambda_{L(p)}$.
\item For all $p$ with $p\leq p_0$,
$\frac{\lambda_Y}{p} = \frac{1}{L(p)^2}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \lambda_{L(p)}$.
\item $\lambda_X = P_X \lambda_B$ and $\lambda_Y = P_Y \lambda_B$.
\end{enumerate}
When $\varphi_\psi(\cot^{-1}(p)) = 0 :$
\begin{enumerate}
\item
For all $p$ with $p\geq p_0$,
$\frac{\lambda_X}{p^2} = \lambda_{L(p)}$.
\item
For all $p$ with $p\leq p_0$,
$\frac{\lambda_Y}{p} = \lambda_{L(p)}$.
\item
$\lambda_X = P_X \lambda_B$ and $\lambda_Y = P_Y \lambda_B$.
\end{enumerate}
\end{lemma}
\begin{proof}
These are the KKT conditions of \ref{eq:cop}.
$\lbrace L(p) \rbrace$
is a functional over a Banach space. This functional exists for every optimal solution
by Proposition 1 of \S 4.14 of \cite{zeidler1995}. Note that that proposition requires the objective
to be continuously differentiable in a neighbourhood of the optimal solution; this does not hold
when the optimization problem is as written and there is some $p$ so that $\varphi_\psi(\cot^{-1}(p))$ goes continuously to $0$ at $p$ (but is nonzero
near $p$). In this case, one could replace $L(p)$ by $L(p)+\varepsilon$ in the denominator of the objective, for some arbitrarily small $\varepsilon$.
This would cause a small distortion in $L(p)$. We elide this technicality for clarity of exposition.
Continuous differentiability of the objective on a neighbourhood where $L(p)>0$ for all $p$ with $\varphi_\psi(\cot^{-1}(p))>0$ follows
from the assumption that $\psi$ is continuously differentiable on the set where $\psi(p_X, p_Y)>0$, and that this set is open (in Definition~\ref{defn:belief}).
\extraqed{}
\end{proof}
\newcommand{\coryhatdefined}
{
The integral $\mathcal Y(\tilde{p})=\int_0^{\tilde{p}} \frac{L(p)dp}{p}$ is well defined for every $\tilde{p}$
and $\mathcal Y(\cdot)$ is monotone nondecreasing and continuous.
}
\begin{corollary}
\label{cor:yhat}
\coryhatdefined{}
\end{corollary}
A proof is given in Appendix~\ref{sec:proof_int_y}. Lemma \ref{lemma:kktconds} and Corollary \ref{cor:yhat} together
imply that the behaviour of a CFMM that results from an optimal solution of \ref{eq:cop} is well-defined. \\
\subsubsection{Consequences of KKT Conditions} ~
The KKT conditions immediately imply the following facts about any optimal solution of \ref{eq:cop}.
\newcommand{\lemmakktobs}
{
\begin{enumerate}
\item
$\lambda_Y Y_0 = \int_0^{p_0} \frac{\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}{L(p)} dp$
and $\lambda_X X_0 = \int_{p_0}^\infty \frac{\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}{L(p)} dp$.
\item
$Y_0>0$ implies $\lambda_Y>0$. Similarly, $X_0 > 0$ implies $\lambda_X > 0$.
\item
$L(p)\neq 0$ if and only if $\lambda_{L(p)}=0$ (unless, for $p\leq p_0$, $\lambda_Y=0$ or for $p\geq p_0$, $\lambda_X=0$).
\item
The objective value is $\lambda_Y Y_0 + \lambda_X X_0$.
\item
$\frac{\lambda_X}{P_X} = \frac{\lambda_Y}{P_Y}$.
\end{enumerate}
}
\begin{lemma}
\label{lemma:kktobs}
\lemmakktobs{}
\end{lemma}
\begin{proof}
\begin{enumerate}
\item
Multiply each side of the first KKT condition in Lemma \ref{lemma:kktconds} by $L(p)$ (for $p$ with nonzero $\varphi_\psi(\cot^{-1}(p))$
to get
$\frac{\lambda_X L(p)}{p^2} = \frac{1}{L(p)}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p)))$, integrate from $p_0$ to $\infty$,
and apply the second item of Lemma \ref{lemma:basicopt}.
A similar argument (integrating from $0$ to $p_0$) gives the expression on $\lambda_Y Y_0$.
\item
If $Y_0>0$, then the right side of the equation in the previous part is nonzero, so $\lambda_Y$ must be nonzero.
The case of $\lambda_X$ is identical.
\item
Follows from points 1 and 2 of Lemma \ref{lemma:kktconds}.
\item
The right sides of the equations in the first statement add up to the objective.
\item
Follows from point 3 of Lemma \ref{lemma:kktconds} \qedhere
\end{enumerate}
\extraqed{}
\end{proof}
Lemma \ref{lemma:kktobs} shows that the fraction of liquidity allocated to an exchange rate $p$ is a function only of the LP's
(relative) belief that the future exchange rate will be $p$. Specifically, except through an overall scalar,
there is no interaction between the values of $L(\cdot)$ at different relative exchange rates.
\begin{proposition}
\label{prop:lp_dependencies}
At an optimum,
$L(p)$ is a function of $\lambda_X, \lambda_Y, \varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))$, and $p$.
\end{proposition}
\begin{proof}
Follows from Lemma \ref{lemma:kktconds}. \extraqed{}
\end{proof}
Proposition \ref{prop:lp_dependencies} gives several important consequences. First, it shows that an optimal liquidity allocation
can be inverted to give a set of belief functions that lead to that liquidity allocation.
\newcommand{\corlpequiv}
{
A liquidity allocation $L(\cdot)$ and an initial spot exchange rate $p_0$ are sufficient to uniquely specify
an equivalence class of beliefs (as defined in Corollary \ref{cor:equivclass}) for which $L(\cdot)$ is optimal.
}
\begin{corollary}
\label{cor:lp_to_equiv}
\corlpequiv{}
\end{corollary}
Second, Proposition \ref{prop:lp_dependencies} actually enables an explicit construction of a belief
that leads to $L(\cdot)$.
\newcommand{\corinverse}
{
Let $P_X$ and $P_Y$ be initial reference valuations, and let $L(\cdot)$ denote a liquidity allocation.
Define the belief $\psi(p_X, p_Y)$ to be $\frac{(L(p_X/p_Y))^2}{p_X/p_Y}$ when $p_X\in (0, P_X]$ and
$p_Y\in (0, P_Y]$, and to be $0$ otherwise.
Then $L(\cdot)$ is the optimal allocation for $\psi(\cdot, \cdot)$.
}
\begin{corollary}
\label{cor:inverse_problem}
\corinverse{}
\end{corollary}
Finally, the KKT conditions (Lemma \ref{lemma:kktconds}) imply that linear combinations of beliefs result in predictable
combinations of liquidity allocations. Towards this, we have the following result, which will also be useful in further proofs. Proofs of Corollaries~\ref{cor:lp_to_equiv},~\ref{cor:inverse_problem}, and~\ref{cor:addbeliefs} are in Appendix~\S\ref{sec:corr_proofs}.
\newcommand{\thmaddbeliefs}
{
Let $\psi_1, \psi_2$ be any two belief functions (that give $\varphi_{\psi_1}$ and $\varphi_{\psi_2}$)
with optimal allocations $L_1(\cdot)$ and $L_2(\cdot)$, and let $L(\cdot)$ be the optimal allocation for
$\psi_1+\psi_2$.
Then $L^2(\cdot)$ is a linear combination of $L_1^2(\cdot)$ and $L_2^2(\cdot)$.
Further, when $\varphi_{\psi_1}$ and $\varphi_{\psi_2}$ have disjoint support,
$L(\cdot)$ is a linear combination of $L_1(\cdot)$ and $L_2(\cdot)$.
}
\begin{corollary}
\label{cor:addbeliefs}
\thmaddbeliefs{}
\end{corollary}
\section{Common CFMMs and Beliefs}
\label{sec:beliefs}
We turn now to the CFMMs deployed in practice. What do the choices of trading functions in large CFMMs reveal about practitioners' beliefs
about future asset prices? In fact, the optimal beliefs for several widely-used trading functions closely match
the widespread but informal intuition about these systems. Recall that $P_X$ and $P_Y$ are the initial reference exchange rates. Also, recall the assumption that all trades are for the worth of 1 unit of the base numeraire currency (Proposition~\ref{prop:failchance}).
\input{uniswap_v2}
\input{uniswap_v3}
\input{balancer}
\input{lmsr}
\section{Market-Maker Profit and Loss}
\label{sec:net_profit}
Deploying assets within a CFMM has a cost,
and LPs naturally want to understand the financial tradeoffs involved (instead of just minimising CFMM inefficiency).
Specifically, LPs in CFMMs trade off revenue from transaction fees against losses due to adverse selection.
\input{fees}
\input{loss}
\input{net_profit}
\input{bid_ask}
\section{Conclusion}
We develop in this work a convex program that, for any set of beliefs about future asset valuations, outputs a trading function
that maximizes the expected fraction of trade requests the CFMM can settle. Careful analysis of this program allows for study of the inverse relationship
as well; for any trading function, our program can construct a class of beliefs for which the trading function is optimal. Constructing this program
requires a new notion of the liquidity of a CFMM,
and the core technical challenge
involves careful analysis of the KKT conditions of this program.
Unlike prior work, this program is able to explain the diversity of CFMM trading curves observed in practice.
We analyze several CFMMs that are widely deployed in
the modern DeFi ecosystem, and show that the beliefs revealed by our model match the informal intuition of practitioners.
Furthermore, our program is versatile enough to cover the case of a profit-seeking LP that must trade off expected revenue from trading fees against loss due to arbitrage.
This program therefore can serve as a guide for practitioners when choosing a liquidity allocation in a real-world CFMM.
\appendix
\section{CFMMs and Market Scoring Rules}
\label{apx:cost_fn}
We highlight here for completeness the equivalence between market scoring rules \cite{hanson2003combinatorial}
and CFMMs. Chen and Pennock \cite{chen2012utility} show that every prediction market, based on a market scoring rule,
can be represented using some ``cost function.''
A prediction market trades $n$ types of shares, each of which pays out 1 unit of a numeraire if a particular future event occurs.
The cost function $C(q)$ of \cite{chen2012utility} is a map from the total number of issued shares of each event, $q\in\mathbb{R}^n$, to some number of units of
the numeraire. To make a trade $\delta\in\mathbb{R}^n$ with the prediction market (i.e. to change the total number of issued shares to $q+\delta$),
a user pays $z=C(q+\delta) - C(q)$ units of the numeraire to the market.
One discrepancy is that traditional formulations of prediction markets (e.g. \cite{chen2012utility,hanson2007logarithmic})
allow an arbitrary number of shares to be issued by the market maker, but the CFMMs described in this work trade in assets with finite supplies.
Suppose for the moment, however, that a CFMM could possess a negative quantity of shares (with the trading function $f$ defined
on the entirety of $\mathbb{R}^n$, instead of just the positive orthant).
This formulation of a prediction market directly gives a CFMM that trades the $n$ shares and the numeraire, with trading function
$f(r,z)=-C(-r)+z$ for $r\in \mathbb{R}^n$ the number of shares
owned by the CFMM, and $z$ the number of units of the numeraire owned by the CFMM. Observe that for any trade $\delta$ and
$dz=C(-(r + \delta))-C(-r)$, $f(r, z)=f(r+\delta, z+dz)$. This establishes the correspondence between prediction markets and CFMMs.
In our examples with the LMSR, we consider a CFMM for which $z = 0$
(i.e., it doesn't exchange shares for dollars, but only shares of one future event for shares of another future event).
The cost function $C(r)$ for the LMSR is $\log(\sum_{i=1}^n \exp(-r_i)).$ The CFMM representation with this cost function follows by setting it to a constant.
\section{Continuous trade size distribution} \label{sec:cont}
\begin{definition}
\label{defn:dist_cont}
Let $size(\cdot)$ be some distribution on $\mathbb{R}_{\geq 0}$ with support in a neighborhood of $0$.
A trader appears at every timestep. The trade has size $k$ units of $Y$, where $k$ is drawn from $size(\cdot)$.
A trade buys or sells from the CFMM with
equal probability.
\end{definition}
This definition implicitly encodes an assumption that the amount of trading from $X$ to $Y$ is balanced in expectation
against the amount of trading from $Y$ to $X$.
An additional assumption makes this setting analytically tractable.
\begin{assumption}[Strict Slippage]
\label{ass:modified_trade_success}
Trade requests measure slippage relative to the post-trade \emph{spot exchange rate} of the CFMM,
not the overall exchange rate of the trade.
\end{assumption}
In other words, a trade request succeeds if and only if it would move the CFMM's reserves to some state within $L_\varepsilon(\hat p)$.
Assumption~\ref{ass:modified_trade_success} implies a pessimistic view of the trade failure probability. However, the following result signifies that Assumption~\ref{ass:modified_trade_success} is reasonable.
\begin{lemma} \label{lemma:slippage}
At any state of the CFMMs asset reserves, the maximum size of a trade that will be successful under the model in Definition~\ref{defn:trade_success} is at most twice the maximum size of a trade that will be successful under Strict Slippage as in Assumption~\ref{ass:modified_trade_success}.
\end{lemma}
\begin{proof}
See that the strict-quasi concavity of the trading function $f$ implies a ``convex-pricing'' of any asset. For any buy trade, the marginal exchange rate received is non-decreasing in the size of the trade. Therefore the slippage of a buy trade of $2k$ units is at least as much as the strict slippage of a buy trade of $k$ units. A similar argument follows for sell trades.
\end{proof}
We now analyze the Markov chain over the CFMM's state, the stationary distribution of which gives us the trade failure probability under Assumption~\ref{ass:modified_trade_success}.
\begin{lemma} \label{lemma:uniform}
Let $M$ be the Markov chain defined by the state of $Y$ in the asset reserves of the CFMM with $ \mathcal Y \in L_\varepsilon(\hat p)$ and transitions
induced by trades drawn from the distribution in Definition \ref{defn:dist_cont}.
Under Assumption~\ref{ass:modified_trade_success}, the stationary distribution of $M$ is uniform over $L_\varepsilon(\hat p)$.
\end{lemma}
\begin{proof}
\input{proof_cont}
\iffalse
We first derive the transition probabilities of the Markov Chain $M.$ Given that $M$ is in state $y_i,$ it can reach a state $y_f$ either via a successful trade or a
failed trade. Let $s \sim size(\cdot).$ The trade fails if $y_i +s > y_2$ if the trade is a SELL Y. It fails if $y_i -s < y_1$ if the trade is a BUY Y.
Since trade size distribution $size(\cdot)$ has a support in a neighborhood of $0$, the chain $M$ is irreducible and hence has a unique stationary distribution.
Let the chain start in the uniform distribution on $L_\varepsilon(p)$.
It suffices to show that $M$ has equal probability of reaching any state in $L_\varepsilon(p)$ in the next time step.
The chain reaches state $y$ if $s~size(\cdot)$ is such that $s > y_2- y$, the trade is a SELL Y, and the initial state was $y$.
It also reaches $y$ if $ s > y - y_1$ , the trade is a BUY Y and the initial state was $y$. In the third case, it reaches $y$ if $s+y_i = y$, the initial state
is $y_i$ and the trade is a SELL Y, or if $y_i -s = y$, the initial state is $y_i$ and trade is a BUY Y. See that for any $s$ and $y$ there is exactly one unique state
in $L_\varepsilon(p)$ from which the chain reaches $y$. Since we started in a uniform distribution, the final distribution is also uniform.
\fi
\extraqed{}
\end{proof}
\begin{proposition}
The probability that a trade of size $k$ units of $Y$ fails is approximately $\min(1, \frac{k}{\vert L_\varepsilon(\hat p)\vert})$,
where the approximation error
is up to Assumption \ref{ass:modified_trade_success}.
\end{proposition}
\begin{proof}
The probability that a (without loss of generality) sell of size $k$ units of $Y$ fails is equal to the probability that a
state $y$, drawn uniformly from the range $L_\varepsilon(p)=[y_1, y_2]$, lies in the range $[y_2-k, y_2]$.
Lemma \ref{lemma:uniform} shows this probability is $\min(1, \frac{k}{y_2-y_1})$.
\extraqed{}
\end{proof}
\section{Omitted Proofs} \label{sec:proofs}
\subsection{Omitted Proofs of \S \ref{sec:preliminaries} and \S \ref{sec:model}}
\label{apx:prelim_obs}
\iffalse
\begin{restatement*}[Observation \ref{obs:tangent}]
An infinitesimal trade at the spot exchange rate keeps $f$ constant.
\end{restatement*}
\begin{proof}
The plane normal to the vector $\left(\frac{\partial f}{\partial x}, \frac{\partial f} {\partial y}\right)$ at some point
is tangent to $f$ at that point. An infinitesimal trade that trades $\frac{\partial f}{\partial x}$ units
of $X$ for $\frac{\partial f} {\partial y}$ units of $Y$ moves the reserves to another point in the tangent plane.
\extraqed{}
\end{proof}
\fi
\begin{restatement*}[Observation \ref{obs:y_fn_p}]
If $f$ is strictly quasi-concave and differentiable, then for any constant $K$ and spot exchange rate $p$,
the point $(x,y)$ where $f(x,y)=K$ and $p$ is a spot exchange rate at
$(x,y)$ is unique.
\end{restatement*}
\begin{proof}
A constant $K=f(X_0, Y_0)$ defines a set $\lbrace x:f(x)\geq K\rbrace$. Because $f$ is strictly quasi-concave, this set is
strictly convex. Trades against the CFMM (starting from initial reserves $(X_0, Y_0)$) move along the boundaries of this set.
Because this set is strictly convex, no two points on the boundary can share a gradient (or subgradient).
\extraqed{}
\end{proof}
\begin{restatement*}[Observation \ref{obs:y_fn_x}]
If $f$ is strictly increasing in both $X$ and $Y$ at every point on the positive orthant, then for a given constant function value $K$,
the amount of $Y$ in the CFMM reserves
uniquely specifies the amount of $X$ in the reserves, and vice versa.
\end{restatement*}
\begin{proof}
If not, then $f$ would be constant on some line with either $X$ or $Y$ constant.
\extraqed{}
\end{proof}
\begin{restatement*}[Observation \ref{obs:y_nondecreasing}]
$\mathcal Y(p)$ is monotone nondecreasing.
\end{restatement*}
\begin{proof}
If $\mathcal Y(p)$ is decreasing, the level set of $f$, i.e., $\lbrace (x,y): f(x,y)\geq K\rbrace$ cannot be convex.
\extraqed{}
\end{proof}
\begin{restatement*}[Lemma \ref{lemma:y_differentiable}]
\lemmaydiffable{}
\end{restatement*}
\begin{proof}
Observation \ref{obs:y_fn_x} implies that the amount of $Y$ in the reserves can be represented as a function $\mathcal {\hat Y}(x)$ of the amount of $X$ in the reserves.
By assumption, the level sets of $f$ (other than for $f(\cdot)=0$) cannot touch the boundary of the nonnegative orthant.
Because $f$ is differentiable and increasing at every point in the positive orthant,
the map $g(x)$ from reserves $x$ to spot exchange rates at $(x, \mathcal {\hat Y}(x))$ must be a bijection from $(0, \infty)$ to $(0,\infty)$.
Because $f$ is twice-differentiable, $g(x)$ must be differentiable, and so the map $h(p)=g^{-1}(p)$ must also be differentiable.
The map $\mathcal Y(p)$ from spot exchange rates to reserves $Y$ is equal to $\mathcal{\hat{Y}}(h(p))$, and so $\hat Y(p)$ is differentiable
because $\mathcal{\hat{Y}}(\cdot)$ is differentiable and $h(\cdot)$ is differentiable.
\extraqed{}
\end{proof}
\begin{restatement*}[Lemma \ref{lemma:y_diffable}]
\lemmaypdiffable{}
\end{restatement*}
\begin{proof}
Follows from Definitions~\ref{defn:Leps} and~\ref{defn:liquidity}.
\end{proof}
\iffalse
\subsection{Omitted Proof of Lemma \ref{lemma:defn_independent}}
\begin{restatement*}[Lemma \ref{lemma:defn_independent}]
\lemmaindependent{}
\end{restatement*}
\begin{proof}
Recall from Observation \ref{obs:y_fn_x} that (for any fixed constant $K>0$) the amount of $Y$ in the reserves can be
represented by a function of the amount of $X$ in the reserves, $\hat{Y}(\cdot)$.
The gradient $p$ (or every subgradient) of $\hat{Y}$ necessarily corresponds to a gradient (resp. subgradient) of $f$.
In particular, define
the line tangent to $\hat{Y}(\cdot)$ at some point $x_0$ as the set $\lbrace (x,y)~\vert~ (y-\hat{Y}(x_0)) = -p(x-x_0)\rbrace$.
As the image of $\hat{Y}$ is a level set of $f$, a line parallel to
this line must be contained within the plane tangent to $f$ at $(x_0, \hat{Y}(x_0))$ (or contained
within some subgradient of $f$ at that point). Thus, $p$ must be a spot exchange rate of $f$ at $(x_0, \hat{Y}(x_0))$.
The lemma follows from substituting $p=-\frac{dY}{dX}$.
\extraqed{}
\end{proof}
\fi
\subsection{Omitted Proof of Corollary \ref{cor:equivclass}} \label{sec:proof_equiv}
\begin{restatement*}[Corollary \ref{cor:equivclass}]
\corequivclass{}
\end{restatement*}
\begin{proof}
Follows by substitution. $\alpha$ rescales the derivative of the objective with respect to every variable by the same
constant and thus does not affect whether an allocation is optimal. \extraqed{}
\end{proof}
\subsection{Omitted Proof of Corollary \ref{cor:varphi}} \label{sec:proof_varphi}
\begin{restatement*}[Corollary \ref{cor:varphi}]
\corvarphi{}
\end{restatement*}
\begin{proof}
\begin{align*}
\int_{p_X, p_Y} \frac{\psi(p_X, p_Y)}{p_YL(p_X/p_Y)} dp_X~dp_Y
& = \int_\theta \frac{\varphi_\psi(\theta)}{L(\cot(\theta)) \sin(\theta)} d\theta\\
~& = \int_p \frac{\varphi_\psi(\theta)\sin^2(\theta)}{L(\cot(\theta)) \sin(\theta)} dp \\
~& = \int_p \frac{\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}{L(p)}dp
\end{align*}
The first line follows by Lemma \ref{lemma:polar} (recall that $dp_X~dp_Y = r ~dr~ d\theta$),
the second by substitution of $p=\cot(\theta)$ and $d\theta=-\sin^2(\theta)dp$ (and
changing the direction of integration --- recall $\theta=0$ when $p=\infty$),
and the third by substitution.
\extraqed{}
\end{proof}
\subsection{Omitted Proof of Lemma \ref{lemma:finite_soln}} \label{sec:prooffinitesol}
\begin{restatement*}[Lemma \ref{lemma:finite_soln}]
The optimization problem of Theorem \ref{thm:problem} always has a solution with finite objective value.
\end{restatement*}
\begin{proof}
Set $L(p)=1$ for $p\leq 1$ and $L(p)=\varphi_\psi(\cot^{-1}(p))/p^2$ otherwise. Then
\begin{dmath*}
\int_p \frac{\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}{L(p)}dp \\
\leq \int_p \frac{\varphi_\psi(\cot^{-1}(p))}{L(p)}dp \\
\leq \int_0^1 \varphi_\psi(\cot^{-1}(p)) dp + \int_1^\infty \frac{dp}{p^2}
\end{dmath*}
The first term of the last line is finite, as per our assumption on trader beliefs.
Set $Y_0=\int_0^{p_0}\frac{L(p)dp}{p}$ and $X_0=\int_{p_0}^\infty \frac{L(p)dp}{p^2}$. Clearly both $X_0$ and $Y_0$ are finite.
Finally, rescale each $L(p)$, $X_0$, and $Y_0$ by a factor of $\frac{B}{P_X X_0 + P_Y Y_0}$ to get a new allocation $L^\prime(p)$,
$X^\prime_0$, and $Y^\prime_0$ satisfing the constraints and that still gives a finite objective value.
\extraqed{}
\end{proof}
\subsection{Omitted Proof of Lemma \ref{lemma:basicopt}} \label{proof_basicopt}
\begin{restatement*}
\lemmabasicopt{}
\end{restatement*}
\begin{proof}
The third equation
holds since the objective function is strictly decreasing in at least one $L(p)$ (where the belief puts a nonzero probability
on the exchange rate $p$), so any unallocated capital
could be allocated to increase this $L(\cdot)$ on a neighbourhood of $L(p)$ and reduce the objective.
The first equation holds because any unallocated units of $X$ could be allocated to $L(p^\prime)$ for a set of $p^\prime$
in some neighbourhood of some $p \leq p_0$
and thereby reduce the objective. If there is no $p\leq p_0$ where the belief puts a nonzero probability, then
all of the capital allocated by the third constraint to $X_0$ could be reallocated into increasing $Y_0$ and thereby decreasing
the objective.
The second equation follows by symmetry with the argument for the first.
\extraqed{}
\end{proof}
\subsection{Omitted Proof of Corollary \ref{cor:yhat}} \label{sec:proof_int_y}
\begin{restatement*}[Corollary \ref{cor:yhat}]
\coryhatdefined{}
\end{restatement*}
\begin{proof}
The last item of Lemma \ref{lemma:basicopt} shows that $L(p)\neq 0$ if and only if $\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))\neq 0$.
When $L(p)$ is nonzero, it is either $\sqrt{\frac{p^2}{\lambda_X}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}$
or
$\sqrt{\frac{p}{\lambda_Y}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}$
(depending on the value of $p$).
By our assumption on trader beliefs, $\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))$ is a well-defined function of $p$
and is integrable. Thus, both $\sqrt{\frac{p^2}{\lambda_X}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}$
and $\sqrt{\frac{p}{\lambda_Y}\varphi_\psi(\cot^{-1}(p))\sin(\cot^{-1}(p))}$ are integrable.
Monotonicity follows from $L(p)\geq 0$ and continuity from basic facts about integrals. \extraqed{}
\end{proof}
\subsection{Omitted Proof of Corollaries \ref{cor:lp_to_equiv},~\ref{cor:inverse_problem}, and~\ref{cor:addbeliefs}} \label{sec:corr_proofs}
\begin{restatement*}[Corollary \ref{cor:lp_to_equiv}]
\corlpequiv{}
\end{restatement*}
\begin{proof}
It suffices to uniquely identify $\varphi_\psi(\cot^{-1}(p))$ for each $p$, up to some scalar.
Lemma \ref{lemma:kktobs} shows that $\varphi_\psi(\cot^{-1}(p))$ is a function of an optimal $L(p)$ and Lagrange multipliers
$\lambda_X$ or $\lambda_Y$,
and because $\lambda_X \frac{P_X}{P_Y} = \lambda_Y$, we must have that $\varphi_\psi$ is specified by $L(p)$ and $p_0$ up to some scalar
$\lambda_X$.
\extraqed{}
\end{proof}
\begin{restatement*}[Corollary \ref{cor:inverse_problem}]
\corinverse{}
\end{restatement*}
\begin{proof}
Recall the definition of $\varphi_\psi(\cdot)$ in~\ref{cor:varphi}.
For the given belief function $\psi,$ standard trigonometric arguments show that when
$p\geq p_0$, we have $\varphi_\psi(\cot^{-1}(p)) = \frac{P_X L(p)^2/p}{\cos(\cot^{-1}(p))}$
and that when $p\leq p_0$, we have $\varphi_\psi(\cot^{-1}(p)) = \frac{P_Y L(p)^2/p}{\sin(\cot^{-1}(p))}$.
Let $\hat{L}(p)$ be the allocation that results from solving the optimization problem for
minimising the expected CFMM inefficiency for belief $\psi$.
Lemma~\ref{lemma:kktobs} part 3, gives the complementary slackness condition of $\hat{L}(p)$ and its corresponding Lagrange multiplier.
With this, Lemma \ref{lemma:kktconds} gives the following:
when $p\geq p_0$, $\frac{\lambda_B P_X}{p^2}=\frac{1}{\hat{L}(p)^2}(P_X L(p)^2/p)/p$,
and when $p\leq p_0$,
$\frac{\lambda_B P_Y}{p}=\frac{1}{\hat{L}(p)^2}(P_Y L(p)^2/p)$.
In other words, for all $p$,
$\lambda_B = \frac{L(p)^2}{\hat{L}(p)^2}$, so $L(\cdot)$ and $\hat{L}(\cdot)$ differ by at most a constant multiplicative factor.
But both allocations use the same budget, so it must be that $\lambda_B=1$ and $\hat{L}(\cdot) = L(\cdot)$.
\extraqed{}
\end{proof}
\begin{restatement*}[Corollary \ref{cor:addbeliefs}]
\thmaddbeliefs{}
\end{restatement*}
\begin{proof}
Note that $\int_r \psi(r\cos(\theta), r\sin(\theta)dr$ is a linear function of each $\psi(p_X, p_Y)$,
and thus $\varphi_{\psi_1 + \psi_2}(\cdot) = \varphi_{\psi_1}(\cdot) + \varphi_{\psi_2}(\cdot)$
For any $p$ with $p\geq p_0$ and nonzero $\varphi_{\psi_1}(\cot^{-1}(p))$,
$L_1(p)^2 = \frac{p^2}{\lambda_{1,X}} \varphi_{\psi_1}(\cot^{-1}(p))\sin(\cot^{-1}(p))$.
Similarly, for nonzero $\varphi_{\psi_2}(\cot^{-1}(p))$,
$L_2(p)^2 = \frac{p^2}{\lambda_{2,X}} \varphi_{\psi_2}(\cot^{-1}(p))\sin(\cot^{-1}(p))$.
If either $\varphi_{\psi_1}$ or $\varphi_{\psi_2}$ is nonzero at $\cot^{-1}(p)$,
then
\begin{equation*}
L(p)^2 = \frac{p^2}{\lambda_{X}} (\varphi_{\psi_1}(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \varphi_{\psi_2}(\cot^{-1}(p))\sin(\cot^{-1}(p)))
\end{equation*}
Therefore,
\begin{equation*}
L(p)^2 = \frac{\lambda_{1,X}}{\lambda_X}L_1(p)^2 + \frac{\lambda_{2,X}}{\lambda_X}L_2(p)^2
\end{equation*}
An analogous argument holds for $p\leq p_0$.
The second statement follows from the fact that that when only one of $L_1(p)$ or $L_2(p)$ is nonzero,
we must have that either
$L(p)=\sqrt{\frac{\lambda_{1,X}}{\lambda_X}}L_1(p)$ or $L(p)=\sqrt{\frac{\lambda_{2,X}}{\lambda_X}}L_2(p)$.
\extraqed{}
\end{proof}
\subsection{Omitted Proof of Proposition \ref{prop:uniswapv3}} \label{proof_v3}
\begin{restatement*}[Proposition \ref{prop:uniswapv3}]
\univthreethm{}
\end{restatement*}
\begin{proof}
A concentrated liquidity position trades exactly as a constant product market maker within its price bounds
$p_{min}$ and $p_{max}$, and makes no trades outside of that range.
By Lemma \ref{lemma:basicopt}, the optimal $L(p)$ is $0$ for $p$ outside of the range $[p_{min}, p_{max}]$.
Inside that range, by Proposition \ref{prop:lp_dependencies}, $L(p)$ differs from the optimal liquidity allocation for
the constant product market maker by a constant, multiplicative factor (the same factor for every $p$).
Thus, the resulting liquidity allocation has the same behavior as a concentrated liquidity position. \extraqed{}
\end{proof}
\subsection{Omitted Proof of Proposition \ref{prop:balancer}} \label{proof_balancer}
\begin{restatement*}[Proposition \ref{prop:balancer}]
\balancerthm{}
\end{restatement*}
\input{balancer_proof}
\subsection{Omitted Proof of Proposition \ref{prop:lmsr}} \label{proof_lmsr}
\begin{restatement*}[Proposition \ref{prop:lmsr}]
\lmsrthm{}
\end{restatement*}
\begin{proof}
This trading function implies the relationship $p=e^{y-x}.$
Combining this with the equation
$e^{-y} + e^{-x}=K$ (for some constant $K$) gives
$(1+p)e^{-y}=K$
and thus $\mathcal Y(p)=\ln(\frac{1+p}{K})$.
From the definition of liquidity, we obtain $L(p)=\frac{p}{1+p}$.
Corollary \ref{cor:inverse_problem} shows that a belief function that leads to this
liquidity allocation is (with $p=p_X/p_Y$)
\begin{equation*}
\frac{L(p)^2}{p}= \frac{p}{(1+p)^2} = \frac{p_X p_Y}{(p_X+p_Y)^2}
\end{equation*}
for $(p_X, p_Y)\in (0, P_X] \times (0, P_Y]$ and $0$ elsewhere.
The result follows by rescaling the belief function (Corollary \ref{cor:equivclass}).
\extraqed{}
\end{proof}
\subsection{Omitted Proof of Proposition \ref{prop:divergence}} \label{sec:expected_future_value}
\begin{restatement*}[Proposition \ref{prop:divergence}]
\thmdivergence{}
\end{restatement*}
\begin{proof}
\begin{dmath*}
\frac{1}{N_{\psi}} \int\limits_{p_X, p_Y}\psi(p_X, p_Y)\left(p_X \mathcal X(p_X/p_Y)) + p_Y \mathcal Y(p_X/p_Y)\right)dp_X~dp_Y \\
= \frac{1}{N_{\psi}} \int\limits_{p_X, p_Y}\psi(p_X, p_Y)\left(p_X \int\limits_{p_X/p_Y}^\infty\frac{L(p)}{p^2} dp + p_Y\int\limits_0^{p_X/p_Y}\frac{L(p)}{p} dp\right)dp_X~dp_Y \\
= \frac{1}{N_{\psi}} \int\limits_{0}^\infty \left( \frac{L(p)}{p^2} \int\limits_{p_X, p_Y}p_X\psi(p_X, p_Y)\mathds{1}_{\lbrace \frac{p_X}{p_Y} \leq p\rbrace}dp_X~dp_Y
+ \frac{L(p)}{p} \int\limits_{p_X, p_Y}p_Y\psi(p_X, p_Y)\mathds{1}_{\lbrace \frac{p_X}{p_Y} \geq p\rbrace}dp_X~dp_Y\right)dp
\end{dmath*}
The first equation follows by substitution of the equations in Observation \ref{obs:alloc}.
Note that for any $p_X, p_Y$, the term
$\frac{L(p)}{p^2}$ for any $p$ appears in the integral $\int_{p_X/p_Y}^\infty\frac{L(p)dp}{p^2}$ if and only if
$p \geq p_X/p_Y$. The result follows from rearranging the integral to group terms by $L(p)$.
\extraqed{}
\end{proof}
\subsection{Omitted Proof of Theorem \ref{thm:qual_divloss}} \label{sec:proof_div}
\begin{restatement*}[Theorem \ref{thm:qual_divloss}]
\thmdivloss{}
\end{restatement*}
\begin{proof}
Define $\varphi^\prime(\theta) = \delta\int_r rate_{\delta}(r\cos(\theta), r\sin(\theta)\psi(r\cos(\theta), r\sin(\theta))dr$.
The KKT conditions for the first problem give the following (nearly identically to those in Lemma \ref{lemma:kktconds}, just using $L_1(\cdot)$ in place of $L(\cdot)$):
\begin{enumerate}
\item
For all $p$ with $p\geq p_0$,
$\frac{\lambda_X}{p^2} = \frac{1}{L_1(p)^2}\varphi^\prime(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \lambda_{L_1(p)}$.
\item
For all $p$ with $p\leq p_0$,
$\frac{\lambda_Y}{p} = \frac{1}{L_1(p)^2}\varphi^\prime(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \lambda_{L_1(p)}$.
\item $ \lambda_X = P_X \lambda_B$ and $ \lambda_Y = P_Y \lambda_B$.
\end{enumerate}
Observe that the derivative, with respect to $L(p)$, of the divergence loss, is
\iffalse
\begin{equation*}
\kappa(p)
= \frac{1}{N_\psi} \int\limits_{0}^\infty \left( \frac{1}{p^2} \int\limits_{p_X, p_Y}p_X\psi(p_X, p_Y)\mathds{1}_{\lbrace \frac{p_X}{p_Y} \leq p\rbrace}dp_X~dp_Y
+ \frac{1}{p} \int\limits_{p_X, p_Y}p_Y\psi(p_X, p_Y)\mathds{1}_{\lbrace \frac{p_X}{p_Y} \geq p\rbrace}dp_X~dp_Y\right)dp
\end{equation*}
\fi
\begin{equation*}
\kappa(p)
= \frac{1}{N_\psi} \left( \frac{1}{p^2} \iint\limits_{p_X, p_Y}p_X\psi(p_X, p_Y)\mathds{1}_{\lbrace \frac{p_X}{p_Y} \leq p\rbrace}dp_X~dp_Y
+ \frac{1}{p} \iint\limits_{p_X, p_Y}p_Y\psi(p_X, p_Y)\mathds{1}_{\lbrace \frac{p_X}{p_Y} \geq p\rbrace}dp_X~dp_Y\right).
\end{equation*}
\iffalse
First, observe that
\begin{align*}
\frac{d\kappa}{dp} & =
\frac{1}{p^2} \int_{p_X, p_Y} p_X \psi(p_X, p_Y)\mathds{1}_{\frac{p_X}{p_Y}=p}dp_X ~ dp_Y
- \frac{1}{p} \int_{p_X, p_Y} p_Y \psi(p_X, p_Y)\mathds{1}_{\frac{p_X}{p_Y}=p}dp_X ~ dp_Y \\
~& = \int_{p_X, p_Y} \left(\psi(p_X, p_Y)\mathds{1}_{\frac{p_X}{p_Y}=p}\right) \left(\frac{p_X}{p^2} - \frac{p_Y}{p}\right)dp_X~dp_Y
\end{align*}
This implies that $\frac{d\kappa}{dp} \leq 0$ when $\frac{P_X}{p_2}-\frac{P_Y}{p} \leq 0$, and vice versa.
This last condition holds if and only if $p\geq \frac{P_X}{P_Y}=p_0$.
\fi
Computing the KKT conditions for the second problem gives the following:
\begin{enumerate}
\item
For all $p$ with $p\geq p_0$,
$\frac{\lambda_X}{p^2} = \frac{1}{L_2(p)^2}\varphi^\prime(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \kappa(p) +\lambda_{L_2(p)}$.
\item
For all $p$ with $p\leq p_0$,
$\frac{\lambda_Y}{p} = \frac{1}{L_2(p)^2}\varphi^\prime(\cot^{-1}(p))\sin(\cot^{-1}(p)) + \kappa(p) + \lambda_{L_2(p)}$.
\end{enumerate}
As defined in the theorem statement, $X_1=\int_{p_0}^\infty \frac{L_1(p)}{p^2}dp$ and $X_2=\int_{p_0}^\infty \frac{L_2(p)}{p^2}dp$
are the optimal initial quantities of $X$.
Normalizing $L_1$ and $L_2$ by $X_1$ and $X_2$ respectively gives the equation
\begin{equation}
\label{eqn:normalized}
\int_{p_0}^\infty \frac{L_1(p)}{X_1p^2} dp=\int_{p_0}^\infty \frac{L_2(p)}{X_2p^2} dp
\end{equation}
This implies that, when normalized by $X_1$ and $X_2$, $p\geq p_0$, and $\varphi^\prime(\cot^{-1}(p))\neq 0$, we have that
\begin{dmath*}
L_2(p)
= \sqrt{\frac{\varphi^\prime(\cot^{-1}(p))\sin(\cot^{-1}(p))}{\frac{\lambda_X}{p^2} - \kappa(p)}} \\
= \sqrt{\frac{\varphi^\prime(\cot^{-1}(p))\sin(\cot^{-1}(p))}{\frac{\lambda_X}{p^2}}*\frac{\frac{\lambda_X}{p^2}}{\frac{\lambda_X}{p_2} - \kappa(p)}} \\
= L_1(p)\frac{X_2}{X_1} \sqrt{\frac{\lambda_X}{\lambda_X - p^2\kappa(p)}}
\end{dmath*}
A similar argument shows that when $p \leq p_0$,
\begin{dmath*}
L_2(p)
= L_1(p)\frac{Y_2}{Y_1} \sqrt{\frac{\lambda_Y}{\lambda_Y - p\kappa(p)}}
\end{dmath*}
\newcommand{\psitp}
{
\psi(r\cos(\theta^\prime), r\sin(\theta^\prime))
}
\newcommand{\drdt}
{
dr ~d\theta^\prime
}
Arithmetic calculation gives that
\begin{dmath*}
\kappa(p)p^2 = \frac{1}{N_{\psi}}\int_r \int_{\theta^\prime=\theta}^{\pi/2} r^2 \cos(\theta^\prime)\psitp{}\drdt{}
+ \cot(\theta) \int_r \int_{\theta^\prime=0}^\theta r^2\sin(\theta^\prime)\psitp{}\drdt{}
\end{dmath*}
and thus that
\begin{equation*}
\frac{d(\kappa(\theta)\cot(\theta)^2)}{d\theta} = - \frac{\csc^2(\theta)}{N_{\psi}}\int_r\int_{\theta^\prime=0}^\theta r^2\sin(\theta^\prime)\psitp{}\drdt{} \leq 0
\end{equation*}
$\kappa(\theta)\cot(\theta)^2$ is therefore decreasing in $\theta$, so $\kappa(p)p^2$ is increasing in $p$ (since $p = \cot(\theta)$).
Therefore, $\sqrt{\frac{\lambda_X}{\lambda_X - p^2\kappa(p)}}$ increases as $p$ goes to $\infty$.
By an analogous argument,
\begin{dmath*}
\kappa(p)p = \frac{\tan(\theta)}{N_{\psi}} \int_r \int_{\theta^\prime=\theta}^{\pi/2} r^2 \cos(\theta^\prime)\psitp{}\drdt{}
+ \int_r \int_{\theta^\prime=0}^\theta r^2\sin(\theta^\prime)\psitp{}\drdt{}
\end{dmath*}
and thus
\begin{equation*}
\frac{d(\kappa(\theta)\cot(\theta))}{d\theta} = \frac{\sec^2(\theta)}{N_{\psi}} \int_r\int_{\theta^\prime=\theta}^{\pi/2} r^2\sin(\theta^\prime)\psitp{}\drdt{} \geq 0
\end{equation*}
$\kappa(\theta)\cot(\theta)$ is therefore increasing in $\theta$, so $\kappa(p)p$ increases as $p$ decreases.
Therefore, $\sqrt{\frac{\lambda_Y}{\lambda_Y - p\kappa(p)}}$ increases as $p$ goes to $0$.
Equation
\ref{eqn:normalized} implies that the quantities $\frac{L_1(p)}{X_1p^2}$ and $\frac{L_2(p)}{X_2p^2}$ integrate to the same value, but $L_2(\cdot)$ increases
strictly more quickly than $L_1(\cdot)$, so there must be a point
$p_1>p_0$ beyond which $\frac{L_1(p)}{X_1} \leq \frac{L_2(p)}{X_2}$.
An analogous argument holds for $p<p_0$.
\extraqed{}
\end{proof}
\end{document}
|
\begin{document}
\title{Characterization of high-dimensional entangled systems\\via mutually unbiased measurements}
\author{D.~Giovannini}
\thanks{[email protected]}
\affiliation{School of Physics and Astronomy, SUPA, University of Glasgow, Glasgow G12 8QQ, United Kingdom}
\author{J.~Romero}
\affiliation{School of Physics and Astronomy, SUPA, University of Glasgow, Glasgow G12 8QQ, United Kingdom}
\affiliation{Department of Physics, SUPA, University of Strathclyde, Glasgow G4 ONG, United Kingdom}
\author{J.~Leach}
\affiliation{School of Engineering and Physical Sciences, SUPA, Heriot-Watt University, Edinburgh EH14 4AS, United Kingdom}
\author{A.~Dudley}
\affiliation{CSIR National Laser Centre, PO Box 395, Pretoria 0001, South Africa}
\author{A.~Forbes}
\affiliation{CSIR National Laser Centre, PO Box 395, Pretoria 0001, South Africa}
\affiliation{School of Physics, University of KwaZulu-Natal, Private Bag X54001, Durban 4000, South Africa}
\author{M.~J.~Padgett}
\affiliation{School of Physics and Astronomy, SUPA, University of Glasgow, Glasgow G12 8QQ, United Kingdom}
\begin{abstract}
Mutually unbiased bases (MUBs) play a key role in many protocols in quantum science, such as quantum key distribution. However, defining MUBs for arbitrary high-dimensional systems is theoretically difficult, and measurements in such bases can be hard to implement.
We show experimentally that efficient quantum state reconstruction of a high-dimensional multi-partite quantum system can be performed by considering only the MUBs of the individual parts.
The state spaces of the individual subsystems are always smaller than the state space of the composite system. Thus, the benefit of this method is that MUBs need to be defined for the small Hilbert spaces of the subsystems rather than for the large space of the overall system. This becomes especially relevant where the definition or measurement of MUBs for the overall system is challenging. We illustrate this approach by implementing measurements for a high-dimensional system consisting of two photons entangled in the orbital angular momentum (OAM) degree of freedom, and we reconstruct the state of this system for dimensions of the individual photons from \(d=2\) to \(5\).
\,{\mathrm{e}}nd{abstract}
\pacs{42.50.Tx, 03.65.Wj, 03.65.Ud, 03.67.Dd}
\maketitle
Mutually unbiased bases (MUBs) \cite{Wootters:1989, Ivanovic:1981} are a key concept in quantum science, as they are intimately related to the nature of quantum information \cite{Wehner:2010, Barnett:2009, Durt:2010a}. Measurements made in one of a set of MUBs provide no information about the state if this was prepared in another basis from the same set. In quantum mechanics, the amount of information that can be extracted from a physical system is fundamentally limited by the uncertainty relations \cite{Wehner:2010, Barnett:2009}. In this context, MUBs acquire a fundamental relevance because they serve as a test bed with which one can explore general uncertainty relations and, ultimately, complementarity \cite{Durt:2010a}. Some important questions related to MUBs remain open \cite{Durt:2010a, Wehner:2010}: what is the number of MUBs for an arbitrary dimension \(d\), and why is mutual unbiasedness not enough to guarantee a strong uncertainty relation? While we do not seek to answer these questions, we provide an accessible experimental platform for exploring these problems by demonstrating measurements in complete sets of MUBs.
Many quantum information protocols depend upon the use of MUBs. For example, quantum key distribution (QKD) relies on the fact that measurements in one basis preclude knowledge of the state in any of the others \cite{Bennett:1984, Jun-Lin:2010, Malik:2012a}. In addition, MUBs play an important role in the reconstruction of quantum states \cite{Wootters:1989, Filippov:2011, Fernandez-Perez:2011}, where they have been successfully used to enable the optimal reconstruction of entangled states of polarization \cite{Adamson:2010} and single-photon linear momentum states \cite{Lima:2011}.
It is known that a Hilbert space of dimension \(D\) will have at most \(D+1\) MUBs \cite{Ivanovic:1981, Wootters:1989, Bandyopadhyay:2002}. In 1989, Wootters showed that if one can find \(D+1\) mutually unbiased bases in dimension \(D\), these bases provide a set of measurements that can be used to optimally determine the density matrix of a \(D\)-dimensional system \cite{Wootters:1989}. However, this approach rapidly breaks down for large \(D\) for two reasons: first, defining MUBs in high dimensions becomes increasingly difficult \cite{Durt:2010a, Brierley:2010a}, and second, performing the measurements in a complete high-dimensional set of MUBs becomes experimentally challenging \cite{Adamson:2010, Nagali:2010a}. This is especially relevant for multi-level multi-particle systems, where the dimension of the overall system scales as \(D=d^N\), with \(d\) the dimension of the Hilbert spaces of the \(N\) individual particles.
We show experimentally that the alternative approach of performing local measurements in the MUBs of the single particles of a multi-particle system still allows a complete reconstruction of the overall density matrix with a minimum number of measurements \cite{Thew:2002a}. The significant benefit of our procedure is that it only requires the definition of MUBs in a Hilbert space of size \(d=D^{1/N}\); see Fig.~\ref{BipartiteSystem}. We illustrate this approach in the case of a photonic implementation of a bipartite multi-level entangled system (\(d=\sqrt{D}\)) using the orbital angular momentum of light.
In addition to the spin angular momentum, associated with polarization, light can also carry orbital angular momentum (OAM) \cite{Yao:2011a}. The OAM of light is associated with phase structures of the form \(e^{i\,{\mathrm{e}}ll\phi}\), where \(\,{\mathrm{e}}ll\hbar\) is the OAM carried by each photon and \(\phi\) the azimuthal angle \cite{Allen:1992}. The unbounded Hilbert space of OAM is one example of a scalable high-dimensional resource that can be used for quantum information science \cite{Mair:2001, Leach:2010, Groblacher:2006, Salakhutdinov:2012}. For example, the entanglement of high-dimensional states provides implementations of QKD that are more tolerant to eavesdropping and can improve the bit rate in other quantum communication protocols \cite{Bourennane:2002, Cerf:2002, Bechmann-Pasquinucci:2000, Walborn:2006, Dixon:2012, Gruneisen:2012}.
One of the advantages of OAM is the ability to access \(d\)-dimensional subspaces \cite{Dada:2011}, for each of which we can define all existing MUBs \cite{Gruneisen:2008}. In this work, we implement measurements in high-dimensional MUBs within the OAM degree of freedom, and we show that the MUBs corresponding to \(d\)-dimensional subspaces are readily accessible with simple laboratory procedures. Furthermore, we show that measurements in MUBs of these subspaces can be used for the complete tomographic reconstruction of multipartite entangled systems with the minimum number of measurements. We produce entangled photon pairs by means of spontaneous parametric down-conversion (SPDC) that we then measure in full sets of \(d+1\) MUBs for OAM, for dimensions ranging from \(d=2\) to \(5\). The states belonging to the MUBs are defined as superpositions of Laguerre-Gaussian (LG) modes.
\begin{figure}[t]
\includegraphics[width=0.7\linewidth]{Subsystems}
\caption{\label{BipartiteSystem}Illustration of the state spaces of a bipartite system, where the system has dimension \(D\) and each subsystem \(d\). Adamson and Steinberg \cite{Adamson:2010} performed measurements in the Hilbert space \(H_D\) of the composite system, while we perform joint local measurements in the spaces \(H_d\) of the individual subsystems (for \(d=2\) to \(5\)).}
\,{\mathrm{e}}nd{figure}
{\it Theory:} Consider two operators in a \(d\)-dimensional Hilbert space with orthonormal spectral decompositions. These operators, and their basis states, are said to be mutually unbiased \cite{Ivanovic:1981, Wootters:1989} if
\begin{equation}
\abs{\braket{\psi_{m,i}}{\psi_{n,j}}}^2 =
\begin{cases}
1/d & \text{for } m\neq n \\
\delta_{ij} & \text{for } m=n
\,{\mathrm{e}}nd{cases}
\,{\mathrm{e}}nd{equation}
for all \(i\) and \(j\). The indices \(i\) and \(j\) correspond to the basis states, and \(m\) and \(n\) indicate any two bases. Operators that are quantum-mechanical observables are sometimes called mutually complementary, or maximally noncommutative \cite{Schwinger:1960}. This is because, given any eigenstate of one, the eigenvalue resulting from a measurement of the other is completely undetermined. In other words, the state of a system described in one MUB provides no information about the state in another. It is known that the number of MUBs in dimension \(d\) cannot exceed \(d+1\) \cite{Wootters:1989, Durt:2010a}, and it is exactly \(d+1\) if \(d\) is prime or a prime power \cite{Wootters:1989, Klappenecker:2004}.
The simplest set of mutually unbiased observables can be found in dimension \(d=2\). For example, in the two-dimensional Hilbert space of polarization, the bases of horizontal/vertical, diagonal/anti-diagonal and left/right circular polarizations provide a set of three MUBs. Two states belonging to the same basis are orthonormal, while the square of the inner product of any two states belonging to different bases is always \(1/2\). Equivalent mutually unbiased states can be implemented using other two-dimensional state spaces, e.g. a subspace of OAM.
In our work, we choose to investigate the OAM degree of freedom of single photons. A general single-photon state in a \(d\)-dimensional subspace can be described by an orthonormal basis set of OAM modes \(\ket{\,{\mathrm{e}}ll}\) as \(\ket{\psi} = \sum_{\brc{\,{\mathrm{e}}ll}} c_\,{\mathrm{e}}ll \ket{\,{\mathrm{e}}ll}\).
The complex coefficients \(c_\,{\mathrm{e}}ll\) are subject to the normalization condition \(\sum c_\,{\mathrm{e}}ll^2 = 1\). Defining MUBs in a general \(d\)-dimensional space is a difficult problem \cite{Brierley:2010a}; however, for a number of low-dimensional cases, it is possible to find complete sets of MUBs using simple procedures \cite{Brierley:2010}. For these cases, which include the dimensions 2 to 5, the states \(\brc{\ket{\,{\mathrm{e}}ll}}\) can be chosen to be one of the MUBs. The states belonging to the remaining \(d\) MUBs are found to be superpositions of the basis states with coefficients of equal magnitude \(\abs{c_\,{\mathrm{e}}ll}=1/\sqrt{d}\) but differing phases.
In general, it is possible for a system to include more than one particle. If one considers a \(d\)-dimensional state space for each particle, the dimension \(D\) of a system of \(N\) particles will be \(D=d^N\). Such a system will be unambiguously specified by its density matrix \(\rho\), a positive-semidefinite unit-trace Hermitian operator that includes \(d^{2N}-1\) independent real parameters (\(d^4-1\) for a bipartite system).
MUBs play an important role in quantum state tomography (QST) \cite{Wootters:1989, Klimov:2008}, the process of determining the density matrix of an unknown quantum system \cite{James:2001, Langford:2004, Altepeter:2005}. One approach to tomography is to perform measurements in the MUBs of the \(D\)-dimensional state space of the composite system \cite{Wootters:1989}. However, such measurements are very challenging as they require the definition of MUBs for Hilbert spaces of very high dimension and can require the implementation of entangled observables \cite{Adamson:2010}. Our approach is simpler as we use the MUBs of the state spaces of the single particles.
Let us consider for simplicity a bipartite system. An overcomplete set of measurements for the reconstruction of the \(D\)-dimensional system is provided by the pairwise combinations of all single-particle MUB states. The total number of independent measurements for this approach is equal to \((d(d+1))^2\), which is always greater than \(d^{4}-1\). We propose another suitable set of measurements, given by pairwise combinations of states from an appropriate subset of the overcomplete set. This subset contains all states in one MUB and all but one state in each of the remaining \(d\) MUBs. It can be shown that the conditions for the completeness of a set of tomographic measurements \cite{Altepeter:2005} are satisfied by this reconstruction strategy (see supplemental material).
This approach gives exactly the \(d^4\) independent measurements that can then be used for a tomographically complete reconstruction of the \(D\)-dimensional system. The number of measurements in our method scales favourably with the dimension of the system if compared with other methods (see supplemental material).
{\it Experimental methods:} A \(\SI{3}{mm}\)-thick \(\beta\)-barium borate (BBO) non-linear crystal cut for type-I collinear SPDC is pumped by a collimated \(\SI{1}{W}\) UV laser to produce frequency-degenerate entangled photon pairs at \(\SI{710}{nm}\). The co-propagating signal and idler photons are separated by a non-polarizing beam splitter and redirected to spatial light modulators (SLMs), onto which the output face of the crystal is imaged by a \(2\times\) telescope. In order for the crystal to produce two-photon states entangled over a wider range of OAM modes, we tune the phase-matching conditions of the BBO crystal to increase the OAM spectrum of the down-converted state \cite{Romero:2012b}. The SLMs act as reconfigurable computer-generated holograms (CGHs) that allow us to measure any arbitrary superposition of OAM modes. The SLMs are used to modulate the phase and introduce a spatially dependent attenuation to discard light into the zero diffraction order, allowing the manipulation of the complex amplitude of the incoming light \cite{Arrizon:2007, Davis:1999, Gruneisen:2008}.
We pump the crystal with a plane phase front. In order to observe correlations in all bases (instead of anti-correlations), the hologram displayed in one of the two detection arms is phase-conjugate with respect to the other \cite{Mair:2001}. The projected Gaussian mode is then imaged onto a single-mode fibre (SMFs) that is coupled to a single-photon photodiode detector. The detectors' outputs are routed to coincidence-counting electronics with a timing window of \(\SI{10}{ns}\). Narrow-band, \(\SI{10}{nm}\) interference filters are placed in front of the detectors to ensure that the frequency spread of the detected down-converted fields is small compared to the central frequencies.
\begin{figure}[t]
\includegraphics[width=\linewidth]{MU3Modes}
\caption{\label{MU3Modes}Mutually unbiased modes \(i\) for each of the 4 bases \(m\) in \(d=3\). The greyscale images represent the intensity; the colour images represent the phase. The first basis, \(m=1\), corresponds to Laguerre-Gaussian modes with OAM ranging from \(\,{\mathrm{e}}ll = -1\) to \(+1\).}
\,{\mathrm{e}}nd{figure}
The combination of the two SLMs, single-mode fibres and coincidence-counting electronics allows us to perform projective measurements on the entangled state of photons \(A\) and \(B\) described by the operators
\begin{equation}
\Pi_{m, i; n, j} = \ket{\psi_{m,i}}_A\ket{\psi_{n,j}}_B^\ast \bra{\psi_{m,i}}_A\bra{\psi_{n,j}}_B^\ast.
\,{\mathrm{e}}nd{equation}
Here, the single-photon states \(\ket{\psi}_A\) and \(\ket{\psi}_B\) belong to MUBs in \(d\) dimensions and are given by
\begin{equation}
\ket{\psi_{m,i}} = \sum_{\brc{\,{\mathrm{e}}ll}} c_{m,i,\,{\mathrm{e}}ll} \, \ket{\,{\mathrm{e}}ll},
\,{\mathrm{e}}nd{equation}
where \(c_{m,i,\,{\mathrm{e}}ll}\) is a complex coefficient. The indices \(m\) and \(n\), which correspond to the basis indices, range from \(1\) to \(d+1\); the indices \(i\) and \(j\), which represent a state within a basis, range from \(1\) to \(d\). For each dimension \(d\), we choose one set of OAM states \(\brc{\ket{\,{\mathrm{e}}ll}}\).
The OAM values used are \(\brc{\,{\mathrm{e}}ll} = \brc{-2,\,+2}\) for \(d=2\), \(\brc{-2, \, -1,\, +1,\, +2}\) for \(d=4\), and \(\brc{-\lfloor d/2 \rfloor, \, \dots, \, +\lfloor d/2 \rfloor}\) for \(d=3\) and \(5\).
For each \(d\), we take the basis corresponding to \(m=1\) to be the orthonormal basis given above; the remaining bases are composed of superpositions of the \(m=1\) states with appropriate complex coefficients; see Fig.~\ref{MU3Modes}. For the dimensions considered, the magnitude of these complex coefficients is \(1/\sqrt{d}\) for all \(i\) and \(\,{\mathrm{e}}ll\).
To determine the phase terms \(c_{m,i,\,{\mathrm{e}}ll}\) that define the MUBs (for \(m = 2\) to \(d+1\)), we use the methods outlined by Refs.~\cite{Brierley:2010, Brierley:2009a}. The coefficients are given by the mutually unbiased vectors derived from \(d \times d\) dephased Hadamard matrices. These matrices are unique for \(d=2, 3,\,4\) and \(5\). For \(d=2\), the MUBs obtained are the familiar set of bases that one usually associates with polarization states. Consequently, the two-dimensional MUBs for OAM \cite{Padgett:1999} are the analogue of those for polarization \cite{Bruss:1998}. All the modes used for \(d=3\) are shown in Fig.~\ref{MU3Modes}.
\begin{figure}[t]
\includegraphics[width=0.49\linewidth]{Correlations2}
\includegraphics[width=0.49\linewidth]{Correlations3} \\[5pt]
\includegraphics[width=0.49\linewidth]{Correlations4}
\includegraphics[width=0.49\linewidth]{Correlations5}
\caption{\label{CorrelationsMatrices}Joint probabilities of detecting photon A in state \(\ket{\psi_{m, i}}_A\) and photon B in state \(\ket{\psi_{n, j}}_B\). The results are normalized such that the sum of the joint detection probabilities for measurements in any two bases \(m\) and \(n\) are unity. Therefore, the probabilities represented by the leading diagonal are expected to be \(1/d\), and all probabilities for \(m \neq n\) are expected to be \(1/d^2\). We also display the quantum contrast QC, which is given by the ratio of the measured coincidence rate to that of the expected accidental coincidences. The arrows indicate the rows and columns of measurements not required for the complete tomographic reconstruction of the density matrix.}
\,{\mathrm{e}}nd{figure}
An overcomplete set of measurements is obtained by scanning through all possible values of \(m\) and \(i\), for photon \(A\), and \(n\) and \(j\), for photon \(B\). For every combination of \(m, n, i\) and \(j\), we record the coincidence counts and both the single channel counts resulting from the projective measurement. From this set of data we extract the tomographically complete set of measurements previously described. These count rates are converted to detection probabilities through the following relationship:
\begin{equation}
p_k = \frac{d^2}{\sum{C_k}} \frac{C_k - U_k}{U_k},
\,{\mathrm{e}}nd{equation}
where the index \(k\) corresponds to a unique choice of measurement settings \(m, n, i\) and \(j\), \(C_k\) is the coincidence count rate and \(U_k\) is the anticipated uncorrelated coincidence rate, which is estimated by taking the product of the single-channel count rates and the gate time (Fig.~\ref{CorrelationsMatrices}). The normalization approach that we take accounts for different hologram efficiencies for different modes (see supplemental material).
The task of the fitting procedure is to find the optimal density matrix \(\rho\) of the \(D\)-dimensional system that best reproduces the experimental results. The parameters of the density matrix are established through numerical minimization of the Pearson's cumulative test statistic \cite{Opatrny:1997, Banaszek:1999}
\begin{equation}
\chi^2 = \sum_{k=1}^{d^{4}} \frac{(p_k-p_k^\prime)^2}{p_k^\prime},
\,{\mathrm{e}}nd{equation}
where \( p_k \) are the probabilities from the experiment, and \(p_k^\prime = \operatorname{Tr}[\rho \, \Pi_k]\) are those predicted from the reconstructed density matrix.
The reconstructed density matrices for dimensions \(2, 3, 4\) and \(5\) are shown in Fig.~\ref{ReconstructedMatrices}. For each reconstructed density matrix \(\rho\), we calculate the linear entropy \(S=1-\operatorname{Tr}(\rho^2)\) and the fidelity \(F=\operatorname{Tr}[\sqrt{\sqrt{\sigma}\rho\sqrt{\sigma}}]^2\), where \(\sigma \) is the \(D\)-dimensional maximally entangled density matrix associated with arbitrarily large spiral bandwidth \cite{Romero:2012b} and perfect detection. The uncertainties were calculated by repeating the reconstruction process for statistically equivalent copies of the original experimental data sets, each obtained by adding Poissonian fluctuations to the measured counts.
\begin{figure}[t]
\includegraphics[width=0.49\linewidth]{Rho2MUBs}
\includegraphics[width=0.49\linewidth]{Rho3MUBs} \\
\includegraphics[width=0.49\linewidth]{Rho4MUBs}
\includegraphics[width=0.49\linewidth]{Rho5MUBs}
\caption{\label{ReconstructedMatrices}Results of tomographic reconstructions using a complete set of single-photon mutually unbiased bases measurements. The real parts of the reconstructed density matrices \(\rho\) are shown. Imaginary parts are less than \(0.076\) for \(d=2\), \(0.059\) for \(d=3\), and \(0.050\) for \(d=5,6\). Also shown are the linear entropy \(S\) and fidelity \(F\) for the reconstructed density matrices. Insets: real parts of the theoretical density matrices for the maximally entangled states.}
\,{\mathrm{e}}nd{figure}
The reconstructed density matrices have low entropies, indicating pure states, and very high fidelities with respect to the maximally entangled state. Due to the finite spiral bandwidth of our generated state \cite{Torres:2003a, Romero:2012b} and limitations in our measurement system, one would anticipate the fidelities to decrease and the entropies to increase as the dimension increases. Indeed, we observe this characteristic in our results.
For comparison, we also implemented the approach described in \cite{Agnew:2011}. We find comparable entropies and fidelities whichever approach is used (see supplemental material). However, our method requires significantly fewer measurements. For example, for \(d=5\), the number of measurements required is \(d^4=625\) compared to \(2025\) for the procedure outlined in Ref.~\cite{Agnew:2011}. Both methods rely on projective measurements in appropriate superpositions of the basis states in the dimension of choice. Neither is more experimentally demanding, as they can both be performed using the same setup and only differ in the choice of projection states.
The MUBs reconstruction method is applied here to almost maximally entangled states. The density matrices of maximally entangled states have low rank, \(r<D\), and could thus be efficiently reconstructed through compressed sensing \cite{Gross:2010, Liu:2012}. In the general case, however, a complete quantum state reconstruction by means of appropriately selected projection operators may be more appropriate and produce results with higher fidelity.
{\it Conclusions:} In this work, we have demonstrated single-photon measurements for MUBs in the OAM degree of freedom and shown how these measurements can be used for efficient quantum state reconstruction. The procedure of measuring combinations of all single-photon states in one basis and all but one state in the remaining bases gives a minimal complete set of tomographic measurements. This experimental method can be readily applied to multi-level multi-partite systems.
The OAM degree of freedom is becoming an important resource for quantum information science. Therefore, the ability to measure states in MUBs is an important step for quantum protocols implemented in this degree of freedom. Measuring MUBs in high-dimensional spaces is not just of practical importance for QKD protocols, but it can also provide important insight into the nature of information in physical systems.
{\it Acknowledgements:} We thank Adetunmise Dada and Daniel Tasca for useful discussions. This work was supported by EPSRC. MJP thanks the Royal Society.
\appendix
\section{Supplemental material}
\subsection{Normalization of probabilities}
After recording the coincidence count rates \(C_k\) for each choice of \(n, i, m\) and \(j\), and the single-channel count rates \(A_k\) and \(B_k\), we convert the count rates to detection probabilities through
\begin{equation}
\label{ExpProbabilities}
p_k = \Upsilon \paren{\frac{C_k - A_k B_k \Delta t}{A_k B_k \Delta t}},
\,{\mathrm{e}}nd{equation}
where \(\Delta t\) is the gate time of our coincidence-counting electronics and \(\Upsilon\) an appropriate normalization factor. The term \(A_k B_k \Delta t\) corresponds to the uncorrelated accidental count rate \(U_k\).
The normalization factor
\begin{equation}
\Upsilon = Q/\sum_{k=1}^{d^2 Q} C_k
\,{\mathrm{e}}nd{equation}
depends on the type of tomographic reconstruction performed. The factor \(Q\) indicates the number of \(d \times d\) quadrants in the correlations matrix for the set of measurements of choice. The product \(d^2 Q\) corresponds to the total number of independent measurements. For an overcomplete tomography, where we set \(\sum_k^{d^2} p_k = 1\) for any given choice of \(m\) and \(n\), \(Q=(d+1)^2\); see Fig.~\ref{MeasMatrixOC}. For a tomographically complete reconstruction that uses the presented subset of MUBs measurements, \(Q=\brck{1+(d-1)}^2=d^2\); see Fig.~\ref{MeasMatrixC}.
\begin{figure}[t]
\subfigure[Overcomplete QST]{\label{MeasMatrixOC}\includegraphics[width=0.54\linewidth]{MeasMatrixOC}}
\subfigure[Complete QST]{\label{MeasMatrixC}\includegraphics[width=0.44\linewidth]{MeasMatrixC}}
\caption{Sets of measurements for \subref{MeasMatrixOC} overcomplete state tomography and \subref{MeasMatrixC} complete state tomography, for \(d=3\) (\(D=3^2\)). The shaded areas indicate quadrants of size \(d \times d\), for each of which we expect \(\sum_k p_k=1\).}
\,{\mathrm{e}}nd{figure}
\subsection{Completeness of tomographic reconstruction}
One can express the density matrix \(\rho\) as a linear combination of a complete basis of \(d^2 \times d^2\) matrices \(\Gamma_\mu\) with complex coefficients \(\gamma_\mu\) \cite{Thew:2002a}:
\begin{equation}
\rho = \frac{\Gamma_0}{D}+\sum_{\mu=1}^{D^2-1} \gamma_\mu \, \Gamma_\mu,
\,{\mathrm{e}}nd{equation}
where \(D=d^2\) is the dimension of our bipartite system.
The basis matrices \(\Gamma_\mu\) have the following properties:
\begin{subequations}
\begin{equation}
\operatorname{Tr}(\Gamma_\mu \cdot \Gamma_\nu) = \delta_{\mu,\nu}
\,{\mathrm{e}}nd{equation}
\begin{equation}
\kappa = \sum_\mu \Gamma_\mu \operatorname{Tr}(\Gamma_\mu \cdot \kappa),
\,{\mathrm{e}}nd{equation}
\,{\mathrm{e}}nd{subequations}
where \(\kappa\) is any \(d^2 \times d^2\) matrix. A suitable set of Hermitian matrices \(\Gamma_\mu\) for the decomposition of \(\rho\) is given by the generalized Gell-Mann matrices for dimension \(D\).
A necessary and sufficient condition for the completeness of the set of tomographic states \(\brc{\ket{\psi_\mu}}\) (associated with the two-qudit observables \(\Pi_\mu\)) is given by the invertibility of the matrix
\begin{equation}
\label{MatrixB}
B_{\mu \nu} = \bra{\psi_\mu} \Gamma_\nu \ket{\psi_\mu}
\,{\mathrm{e}}nd{equation}
which allows us to express the complex coefficients \(\gamma_\mu\) in terms of probabilities \(p_\mu=\bra{\psi_\mu}\rho\ket{\psi_\mu}\) \cite{Altepeter:2005}:
\begin{equation}
\gamma_\mu = d^2 \sum_{\nu=1}^{d^4} (B^{-1})_{\mu\nu} \, p_\nu.
\,{\mathrm{e}}nd{equation}
Let us define the orthonormal set of basis vectors \(u_i\) in dimension \(d\), whose elements are given by \((u_i)_j = \delta_{ij}\). For a choice of two single-particle MUBs vectors for the qu\(d\)it subsystems \(A\) and \(B\)
\begin{subequations}
\begin{equation}
u_A = (a_1, \, \dots \, a_d)
\,{\mathrm{e}}nd{equation}
\begin{equation}
u_B = (b_1, \, \dots \, b_d),
\,{\mathrm{e}}nd{equation}
\,{\mathrm{e}}nd{subequations}
we can express the elements \(j=1, \, \dots \, D\) of the corresponding vector for the \(D\)-dimensional state space of the bipartite system as
\begin{equation}
(v_{AB})_j = a_\alpha b_\beta,
\,{\mathrm{e}}nd{equation}
where \((\alpha, \, \beta)\) are all pairwise permutations of indices \(\brc{1, \, \dots \, d}\). From the \(D\)-dimensional vectors \(v_{AB}\) we then define the states \(\ket{\psi_\mu}\) that describe the measurements on the composite system.
After calculating the states \(\ket{\psi_\mu}\) for the subset of MUBs measurements for complete tomography defined previously, we find the invertible matrix \(B\) through Eq.~\,{\mathrm{e}}qref{MatrixB}.
\begin{figure}[t]
\includegraphics[width=0.55\linewidth]{MU2Modes}
\caption{\label{MU2Modes}Mutually unbiased modes for \(d=2\) in the \(\abs{\,{\mathrm{e}}ll}=2\) OAM subspace. The brightness of the image corresponds to the intensity of the modes; the colour represents phase.}
\,{\mathrm{e}}nd{figure}
\subsection{Quantum state reconstruction via numerical optimization}
The number of joint measurements required to perform our reconstruction procedure is given by:
\begin{equation}
M_\text{MUBs} = d^2 + 2d^2(d-1)+d^2(d-1)^2 = d^4,
\,{\mathrm{e}}nd{equation}
which corresponds to the minimum number of parameters required to perform a complete quantum state tomography. The number of measurements required by the overcomplete quantum state reconstruction strategy outlined in Ref.~\cite{Agnew:2011} requires instead the following total number of measurements:
\begin{equation}
M_\text{QST} = \brck{4 \binom{d}{2}+d}^2.
\,{\mathrm{e}}nd{equation}
The numerical optimization to find the density matrix \(\rho\) that provides the best fit to the experimental probabilities from Eq.~\,{\mathrm{e}}qref{ExpProbabilities} is carried out by performing a random search over the parameter space of a complex left-triangular matrix \(T\) \cite{Altepeter:2005}, from which a physical guessed density matrix is derived:
\begin{equation}
\rho^\prime = T^\dag T/\operatorname{Tr}(T^\dag T).
\,{\mathrm{e}}nd{equation}
We reconstructed the states from \(d=2\) to \(5\) using both methods. A quantitative comparison of the results is shown in Tab.~\ref{ReconstructionResults}.
\begin{figure}[t]
\includegraphics[width=\linewidth]{MU5Modes}
\caption{\label{MU5Modes}Mutually unbiased modes \(i\) for each of the 6 bases \(m\) in \(d=5\). The greyscale images represent the intensity; the colour images represent the phase. The first basis, \(m=1\), corresponds to Laguerre-Gaussian modes with OAM ranging from \(\,{\mathrm{e}}ll = -2\) to \(+2\).}
\,{\mathrm{e}}nd{figure}
\begin{table}
\caption{\label{ReconstructionResults}Linear entropy \(S\) and fidelity \(F\) (with respect to a maximally entangled density matrix) for density matrices reconstructed from overcomplete quantum state tomography (QST) and measurements in mutually unbiased bases (MUBs). \(M\) represents the number of measurements needed for the indicated reconstruction method.}
\begin{ruledtabular}
\begin{tabular}{ccccc}
& Method & \(M\) & \(S\) & \(F\) \\ \hline
\multirow{2}{*}{\(d=2\)}
& MUBs & 16 & \(0.025 \pm 0.008\) & \(0.979 \pm 0.004\) \\
& QST & 36 & \(0.070 \pm 0.007\) & \(0.958 \pm 0.004\) \\
\hline
\multirow{2}{*}{\(d=3\)}
& MUBs & 81 & \(0.178 \pm 0.003\) & \(0.886 \pm 0.002\) \\
& QST & 225 & \(0.179 \pm 0.005\) & \(0.893 \pm 0.003\) \\
\hline
\multirow{2}{*}{\(d=4\)}
& MUBs & 256 & \(0.234 \pm 0.005\) & \(0.853 \pm 0.003\) \\
& QST & 784 & \(0.281 \pm 0.009\) & \(0.818 \pm 0.006\) \\
\hline
\multirow{2}{*}{\(d=5\)}
& MUBs & 625 & \(0.324 \pm 0.003\) & \(0.793 \pm 0.002\) \\
& QST & 2025 & \(0.364 \pm 0.008\) & \(0.764 \pm 0.006\)
\,{\mathrm{e}}nd{tabular}
\,{\mathrm{e}}nd{ruledtabular}
\,{\mathrm{e}}nd{table}
\subsection{Mutually unbiased vectors}
The complete sets of mutually unbiased vectors used in the quantum state tomography, for dimensions from \(d=2\) to \(5\), are reported in this section. For each vector \(v_{mi}\), \(m\) indicates the basis among the \(d+1\) available in dimension \(d\) and \(i\) the vector within the basis. Each vector provides the corresponding set of complex coefficients for the superposition of the basis modes of choice; see Fig.~\ref{MU2Modes} for \(d=2\) and Fig.~\ref{MU5Modes} for \(d=5\).
The experimental procedure and the reconstruction technique can be readily extended to higher dimensions. The existence of full sets of \(d+1\) MUBs has however only been proven for dimensions \(d\) that are prime numbers or powers of a prime. Finding MUBs in higher prime power dimensions, especially sets that may be suitable for practical implementations, remains challenging. It should also be noted that, despite MUBs being particularly advantageous to efficiently reconstruct the density matrix of an unknown state encoded in the spatial modes of a single photon, as \(d\) increases the complicated structures of the modes involved may negatively affect the detection efficiency.
\subsubsection{Coefficients for \(d=2\)}
\begin{ruledtabular}
\begin{tabular}{CCCC}
m & i & c_1 & c_2 \\ \hline
\multirow{2}{*}{1} & 1 & 1 & 0 \\
& 2 & 0 & 1 \\ \hline
\multirow{2}{*}{2} & 1 & \nicefrac{1}{\sqrt{2}} & \nicefrac{1}{\sqrt{2}} \\
& 2 & \nicefrac{1}{\sqrt{2}} & \nicefrac{-1}{\sqrt{2}} \\ \hline
\multirow{2}{*}{3} & 1 & \nicefrac{1}{\sqrt{2}} & \nicefrac{i}{\sqrt{2}} \\
& 2 & \nicefrac{1}{\sqrt{2}} & \nicefrac{-i}{\sqrt{2}}
\,{\mathrm{e}}nd{tabular}
\,{\mathrm{e}}nd{ruledtabular}
\subsubsection{Coefficients for \(d=3\)}
\begin{ruledtabular}
\begin{tabular}{CCCCC}
m & i & c_1 & c_2 & c_3 \\ \hline
\multirow{3}{*}{1} & 1 & 1 & 0 & 0 \\
& 2 & 0 & 1 & 0 \\
& 3 & 0 & 0 & 1 \\ \hline
\multirow{3}{*}{2} & 1 & \nicefrac{1}{\sqrt{3}} & \nicefrac{1}{\sqrt{3}} & \nicefrac{1}{\sqrt{3}} \\
& 2 & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{2 i \pi }{3}}}{\sqrt{3}} & \nicefrac{e^{\nicefrac{-2 i \pi }{3}}}{\sqrt{3}} \\
& 3 & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{-2 i \pi }{3}}}{\sqrt{3}} & \nicefrac{e^{\nicefrac{2 i \pi }{3}}}{\sqrt{3}} \\ \hline
\multirow{3}{*}{3} & 1 & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{2 i \pi }{3}}}{\sqrt{3}} & \nicefrac{e^{\nicefrac{2 i \pi }{3}}}{\sqrt{3}} \\
& 2 & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{-2 i \pi }{3}}}{\sqrt{3}} & \nicefrac{1}{\sqrt{3}} \\
& 3 & \nicefrac{1}{\sqrt{3}} & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{-2 i \pi }{3}}}{\sqrt{3}} \\ \hline
\multirow{3}{*}{4} & 1 & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{-2 i \pi }{3}}}{\sqrt{3}} & \nicefrac{e^{\nicefrac{-2 i \pi }{3}}}{\sqrt{3}} \\
& 2 & \nicefrac{1}{\sqrt{3}} & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{2 i \pi }{3}}}{\sqrt{3}} \\
& 3 & \nicefrac{1}{\sqrt{3}} & \nicefrac{e^{\nicefrac{2 i \pi }{3}}}{\sqrt{3}} & \nicefrac{1}{\sqrt{3}}
\,{\mathrm{e}}nd{tabular}
\,{\mathrm{e}}nd{ruledtabular}
\subsubsection{Coefficients for \(d=4\)}
\begin{ruledtabular}
\begin{tabular}{CCCCCC}
m & i & c_1 & c_2 & c_3 & c_4 \\ \hline
\multirow{4}{*}{1} & 1 & 1 & 0 & 0 & 0 \\
& 2 & 0 & 1 & 0 & 0 \\
& 3 & 0 & 0 & 1 & 0 \\
& 4 & 0 & 0 & 0 & 1 \\ \hline
\multirow{4}{*}{2} & 1 & \nicefrac{1}{2} & \nicefrac{1}{2} & \nicefrac{1}{2} & \nicefrac{1}{2} \\
& 2 & \nicefrac{1}{2} & \nicefrac{1}{2} & \nicefrac{-1}{2} & \nicefrac{-1}{2} \\
& 3 & \nicefrac{1}{2} & \nicefrac{-1}{2} & \nicefrac{-1}{2} & \nicefrac{1}{2} \\
& 4 & \nicefrac{1}{2} & \nicefrac{-1}{2} & \nicefrac{1}{2} & \nicefrac{-1}{2} \\ \hline
\multirow{4}{*}{3} & 1 & \nicefrac{1}{2} & \nicefrac{1}{2} & \nicefrac{-i}{2} & \nicefrac{i}{2} \\
& 2 & \nicefrac{1}{2} & \nicefrac{1}{2} & \nicefrac{i}{2} & \nicefrac{-i}{2} \\
& 3 & \nicefrac{1}{2} & \nicefrac{-1}{2} & \nicefrac{i}{2} & \nicefrac{i}{2} \\
& 4 & \nicefrac{1}{2} & \nicefrac{-1}{2} & \nicefrac{-i}{2} & \nicefrac{-i}{2} \\ \hline
\multirow{4}{*}{4} & 1 & \nicefrac{1}{2} & \nicefrac{i}{2} & \nicefrac{-1}{2} & \nicefrac{i}{2} \\
& 2 & \nicefrac{1}{2} & \nicefrac{-i}{2} & \nicefrac{-1}{2} & \nicefrac{-i}{2} \\
& 3 & \nicefrac{1}{2} & \nicefrac{i}{2} & \nicefrac{1}{2} & \nicefrac{-i}{2} \\
& 4 & \nicefrac{1}{2} & \nicefrac{-i}{2} & \nicefrac{1}{2} & \nicefrac{i}{2} \\ \hline
\multirow{4}{*}{5} & 1 & \nicefrac{1}{2} & \nicefrac{i}{2} & \nicefrac{i}{2} & \nicefrac{-1}{2} \\
& 2 & \nicefrac{1}{2} & \nicefrac{-i}{2} & \nicefrac{-i}{2} & \nicefrac{-1}{2} \\
& 3 & \nicefrac{1}{2} & \nicefrac{i}{2} & \nicefrac{-i}{2} & \nicefrac{1}{2} \\
& 4 & \nicefrac{1}{2} & \nicefrac{-i}{2} & \nicefrac{i}{2} & \nicefrac{1}{2}
\,{\mathrm{e}}nd{tabular}
\,{\mathrm{e}}nd{ruledtabular}
\subsubsection{Coefficients for \(d=5\)}
\begin{ruledtabular}
\begin{tabular}{CCCCCCC}
m & i & c_1 & c_2 & c_3 & c_4 & c_5 \\ \hline
\multirow{5}{*}{1} & 1 & 1 & 0 & 0 & 0 & 0 \\
& 2 & 0 & 1 & 0 & 0 & 0 \\
& 3 & 0 & 0 & 1 & 0 & 0 \\
& 4 & 0 & 0 & 0 & 1 & 0 \\
& 5 & 0 & 0 & 0 & 0 & 1 \\ \hline
\multirow{5}{*}{2} & 1 & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} \\
& 2 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} \\
& 3 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} \\
& 4 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} \\
& 5 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} \\ \hline
\multirow{5}{*}{3} & 1 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} \\
& 2 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} \\
& 3 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} \\
& 4 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} \\
& 5 & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} \\ \hline
& 1 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} \\
\multirow{5}{*}{4} & 2 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} \\
& 3 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} \\
& 4 & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} \\
& 5 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} \\ \hline
\multirow{5}{*}{5} & 1 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} \\
& 2 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} \\
& 3 & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} \\
& 4 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} \\
& 5 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} \\ \hline
\multirow{5}{*}{6} & 1 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} \\
& 2 & \nicefrac{1}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} \\
& 3 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} \\
& 4 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{2 i \pi }{5}}}{\sqrt{5}} \\
& 5 & \nicefrac{1}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-2 i \pi }{5}}}{\sqrt{5}} & \nicefrac{e^{\nicefrac{-4 i \pi }{5}}}{\sqrt{5}} & \nicefrac{1}{\sqrt{5}}
\,{\mathrm{e}}nd{tabular}
\,{\mathrm{e}}nd{ruledtabular}
\,{\mathrm{e}}nd{document}
|
\begin{document}
\title{Demonstrating Quantum Zeno Effect on IBM Quantum Experience}
\author{Subhashish Barik}
\email{[email protected]}
\affiliation{Department of Physical Sciences,\\ Indian Institute of Science Education and Research Kolkata, Mohanpur 741246, West Bengal, India}
\author{Dhiman Kumar Kalita}
\email{[email protected]}
\affiliation{Department of Physical Sciences,\\ Indian Institute of Science Education and Research Kolkata, Mohanpur 741246, West Bengal, India}
\author{Bikash K. Behera}
\email{[email protected]}
\affiliation{Bikash's Quantum (OPC) Pvt. Ltd., Balindi, Mohanpur 741246, West Bengal, India}
\affiliation{Department of Physical Sciences,\\ Indian Institute of Science Education and Research Kolkata, Mohanpur 741246, West Bengal, India}
\author{Prasanta K. Panigrahi}
\email{[email protected]}
\affiliation{Department of Physical Sciences,\\ Indian Institute of Science Education and Research Kolkata, Mohanpur 741246, West Bengal, India}
\begin{abstract}
\textbf{Abstract:}
Quantum Zeno Effect (QZE) has been one of the most interesting phenomena in quantum mechanics ever since its discovery in 1977 by Misra and Sudarshan [J. Math. Phys. \textbf{18}, 756 (1977)]. There have been many attempts for experimental realization of the same. Here, we present the first ever simulation of QZE on IBM quantum experience platform. We simulate a two-level system for Rabi-driven oscillation and then disturb the time evolution by intermediate repetitive measurements using quantum gates to increase the survival probability of the qubit in the initial state. The circuits are designed along with the added intermediate measurements and executed on IBM quantum simulator, and the outcomes are shown to be consistent with the predictions. The increasing survival probability with the number of intermediate measurements demonstrates QZE. Furthermore, some alternative explanations for the obtained results are provided which leads to some ambiguity in giving the exact reasoning for the observed outcomes.
\end{abstract}
\begin{keywords}{Quantum Zeno effect, Deferred and implicit measurement, IBM quantum experience}\end{keywords}
\maketitle
\section{Introduction}
Quantum Zeno Effect (QZE) says that if we do repeated measurements on an unstable quantum system then we can slow down the quantum mechanical evolution of the system. This unusual effect after its discovery \cite{MSJMP1977} triggered many experimentalists to observe it practically. Many successful attempts \cite{IHBWPRA1990, NHCPRL1997, MDPRA2000, FGRPRL2001,NYKPRA2001, BHRWNTOC2002, HRBPKN2006, SMBCMKPPRL2006, SHCLCCSNC2014} have been reported so far in various experimental conditions. While the first attempt was to observe QZE in a two-level system with Rabi-driven oscillation \cite{IHBWPRA1990}, the later ones focussed on multi-level systems \cite{SHCLCCSNC2014}, superconducting qubits \cite{MSKSPRB2006,KBMNSSNJP2015} etc. However, no attempts have been reported so far to simulate this effect on a quantum computer. Here, we address this issue by simulating QZE on IBM Quantum Experience (IBM QE).
We have tried to suppress the evolution of the initial state to a final state in a two-level Rabi-driven oscillation by frequent intermediate `measurements'. In other words, we have attempted to increase the probability of finding the qubit in the initial state when a unitary operator tends to evolve it to the final state by using frequent intermediate `measurements'. To be more precise, the attempt is to increase the survival probability (probability of surviving in the ground state) of the qubit during the two-level transition. It would be apt to mention that we have used the term `measurement' here in a loose sense and its actual interpretation with relevance to this article is discussed later in Section \ref{sec iii C}. At some places, we have also used the word `disturbance' for `measurement' as the former seems more generic. On the IBM QE, we have used $U3$ gates and CNOT gates as the intermediate disturbances while the the states $\ket{0}$ and $\ket{1}$ as the initial and final states of the desired two-level system respectively. From the knowledge of the Hamiltonian, H that would drive the two-level Rabi oscillation, we construct the time evolution operator, U and implement it using the $U3$ gate on IBM QE platform. To implement the frequent intermediate disturbances we change the parameters of the $U3$ gate and use them along with CNOT gates as shown in the Fig. \ref{qze_Fig3}.
\section{Theory\label{qnm_Sec2}}
The simplest non-trivial quantum mechanical example for explaining QZE would be the two-level system \cite{POSPD2014,GA2018}. Let us consider the well known Rabi oscillation for a two-level system caused by a generic Hamiltonian of the form $\hat{H}$ = $\Omega$($\ket{0}\bra{1}$ $+$ $\ket{1}\bra{0}$) where $\Omega$ is a time-independent constant. This Hamiltonian takes the qubit from state $\ket{0}$ to $\ket{1}$ and from state $\ket{1}$ to $\ket{0}$ and its matrix form looks like:
\begin{eqnarray}
\hat{H} ={\left( \begin{array}{cc} 0 & \Omega \\
\Omega & 0 \end{array} \right)}
\end{eqnarray}
Clearly then, $\hat{H} \ket{0}= \Omega \Ket{1}$ and $\hat{H} \ket{1}= \Omega \Ket{0}$. We can exponentiate this Hamiltonian to find the time evolution operator as:
\begin{eqnarray}
\hat{U}= e^{-i\hat{H}t}={\left( \begin{array}{cc} cos(\Omega t) & -isin(\Omega t) \\
-isin(\Omega t) & cos(\Omega t) \end{array} \right)}
\label{qze_Eq2}
\end{eqnarray}
It can be easily seen that if $\hat{U}$ acts on the initial state $\ket{0}$ then it would result in a superposition of states $\ket{0}$ and $\ket{1}$.
\begin{eqnarray}
\hat{U} \ket{0}=cos(\Omega t) \ket{0} -i sin(\Omega t) \ket{1}
\label{qze_Eq3}
\end{eqnarray}
Eq. \eqref{qze_Eq3} clearly shows that the survival probability, $P_s$ of staying in state $\ket{0}$ after some time `\textit{t}' is $cos^2(\Omega t)$. We can further write:
\begin{eqnarray}
P_s =cos^2(\Omega t) =\frac{1}{2} (1+cos(2 \Omega t))\nonumber\\
=\frac{1}{2} (1+1-\frac{(2 \Omega t)^2}{2!} + \frac{(2 \Omega t)^4}{4!} - \ldots )
\end{eqnarray}
Neglecting the higher order terms by considering $t$ to be small we obtain:
\begin{eqnarray}
P_s = \frac{1}{2}\bigg (2-\frac{(2 \Omega t)^2}{2}\bigg)=1-\Omega^2 t^2
\end{eqnarray}
Now, if we divide $t$ to `$\textit{n}$' intervals and `measure' after each interval then after the final $n^{th}$ interval's `measurement' the survival probability becomes:
\begin{eqnarray}
P_s= \bigg(1- \Omega^2 \bigg(\frac{t}{n}\bigg)^2\bigg)^n
\label{qze_Eq6}
\end{eqnarray}
Considering $\frac{t}{n}$ to be very small as compared to 1 we can further write Eq. \eqref{qze_Eq6} as:
\begin{eqnarray}
P_s=1-\frac{\Omega ^2 t^2}{n}
\label{qze_Eq7}
\end{eqnarray}
Eq. \eqref{qze_Eq7} clearly shows that the survival probability increases with the number of intervals or the number of intermediate `measurements' $n$ and with increasing value of $n$ the survival probability tends towards unity.
\begin{equation}
P_s \propto n \ and \ \lim_{n \to \infty} P_s = 1
\end{equation}
\section{Implementation on IBM Quantum Experience \label{qnm_3}}
\subsection{Setting up the basic circuit}
When we think of implementing the above theoretical formulation on IBM QE the primary task is to prepare the desired initial state and the unitary time evolution operator. IBM QE initializes all the qubits in the state $\ket{0}$, hence the first task is done. For the second task, we use the $U3$ gate provided on IBM QE; we set the parameters $\theta$, $\phi$ and $\lambda$ as per our requirement to simulate $\hat{U}$ of Eq. \eqref{qze_Eq2}. The $U3$ gate on IBM QE has the following form:
\begin{eqnarray}
U3(\theta,\phi,\lambda)={\left( \begin{array}{cc} cos(\theta/2) & -e^{i\lambda}sin(\theta/2) \\
e^{i\phi}sin(\theta/2) & e^{i(\lambda+\phi)}cos(\theta/2) \end{array} \right)}
\end{eqnarray}
Thus, by choosing the parameters $\phi=-\pi/2, \lambda=\pi/2$ and $\theta=2\Omega t$, we make $U3$ equal to $\hat{U}$. Now, we are ready to operate $\hat{U}$ on state $\ket{0}$ i.e., to implement $U3$ on qubit $q[0]$ as shown in Fig. \ref{qze_Fig1}.
\begin{figure}
\caption{Circuit describing the operation of $U3$ on $q[0]$}
\label{qze_Fig1}
\end{figure}
The measurement gate added to the circuit after $U3$ gate in Fig. \ref{qze_Fig1} measures the qubit $q[0]$. If we choose $\theta=\pi/2$ then $q[0]$ is measured to be found in $\ket{0}$ and $\ket{1}$ with roughly equal probability of 50\% as shown in the Fig. \ref{qze_Fig2} which is in fact what we would expect theoretically from Eq. \eqref{qze_Eq3}.
\begin{figure}
\caption{Measurement outcomes of $q[0]$ after the operation of $U3$ on ibmq qasm simulator for 8192 shots, which gives precisely 50.464\% probability for state $\ket0$ and 49.536\% for state $\ket1$.}
\label{qze_Fig2}
\end{figure}
\subsection{Adding the intermediate measurements}
\label{seciiib}
In the next step, we proceed towards observing the QZE. Hence, we add the intermediate disturbances dividing the `measurement' interval to equal halves. For $n$=2, we use two $U3$ gates each having $\theta$=$\pi$/4. The values of $\phi$ and $\lambda$ are kept unchanged. Further, we add one $CNOT$ gate right after each $U3$ gate and at the end we put the measurement box in the qubit line of $q[0]$. For making the above circuit, we use the fact that the use of two $U3$ gates having $\theta=\pi/4$ is equivalent to the use of a single $U3$ gate with $\theta=\pi/2$; this concept is further explained in the Appendix \ref{Appendix A}. The resulting circuit is described in Fig. \ref{qze_Fig3} and the outcomes of the measurement of this circuit are shown in Fig. \ref{qze_Fig4}.
\begin{figure}
\caption{Quantum circuit for \textit{n}
\label{qze_Fig3}
\end{figure}
We have used here the result that operating one $U3$ gate with $\theta$=$\pi/2$ is equivalent to operating two $U3$ gates with $\theta$=$\pi/4$ and to generalize it we can say that operating one $U3$ gate with $\theta$=$\pi/m$ is equal to operating $n$ $U3$ gates sequentially with $\theta$=$(\pi/m)/n$=$\pi/mn$.
\begin{figure}
\caption{Measurement outcomes of $q[0]$ after the operation of two $U3$ gates with parameters $\theta=\pi/4$, $\phi=-\pi/2$ and $\lambda=\pi/2$ and two $CNOT$ gates on ibmq qasm simulator for 8192 shots, which gives precisely 74.927\% probability for state $\ket0$ and 25.073\% for state $\ket1$.}
\label{qze_Fig4}
\end{figure}
We can clearly see the effect of adding one intermediate measurement; the survival probability goes to roughly 75\% from 50\%. Next, we keep increasing the value of $n$ in our quantum circuit gradually and with each increment of $n$ we put one extra $U3$ gate along with one extra $CNOT$ gate by taking $\theta$=$\pi/2n$. We put the measurement gate at the end. For example, for $n$=4, we take four $U3$ gates each having $\theta$=$\pi/8$ and followed by a CNOT as shown in Fig. \ref{qze_Fig5}.
\begin{figure}
\caption{Quantum circuit for $n$=2. The circuit describes the operation of four $U3$ gates and four $CNOT$ gates on $q[0]$ where, each of the $U3$ has the value of $\theta=\pi/8, \phi=-\pi/2$ and $\lambda=\pi/2$. The measurement gate measures only the first qubit in the computational ($\ket0$, $\ket1$) basis}
\label{qze_Fig5}
\end{figure}
We prepare different quantum circuits for each $n$ and take measurements for $n$=2 to 14. We then plot the obtained survival probabilities against their corresponding $n$. We do the above process for five different sets of $\theta$ values; $\theta$= $\pi/2$, $\pi/3$, $\pi/4$, $\pi/5$, $\pi/6$ and then plot the curves of survival probability for each set of $\theta$ value. The resulting plot is given in Fig. \ref{qze_Fig6}.
\begin{figure}
\caption{The plot of survival probability vs number of intermediate measurements $n$. The series 1, 2, 3, 4 and 5 correspond to the $\theta$ values $\pi/2$, $\pi/3$, $\pi/4$, $\pi/5$ and $\pi/6$ respectively. The curve for a higher $\theta$ value saturates faster as its initial survival probability (for $n$=1) of it is higher.}
\label{qze_Fig6}
\end{figure}
It is very conspicuous from Fig. \ref{qze_Fig6} that the survival probability increases continuously with $n$ and then saturates close to 100\% for higher values of $n$. This demonstrates the QZE.
\subsection{The meaning of `measurement' here}
\label{sec iii C}
The question of what defines a `measurement' in the context of QZE has been debated vigorously in the literature \cite{IHBWPRA1990, ICS2019, HWAP1997, SWAJP1982, KSPR2005, MA2005, SRMP2005}. Different researchers have used different methods to show measurements in the context of QZE \cite{HDWCPRA2019, BPA2019}. Sometimes using the term `intermediate disturbance' seems more convenient while sometimes the term `projective measurement' makes more sense. To avoid going into the controversial interpretation of its actual meaning we would like to call it \textbf{deferred and implicit measurement} \cite{C2019}. The principle of deferred and implicit measurement says that if we leave some quantum wires untouched we can assume they are measured \cite{NCQCQI2010,CTQC2012}. Based on this principle, we can treat each intermediate $CNOT$ gate as a valid intermediate measurement and thus it is these intermediate $CNOT$ gates which causes the increase in the survival probability.
\section{Conclusion \label{qnm_Sec6}}
We have shown here that the survival probability of staying in state $\ket{0}$ increases with the number of intermediate measurements; more particularly `deferred and implicit measurements'. In other words, we have suppressed the transition of the qubit from state $\ket{0}$ to $\ket{1}$. From this point of view, the observed behaviour in Fig. \ref{qze_Fig6} seems to demonstrate QZE. However, looking at the theoretical details of the quantum circuits as done in Appendix \ref{Appendix B}, the observed behaviour just looks like the outcomes of the trivial calculations for the operation of $U3$ and $CNOT$ gates. This creates an ambiguity in whether we can call it as a valid demonstration of QZE or not. Thus, we would like to conclude here with this open question and suggest a deeper look into the matter in future work. Moreover, we propose extending this methodology of adding `deferred and implicit measurements' to higher level systems. That would require us to come up with clever ways to simulate the time evolution operator using the gates of the IBM QE library and the multi-energy levels using multiple-qubit states. One particular implementation could be of a three level system's dynamics where we would like to confine the transitions to only the lower energy levels by suppressing the transition to the highest energy level. In fact, this could be the simulation of a possible resolution to the leakage problem in superconducting quantum computing architectures \cite{GA2018}.
\section*{Acknowledgments}
\label{qlock_acknowledgments}
S.~B. and D.~K.~K. would like to thank Bikash's Quantum (OPC) Pvt. Ltd. and IISER Kolkata respectively for providing hospitality during the course of the project work. S.~B. would also like to thank Prof. I.~S. Tyagi of Indian Institute of Technology Ropar for motivating and giving him the opportunity to explore the Quantum Zeno Effect during his Master thesis. B.~K.~B. acknowledges the support of IISER-K Institute fellowship. The authors acknowledge the support of IBM Quantum Experience. The views expressed are those of the authors and do not reflect the official policy or position of IBM or the IBM Quantum Experience team.
\section*{Competing interests}
The authors declare no competing financial as well as non-financial interests.
\begin{thebibliography}{10}
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\section*{References}
\bibitem{MSJMP1977}B. Misra and E.~C.~G. Sudarshan, The Zeno's paradox in quantum theory, J. Math. Phys. \textbf{18}, 756 (1977).
\bibitem{IHBWPRA1990}W.~M. Itano, D.~J. Heinzen, J.~J. Bollinger, and D.~J. Wineland, Quantum Zeno Effect, Phys. Rev. A \textbf{41}, 2295 (1990).
\bibitem{NHCPRL1997}B. Nagels, L.~J.~F. Hermans, and P.~L. Chapovsky, Quantum Zeno Effect Induced by Collisions, Phys. Rev. Lett. \textbf{79}, 3097 (1997).
\bibitem{MDPRA2000}K. Molhave, and M. Drewsen, Formation of translationally cold MgH+ and MgD+ molecules in an ion trap, Phys. Rev. A \textbf{62}, 011401(R) (2000).
\bibitem{FGRPRL2001} M.~C. Fischer, B. Gutierrez-Medina, and M.~G. Raizen, Observation of the quantum Zeno and anti-Zeno effects in an unstable system, Phys. Rev. Lett. \textbf{87}, 040402 (2001).
\bibitem{NYKPRA2001}T. Nakanishi, K. Yamane, and M. Kitano, Absorption-free optical control of spin systems: The quantum Zeno effect in optical pumping, Phys. Rev. A \textbf{65}, 013404 (2001).
\bibitem{BHRWNTOC2002}C. Balzer, T.Hannemann, D. ReiB, C. Wunderlich, W. Neuhauser, and P.~E. Toschek, A relaxationless demonstration of the Quantum Zeno paradox on an individual atom, Opt. Commun. \textbf{211}, 235 (2002).
\bibitem{HRBPKN2006}O. Hosten, M.~T. Rakher, J.~T. Barreiro, N.~A. Peters, and P.~G. Kwiat, Counterfactual quantum computation through quantum interrogation, Nature \textbf{439}, 949 (2006).
\bibitem{SMBCMKPPRL2006}E.~W. Streed, J. Mun, M. Boyd, G.~K. Campbell, P. Medley, W. Ketterle, and D.~E. Pritchard, Large atom number Bose-Einstein condensate machines, Phys. Rev. Lett. \textbf{97}, 260402 (2006).
\bibitem{SHCLCCSNC2014}F. Schafer, I. Herrera, S. Cherukattil, C. Lovecchio, F.~S. Cataliotti, F. Caruso, and A. Smerzi, Experimental realization of quantum zeno dynamics, Nat. Commun. \textbf{5}, 3194 (2014).
\bibitem{MSKSPRB2006}Y. Matsuzaki, S. Saito, K. Kakuyanagi, and K. Semba, Quantum Zeno effect with a superconducting qubit, Phys. Rev. B \textbf{82}, 180518(R) (2010).
\bibitem{KBMNSSNJP2015}K. Kakuyanagi, T. Baba, Y. Matsuzaki, H. Nakano, S. Saito, and K. Semba, Observation of quantum Zeno effect in a superconducting flux qubit, New J. Phys. \textbf{17}, 063035 (2015).
\bibitem{POSPD2014}S. Pascazio, All You Ever Wanted to Know About the Quantum Zeno Effect in 70 Minutes, Open Sys. Inf. Dyn. \textbf{21}, 1440007 (2014).
\bibitem{GA2018}A.~A. Galiautdinov, Quantum Zeno effect: A possible resolution to the leakage problem in superconducting quantum computing architectures, arXiv:1805.06877, (2018).
\bibitem{ICS2019}W.~M. Itano, The quantum Zeno paradox, 42 years on*, SPECIAL SECTION: E.~C.~G. SUDARSHAN, Current Science \textbf{116}, 2 (2019).
\bibitem{HWAP1997}D. Home, M.~A.~B. Whitaker, A Conceptual Analysis of Quantum Zeno; Paradox, Measurement, and Experiment, Annal. Phys. \textbf{258}, 237 (1997).
\bibitem{SWAJP1982}I. Singh and M.~A.~B. Whitaker, Role of the observer in quantum mechanics and the Zeno paradox, Am. J. Phys. \textbf{50}, 882 (1982).
\bibitem{KSPR2005}K. Koshino, and A. Shimizu, Quantum Zeno effect by general measurements, Phys. Rep. \textbf{412}, 191 (2005).
\bibitem{MA2005}A.~N. Mitra, Foundations Of Quantum Theory Revisited, arXiv:quant-ph/0510223, (2005).
\bibitem{SRMP2005}M. Schlosshauer, Decoherence, the measurement problem, and interpretations of quantum mechanics, Rev. Mod. Phys. \textbf{76}, 1267 (2005).
\bibitem{HDWCPRA2019}S. He, L.W. Duan, C. Wang, Q.H. Chen, Quantum Zeno Effect in a circuit-QED system, Phys. Rev. A, \textbf{99}, 052101, (2019)
\bibitem{BPA2019}S. Belan, V. Parfenyev, Optimal Measurement Protocols in Quantum Zeno Effect, arXiv:1909.03226 [cond-mat.stat-mech],
(2019)
\bibitem{C2019}C. Calcluth, \url{https://phys.cam/2019/07/quantum-zeno-effect/}, (2019).
\bibitem{NCQCQI2010}M. Nielsen, and I.~L. Chuang, ``4.4 Measurement". Quantum Computation and Quantum Information: 10th Anniversary Edition, Cambridge University Press, 186, (2010).
\bibitem{CTQC2012}O.~A. Cross, ``5.2.2 Deferred Measurement" Topics in Quantum Computing, 348, (2012).
\end{thebibliography}
\appendix
\section{The reasoning using which we decompose a single $U3$ to multiple $U3$.}
\label{Appendix A}
In Section \ref{seciiib}, we have used the result that operating one $U3$ gate with $\theta=\pi/2$ is equivalent to operating two $U3$ gates with $\theta=\pi/4$ and to generalize it we can say that operating one $U3$ gate with $\theta=\pi/m$ is equal to operating $n$ $U3$ gates sequentially with $\theta=(\pi/m)/n=\pi/mn$.
To give specific examples, we start with $m$=2 and $n$=8. This particularly refers to using 8 \textit{U3} gates sequentially each having $\theta=\pi/2$. The circuit and its measurement outcomes are shown in Fig. \ref{qze_Fig7}. It can be seen that the survival probability is found to be 50.122\% which is in close agreement with the result obtained in Fig. \ref{qze_Fig7} i.e., for a single \textit{U3} gate with $\theta=\pi/2$.
\begin{figure}
\caption{Plot showing the outcomes of measurement for eight \textit{U3}
\label{qze_Fig7}
\end{figure}
Next, we take the example for $m$=5 and $n$=14. This particularly refers to using 14 \textit{U3} gates sequentially each having $\theta=\pi/5$. The circuit and its measurement outcomes are shown in Fig. \ref{qze_Fig9}. It can be seen that the survival probability is found to be 90.381\% which is in close agreement with the result obtained in Fig. \ref{qze_Fig8} i.e., for a single \textit{U3} gate with $\theta=\pi/5$ which gives the survival probability as 90.485\%. In this way we can verify this for all different values of $m$ and $n$ on IBM QE.
\begin{figure}
\caption{Plot showing the outcomes of measurement for one \textit{U3}
\label{qze_Fig8}
\end{figure}
\begin{figure}
\caption{Plot showing the outcomes of measurement for fourteen \textit{U3}
\label{qze_Fig9}
\end{figure}
\section{The theory behind the operation of intermediate $U3$ and $CNOT$ gates}
\label{Appendix B}
From Eq. \eqref{qze_Eq3} we can find the form of $U3$ matrix for $\phi=-\pi/2$ and $\lambda=\pi/2$ to be:
\begin{eqnarray}
U3(\theta,-\pi/2,\pi/2)={\left( \begin{array}{cc} cos(\theta/2) & -isin(\theta/2) \\
-isin(\theta/2) & cos(\theta/2) \end{array} \right)}
\label{qze_EqB1}
\end{eqnarray}
From Eq. \eqref{qze_EqB1} we can infer that:
\begin{eqnarray}
U3\ket{0}= cos(\theta/2)\ket{0}-isin(\theta/2)\ket{1}\\
U3\ket{1}= cos(\theta/2)\ket{1}-isin(\theta/2)\ket{0}
\end{eqnarray}
For some simplicity let us write,
\begin{eqnarray}
U3\ket{0}= \alpha\ket{0}+\beta\ket{1}\\
U3\ket{1}= \beta\ket{0}+\alpha\ket{1}
\end{eqnarray}
That is to say, we have:
\begin{eqnarray}
\alpha=\cos(\theta/2)\\
\beta=-i\sin(\theta/2)
\end{eqnarray}
with $|\alpha|^2+|\beta|^2=1$. Using this formalism, for Fig. \ref{qze_Fig3} i.e., for quantum circuit with two $U3$ gates, we can analytically obtain the result for the survival probability of obtaining state $\ket{0}$ as explained below:
\textbf{Step 1}: $U3$ operates on $\ket{0}$
\begin{eqnarray}
\ket{\psi_1}=U3\Ket{0}=\alpha\ket{0}+\beta\ket{1}
\end{eqnarray}
\textbf{Step 2}: $CNOT$ acts on $\ket{\psi_1}\otimes\ket{0}$
\begin{eqnarray}
\ket{\psi_2}&=&CNOT(\alpha\ket{0}+\beta\ket{1})\otimes\Ket{0}\nonumber\\
&=&CNOT(\alpha\ket{00}+\beta\ket{10})\nonumber\\
&=&\alpha\ket{00}+\beta\ket{11}
\end{eqnarray}
\textbf{Step 3}: $U3$ operates on $\ket{\psi_2}$
\begin{eqnarray}
\ket{\psi_3}&=&U3(\alpha\ket{00}+\beta\ket{11})\nonumber\\
&=&\alpha(U3\ket{0})\otimes\ket{0}+\beta(U3\ket{1})\otimes\ket{1}\nonumber\\
&=&\alpha(\alpha\ket{0}+\beta\ket{1})\otimes\ket{0}+\beta(\alpha\ket{1}+\beta\ket{0})\otimes\ket{1}\nonumber\\
&=&\alpha^2\ket{00}+\alpha\beta\ket{10}+\alpha\beta\ket{11}+\beta^2\ket{01}
\end{eqnarray}
\textbf{Step 4}: $CNOT$ acts on $\ket{\psi_3}\otimes\ket{0}$
\begin{eqnarray}
\Ket{\psi_4}&=&CNOT(\ket{\psi_3}\otimes\ket{0})\nonumber
\\
\ket{\psi_4}&=&CNOT(\alpha^2\ket{00}+\alpha\beta\ket{10}+\alpha\beta\ket{11}\nonumber\\
&&+ \beta^2\ket{01})\otimes\ket{0}\nonumber\\
&=&CNOT(\alpha^2\ket{000}+\alpha\beta\ket{100}+\alpha\beta\ket{110}\nonumber\\&&
+\beta^2\ket{010})\nonumber\\
&=&\alpha^2\ket{000}+\alpha\beta\ket{101}+\alpha\beta\ket{111}\nonumber\\
&&+\beta^2\ket{010}
\end{eqnarray}
\textbf{Step 5}: Measurement on first qubit of $\ket{\psi_4}$
This gives the probability of getting $\ket{0}$ for the first qubit as:
\begin{eqnarray}
P_s=|\alpha^2|^2+|\beta^2|^2= \alpha^4+\beta^4
\end{eqnarray}
For Fig. \ref{qze_Fig3}, we have $\theta=\pi/2$ this result comes out to be:
$P_s=cos^4(\theta/2)+sin^4(\theta/2)=cos^4(\pi/4)+sin^4(\pi/4)$=0.7500 theoretically and 0.7475 from IBM QE (as plotted in Fig. \ref{qze_Fig6}). A similar treatment for three $U3$ gates of $\theta=\pi/6$ yields the probability of obtaining state $\ket{0}$ to be $|\alpha|^6 +3|\alpha|^2|\beta|^4=cos^6(\pi/12)+3cos^2(\pi/12))sin^4(\pi/12)$. This result comes out to be 0.8247 theoretically and 0.8223 from IBM QE (as plotted in Fig. \ref{qze_Fig6}). So, we can see a close agreement between the theoretically predicted outcomes and the ones given by IBM QE. We can extend this argument to all other values of $\theta$ and $n$ as well.
\end{document}
|
\begin{document}
\title{Harmonic G-structures}
\begin{abstract}{\indent}
For closed and connected subgroups $G$ of $\Lie{SO}(n)$, we study the
energy functional on the space of $G$-structures of a (compact)
Riemannian manifold $(M,\langle \cdot, \cdot \rangle),$ where
$G$-structures are considered as sections of the quotient bundle
${\mathcal S\mathcal O}(M)/G.$ Then, we deduce the corresponding
first and second variation formulae and the characterising
conditions for critical points by means of tools closely related
with the study of $G$-structures. In this direction, we show the
r\^{o}le in the energy functional played by the intrinsic torsion
of the $G$-structure. Moreover, we analyse the particular case
$G=\Lie{U}(n)$ for even-dimensional manifolds. This leads to the
study of harmonic almost Hermitian manifolds and harmonic maps
from $M$ into ${\mathcal S\mathcal O}(M)/U(n).$
\noindent {\footnotesize \emph{Keywords and phrases:} $G$-structure,
intrinsic torsion, minimal connection, almost
Hermitian manifold, harmonic $G$-structure, harmonic almost
Hermitian structure, harmonic map }
\noindent {\footnotesize \emph{2000 MSC}: 53C10, 53C15, 53C25 }
\end{abstract}
\tableofcontents
\section{Introduction}{\indent}
The energy of a map between Riemannian manifolds is a functional
which has been widely studied by diverse authors
\cite{EeLe1,EeLe2,Ur}. Critical points for the energy functional
are called \emph{harmonic maps} and have been characterised by
Eells and Sampson \cite{EeSa} as maps with vanishing \emph{tension
field}.
For a Riemannian manifold $(M,\langle \cdot , \cdot \rangle)$, we
denote by $(T_1 M, \langle \cdot , \cdot \rangle^S)$ its unit
tangent bundle equipped with the Sasaki metric $\langle \cdot ,
\cdot \rangle^S$ (see \cite{Sak}). Looking at unit vectors fields
as maps $M \to T_1 M$, if $M$ is compact and oriented,
one can consider the energy functional as defined on the set
$\mathfrak X_1(M)$ of unit vector fields. Critical points for this
functional give rise to the notion of \emph{harmonic unit vector
field}. The condition characterising harmonic unit vector fields
has been obtained by Wiegmink \cite{Wie1} (see also Wood's paper
\cite{Wood}). This has been also extended in a natural way to
sections of sphere bundles (see \cite{GMS}, \cite{Salvai}) and to
oriented distributions, considered as sections of the
corresponding Grassmann bundle \cite{GGV}.
In \cite{Wood2}, for principal $G$-bundles $Q \to M$ over a
Riemannian manifold $(M, \langle \cdot , \cdot \rangle)$, Wood
considers global sections $\sigma \,: \; M \to Q/H$ of the
quotient bundle $\pi : Q/H \to M$, where $H$ is a closed subgroup
of $G$ such that $G/H$ is reductive. Note that such global
sections are in one-to-one correspondence with the $H$-reductions
of the $G$-bundle $Q \to M$. Likewise, a connection on $Q \to M$
and a $G$-invariant metric on $G/H$ are fixed. Thus, $Q/H$ can be
equipped in a natural way with a metric $\langle \cdot , \cdot
\rangle_{Q/H},$ defined by using the metrics on $M$ and $G/H$. For
such a metric on $Q/H$, the submersion $\pi : Q/H \to M$ is
Riemannian and has totally geodesic fibres. In such conditions,
harmonic sections are characterised as those with vanishing
vertical tension field. This situation arises when the Riemannian
manifold $M$ is equipped with some additional geometric structure,
viewed as reduction of the structure group of the tangent bundle.
In this paper, we consider the particular situation for
$\Lie{G}$-structures defined on an oriented Riemannian
$n$-manifold $(M ,\langle \cdot , \cdot \rangle)$, where
$\Lie{G}$ is a closed and connected subgroup of $\Lie{SO}(n).$ The
manifold $M$ is said to be equipped with a $\Lie{G}$-structure if
its oriented orthogonal frame bundle $\mathcal{SO}(M)$ admits a
reduction $\mathcal{G}(M)$ to the subgroup $\Lie{G}$. Moreover, if
$\mathcal{SO}(M)/G = \mathcal{SO}(M) \times_G \Lie{SO}(n)/ G$ is
the quotient bundle under the action of $G$ on $\mathcal{SO}(M),$
the existence of a $G$-structure on $M$ is equivalent to the
existence of a global section $\sigma : M \to \mathcal{SO}(M)/G.$
In the present work, we analyse the energy functional defined on
the space of sections $\Gamma^{\infty} (\mathcal{SO}(M)/G)$ of the
quotient bundle. Thus, if $\xi^G$ denotes the intrinsic torsion
of the $G$-structure, we clearly shows the central r\^{o}le played
by $\xi^G$ in the energy functional.
(Theorem \ref{siginttor}). Furthermore, the first
variation formula is deduced (Theorem \ref{firstvar}). Then, we
show several equivalent characterising conditions of critical
points for the energy functional on the space of $G$-structures
defined on $(M ,\langle \cdot , \cdot \rangle)$ (Theorem
\ref{carharm}). This gives rise to the notion of \emph{harmonic
$G$-structure} for general Riemannian manifolds, not necessarily
compact and oriented. It is worthwhile to note that harmonic
$G$-structures are not necessarily critical for the energy
functional on all maps from $(M,\langle\cdot,\cdot\rangle)$ to
$(\mathcal{SO}(M)/G,\langle\cdot,\cdot\rangle_{\mathcal{SO}(M)/G})
.$ They are harmonic maps when the corresponding harmonic
$G$-structures satisfy a condition involving the curvature of the
Riemannian manifold. Additionally, we deduce the second variation
formula (Theorem \ref{secondvar}).
We point out that because the intrinsic torsion of the
$G$-structures is involved in all results above mentioned, this
makes possible going further in the study of relations between
harmonicity and classes of $G$-structures. This will be
illustrated in Section \ref{sect:almherm}, where we focus
attention on the study of harmonic almost Hermitian structures
initiated by Wood in \cite{Wood1,Wood2}. Thus, we study
harmonicity of almost Hermitian structures by using the tools
developed in Section \ref{charactgstruc}, recovering Wood's
results and proving some additional ones. In Theorem
\ref{characharmherm1}, several equivalent characterising
conditions for harmonic almost Hermitian structures are shown.
The relation of harmonicity with Gray-Hervella's classes of almost
Hermitian structures is studied in Theorem \ref{classhermharm}.
Note that the results there contained characterise harmonic almost
almost Hermitian structures by means of conditions on the
Riemannian curvature.
Concretely, in terms of the particular Ricci tensor $\Ric^*$ determined
by the almost Hermitian structure.
Finally, we point out that Theorem \ref{classhermharm}, in some sense, generalises the
results
proved by Bor et al.
\cite{BHLS} (see Theorem \ref{bor-Hlam-Salva}). In fact,
note that the results in \cite{BHLS} are stated
for conformally flat manifolds, i.e., Weyl curvature tensor vanished.
After these remarks, we focus attention on the study of
harmonicity as a map of almost Hermitian structures. Results in
that direction were already obtained by Wood \cite{Wood2}. Here
we complete such results by using tools here presented.
For completeness, we
finish this paper by briefly giving a detailed and self-contained
explanation of the situation for nearly K\"{a}hler manifolds.
Thus, we will recover results already known originally proved,
some of them, by Gray and, others, by Wood. However, we will
display alternative proofs in terms of the intrinsic torsion.
Additionally, it is also shown a Kirichenco`s result \cite{Kir}
saying that, for nearly K\"{a}hler manifolds, the intrinsic
torsion is parallel with respect to the minimal connection.
\end{ack}
\section{Preliminaries}{\indent} First we recall some notions relative to
$\Lie{G}$-structures, where $\Lie{G}$ is a subgroup of the linear
group $\Lie{GL}(n , \mathbb R)$. The Lie algebra of $G$ will be
denoted by $\mathfrak{g}$. An $n$-dimensional manifold $M$ is
equipped with a $\Lie{G}$-structure if its frame bundle admits a
reduction $\mathcal{G}(M)$ to the subgroup $\Lie{G}$. Moreover,
if $(M ,\langle \cdot , \cdot \rangle)$ is an $n$-dimensional
oriented Riemannian manifold, we can consider the principal
$SO(n)$-bundle $\pi_{\Lie{SO}(n)} : \mathcal{SO}(M) \to M$ of the
oriented orthonormal frames with respect to the metric $\langle
\cdot, \cdot \rangle.$ A $G$-structure on $(M ,\langle \cdot ,
\cdot \rangle)$ is a reduction $\mathcal{G}(M)\lie{su}bset
\mathcal{SO}(M)$ to a subgroup $\Lie{G}$ of $\Lie{SO}(n).$
In what follows, we always assume that $G$ is closed and also,
connected. Then, the quotient space $SO(n)/G$ is a homogeneous
manifold and it becomes into a normal homogeneous Riemannian
manifold with bi-invariant metric induced by the inner product
$\langle \cdot,\cdot \rangle$ on ${\mathfrak s \mathfrak o}(n)$
given by $\langle X,Y\rangle = -\mathop{\rm trace} XY,$ the natural extension
of the usual Euclidean product $\langle \cdot , \cdot \rangle$ on
$\mathbb{R}^n$ to $\mbox{End}(\mathbb{R}^n).$ Let ${\mathcal
S\mathcal O}(M)/G$ be the orbit space under the action of $G$ on
${\mathcal S\mathcal O}(M)$ on the right as subgroup of $SO(n).$
Then the $G$-orbit map $\pi_{G}:{\mathcal S\mathcal O}(M)\to
{\mathcal S\mathcal O}(M)/G$ is a principal $G$-bundle and we have
$\pi_{SO(n)} = \pi\comp \pi_{G},$ where $\pi: {\mathcal S\mathcal
O}(M)/G\to M$ is a fibre bundle with fibre $SO(n)/G,$ which is
naturally isomorphic to the associated bundle ${\mathcal S\mathcal
O}(M)\times_{SO(n)}SO(n)/G.$ The map $\sigma:M\to {\mathcal
S\mathcal O}(M)/G$ given by $\sigma(m) = \pi_{G}(p),$ for all
$p\in {\mathcal G}(M)$ with $\pi_{SO(n)}(p) = m,$ is well-defined
because $\pi_{G}$ is constant on each fiber of the reduced bundle.
It is a smooth section and we have ${\mathcal G}(M) =
\pi^{-1}_{G}(\sigma(M)).$ Hence, there is a one-to-one
correspondence between the totally of $G$-structures and the
manifold $\Gamma^{\infty}({\mathcal S\mathcal O}(M)/G)$ of all
global sections of ${\mathcal S\mathcal O}(M)/G.$ In what sequel,
we shall also denote by $\sigma$ the $G$-structure determined by
the section $\sigma.$
If $u_{1}= (1,0,\dots,0),\dots ,u_{n} = (0 ,\dots ,0,1)$ is the
canonical orthonormal frame on $\mathbb{R}^n$, then an oriented
frame $p \in \mathcal{SO}(M)$ can be viewed as an isomorphism $p :
\mathbb{R}^n \to \mbox{\rm T}_{\pi_{\Lie{SO}(n)}(p)}M$ such that
$\{ p(u_1), \dots , p(u_n)\}$ is a positive oriented basis of
$T_{\pi_{\Lie{SO}(n)}(p)}M$. From now on, we will make reiterated
use of the {\it musical isomorphisms} $\flat : \mbox{\rm T}M \to
\mbox{\rm T}^* M$ and $\sharp : \mbox{\rm T}^* M \to \mbox{\rm T}
M$, induced by the metric $\langle \cdot , \cdot \rangle$ on $M$,
respectively defined by $X^{\flat} = \langle X , \cdot \rangle$
and $\langle \theta^{\sharp} , \cdot \rangle = \theta $.
In the presence of a $G$-structure determined by a section
$\sigma : M \to \mathcal{SO}(M)/G$, a frame $p \in
\mathcal{SO}(M)$ is said to be an {\it adapted frame} to the
$G$-structure, if $p \in \sigma \circ \pi_{\Lie{SO}(n)}(p)$ or,
equivalently, if $p \in \mathcal{G}(M) \lie{su}bseteq
\mathcal{SO}(M)$. Note also that, in a first instance, the bundle
of endomorphisms $\mbox{End} (\mbox{T} M )$ on the fibers in the
tangent bundle $\mbox{T} M$ coincides with the associated vector
bundle $\mathcal{SO}(M) \times_{\Lie{SO}(n)}
\mbox{End}(\mathbb{R}^n)$, where $\Lie{SO}(n)$ acts on
$\mbox{End}(\mathbb{R}^n)$ in the usual way $(g \cdot \varphi)(x)
= g \varphi (g^{-1} x)= (\mbox{Ad}_{\Lie{SO}(n)}(g)\varphi)(x)$.
Thus, it is identified
\begin{equation}
\label{matrix1}
\varphi_{m} = a_{ji} \, p(u_i)^{\flat} \otimes p(u_j) \cong [(p, a_{ji} \, u_i^{\flat}
\otimes u_j)],
\end{equation}
where $m \in M$ and $p \in \pi^{-1}_{\Lie{SO}(n)}(m)$ and the
summation convention is used. Such a convention will be followed
in the sequel. When a risk of confusion appear, the sum will be
written in detail.
In our context, we have also a reduced subbundle
$\mathcal{G}(M)$. So that we can do the identification $\mbox{End}
(\mbox{T} M ) = \mathcal{G}(M) \times_{G}
\mbox{End}(\mathbb{R}^n)$ because any $\varphi_m$ can be
identified with an element in $\mathcal{G}(M) \times_{G}
\mbox{End}(\mathbb{R}^n)$ as in Equation \eqref{matrix1}, but in
this case must be $p \in \sigma(m)$.
Now we restrict our attention to the
subbundle $\lie{so}(M)$ of $\mbox{End} (\mbox{T} M )$ of
skew-symmetric endomorphisms $\varphi_m$, for all $m \in M$, i.e.,
$\langle \varphi_m X , Y \rangle= -\langle \varphi_m Y , X
\rangle$. Note that this subbundle $\lie{so}(M)$ is expressed as $
\lie{so}(M) = \mathcal{SO}(M) \times_{\Lie{SO}(n)} \lie{so}(n) =
\mathcal{G}(M) \times_{G} \lie{so}(n) $. The corresponding
matrices $(a_{ij})$ for $\lie{so}(M)$, given by Equation
\eqref{matrix1}, are such that $a_{ij} =- a_{ji}$. Furthermore,
because $\lie{so}(n)$ is decomposed into the $G$-modules $\lie{g}$
and the orthogonal complement $\lie{m}$ on $\lie{so}(n)$ with
respect to the inner product $\langle\cdot,\cdot\rangle,$ the
bundle $\lie{so}(M)$ is also decomposed into $\lie{so}(M) =
\lie{g}_{\sigma}(M) \oplus \lie{m}_{\sigma}(M) $, where
$\lie{g}_{\sigma}(M) = \mathcal{G}(M) \times_G \lie{g}$ and
$\lie{m}_{\sigma}(M) = \mathcal{G}(M) \times_G \lie{m}$. The
matrices $(a_{ij})$ in Equation \eqref{matrix1} corresponding to
$\lie{g}_{\sigma}(M)$ and $\lie{m}_{\sigma}(M) $ are such that
they are in $\lie{g}$ and $\lie{m}$, respectively. The subindex
$\sigma$ in $\lie{g}_{\sigma}(M)$ and $\lie{m}_{\sigma}(M) $ is to
point out that these bundles are determined by the $G$-structure
$\sigma$. From now on, we will merely write $\lie{g}_{\sigma}$ and
$\lie{m}_{\sigma}$.
Under the conditions above fixed, if $M$ is equipped with a
$G$-structure, then there exists a $G$-connection
$\widetilde{\nabla}$ defined on $M$. Doing the difference
$\widetilde{\xi}_X = \widetilde{\nabla}_X - \nabla_X$, where
$\nabla_X$ is the Levi-Civita connection of $\langle \cdot , \cdot
\rangle$, a tensor $\widetilde{\xi}_X \in \lie{so}(M)$ is
obtained. Because $\nabla$ is torsion-free, $\widetilde{\xi}$ is
an alternative way of giving the torsion of $\widetilde{\nabla}$.
In fact, if $\widetilde{T}$ is the usual torsion tensor of
$\widetilde{\nabla}$ given by $\widetilde{T}(X,Y) =
\widetilde{\nabla}_X Y - \widetilde{\nabla}_Y X -[X,Y]$, then it
is satisfied
\begin{equation} \label{tortor}
\begin{array}{l}
\widetilde{T}(X,Y) = \widetilde{\xi}_X Y -
\widetilde{\xi}_Y X, \\[1mm]
2 \langle \widetilde{\xi}_X Y , Z \rangle = \langle
\widetilde{T}(X,Y), Z \rangle - \langle \widetilde{T}(Y,Z), X
\rangle + \langle \widetilde{T}(Z,X), Y \rangle.
\end{array}
\end{equation}
Decomposing $\widetilde{\xi}_X = (
\widetilde{\xi}_X )_{\lie{g}_{\sigma}} + ( \widetilde{\xi}_X
)_{\lie{m}_{\sigma}}$, $( \widetilde{\xi}_X )_{\lie{g}_{\sigma}}
\in \lie{g}_{\sigma}$ and $( \widetilde{\xi}_X
)_{\lie{m}_{\sigma}} \in \lie{m}_{\sigma}$, a new $G$-connection
$\nabla^G$, defined by $\nabla^G_X = \widetilde{\nabla}_X -
(\tilde{\xi}_X )_{\lie{g}_{\sigma}}$, can be considered. Because
the difference between two $G$-connections must be in
$\lie{g}_{\sigma}$, $\nabla^G$ is the unique $G$-connection on $M$
such that its torsion satisfies the condition $\xi^G_X = (
\widetilde{\xi}_X )_{\lie{m}_{\sigma}} = \nabla^{G}_X - \nabla_X
\in \lie{m}_{\sigma}$. $\nabla^G$ is called the {\it minimal
connection} and $\xi^G$ is referred as the {\it intrinsic torsion}
of the $G$-structure $\sigma$ \cite{CleytonSwann:torsion,Salamon}.
A natural way of classifying $G$-structures arises by decomposing
of the space $\mbox{T}^* M \otimes \lie{m}_{\sigma}$ of possible
intrinsic torsions into irreducible $G$-modules. If $\xi^G=0$, the
$G$-structure is usually referred as a {\it parallel} (or {\it
integrable}) $G$-structure. In such a case, the Riemannian
holonomy group of $M$ is contained in $G$.
Associated to the metric connections $\nabla$ and $\nabla^G$ there
are connections one-forms $\omega$ and $\omega^G$ defined on
$\mathcal{SO}(M)$ and $\mathcal{G}(M)$ with values in
$\lie{so}(n)$ and $\lie{g}$, respectively. Note that the
projection $\mathcal{G}(M) \to M$ of the reduced bundle is
$\pi_{\Lie{SO}(n)}$ restricted to $\mathcal{G}(M)$. Therefore, if
$\wp=\{e_1, \dots e_n\} \, : \, U \to \mathcal{G}(M) $ is a local
frame field adapted to the $G$-structure, then
$$
\langle \xi^G_{X} e_i , e_j \rangle_{m} = \langle \nabla^G_{X} e_i
, e_j \rangle_{m} - \langle \nabla_{X} e_i , e_j \rangle_{m} =
\omega^G_{\wp(m)} ( \wp_{\ast m} X )_{ji} - \omega_{\wp(m)} (
\wp_{\ast m} X )_{ji}.
$$
Since the matrices $(\langle \xi^G_{X} e_i , e_j \rangle_{m}) \in
\lie{m}$ and $(\omega^G_{\wp(m)} ( \wp_{\ast m} X )_{ji})\in
\lie{g}$, it is obtained the following identities for matrices
$$
( (\omega_{\wp(m)}( \wp_{\ast m} X)_{ji} )_{\lie{g}} =
(\omega^G_{\wp(m)} ( \wp_{\ast m} X )_{ji}), \qquad
(\omega_{\wp(m)}( \wp_{\ast m} X )_{ji})_{\lie{m}} = - \left(
\langle \xi^G_{X} e_{i} , e_{j}
\rangle_{m}\right).
$$
Therefore, the intrinsic torsion is expressed as
\begin{equation} \label{inttorome}
\xi_X^G = - (\omega ( \wp_{\ast } X )_{ji})_{\lie{m}} \,
e_{i}^{\flat} \otimes e_j,
\end{equation}
where $\wp = \{ e_1 , \dots e_n \}$ is a local frame field
adapted to the $G$-structure.
Finally, we need to point out that, along the present paper, we
will consider the natural extension of the metric $\langle \cdot
, \cdot \rangle$ to $(r,s)$-tensor fields on $M$. Such an
extension is defined by
\begin{equation} \label{extendedmetric}
\langle \Psi,\Phi \rangle = \Psi^{i_{1}\dots i_{r}}_{j_{1}\dots
j_{s}} \Phi^{i_{1}\dots
i_{r}}_{j_{1}\dots j_{s}},
\end{equation}
where $\Psi^{i_{1}\dots i_{r}}_{j_{1}\dots j_{s}}$ and
$\Phi^{i_{1}\dots i_{r}}_{j_{1}\dots j_{s}}$ are the components of
$\Psi$ and $\Phi$ with respect to an orthonormal local frame.
\section{Characterising harmonic $G$-structures via the intrinsic
torsion}{\indent} \label{charactgstruc}
Now we consider the bundle $\pi_{G} \, : \,
\mathcal{SO}(M) \to \mathcal{SO}(M)/G$. Because we have $\mbox{T}
\mathcal{SO}(M) = \ker \pi_{\Lie{SO}(n)\ast} \oplus \ker \omega$,
the tangent bundle of $\mathcal{SO}(M)/G$ is decomposed into
$\mbox{T} \mathcal{SO}(M)/G = \mathcal{V} \oplus \mathcal{H}$,
where $\mathcal{V} = \pi_{G\ast} (\ker \pi_{\Lie{SO}(n)\ast} )$
and $\mathcal{H} = \pi_{G\ast} (\ker \omega )$. Then the {\it
vertical} and {\it horizontal} distributions $\mathcal{V}$ and
$\mathcal{H}$ are such that $\pi_{\ast} \mathcal{V} =0$ and
$\pi_{\ast} \mathcal{H} = \mbox{\rm T}M$.
Moreover, we consider the pullback or induced bundle $\pi^*
\lie{so}(M)$ of $\lie{so}(M)$ by $\pi,$ that is, the vector bundle
over $\mathcal{SO}(M)/G$ consisting of those pairs $(pG,
\varphi_m)$, where $\pi(pG)=m$ and $\varphi_m \in \lie{so}(M)_m$.
Alternatively, $\pi^* \lie{so}(M)$ is also described as the
associated bundle $\mathcal{SO}(M) \times_G \lie{so}(n)$ to
$\pi_{G}.$ Then $\pi^* \lie{so}(M)$ is decomposed into $\pi^*
\lie{so}(M) = \lie{g}_{\mathcal{SO}(M)} \oplus
\lie{m}_{\mathcal{SO}(M)}$, where $\lie{g}_{\mathcal{SO}(M)}=
\mathcal{SO}(M) \times_G \lie{g} $ and
$\lie{m}_{\mathcal{SO}(M)}= \mathcal{SO}(M) \times_G \lie{m}$.
A metric on each fiber of $\pi^* \lie{so}(M)$ is defined by
$$
\langle (pG , \varphi_m) , (pG ,\psi_m) \rangle = \langle
\varphi_m , \psi_m \rangle,
$$
where $\langle \cdot , \cdot \rangle$ in the right side is the
extension to $(1,1)$-tensors of the metric on $M$ given by
\eqref{extendedmetric}. With respect to this metric, the
decomposition $\pi^* \lie{so}(M) = \lie{g}_{\mathcal{SO}(M)}
\oplus \lie{m}_{\mathcal{SO}(M)}$ is orthogonal.
Additionally, we have a covariant derivative $\nabla$ on $\pi^*
\lie{so}(M)$ induced by the Levi-Civita connection
associated to the metric $\langle \cdot , \cdot
\rangle$ on $M$ and given by
\begin{equation} \label{inducedLC}
\left(\nabla_A \tilde{\varphi} \right)_{pG} = \left(pG \, , \,
\frac{\nabla}{ds}_{|s=0} \mbox{pr}_2^{\pi}
\tilde{\varphi}_{\tilde{\gamma}(s)}\right),
\end{equation}
for all $A \in
\mathfrak{X}(\mathcal{SO}(M)/G)=\Gamma^{\infty}(\mbox{T}\mathcal{SO}(M)/G)$
and $\tilde{\varphi} \in \Gamma^{\infty}(\pi^* \lie{so}(M)),$
where $s \to \tilde{\gamma}(s)$ is a curve in $\mathcal{SO}(M)/G$
such that $\tilde{\gamma}(0)=pG$ and $\tilde{\gamma}'(0) = A_{pG}$
and $ \mbox{pr}_2^{\pi}$ is the projection $ \mbox{pr}_2^{\pi}(pG,
\varphi_{m}) = \varphi_{m}$ on $\lie{so}(M).$ Note that, in the
right side, the covariant derivative is along the curve $\gamma(s)
=\pi \circ \tilde{\gamma}(s).$
There is a canonical isomorphism between $\mathcal{V}$ and the
bundle $\lie{m}_{\mathcal{SO}(M) }$. For describing such an
isomorphism, let us firstly say that the elements in
$\lie{m}_{\mathcal{SO}(M)}$ can be seen as pairs $(pG ,
\varphi_m)$ such that if $\varphi_m$ with respect to $p$ is
expressed as in Equation \eqref{matrix1}, then $( a_{ji}) \in
\lie{m}$.
Now, let us describe the mentioned
canonical isomorphism $\phi_{| \mathcal{V}_{pG}} :
\mathcal{V}_{pG} \to \left( \lie{m}_{\mathcal{SO}(M)}
\right)_{pG}$. For all $a \in \lie{m}$, we have the fundamental
vector field $a^*$ on $\mathcal{SO}(M)$ given by
$$
a^*_p = \frac{d}{dt}_{|t=0} p . \exp t a \in \ker
\pi_{\Lie{SO}(n)*p} \lie{su}bseteq \mbox{T}_p \mathcal{SO}(M).
$$
Any vector in $\mathcal{V}_{pG}$ is given by $\pi_{G*p} (a^*_p)$,
for some $a =(a_{ji}) \in \lie{m}$. The isomorphism $\phi_{|
\mathcal{V}_{pG}}$ is defined by
$$
\phi_{| \mathcal{V}_{pG}} ( \pi_{G*p} (a^*_p)) = (pG, a_{ji} \,
p(u_i)^{\flat} \otimes p(u_j)).
$$
Next it is extended the map $\phi_{|\mathcal{V}} : \mathcal{V} \to
\lie{m}_{\mathcal{SO}(M)}$ to $\phi : \mbox{T} \,
\mathcal{SO}(M)/G \to \lie{m}_{\mathcal{SO}(M)}$ by saying that
$\phi (A) =0$, for all $A \in \mathcal{H}$, and $\phi (V) =
\phi_{|\mathcal{V}}(V)$, for all $V \in \mathcal{V}$. This is used
to define a metric $\langle \cdot , \cdot
\rangle_{\mathcal{SO}(M)/G}$ on $\mathcal{SO}(M)/G$ by
\begin{equation} \label{metricquo}
\langle A , B \rangle_{\mathcal{SO}(M)/G} = \langle \pi_{\ast} A ,
\pi_{\ast} B \rangle + \langle \phi (A) , \phi (B) \rangle.
\end{equation}
For this metric, the projection $\pi \, : \, \mathcal{SO}(M)/G \to
M$ is a Riemannian submersion with totally geodesic fibres (see
\cite{Vilms} and \cite[page 249]{Besse:Einstein}). That is, if
${\sf v} \,: \, \mbox{\rm T} \mathcal{SO}(M)/G \to \mathcal{V}$
and ${\sf h} \,: \, \mbox{\rm T} \mathcal{SO}(M)/G \to
\mathcal{H}$ are respectively the vertical and horizontal
projections and $\nabla^q$ is the Levi-Civita connection of
$\langle \cdot , \cdot \rangle_{\mathcal{SO}(M)/G}$, then $
\nabla^q_{V} W = {\sf v} \nabla^q_{V} W$ and $ \nabla^q_{V}H
= {\sf h} \nabla^q_{V} H$, for all $H \in \Gamma^{\infty}(\mathcal{H})$ and
$V, W \in \Gamma^{\infty}(\mathcal{V})$.
Because $\lie{so}(n) = \lie{g} \oplus \lie{m}$ is a reductive
decomposition, that is, it satisfied ${Ad}_{\Lie{SO}(n)} (G)
\lie{m} \lie{su}bseteq \lie{m},$ the component $\omega_{\lie{g}}$ in
$\lie{g}$ of the the connection-form $\omega$ is a connection-form
for the bundle $\pi_G \,: \, \mathcal{SO}(M) \to
\mathcal{SO}(M)/G$ which is referred as {\it canonical
connection}. This connection provides a covariant derivative
$\nabla^c$ on $\lie{m}_{\mathcal{SO}(M)}$, which respect to which
the fibre metric is holonomy invariant. The Levi-Civita connection
$\nabla^q$ is related with $\nabla^c$ on
$\lie{m}_{\mathcal{SO}(M)}$ via the projection of the
$\lie{m}$-component of the curvature form $\Omega$ of the
Levi-Civita connection $\nabla$ of $M$. Thus, it is considered the
two-form $\Phi$ on $\mathcal{SO}(M)/G$, with values in
$\lie{m}_{\mathcal{SO}(M)}$, defined by
$$
\Phi (A,B) = \phi \pi_{G*} \Omega ( \tilde{A},
\tilde{B})^*_{\lie{m}} = \phi \pi_{G*} d \omega ( \tilde{A},
\tilde{B})^*_{\lie{m}} + \phi \pi_{G*} [\omega (\tilde{A}) ,
\omega ( \tilde{B}) ]^*_{\lie{m}},
$$
where $\tilde{A}, \tilde{B} \in \mbox{T} \mathcal{SO}(M)$ such
that $\pi_{G*} \tilde{A} = A$, $\pi_{G*} \tilde{B} = B$.
Therefore, if on $\mathcal{SO}(M)/G$ we consider the vertical
vectors $U$ and $V$ and the horizontal vectors $H$ and $K$,
then
$$
\Phi (U,V) = 0, \qquad \Phi (U,H) =0, \qquad \Phi (H,K) = \phi \pi_{G*} \Omega (
\tilde{H}, \tilde{K})^*_{\lie{m}} = \phi \pi_{G*} d \omega (
\tilde{H}, \tilde{K})^*_{\lie{m}}.
$$
Next, we recall some useful facts proved in \cite[Corollary 2.4
and Proposition 2.7]{Wood2}.
\begin{lemma}[\cite{Wood2}] $\;$ \label{wood:lemma} We have
\begin{enumerate}
\item[{\rm (i)}]
$
\nabla^c_A \tilde{V} = \nabla_A \tilde{V} - [ \phi \, A ,
\tilde{V}].
$
\item[{\rm (ii)}]
$ \phi(\nabla^q_{A}B) - \nabla^c_{A}\phi B = \frac{\textstyle
1}{\textstyle 2}\left \{ [\phi A,\phi B]_{\lie{m}} -
\Phi(A,B)\right \}, $
\end{enumerate}
for all $A,B \in \mathfrak{X}(\mathcal{SO}(M)/G)$ and $\tilde{V}
\in \Gamma^{\infty}(\lie{m}_{\mathcal{SO}(M)}).$
\end{lemma}
From here, we obtain
\begin{equation}\label{nn}
\phi ( \nabla^q_A V) = \nabla^c_A\phi \, V + \frac12 [ \phi A ,
\phi V]_{\lie{m}} = \nabla_A \phi \, V - \frac12 [ \phi A , \phi
V]_{\lie{m}} - [ \phi A , \phi V]_{\lie{g}},
\end{equation}
for all $A \in \mathfrak{X}(\mathcal{SO}(M)/G)$ and $V \in
\Gamma^{\infty}(\mathcal{V}).$
\begin{remark} \label{id:mvert}
{\rm (1) The Lie bracket on $\pi^{*} \lie{so}(M)$ is defined by
$$
[(pG,\varphi_m), (pG,\psi_m)] = (pG , [\varphi_m, \psi_m]) = (pG ,
\varphi_m \circ \psi_m - \psi_m \circ \varphi_m).
$$
(2) Given a $G$-structure $\sigma \, : \, M \to
\mathcal{SO}(M)/G$, the bundle $\sigma^* \pi^* \lie{so}(M)$ is
identified with $\lie{so}(M)$ by the bijection map $\mbox{\rm
pr}_2^{\pi} \circ \mbox{\rm pr}_2^{\sigma}:
(m,(\sigma(m),\varphi_{m}))\mapsto \varphi_{m}$ and likewise,
$\sigma^* \lie{g}_{\mathcal{SO}(M)} \cong \lie{g}_{\sigma}$ and
$\sigma^* \lie{m}_{\mathcal{SO}(M)} \cong \lie{m}_{\sigma}$. With
respect to sections, if $\varphi \in \Gamma^{\infty}(\lie{so}(M))$
then $pG \to (pG , \varphi_{\pi(pG)})$ belongs to
$\Gamma^{\infty}(\pi^* \lie{so}(M))$ and conversely, if
$\tilde{\varphi} \in \Gamma^{\infty} (\lie{m}_{\mathcal{SO}(M)})$
(respectively, $\tilde{\varphi} \in \Gamma^{\infty}
(\lie{g}_{\mathcal{SO}(M)})$), then $m \to \mbox{pr}_2^\pi
\tilde{\varphi}_{\sigma(m)}$ is in $\Gamma^{\infty}
(\lie{m}_{\sigma})$ (respectively, in $\Gamma^{\infty}
(\lie{g}_{\sigma})$).
}
\end{remark}
Now, we consider the set of all possible $G$-structures on a
closed and oriented Riemannian manifold $(M, \langle \cdot , \cdot
\rangle).$ As it has been already mentioned, this set is
identified with the manifold $\Gamma^{\infty}(\mathcal{SO}(M)/G)$
of all global sections $\sigma \, : \, M \to \mathcal{SO}(M)/G$.
Then the {\it energy} of the $G$-structure is defined as the
energy of the corresponding section $\sigma,$ given by the
integral
\begin{equation}\label{1}
{\mathcal E}(\sigma)=\frac{\textstyle 1}{\textstyle2}\int_{M}\|\sigma_{\ast}\|^{2}dv,
\end{equation}
where $\|\sigma_{\ast}\|^{2}$ is the norm of the differential
$\sigma_{\ast}$ of $\sigma$ with respect to the metrics $\langle
\cdot , \cdot \rangle$ and $ \langle \cdot , \cdot
\rangle_{\mathcal{SO}(M)/G}$, and $dv$ denotes the volume form on
$(M,\langle \cdot , \cdot \rangle)$. On the domain of a local
orthonormal frame field $\{e_1, \dots , e_n \}$ on $M$,
$\|\sigma_{*}\|^{2}$ can be expressed as $\|\sigma_{*}\|^{2} =
\langle
\sigma_{*}e_{i},\sigma_{*}e_{i}\rangle_{\mathcal{SO}(M)/G}$.
Furthermore, from \eqref{1} and using \eqref{metricquo}, it is
obtained that the energy $\mathcal{E}(\sigma)$ of $\sigma$ is
given by
\[
{\mathcal E}(\sigma) = \frac{\textstyle n}{\textstyle 2} {\rm
Vol}(M) + \frac{\textstyle 1}{\textstyle 2}\int_{M}\| \phi \,
\sigma_*\|^{2}dv.
\]
We will call the {\it total bending} of the $G$-structure
$\sigma$ to the relevant part of this formula
$B(\sigma)=\frac{\textstyle 1}{\textstyle 2}\int_{M}\| \phi \,
\sigma_*\|^{2}dv$. Because we will show that $ \phi \,
\sigma_{\ast} = - \xi^G$, the total bending provides a measure of
how the $G$-structure $\sigma$ fails to be parallel. Here, we are
doing the identification $\sigma^* \lie{m}_{\mathcal{SO}(M)} \cong
\lie{m}_{\sigma}$ pointed out in Remark \ref{id:mvert}.
\begin{theorem} \label{siginttor}
If $\sigma$ is a global section of $\mathcal{SO}(M)/G$ then $\phi
\, \sigma_{\ast} = - \xi^G$, where $\xi^G$ is the intrinsic
torsion of the $G$-structure determined by $\sigma,$ and the total
bending of the $G$-structure $\sigma$ is given by
$$
B(\sigma)=\frac{\textstyle 1}{\textstyle 2}\int_{M}\| \xi^G\|^{2}
\, dv.
$$
\end{theorem}
\begin{proof} For $X \in \mbox{T}_m M$, we will compute
$\phi \, \sigma_{\ast} X$. If $\wp = \{ e_1 , \dots , e_n \} : U
\to \mathcal{G}(M)$ is a local frame field adapted to the
$G$-structure $\sigma$ with $m\in U,$ then $\pi_{\Lie{SO}(n)}\comp
\wp = Id_{U}$ and taking $\pi_{G|\mathcal{G}(M)} = \sigma \comp
\pi_{\Lie{SO}(n)}$ into account, we have $\sigma_{*} =
\pi_{G*}\comp \wp_{\ast}.$ Therefore, we get
$$
\begin{array}{lcl}
\phi(\sigma_{*} X) & = & \phi({\sf v}(\sigma_{*}X)) = \phi({\sf
v}(\pi_{G*}\wp_{\ast} X)) = \phi \left(\pi_{G*}(\omega(\wp_{\ast}
X)_{ji} u_i^{\flat} \otimes u_j)^*\right)\\[0.5pc]
& = &
\phi\left((\pi_{G*}(\omega(\wp_{\ast} X)_{ji})_{\lie{m}}
u_i^{\flat} \otimes u_j)^*\right) = \left(\sigma(m),
(\omega(\wp_{\ast} X)_{ji})_{\lie{m}}e_i^{\flat} \otimes
e_j\right).
\end{array}
$$
Thus, from \eqref{inttorome}, we have
$$
\phi \sigma_{*} X = (\sigma(m) , -\xi^G_X)
$$
and
$$
\|\phi\sigma_{*}\|^{2} = \langle \phi \, \sigma_* (e_{i })) , \phi
\, \sigma_* (e_{i}) \rangle = \langle (\sigma , \xi^G_{e_i}) ,
(\sigma, \xi^G_{e_i}) \rangle = \langle \xi^G_{e_i} , \xi^G_{e_i }
\rangle =\|\xi^{G}\|^{2}.
$$
Now, the theorem follows using the above identification $\sigma^*
\lie{m}_{\mathcal{SO}(M)} \cong \lie{m}_{\sigma}.$
\end{proof}
Some immediate consequences of last Theorem, most of them already
proved in \cite{Wood2}, are given in the following corollary.
\begin{corollary} The following conditions are equivalent:
\begin{enumerate}
\item[{\rm (i)}] $\sigma_{\ast} X$ is horizontal, for all $X \in
\mbox{\rm T}M$.
\item[{\rm (ii)}] $\sigma$ is a parallel $G$-structure, i.e., $\xi^G=0$, or $\nabla^G$ is torsion-free.
\item[{\rm (iii)}] $\sigma$ is an isometric immersion.
\item[{\rm (iv)}] $\nabla$ can be reduced to a
$G$-connection.
\end{enumerate}
\end{corollary}
Next, we determine the Euler-Lagrange equation or the critical
point condition for the energy functional $\mathcal{E}$ on closed
and oriented Riemannian manifolds. If we consider a smooth
variation $\sigma_t \in \Gamma^{\infty} (\mathcal{SO}(M)/G)$ of
$\sigma=\sigma_0$, then the corresponding {\it variation field} $m
\to \varphi(m) = \frac{d}{dt}_{|t=0} \sigma_t(m)$ is a section of the
pullback bundle $\sigma^{*}\mathcal{V}$ over $M$. Thus, the
tangent space $\mbox{T}_{\sigma} \Gamma^{\infty}
(\mathcal{SO}(M)/G)$ is identified with the space $\Gamma^{\infty}
(\sigma^{*} \mathcal{V})$ of global sections of $\sigma^{*}
\mathcal{V}$ \cite{Ur}. Because $\phi$ determines also an
identification $\sigma^{*}\mathcal{V}\cong \lie{m}_{\sigma}$ by
the bijection
\[
(m,\pi_{G*\sigma(m)}a^{*}_{\sigma(m)})\mapsto \varphi_{m} = a_{ji}
\, p(u_i)^{\flat} \otimes p(u_j),
\]
where $a = (a_{ij})\in \lie{m}$ and $p\in \mathcal{G}(M)$ with
$\pi_{\Lie{SO}(n)}(p) = m,$ we can identify the tangent space
$\mbox{T}_{\sigma} \Gamma^{\infty} (\mathcal{SO}(M)/G)$ with
$\Gamma^{\infty}(\lie{m}_{\sigma})$.
In following results, we will consider the coderivative $d^*
\xi^G$ of the intrinsic torsion $\xi^G$, which is defined by
$$
(d^* \xi^G)_m (X) = - (\nabla_{e_i} \xi^G)_{e_i} X,
$$
where $\{ e_1, \dots , e_n \}$ is any orthonormal frame on $m\in
M$. In a first instance, $d^* \xi^G$ is a global section of
$\lie{so}(M)= \lie{g}_{\sigma} \oplus \lie{m}_{\sigma}$.
\begin{lemma} \label{coderxilem} The coderivative $d^* \xi^G$ is a global section
of $\lie{m}_{\sigma}$ and is given by
\begin{equation} \label{coderxi}
d^* \xi^G = - (\nabla^{G}_{e_i} \xi^G)_{e_i} - \xi^G_{\xi^G_{e_i}
e_i}.
\end{equation}
\end{lemma}
\begin{proof} Because $\nabla^G = \nabla + \xi^G$, it follows
that $ d^* \xi^G = - (\nabla^{G}_{e_i} \xi^G)_{e_i} + (\xi^G_{e_i}
\xi^G)_{e_i}. $ But one can check that $(\xi^G_{e_i} \xi)_{e_i} =
- \xi^G_{\xi^G_{e_i} e_i}$. Thus, Equation \eqref{coderxi} is
obtained. It is obvious that $\xi^G_{\xi^G_{e_i} e_i}$ is in
$\lie{m}_{\sigma}$. Since $\nabla^G$ is a $G$-connection,
$\nabla^G$ preserves the $G$-type of a tensor. Therefore, from
$\xi^G_X \in \lie{m}_{\sigma}$, it follows that $(\nabla^{G}_{e_i}
\xi^G)_{e_i} \in \lie{m}_{\sigma}$.
\end{proof}
\begin{theorem}[The first variation formula] \label{firstvar}
If $(M,\langle \cdot , \cdot\rangle)$ a closed and oriented
Riemannian manifold and $\sigma$ a global section of
$\mathcal{SO}(M)/G$, then, for all $\varphi \in \Gamma^{\infty}
(\lie{m}_{\sigma}) \cong \mbox{\rm T}_{\sigma} \Gamma^{\infty}
(\mathcal{SO}(M)/G)$, we have
$$
d \mathcal{E}_{\sigma} (\varphi) = - \int_M \langle \xi^G , \nabla
\varphi \rangle dv = - \int_M \langle d^* \xi^G , \varphi \rangle
dv,
$$
where $\xi^G$ is the intrinsic torsion of $\sigma$.
\end{theorem}
\begin{proof} We will also denote by $\varphi$ the section in
$\Gamma^{\infty}(\sigma^{*}\mathcal{V})$ which is identified with
$\varphi \in \Gamma^{\infty}(\lie{m}_{\sigma})$, i.e.,
$\mbox{pr}_2^\pi \phi \varphi = \varphi$. If $I_{\varepsilon_1} =
]-\varepsilon_1, \varepsilon_1[ \to \Gamma^{\infty}
(\mathcal{SO}(M)/G)$, $t \to \sigma_t$, is a curve such that
$\sigma_0 = \sigma$, and $ (\sigma_t)'(0) = \varphi$, then we
obtain
\begin{eqnarray*}
d{\mathcal E}_{\sigma}(\varphi) = \frac{\textstyle d}{\textstyle
dt}_{\mid t=0}{\mathcal E}(\sigma_t) & = &
\frac{1}{2}\int_{M}\frac{d}{dt}_{\mid t=0} \langle {\sf v} \,
\sigma_{t*} , {\sf v} \, \sigma_{t*} \rangle_{\mathcal{SO}(M)/G}
dv \\
& = & \int_{M} \langle {\sf v} \, \sigma_{*} ,
\frac{\nabla^q}{dt}_{\mid t=0} {\sf v} \, \sigma_{t*}
\rangle_{\mathcal{SO}(M)/G} dv.
\end{eqnarray*}
Now, since $\pi$ have totally geodesic fibres and the tangent
vector $(\sigma_t(m))'_{t=0} = \varphi(m)$ of the curve $t \to
\sigma_t(m)$ is vertical, it follows
$$
\langle {\sf v} \, \sigma_{*} , \frac{\nabla^q}{dt}_{\mid t=0}
{\sf v} \, \sigma_{t*} \rangle_{\mathcal{SO}(M)/G} = \langle {\sf
v} \, \sigma_{*} , \frac{\nabla^q}{dt}_{\mid t=0} \sigma_{t*}
\rangle_{\mathcal{SO}(M)/G}.
$$
Next, if $I_{\varepsilon_2} = ]-\varepsilon_2, \varepsilon_2[ \to
M$, $s \to \gamma (s)$, is a curve such that $\gamma(0)=m$ and
$\gamma'(0)=X$ and we consider the smooth map $I_{\varepsilon_1}
\times I_{\varepsilon_2} \to \mathcal{SO}(M)/G$ defined by $(t,s)
\to \sigma_t(\gamma (s))$, then we obtain
\begin{equation*}
\frac{\nabla^q }{\partial t}_{|t=0} \frac{\partial}{\partial
s}_{|s =0} (\sigma_t (\gamma(s))) = \frac{\nabla^q}{dt}_{|t=0}
(\sigma_{t \ast m} X ) = \frac{\nabla^q}{\partial s}_{|s=0}
\frac{\partial}{\partial t}_{|t =0} (\sigma_t (\gamma(s))) =
\frac{\nabla^q}{d s}_{|s=0} \varphi(\gamma(s)).
\end{equation*}
Therefore,
\begin{eqnarray*}
\langle {\sf
v} \, \sigma_{*} X , \frac{\nabla^q}{dt}_{\mid t=0} \sigma_{t*}
X \rangle_{\mathcal{SO}(M)/G} & = & \langle {\sf v} \, \sigma_{*}
X , \frac{\nabla^q}{ds}_{\mid s=0} \varphi(\gamma(s))
\rangle_{\mathcal{SO}(M)/G} \\
& = & \langle \phi \sigma_{*} X , \phi \frac{\nabla^q}{ds}_{\mid
s=0} \varphi(\gamma(s)) \rangle.
\end{eqnarray*}
Hence, using \eqref{nn}, we get
\begin{eqnarray*}
\langle {\sf
v} \, \sigma_{*} X , \frac{\nabla^q}{dt}_{\mid t=0} \sigma_{t*}
X \rangle_{\mathcal{SO}(M)/G} & = & \langle \phi \sigma_{*} X ,
\frac{\nabla}{ds}_{\mid s=0} \phi \varphi(\gamma(s)) - \frac12 [
\phi \sigma_{*} X , \phi \varphi]_{\lie{m}} \rangle
\\
& = & \langle \phi \sigma_{*} X ,
\frac{\nabla}{ds}_{\mid s=0} \phi \varphi(\gamma(s)) \rangle,
\end{eqnarray*}
where we have used that $SO(n)/G$ is a normal homogeneous
Riemannian manifold
and $\frac{\nabla}{ds}_{\mid s=0} \phi
\varphi(\gamma(s))$ means the covariant derivative along the curve
$ s \to \sigma (\gamma(s))$. Finally, since by Equation
\eqref{inducedLC} we have
$$
\frac{\nabla}{ds}_{\mid s=0} \phi \varphi(\gamma(s)) = \left(
\sigma (\gamma(0)) , \frac{\nabla}{ds}_{\mid s=0} \mbox{pr}_2^\pi
\phi \varphi(\gamma(s)) \right),
$$
then we obtain
\begin{eqnarray*}
\langle {\sf v} \, \sigma_{*} X , \frac{\nabla^q}{dt}_{\mid t=0} \sigma_{t*}
X \rangle_{\mathcal{SO}(M)/G} & = & \langle \mbox{pr}_2^\pi \phi
\sigma_{*} X , \frac{\nabla}{ds}_{\mid s=0} \mbox{pr}_2^\pi \phi
\varphi(\gamma(s)) \rangle
\\
& = &- \langle \xi^G_{X} , \nabla_X \mbox{pr}_2^\pi \phi \varphi \rangle.
\end{eqnarray*}
From this, and taking into account that $\mbox{pr}_2^\pi \phi
\varphi = \varphi$, we will get the required identity
\begin{equation} \label{difener}
d{\mathcal E}_{\sigma}(\varphi) = - \int_{M} \langle \xi^G ,
\nabla \varphi \rangle dv.
\end{equation}
On the other hand, we have the equality
$$
\langle \xi^G , \nabla \varphi \rangle = \mbox{div}
(\xi^G)^{\mbox{t}} \varphi + \langle d^* \xi^G , \varphi \rangle,
$$
where $\mbox{t}$ means the {\it transpose} operator which is
applied to any section $\Psi \in \Gamma^{\infty} (\mbox{T}^{*} M
\otimes \lie{so}(M))$ and defined by $\Psi^{\mbox{t}} :
\lie{so}(M) \to \lie{X}(M)$, $\langle \Psi^{\mbox{t}} \varphi, X
\rangle = \langle \Psi_X , \varphi \rangle$. Using last identity
in Equation \eqref{difener}, we will finally have the another
expression for $d{\mathcal E}_{\sigma}(\varphi)$ required in
Theorem.
\end{proof}
\begin{theorem} \label{carharm}
Under the same assumptions as in Theorem \ref{firstvar}, the
following conditions are equivalent:
\begin{enumerate}
\item[{\rm (i)}]$\sigma$ is a critical point for the energy functional
on $\Gamma^{\infty}(\mathcal{SO}(M)/G)$.
\item[{\rm (ii)}]$d^* \xi^G=0$.
\item[{\rm (iii)}]$(\nabla^G_{e_i} \xi^G)_{e_i} = -
\xi^G_{\xi^G_{e_i} e_i}$.
\item[{\rm (iv)}] If $T^G$ is the torsion of the minimal connection
$\nabla^G$, then
\begin{enumerate}
\item[{\rm (a)}] $\langle (\nabla_{e_i} T^G)(X,Y), e_i\rangle=0$, for all $X,Y \in
\mathfrak{X}(M)$, and
\item[{\rm (b)}]$d^* T^G$ is a skew-symmetric endomorphism,
i.e., $d^* T^G \in \lie{so}(M)$.
\end{enumerate}
\end{enumerate}
\end{theorem}
\begin{proof} An immediate consequence of Theorem \ref{firstvar}
and Lemma \ref{coderxilem} is that conditions (i) and (ii) are
equivalent. The equivalence of (iii) follows from Equation
\eqref{coderxi}. Finally, the equivalence of the conditions in
(iv) is a direct consequence of the identity
$$
2 \langle (\nabla_X \xi^G)_Y Z , U \rangle = \langle Y, (\nabla_X
T^G)( Z , W) \rangle - \langle Z, (\nabla_X T^G)( W , Y) \rangle
+ \langle W, (\nabla_X T^G)( Y , Z) \rangle.
$$
\end{proof}
For general Riemannian manifolds $(M,\langle \cdot ,\cdot
\rangle)$, not necessarily closed and oriented, we will say that
a $G$-structure $\sigma$ is {\em harmonic}, if it satisfies $d^* \xi^G=0$.
\begin{theorem}[The second variation formula] \label{secondvar}
With the same assumptions as in Theorem \ref{firstvar}, if
$\sigma$ is a harmonic $G$-structure, then the Hessian form
$({\rm Hess}\;{\mathcal E})_{\sigma}$ on $
\Gamma^{\infty}(\lie{m}_{\sigma} ) \cong \mbox{T}_{\sigma}
\Gamma^{\infty} (\mathcal{SO}(M)/G)$ is given by
\[
({\rm Hess}\; {\mathcal E})_{\sigma}\varphi = \int_{M} \left(
\|\nabla \varphi \|^{2} - \frac12 \| [ \xi^G ,
\varphi]_{\lie{m}_{\sigma}}
\|^2 + \langle \nabla
\varphi , 2[ \xi^G , \varphi] -[ \xi^G ,
\varphi]_{\lie{m}_{\sigma}}\rangle\right ) dv.
\]
In particular, if $[\lie{m},\lie{m}]\lie{su}bset \lie{g}$ or
equivalently $SO(n)/G$ is locally symmetric, then
\[
({\rm Hess}\; {\mathcal E})_{\sigma}\varphi = \int_{M} \left(
\|\nabla \varphi \|^{2} - 2 \| [ \xi^G , \varphi]\|^2 \right )
dv.
\]
\end{theorem}
\begin{proof} From results contained in the proof of
Theorem \ref{firstvar}, relative to the first variation formula,
we have
\begin{eqnarray*}
\frac{\textstyle d}{\textstyle dt}_{\mid t=0} d{\mathcal
E}_{\sigma_{t}}(\varphi) & = & \int_{M} \frac{\textstyle
d}{\textstyle dt}_{\mid t=0} \langle {\sf v} \, \sigma_{t*} ,
\frac{\nabla^q}{dt}_{\mid t=t} \, \sigma_{t*}
\rangle_{\mathcal{SO}(M)/G} dv.
\end{eqnarray*}
But using the same arguments as in the referred proof, we will get
$$
\frac{\textstyle d}{\textstyle dt}_{\mid t=0} \langle {\sf v} \,
\sigma_{t*} X , \frac{\nabla^q}{dt}_{\mid t=t} \, \sigma_{t*} X
\rangle_{\mathcal{SO}(M)/G} = \frac{\textstyle d}{\textstyle
dt}_{\mid t=0} \langle {\sf v} \, \sigma_{t*X}
,\frac{\nabla^q}{ds}_{\mid s=0} \varphi_t(\gamma(s))
\rangle_{\mathcal{SO}(M)/G},
$$
where $s \to \gamma (s)$ is a curve in $M$ such that $\gamma(0)=m$
and $\gamma'(0)=X$, $(\sigma_t)'_{t=t_0} (m) = \varphi_{t_0}(m)$,
and $\frac{\nabla^q}{ds}_{\mid s=0}$ is the covariant derivative
along the curve $s \to \sigma_t(\gamma(s))$. From last identity,
using that the fibers are totally geodesic, it follows
\begin{eqnarray*}
\frac{\textstyle d}{\textstyle dt}_{\mid t=0} \langle {\sf v} \,
\sigma_{t*} X , \frac{\nabla^q}{dt}_{\mid t=t} \, \sigma_{t*} X
\rangle_{\mathcal{SO}(M)/G} & = & \| {\sf v}
\frac{\nabla^q}{ds}_{\mid s=0} \varphi (\gamma(s)) \|^2_{\mathcal{SO}(M)/G}, \\
&&
+ \langle {\sf v} \, \sigma_{*} X
, \frac{\nabla^q}{dt}_{\mid t=0} \frac{\nabla^q}{ds}_{\mid s=0}
\varphi_t(\gamma(s)) \rangle_{\mathcal{SO}(M)/G}.
\end{eqnarray*}
Now, from \eqref{nn}, the first summand is expressed as
\begin{equation} \label{secondvar:1}
\| {\sf v}
\frac{\nabla^q}{ds}_{\mid s=0} \varphi (\gamma(s))
\|^2_{\mathcal{SO}(M)/G} = \| \phi \frac{\nabla^q}{ds}_{\mid s=0}
\varphi (\gamma(s)) \|^2 = \| \nabla_X \varphi + \frac12[\xi^G_X
, \varphi]_{\lie{m}_{\sigma}} + [\xi^G_X ,
\varphi]_{\lie{g}_{\sigma}} \|^2
\end{equation}
and the second summand can be given by
\begin{eqnarray*}
\langle {\sf v} \, \sigma_{*} X , \frac{\nabla^q}{dt}_{\mid t=0}
\frac{\nabla^q}{ds}_{\mid s=0} \varphi_t(\gamma(s))
\rangle_{\mathcal{SO}(M)/G} & = & \langle {\sf v} \, \sigma_{*} X
,
\frac{\nabla^q}{ds}_{\mid s=0} \frac{\nabla^q}{dt}_{\mid t=0}
\varphi_t(\gamma(s)) \rangle_{\mathcal{SO}(M)/G}\\
& & + \langle R^q{( \varphi (m), {\sf v} \sigma_{*} X)}
\varphi(m), {\sf v} \, \sigma_{*} X
\rangle_{\mathcal{SO}(M)/G},
\end{eqnarray*}
where $R^q{(A,B)} = \nabla^q_{[A,B]} - [\nabla^q_A , \nabla^q_B]$
is the Riemannian curvature tensor of $\langle \cdot , \cdot
\rangle_{\mathcal{SO}(M)/G}$ and we have used that $\pi$ has
totally geodesic fibres. On one hand, by using similar arguments
as in the proof of Theorem \ref{firstvar}, we get
\begin{eqnarray} \label{secondvar:2}
\langle {\sf v} \, \sigma_{*} X ,
\frac{\nabla^q}{ds}_{\mid s=0} \frac{\nabla^q}{dt}_{\mid t=0}
\varphi_t(\gamma(s)) \rangle_{\mathcal{SO}(M)/G} & = & \langle
\phi \, \sigma_{*} X , \phi \frac{\nabla^q}{ds}_{\mid s=0}
(\sigma_t)''_{t=0}(\gamma(s)) \rangle \\
& = & - \langle \xi_X^G , \nabla_X \mbox{pr}^{\pi}_2
(\sigma_t)''_{t=0} (m) \rangle. \nonumber
\end{eqnarray}
Additionally, since $\sigma$ is harmonic, $d^* \xi^G=0$, we have
the identity
\begin{equation} \label{secondvar:3}
\langle \xi^G , \nabla \mbox{pr}^{\pi}_2 (\sigma_t)''_{t=0} (m)
\rangle = \mbox{div} (\xi^G)^{\mbox{t}} \mbox{pr}^{\pi}_2
(\sigma_t)''_{t=0} (m).
\end{equation}
On the other hand, in order to compute $\langle R^q{( \varphi
(m), {\sf v} \sigma_{*} X)} \varphi(m), {\sf v} \, \sigma_{*} X
\rangle_{\mathcal{SO}(M)/G},$ note that the ${\sf v} \nabla^q_{\varphi} \psi$ is a well defined
connection on the fibres of $\pi$. In our case, ${\sf v}
\nabla^q_{\varphi} \psi= \nabla^q_{\varphi} \psi$ and the
corresponding Riemannian curvature tensor $R^{\sf v}$ is such
that $R^{\sf v}{(\varphi ,\psi_1)} \psi_2 = R^q{(\varphi ,\psi_1)}
\psi_2$. Therefore,
\begin{eqnarray*} \langle R^q{( \varphi (m), {\sf v} \sigma_{*} X)}
\varphi(m), {\sf v} \, \sigma_{*} X
\rangle_{\mathcal{SO}(M)/G} & = & \langle R^{\sf v}{( \varphi (m), {\sf v} \sigma_{*} X)} \varphi(m) ,
{\sf v} \, \sigma_{*} X
\rangle_{\mathcal{SO}(M)/G} .
\end{eqnarray*}
Now, using \eqref{nn}, we get
\begin{eqnarray*}
\phi {\sf v} \nabla^q_{\varphi(m)} {\sf v} \nabla^q_{{\sf v} \sigma_{*} X} \varphi & = &
\frac14 [\phi \varphi , [\phi \sigma_{*} X ,
\phi \varphi]_{\lie{m}} ]_{\lie{m}}
+ \frac12 [\phi \varphi , [\phi \sigma_{*} X ,
\phi \varphi]_{\lie{g}} ]_{\lie{m}} \\
&&
+ \frac12 [\phi \varphi ,[\phi \sigma_{*} X,
\phi \varphi]_{\lie{m}}]_{\lie{g}}
+ [\phi \varphi ,[\phi \sigma_{*} X,
\phi \varphi]_{\lie{g}}]_{\lie{g}}, \\
\phi {\sf v} \nabla^q_{{\sf v} \sigma_{*} X} {\sf v}
\nabla^q_{\varphi(m)} \varphi & = & 0, \\
\phi {\sf v} \nabla^q_{[\varphi, \sigma_{*} X]} \varphi & = &
- \frac12 [\phi \varphi , [\phi \sigma_{*} X ,
\phi \varphi]_{\lie{m}} ]_{\lie{m}}
- \frac12 [\phi \varphi , [\phi \sigma_{*} X ,
\phi \varphi]_{\lie{g}} ]_{\lie{m}},\\
& &- [\phi \varphi , [\phi \sigma_{*} X ,
\phi \varphi]_{\lie{m}} ]_{\lie{g}}
- [\phi \varphi , [\phi \sigma_{*} X ,
\phi \varphi]_{\lie{g}} ]_{\lie{g}}.
\end{eqnarray*}
From these identities and because with the induced metric by the
inner product $\langle \cdot,\cdot \rangle$ on ${\mathfrak s
\mathfrak o}(n)$ is bi-invariant, it is not hard to deduce
\begin{eqnarray} \label{secondvar:4}
\qquad \quad\langle R^{\sf v}{( \varphi (m), {\sf v}
\sigma_{*} X)} \varphi(m) ,
{\sf v} \, \sigma_{*} X
\rangle_{\mathcal{SO}(M)/G} & = & - \frac34 \| [\xi_X^G ,
\varphi]_{\lie{m}_{\sigma}} \|^2
- \langle [ \varphi , [ \xi_X^G ,
\varphi]_{\lie{g}_{\sigma}} ]_{\lie{m}_{\sigma}} , \xi^G_X
\rangle\\
\nonumber & = &- \frac34 \| [\xi_X^G ,
\varphi]_{\lie{m}_{\sigma}} \|^2
- \|[\xi_X^G ,
\varphi]_{\lie{g}_{\sigma}} \|^2.
\end{eqnarray}
The required formula for the second variation follows from
\eqref{secondvar:1}, \eqref{secondvar:2}, \eqref{secondvar:3} and
\eqref{secondvar:4}. For the last part of the theorem, we use that
$\nabla \varphi = \nabla^{G}\varphi - [\xi,\varphi]$ and
$\nabla^{G}\varphi\in \Gamma^{\infty}(\lie{m}_{\sigma}).$
\end{proof}
For studying harmonicity as a map of $G$-structures, we need to
consider $\nabla \sigma_* $, where $\left( \nabla_X \sigma_*
\right)(Y) = \nabla^q_{X} \sigma_* Y - \sigma_* (\nabla_X Y),$ for
all $X,Y\in \mathfrak{X}(M).$ Here as before, $\nabla^q$ also
denotes the induced connection on $\sigma^*T\mathcal{SO}(M)/G.$
\begin{lemma} \label{curvphi1} If $R{(X,Y)} = \nabla_{[X,Y]} - [ \nabla_X , \nabla_Y]$ is the
curvature Riemannian tensor of $(M , \langle \cdot , \cdot
\rangle)$ and $\sigma$ is a $G$-structure on $M$, then
$$
\sigma^* \Phi (X,Y) = - (\nabla_{X} \xi^G)_Y + (\nabla_{Y}
\xi^G)_X - 2[\xi^G_X , \xi^G_Y] + [ \xi_X^G , \xi^G_Y
]_{\lie{m}_{\sigma}} = - R(X,Y)_{\lie{m}_{\sigma}}.
$$
\end{lemma}
\begin{proof} If $\wp : U \to \mathcal G(M)$ is a local section of
the reduced bundle $\mathcal G(M) \lie{su}bseteq \mathcal{SO}(M)$, then
$$
\Phi_{\sigma(m)} (\sigma_* X, \sigma_* Y) = \phi \pi_{G*\wp(m)}
\Omega ( \wp_*{X}, \wp_*Y)^*_{\lie{m}} = (\sigma(m) , -
R(X,Y)_{\lie{m}_{\sigma}} ) = - R(X,Y)_{\lie{m}_{\sigma}}
$$
(see \cite[Proposition 4.5]{LM}). Now, if we use $\nabla^G =
\nabla + \xi^G$ in the expression for $R$, it is not hard to see
that
$$
R{(X,Y)} = R^G{(X,Y)} + (\nabla^G_{X} \xi^G)_Y - (\nabla^G_{Y}
\xi^G)_X + \xi^G_{ \xi^G_X Y} - \xi^G_{\xi^G_YX} - [ \xi^G_X ,
\xi^G_Y ],
$$
where $ R^G{(X,Y)} = \nabla^G_{[X,Y]} - [\nabla_X^G , \nabla_Y^G
]$. Finally, since $R^G \in \Lambda^2 T^* M \otimes
\lie{g}_{\sigma}$, $\xi^G \in T^* M \otimes \lie{m}_{\sigma}$ and
$\nabla^G$ is a $G$-connection, we get
\begin{eqnarray*}
R(X,Y)_{\lie{m}_{\sigma}} & = & (\nabla^G_{X} \xi^G)_Y -
(\nabla^G_{Y} \xi^G)_X + \xi^G_{ \xi^G_X Y} - \xi^G_{\xi^G_YX} -
[ \xi^G_X , \xi^G_Y ]_{\lie{m}_{\sigma}} \\
& = & (\nabla_{X} \xi^G)_Y -
(\nabla_{Y} \xi^G)_X + 2 [\xi^G_X , \xi^G_Y] - [ \xi^G_X ,
\xi^G_Y ]_{\lie{m}_{\sigma}}.
\end{eqnarray*}
From all of this, Lemma follows. Finally, note also that
$R(X,Y)_{\lie{g}_{\sigma}} = R^G{(X,Y)} - [ \xi^G_X , \xi^G_Y
]_{\lie{g}_{\sigma}}$.
\end{proof}
If $\sigma^{*}\Phi=0$, the $G$-structure $\sigma$ is referred as
{\it flat $G$-structure}. By the final remark in the proof of last
Lemma, this notion is characterised by $R{(X,Y)} = R^G{(X,Y)} - [
\xi^G_X , \xi^G_Y ]_{\lie{g}_{\sigma}} \in S^2 \lie{g}_{\sigma}$.
Therefore, the intrinsic torsion of a flat $G$-structure has not
contributions in the $G$-components of $R$ orthogonal to $S^2
\lie{g}_{\sigma}$. Thus, $R$ is in the space of algebraic
curvature tensors for manifolds with parallel $G$-structure.
Now we have the tools to show some results (Theorem
\ref{verthor}, Theorem \ref{harmmap1} and Theorem \ref{superflat})
which are versions of Wood's
results given in \cite{Wood2}, expressed in terms of the
intrinsic torsion $\xi^G$ and the Riemannian curvature tensor $R$.
But we firstly recall that a $G$-structure $\sigma$ is said to be
{\it totally geodesic}, if $\nabla \sigma_* =0$. In such a
situation, $\sigma(M)$ is a totally geodesic submanifold of
$\mathcal{SO}(M)/G$. Weaker conditions can be considered by saying
that a $G$-structure $\sigma$ is {\it vertically geodesic} (resp.,
{\it horizontally geodesic}), if the vertical component (resp.,
{\it horizontal component}) of $\nabla \sigma_*$ vanishes. In
these situations, $\sigma$ send geodesics to path with horizontal
(resp., vertical) acceleration.
\begin{theorem} \label{verthor} If $\sigma$ is a $G$-structure on $(M,\langle
\cdot , \cdot \rangle)$, then:
\begin{enumerate}
\item[{\rm (a)}] $\phi (\nabla_X \sigma_*)Y = - \frac12 \left( (\nabla_{X} \xi^G)_Y +
(\nabla_{Y} \xi^G)_X \right)$. Therefore, $\sigma$ is vertically geodesic if and only if
$(\nabla_{X} \xi^G)_X = 0.$ In particular, if $\sigma$ is
vertically geodesic, then $\sigma$ is a harmonic $G$-structure.
\item[{\rm (b)}] $ 2 \langle \pi_* (\nabla_X \sigma_*)Y , Z
\rangle = \langle \xi^G_X , R{(Y,Z)} \rangle + \langle \xi^G_Y , R{(X,Z)}
\rangle$. Therefore, $\sigma$ is horizontally geodesic if and only
if $\langle \xi^G_X , R{(Y,Z)} \rangle$ is a skew-symmetric
three-form. In particular, if $\sigma$ is a flat $G$-structure,
then $\sigma$ is horizontally geodesic.
\end{enumerate}
\end{theorem}
\begin{proof}
For (a). Using Lemma \ref{wood:lemma} , we have
\begin{eqnarray*}
\phi \left(\nabla_{X} \sigma_*\right) Y & = & \nabla^c_{\sigma_*
X} \phi \sigma_* Y + \frac12\left\{[\phi\sigma_{*}X,
\phi\sigma_{*}Y]_{\lie{m}} - \Phi( \sigma_* X , \sigma_*
Y)\right\} - \phi \sigma_* (\nabla_X Y)
\\
& = & \nabla_{\sigma_*X} \phi \, \sigma_* Y - [ \phi \sigma_*X
, \phi \sigma_* Y ]+ \frac12 [ \phi \sigma_*X , \phi \sigma_* Y
]_{\lie{m}} \\
&& - \phi \sigma_* (\nabla_X Y) - \frac12 \Phi( \sigma_* X ,
\sigma_* Y).
\end{eqnarray*}
Now, taking $\phi \sigma_* = - \xi^G$ into account and using
Lemma \ref{curvphi1}, the required identity in (a) follows.
For (b). In \cite[Theorem 3.4 (ii)]{Wood2}, it is proved that
\begin{eqnarray*}
2 \langle \pi_* \left(\nabla_{X} \sigma^*\right) Y , Z \rangle &
= & \langle \phi \sigma_* X , \Phi( \sigma_* Y , \sigma_* Z)
\rangle + \langle \phi \sigma_* Y , \Phi( \sigma_* X , \sigma_*
Z) \rangle.
\end{eqnarray*}
Since $\phi \sigma_* = - \xi^G$ and $\Phi( \sigma_* Y , \sigma_* Z
\rangle = -R(Y,Z)_{\lie{m}_{\sigma}}$, (b) follows.
\end{proof}
Next, we compute the respective vertical and horizontal components
of the {\it tension field} $\tau(\sigma)= \left( \nabla^q_{e_i}
\sigma_*\right)(e_i)$ used in variational problems \cite{Ur}.
Given a $G$-structure $\sigma$ on a closed Riemannian manifold
$(M,\langle\cdot ,\cdot \rangle),$ the map $(M, \langle \cdot,
\cdot \rangle) \mapsto (\mathcal{SO}(M)/G,
\langle\cdot,\cdot\rangle_{\mathcal{SO}(M)/G})$ is harmonic, i.e.,
$\sigma$ is a critical point for the energy functional on
$\mathcal{C}^{\infty}(M,\mathcal{SO}(M)/G),$ if and only if
$\tau(\sigma)$ vanishes. Because variations vector fields of
smooth variations of $\sigma$ through sections belong to
$\Gamma^{\infty}(\sigma^{*}\mathcal{V}),$ it follows that harmonic
sections are characterised by the vanishing of the vertical
component of $\tau(\sigma)$. By Theorem \ref{verthor}(a), it
follows $ \phi \tau(\sigma) = - (\nabla_{e_i} \xi^G)_{e_i} = d^*
\xi^G$ which coincides with the above exposed relative to harmonic
$G$-structures. Since, by Theorem \ref{verthor}(b), the horizontal
component of $\tau(\sigma)$ is determined by the horizontal lift
of the vector field metrically equivalent to the one-form $\langle
\xi^G_{e_i} , R{(e_i,X)}\rangle$, then next result follows.
\begin{theorem} \label{harmmap1} A $G$-structure $\sigma$ on a closed and oriented Riemannian manifold
$(M,\langle \cdot , \cdot \rangle)$ is a harmonic map if and only
if $\sigma$ is a harmonic $G$-structure such that $\langle
\xi^G_{e_i} , R(e_i,X)\rangle=0$. Therefore, if $\sigma$ is flat,
then $\sigma$ is a harmonic map if and only if $\sigma$ is a
harmonic $G$-structure.
\end{theorem}
Such a $G$-structure $\sigma$ is said to determine a
\emph{harmonic map}, even when $M$ is possibly non-compact or
non-orientable and if ${\sf v} \left( \nabla_\cdot {\sf v}
\sigma_*\right) \cdot =0$, the $G$-structure $\sigma$ is called
{\it super-flat}.
\begin{theorem} \label{superflat} We have
\[
\phi (\nabla_X {\sf v} \sigma_*)(Y) = - \frac12 \left( (\nabla_{X}
\xi^G)_Y + (\nabla_{Y} \xi^G)_X + R(X,Y)_{\lie{m}_{\sigma}}
\right).
\]
Therefore, $\sigma$ is super-flat if and only if $\sigma$ is flat
and totally geodesic. In particular, a parallel $G$-structure is
super-flat.
\end{theorem}
\begin{proof}
Using Lemma \ref{wood:lemma}, we have
\[
\phi \left(\nabla_{X} {\sf v} \sigma_*\right) Y =
\phi(\nabla_{X}\sigma_*) Y - \phi \nabla^{q}_{X}{\sf
h}\sigma_{*}Y = \phi(\nabla_{X}\sigma_*) Y + \frac12 \Phi(
\sigma_* X , \sigma_* Y).
\]
Then, the identity follows using Lemma \ref{curvphi1} and Theorem
\ref{verthor}. Finally, note that if $\sigma$ is super-flat, then
the vanishing of the symmetric part for $X$ and $Y$ of $\phi
(\nabla_X {\sf v} \sigma_*)Y$ implies that $\sigma$ is vertically
geodesic. Meanwhile, the vanishing of the skew-symmetric part for
$X$ and $Y$ implies that $\sigma$ is flat.
\end{proof}
Relevant types of diverse $G$-structures are characterised by
saying that its intrinsic torsion $\xi^G$ is metrically equivalent
to a skew-symmetric three-form, that is, $\xi^G_X Y = - \xi^G_Y
X$. Now we will show some facts satisfied by such $G$-structures.
\begin{proposition} \label{pro:skew} For a $G$-structure $\sigma$ such
that $\xi^G_X Y = - \xi^G_{Y} X$, we have:
\begin{enumerate}
\item[{\rm (i)}]
If $[\xi^G_X , \xi^G_Y] \in \lie{g}_{\sigma}$, for all $X,Y \in
\mathfrak X(M)$,
then
$\langle R_{(X,Y)\lie{m}_{\sigma}} X , Y \rangle = 2\langle
\xi_{X} Y , \xi_X Y \rangle$. Therefore, $\sigma$ is parallel if
and only if $\sigma$ is flat if and only if $\sigma$ is
super-flat.
\item[{\rm (ii)}] If $\sigma$ is a harmonic $G$-structure, then
$\sigma$ is also a harmonic map.
\end{enumerate}
\end{proposition}
\begin{proof}
For (i). Because the condition $\xi^{G}_X Y= - \xi^{G}_Y X$
implies that $(\nabla_X \xi^{G})_Y Z = - (\nabla_X \xi^{G})_Z Y$
and $(\nabla_X^{G} \xi)_Y Z = - (\nabla^G_X \xi)_Z Y,$ we will get
the required identity in (i) by using the expression for
$R(X,Y)_{\lie{m}_{\sigma}}$ contained in Lemma \ref{curvphi1}.
For (ii). Applying the first Bianchi's identity, we have
$$
\langle \xi^G_{e_i} ,
R{(e_i,X)} \rangle = \frac13 \langle \xi^G_{e_i} e_j , e_k
\rangle \left( \langle R{(e_j,e_k)} e_i , X \rangle + \langle
R{(e_k,e_i)} e_j , X \rangle + \langle R{(e_i,e_j)} e_k , X
\rangle \right)=0.
$$
\end{proof}
In next Section, we will study harmonicity of almost Hermitian
metric structures. Such structures are examples of $G$-structures
defined by means of one or several $(r,s)$-tensor fields $\Psi$
which are stabilised under the action of $G$, i.e., $g \cdot
\Psi= \Psi$, for all $g \in G$. Moreover, it will be possible
characterise the harmonicity of such $G$-structures by conditions
given in terms of those tensors $\Psi$.
The \emph{connection Laplacian} (or {\it rough Laplacian})
\cite{LM} $\nabla^* \nabla \Psi$ will play a relevant r\^{o}le in
such conditions. We recall that
$$
\nabla^* \nabla \Psi = - \left( \nabla^2 \Psi \right)_{e_i,e_i},
$$
where $\{ e_1, \dots , e_n \}$ is an orthonormal frame field and
$(\nabla^2\Psi)_{X,Y} = \nabla_X (\nabla_Y \Psi) -
\nabla_{\nabla_XY}\Psi$. Next Lemma provides an expression for
$\nabla^* \nabla \Psi$ in terms of $\nabla^G$ and $\xi^G$ which
will be useful in the sequel.
\begin{lemma} \label{lapstaten}
Let $(M,\langle \cdot , \cdot \rangle)$ be an oriented Riemannian
$n$-manifold equipped with a $G$-structure, where the Lie group
$G$ is closed, connected and $G \lie{su}bseteq \Lie{SO}(n)$. If
$\Psi$ is a $(r,s)$-tensor field on $M$ which is stabilised under the action of $G$, then
$$
\nabla^* \nabla \Psi = \left( \nabla^{\Lie{G}}_{e_i}
\xi^{\Lie{G}}\right)_{e_i} \Psi +
\xi^{\Lie{G}}_{\xi^{\Lie{G}}_{e_i}e_i} \Psi - \xi^{\Lie{G}}_{e_i}
(\xi^{\Lie{G}}_{e_i} \Psi).
$$
Moreover, if the $G$-structure is harmonic, then $ \nabla^* \nabla
\Psi = -\xi^{\Lie{G}}_{e_i} (\xi^{\Lie{G}}_{e_i} \Psi). $
\end{lemma}
\begin{proof}
Since $\nabla^G$ is a $G$-connection and $\Psi$ is stabilised
under the action of $G$, then $\nabla^G \Psi = 0$. Taking
$\nabla^G = \nabla + \xi^G$ into account, this implies that
$\nabla \Psi = - \xi^G \Psi$. Therefore,
$$
(\nabla^2\Psi)_{X,Y} = - \nabla_X (\xi^G_Y \Psi) +
\xi^G_{\nabla_XY}\Psi = - \nabla^G_X (\xi^G_Y \Psi) + \xi^G_X
(\xi^G_Y \Psi)+ \xi^G_{\nabla_XY}\Psi.
$$
Because the presence of the metric $\langle \cdot , \cdot
\rangle$, any $(r,s)$-tensor field on $M$ is metrically equivalent
to a $(0,r+s)$-tensor field. Therefore, we have only to make the
proof for covariant tensors fields. Thus, we can assume that
$\Psi$ is a $(0,s)$-tensor field on $M$. By a straightforward
computation we get {\footnotesize
\begin{gather*}
\nabla^G_X (\xi^G_Y \Psi)(Z_1, \dots, Z_s) = - \lie{su}m_{i=1}^s X
\left(\Psi(Z_1,\dots, \xi^G_Y Z_i, \dots, Z_s)\right) +
\lie{su}m_{i=1}^s \Psi(Z_1,\dots, \xi^G_Y \nabla^G_X Z_i, \dots,
Z_s)\\[-0mm]
+ \lie{su}m^s_{i,j=1\atop i\neq j}\Psi(Z_1,\dots, \xi^G_Y
Z_i, \dots , \nabla^G_X Z_j, \dots, Z_s).
\end{gather*}}
Now using $\nabla^G \Psi = 0$, we have
\begin{eqnarray*}
\lie{su}m_{i=1}^s X \left(\Psi(Z_1,\dots,
\xi^G_Y Z_i, \dots, Z_s)\right) & = & \lie{su}m_{i=1}^s
\Psi(Z_1,\dots,
\nabla^G_X \xi^G_Y Z_i, \dots, Z_s) \\
&& +\lie{su}m^s_{i,j=1\atop i\neq j}\Psi(Z_1,\dots, \xi^G_Y Z_i, \dots
, \nabla^G_X Z_j, \dots, Z_s).
\end{eqnarray*}
Taking this identity into account in the expression for
$\nabla^G_X (\xi^G_Y \Psi)$, we will obtain $\nabla^G_X (\xi^G_Y
\Psi) = (\nabla^G_X \xi^G)_Y \Psi + \xi^{G}_{\nabla^G_X Y} \Psi$.
Therefore, for the second covariant derivative we get
$$
(\nabla^2\Psi)_{X,Y} = - (\nabla^G_X \xi^G)_Y \Psi -
\xi^{G}_{\nabla^G_X Y} \Psi + \xi^G_{\nabla_XY}\Psi + \xi^G_X
(\xi^G_Y \Psi),
$$
which proves the required expression for $\nabla^* \nabla \Psi.$
\end{proof}
\section{Harmonic almost Hermitian structures}{\indent}
\label{sect:almherm}
An almost Hermitian manifold is a
$2n$-dimensional Riemannian manifold $(M,\langle \cdot, \cdot
\rangle)$ equipped with an almost complex structure $J$
compatible with the metric, that is, $ J^{2} = -{\rm Id}$ and
$\langle JX,JY \rangle = \langle X,Y \rangle$, for all vector
fields $X$, $Y$. Associated to the almost Hermitian structure, the
two-form $\omega= \langle \cdot, J \cdot\rangle$, called the {\em
K\"ahler form}, is usually considered. Using $\omega$, $M$ can be
oriented by fixing a constant multiple of $\omega^n = \omega
\wedge \dots^{(n}\wedge \omega$ as volume form. Likewise, the
presence of an almost Hermitian structure is equivalent to say
that $M$ is equipped with a $\Lie{U}(n)$-structure. It is well
known that $\Lie{U}(n)$ is a closed and connected subgroup of
$\Lie{SO}(2n)$ and $\Lie{SO}(2n)/ \Lie{U}(n)$ is reductive; in fact, it is a
Riemannian symmetric space . Moreover, we have the decomposition
into $\Lie{U}(n)$-modules $\lie{so}(M) = \lie{u}(n)(M) \oplus
\lie{m}(M)$. We will omit the subindex $\sigma$ used in previous
Sections. Also, as in references, we shall simply denote
$\lie{u}(n)(M)$ and $\lie{m}(M)$ by $\lie{u}(n)$ and
$\lie{u}(n)^{\perp}.$ The bundle $\lie{u}(n)$ (resp.,
$\lie{u}(n)^{\perp}$) consists of those skew-symmetric
endomorphisms $A$ on tangent vectors such that $AJ=JA$ (resp.,
$AJ=-JA$). The identification $b_A( \cdot , \cdot ) = \langle A
\cdot , \cdot \rangle$ implies $\Lambda^2 \mbox{T}^* M \cong
\lie{so}(M)$. Therefore, $\Lambda^2 \mbox{T}^* M = \lie{u}(n)
\oplus \lie{u}(n)^{\perp}$, where in this case $\lie{u}(n)$
(resp., $\lie{u}(n)^{\perp}$) consists of those two-forms on $M$
which are Hermitian (resp., anti-Hermitian), i.e., $b(J\cdot , J
\cdot) = b(\cdot , \cdot)$ (resp., $b(J\cdot , J \cdot) = -
b(\cdot , \cdot)$).
The minimal $\Lie{U}(n)$-connection is given by $\nabla^{\Lie{U}(n)} =
\nabla + \xi^{\Lie{U}(n)}$, with
\begin{equation} \label{torsion:xi}
\xi^{\Lie{U}(n)}_X Y = - \tfrac12 J\left( \nabla_X J \right) Y,
\end{equation}
(see~\cite{Falcitelli-FS:aH}). Moreover, $\xi^{\Lie{U}(n)} \in
\mbox{T}^* M \otimes \lie{u}(n)^\perp$ is equivalent to the condition
$$
\xi^{\Lie{U}(n)} J + J \xi^{\Lie{U}(n)} = 0.
$$
Since $\Lie{U}(n)$~stabilises the K\"ahler form~$\omega$, it follows
that $\nabla^{\Lie{U}(n)} \omega = 0$. Taking this into account,
$\xi^{\Lie{U}(n)} \in \mbox{T}^* M \otimes \lie{u}(n)^\perp$ implies
$\nabla \omega = - \xi^{\Lie{U}(n)} \omega \in \mbox{T}^* M
\otimes \lie{u}(n)^\perp$. Thus,
one can
identify the $\Lie{U}(n)$-components of $\xi^{\Lie{U}(n)}$ with the
$\Lie{U}(n)$-components of $\nabla\omega$:
\begin{enumerate}
\item if $n=1$, $ \xi^{\Lie{U}(1)} \in \mbox{T}^* M \otimes
\lie{u}(1)^\perp = \{ 0 \}$; \item if $n=2$, $ \xi^{\Lie{U}(2)} \in
\mbox{T}^* M \otimes \lie{u}(2)^\perp = \Wc2 \oplus
\Wc4$;
\item if $n \geqslant 3$, $ \xi^{\Lie{U}(n)} \in \mbox{T}^* M
\otimes \lie{u}(n)^\perp =
\Wc1 \oplus \Wc2 \oplus \Wc3 \oplus \Wc4$.
\end{enumerate}
Here the summands~$\Wc{i}$ are the irreducible $\Lie{U}(n)$-modules
given by Gray and Hervella in~\cite{Gray-H:16}. In the following,
we will merely write $\xi = \xi^{\Lie{U}(n)}$ and $\xi_{(i)}$~will
denote the component in~$\Wc{i}$ of the intrinsic torsion~$\xi$.
For one-forms $\theta$, we will stand $J \theta (X) = -
\theta(JX)$, for all $X \in \mathfrak X(M)$. The one-form $J
\mbox{\it d}^* \omega$ is a constant multiple of the Lee one-form
which determines the $\mathcal W_4$-part of the intrinsic torsion
$\xi$ \cite{Gray-H:16}. Moreover, from \eqref{torsion:xi}, we will
have $ 2 \langle \xi_X Y , Z \rangle = - (\nabla_X
\omega)(Y,JZ)$. Now, using this last identity, it is obtained that
the vector field $\xi_{e_i} e_i$ which take part in the
harmonicity criteria (see Theorem \ref{carharm}) is given by $ 2
\xi_{e_i} e_i = - J (d^* \omega)^{\sharp}$.
\begin{theorem} \label{characharmherm1}
For an almost Hermitian $2n$-manifold $(M,\langle \cdot, \cdot
\rangle ,J)$ with K\"ahler form $\omega$, we have that the
following conditions are equivalent:
\begin{enumerate}
\item[{\rm (i)}] The almost Hermitian structure is harmonic.
\item[{\rm (ii)}] $[J,\nabla^* \nabla J] = 0$, where $[\cdot,\cdot]$ denotes the commutator bracket for endomorphisms.
\item[{\rm (iii)}] $\nabla^* \nabla \omega$ is a Hermitian two-form.
\item[{\rm (iv)}] $ \nabla^* \nabla \omega (X,Y) = - 4 \omega (\xi_{e_i} X,
\xi_{e_i}Y)$, for all $X,Y \in \mathfrak{X}(M)$.
\end{enumerate}
\end{theorem}
\begin{remark}
{\rm Condition (ii) represents the Euler-Lagrange equations given
in \cite{Wood1} for the harmonic almost Hermitian structure
determined by $J$}.
\end{remark}
\begin{proof}
Using Theorem \ref{carharm}, Lemma \ref{lapstaten} and $\xi J = -J
\xi$, it follows that (i) implies (iv) and (iv) implies
$$
\left( ( \nabla^{\Lie{U}(n)}_{e_i} \xi)_{e_i} + \xi_{\xi_{e_i}e_i}
\right) \omega =0.
$$
But note that the map $A \to -\omega(A \cdot, \cdot) - \omega(
\cdot, A
\cdot)$ from $\lie{u}(n)^{\perp} \lie{su}bseteq \lie{so}(2n)$ to
$\lie{u}(n)^{\perp} \lie{su}bseteq \Lambda^2 \mbox{T}^* M$ is an
$U(n)$-isomorphism. Therefore, $( \nabla^{\Lie{U}(n)}_{e_i} \xi)_{e_i} +
\xi_{\xi_{e_i}e_i}=0$.
Taking into account that $(\nabla^{\Lie{U}(n)}_{e_i} \xi)_{e_i}
\omega, \; \xi^{\Lie{U}(n)}_{\xi_{e_i}e_i} \omega $ belong to $
\lie{u}(n)^{\perp}$, the equivalence between (iii) and (iv) is an
immediate consequence of Lemma \ref{lapstaten} and $\xi J = -J
\xi$.
Because we have $(\nabla_{X}\omega)(Y,Z) = \langle Y,
(\nabla_{X}J)Y\rangle$, it follows that
$$
(\nabla^{*}\nabla \omega)(X,Y) = \langle X , (\nabla^* \nabla
J)Y\rangle.
$$
This implies the equivalence between (ii) and (iii).
\end{proof}
Tricerri and Vanhecke \cite{Tricerri-Vanhecke:aH} gave a complete
decomposition of the Riemannian curvature tensor~$R$ of an almost
Hermitian manifold $M$ into irreducible $\Lie{U}(n)$-components. These
divide naturally into two groups, one forming the
space~$\mathcal K=\Cur(\lie{u}(n))$ of algebraic curvature tensors for a
K\"ahler manifold (characterised by $\xi=0$), and the other,
$\mathcal K^\perp$, being its orthogonal complement. Additionally,
Falcitelli et al.~\cite{Falcitelli-FS:aH} showed that the
components of~$R$ in~$\mathcal K^\perp$ are linearly determined by the
covariant derivative $\nabla \xi$. By using the minimal
$\Lie{U}(n)$-connection $\nabla^{\Lie{U}(n)}$ of~$M$, Falcitelli et
al.\ display some tables which show whether or not the tensors
$\nabla^{\Lie{U}(n)} \xi_{(i)}$ and $\xi_{(i)}\odot\xi_{(j)}$
contribute to the components of~$R$ in~$\mathcal K^\perp$. Some
variations of such tables have been given in \cite{FMCAS}.
Explanations for these variations are based in Equation
\eqref{d2omega:part20} given below. All of this has provided a
unified approach to many of the curvature results obtained by
Gray~\cite{Gray:curvature}.
For studying some components of $R$, it is necessary to consider
the usual Ricci curvature tensor $\Ric$, associated to the metric
structure, and another tensor $\Ric^*$, called the {\it
$\ast$-Ricci curvature tensor}, associated to the almost Hermitian
structure and defined by $\Ric^* (X,Y) = \langle R_{X , e_i} JY ,
Je_i\rangle$.
In general, $\Ric^*$ is not symmetric. However, because
$
\Ric^*(JX,JY) = \Ric^*(Y,X)$,
it
can be claimed that its Hermitian part coincides with its
symmetric part $\Ric^*_{\mbox{\footnotesize s}}$, and its
anti-Hermitian part is equal to its skew-symmetric part
$\Ric^*_{\mbox{\footnotesize alt}}$. Under the action of
$\Lie{U}(n)$, $\Ric^*$ is decomposed into $\Ric^* =
\Ric^*_{\mbox{\footnotesize s}} + \Ric^*_{\mbox{\footnotesize
alt}} $, where $\Ric^*_{\mbox{\footnotesize s}} \in \mathbb R
\langle \cdot , \cdot \rangle \oplus \lie{su}(n)_s \lie{su}bseteq S^2
\mbox{T}^* M$ and $ \Ric^*_{\mbox{\footnotesize alt}} \in
\lie{u}(n)^{\perp} \lie{su}bseteq \Lambda^2 \mbox{T}^* M$
\cite{Tricerri-Vanhecke:aH}. Because in the present work the
tensor $\Ric^*_{\mbox{\footnotesize alt}}$ will play a special
r{\^o}le, we recall the following result.
\begin{lemma}[\cite{FMCAS}] \label{astricciah}
If $M$ be is an almost Hermitian $2n$-manifold with
minimal $\Lie{U}(n)$-connection $\nabla^{\Lie{U}(n)} = \nabla + \xi$, then
the skew-symmetric part $\Ric^*_{\mbox{\footnotesize alt}}$ of the
$*$-Ricci tensor is given by
\begin{equation}
\label{otraricsh}
\begin{split}
\Ric^*_{\mbox{\rm \footnotesize alt}} (X,Y)
& = - \inp{\xi_{J\xi_{e_i} e_i}JX}Y
+ \inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi)_{Je_i}JX}Y.
\end{split}
\end{equation}
\end{lemma}
From the fact $d^2 \omega=0$, writing $d^2 \omega$ by means of
$\nabla^{\Lie{U}(n)}$ and $\xi$, the identity
\begin{equation}
\label{d2omega:part20}
{\rm
\begin{array}{rl}
0 =
&3\inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(1)})_{e_i}X}Y
- \inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(3)})_{e_i}X}Y
+ (n-2) \inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(4)})_{e_i}X}Y
\\[2mm]
&
+ \inp{{\xi_{(3)}}_Xe_i}{{\xi_{{(1)}e_i}}Y}
- \inp{{\xi_{(3)}}_Ye_i}{{\xi_{(1)}}_{e_i}X}
+ \inp{{\xi_{(3)}}_Xe_i}{{\xi_{{(2)}e_i}}Y}
- \inp{{\xi_{(3)}}_Ye_i}{{\xi_{{(2)}e_i}}X}
\\[1mm]
&
- \displaystyle \frac{n-5}{n-1}\inp{{\xi_{{(1)} \xi_{{(4)} e_i} e_i}}X}Y
- \displaystyle \frac{n-2}{n-1}\inp{{\xi_{{(2)}\xi_{{(4)} e_i} e_i}}X}Y
+ \inp{{\xi_{{(3)}\xi_{{(4)} e_i} e_i}}X}Y
\end{array} }
\end{equation}
was deduced in \cite{FMCAS}. Here, we will make use of
\eqref{d2omega:part20} below. Likewise, we need to point out that
${\xi_{{(4)} \, \xi_{e_i} e_i}} = 0$. In fact, this directly
follows from the expression for $\xi_{(4)}$ \cite{Gray-H:16} given
by
\begin{equation} \label{torsionw4}
\langle \xi_{{(4)}X} Y, JZ \rangle = -
\frac{1}{4(n-1)}
\left\{ X^{\flat} \wedge d^* \omega (Y,Z) -JX^{\flat} \wedge Jd^* \omega (Y,Z)
\right\}.
\end{equation}
Some results proved in \cite{Wood1} are recovered in Theorem
\ref{classhermharm} below which is completed with other additional
results. In proving those results next Lemma will be useful.
\begin{lemma} \label{previo} For an almost Hermitian $2n$-manifold $(M,\langle \cdot
,\cdot \rangle, J)$, we have
\begin{eqnarray*}
2(n-1) \langle (\nabla^{U(n)}_{e_i} \xi_{(4)})_{e_i} X , Y
\rangle & = & d (\xi^{\flat}_{e_i} e_i ) (X,Y) - d
(\xi^{\flat}_{e_i} e_i) (JX,JY) \\
& & - 4 \langle \xi_{(1)\xi_{e_i} e_i } X , Y \rangle + 2 \langle \xi_{(2)\xi_{e_i} e_i } X , Y
\rangle .
\end{eqnarray*}
\end{lemma}
\begin{proof} From the expression \eqref{torsionw4} we have
\begin{eqnarray} \label{w4expre}
2(n-1) \xi_{{(4)}X} & = & X^\flat \otimes \xi_{e_i} e_i
- \xi^{\flat}_{e_i} e_i \otimes X - JX^\flat \otimes J
\xi_{e_i} e_i + J \xi^{\flat}_{e_i} e_i \otimes JX.
\end{eqnarray}
Now, fixing a local orthonormal frame field $\{ e_1 , \ldots ,
e_{2n} \}$ such that $(\nabla_{e_i} e_j )_m =0$, for a given $m
\in M$, we will compute $(\nabla_{e_i} \xi_{(4)})_{e_i} X)_m$. In
fact, by a straightforward computation we will obtain
\begin{eqnarray*}
2(n-1) \langle (\nabla_{e_i} \xi_{(4)})_{e_i} X , Y \rangle & =
& d (\xi^{\flat}_{e_i} e_i ) (X,Y) - d (\xi^{\flat}_{e_i} e_i)
(JX,JY) \\
&& + 2 \langle \xi_{JX} JY - \xi_{JY} J X , \xi_{e_i} e_i ,
\rangle.
\end{eqnarray*}
Then, taking the properties of $\xi_{(i)}$ given in
\cite{Gray-H:16} into account, we will get
\begin{eqnarray*}
\langle \xi_{{(4)}X} Y - \xi_{{(4)}Y} X , \xi_{e_i} e_i
\rangle & = & 0.
\end{eqnarray*}
Thus, we will obtain the identity
\begin{eqnarray*}
2(n-1) \langle (\nabla_{e_i} \xi_{(4)})_{e_i} X , Y \rangle & =
& d (\xi^{\flat}_{e_i} e_i ) (X,Y) - d (\xi^{\flat}_{e_i} e_i)
(JX,JY) - 4 \langle \xi_{(1)\xi_{e_i} e_i } X , Y \rangle
\\
&& + 2 \langle \xi_{(2)\xi_{e_i} e_i } X , Y \rangle
+ 2 \langle \xi_{(3)X } Y - \xi_{(3)Y } X, \xi_{e_i} e_i
\rangle.
\end{eqnarray*}
Finally, it is not hard to show
$$
2(n-1) \langle (\xi_{e_i} \xi_{(4)})_{e_i} X ,
Y \rangle = - 2 \langle \xi_{(3)X } Y - \xi_{(3)Y } X ,
\xi_{e_i} e_i \rangle .
$$
From the last two identities, the required identity in Lemma
follows.
\end{proof}
\begin{theorem} \label{classhermharm}
For an almost Hermitian $2n$-manifold $(M,\langle \cdot
,\cdot \rangle, J)$, we have:
\begin{enumerate}
\item[{\rm (i)}] If $M$ is of type $\mathcal{W}_1 \oplus
\mathcal{W}_2 \oplus \mathcal{W}_4$, then the almost Hermitian
structure is harmonic if and only if
\begin{eqnarray*}
\qquad \quad (n-1) \Ric_{alt}^* (X,Y) & = & d (\xi^{\flat}_{e_i} e_i ) (X,Y) - d (\xi^{\flat}_{e_i}
e_i) (JX,JY) + 2 (n-3) \langle \xi_{(1)\xi_{e_i} e_i } X , Y
\rangle \\
& & + 2n \langle \xi_{(2)\xi_{e_i} e_i } X , Y \rangle .
\end{eqnarray*}
\item[{\rm (ii)}] If $M$ is quasi-K{\"a}hler $(\mathcal{W}_1 \oplus
\mathcal{W}_2)$, then the almost Hermitian structure is harmonic
if and only if $\Ric^*_{\mbox{\rm \footnotesize alt}} = 0$.
\item[{\rm (iii)}]
If $M$ is locally conformal almost K{\"a}hler $(\mathcal{W}_2
\oplus \mathcal{W}_4)$, then the almost Hermitian structure is
harmonic
if and only if
\begin{equation*}
(n-1) \Ric^*_{\mbox{\rm \footnotesize alt}}(X,Y) = 2n \langle
\xi_{\xi_{e_i} e_i} X, Y \rangle,
\end{equation*}
for all $X,Y \in \mathfrak X (M)$.
\item[{\rm (iv)}] If $M$ is
of type $\mathcal{W}_1 \oplus \mathcal{W}_4$ and $n \neq 2$, then
the almost Hermitian structure is harmonic if and only if
\begin{equation*}
(n-1)(n-5) \Ric^*_{\mbox{\rm \footnotesize alt}}(X,Y) = 2(n+1)(
n-3)\langle \xi_{ \xi_{e_i} e_i} X, Y \rangle,
\end{equation*}
for all $X,Y \in \mathfrak X (M)$.
\item[{\rm (v)}] If $M$ is
Hermitian $(\mathcal{W}_3 \oplus \mathcal{W}_4)$, then the almost
Hermitian structure is harmonic
if and only if
\begin{equation*}
\Ric^*_{\mbox{\rm \footnotesize alt}} (X,Y) = - 2 \langle
\xi_{\xi_{e_i} e_i} X , Y \rangle.
\end{equation*}
\end{enumerate}
In particular:
\begin{enumerate}
\item[\rm (i)$^*$] A nearly K{\"a}hler structure
$(\mathcal{W}_1)$ is a harmonic map.
\item[\rm (ii)$^*$] If the exterior derivative of the Lee form is Hermitian $($in particular, if it is closed$)$, a Hermitian
structure is harmonic if and only if $\Ric^*_{\mbox{\rm \footnotesize
alt}}=0$.
\item[\rm (iii)$^*$] A balanced Hermitian structure $(\mathcal{W}_3)$ is a harmonic almost Hermitian structure.
\item[\rm (iv)$^*$] A locally conformal K{\"a}hler structure $(\mathcal{W}_4)$ is
a harmonic almost Hermitian structure. In such a case, the Lee
form is closed and, therefore, $ \Ric^*_{\mbox{\rm \footnotesize
alt}} =0$.
\end{enumerate}
\end{theorem}
\begin{proof}
For (i). By Lemma \ref{astricciah}, using the properties of
$\xi_{(i)}$ given in \cite{Gray-H:16}, we have
\begin{equation}
\begin{split}
\Ric^*_{\mbox{\rm \footnotesize alt}} (X,Y)
& = \inp{\xi_{(1)\xi_{e_i} e_i}X}Y
+ \inp{\xi_{(2)\xi_{e_i} e_i}X}Y
- \inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(1)})_{e_i}X}Y\\
& \quad
- \inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(2)})_{e_i}X}Y
+ \inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(4)})_{e_i}X}Y.
\end{split} \nonumber
\end{equation}
Now, by Theorem \ref{carharm} and Lemma \ref{previo}, (i)
follows. In particular, if the structure is nearly K\"{a}hler, by
Equation \eqref{d2omega:part20}, we have
$(\nabla^{\Lie{U}(n)}_{e_i}\xi)_{e_i} =0$. Thus,
we get $\Ric^*_{\mbox{\rm \footnotesize alt}}=0$. Finally, by
Proposition \ref{pro:skew} (ii), (i)$^*$ follows.
Parts (ii) and (iii) are immediate consequences of (i). We recall that,
in case of locally conformal almost K\"{a}hler manifolds, the Lee one-form is
closed. This fact is well known. In particular, if the structure
is locally conformal K\"{a}hler, then
$\inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi)_{e_i}X}Y =0$ by Lemma
\ref{previo}. Moreover, we will also have $\inp{\xi_{(4)\xi_{e_i}
e_i}X}Y =0$. Then (iv)$^*$ follows.
For (iv). Because the structure is of type $\mathcal{W}_1 \oplus
\mathcal{W}_4$, Equation \eqref{d2omega:part20} and Equation
\eqref{otraricsh} are respectively given by
\begin{equation}
\label{d2omega:part20w1w4}
\begin{array}{rl}
0
=& \;
3\inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(1)})_{e_i}X}Y +
(n-2)
\inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(4)})_{e_i}X}Y
- \frac{n-5}{n-1}\inp{{\xi_{{(1)} \xi_{{(4)}e_i} e_i}}X}Y
,
\end{array}
\end{equation}
\begin{equation}
\label{otraricshw1w4}
\begin{array}{rl}
\Ric^*_{\mbox{\rm\footnotesize alt}} (X,Y)
= & \inp{\xi_{{(1)}\xi_{{(4)}e_i} e_i}X}Y
-\inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(1)})_{e_i}X}Y
+\inp{(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(4)})_{e_i}X}Y .
\end{array}
\end{equation}
Likewise, the characterising condition for harmonic almost
Hermitian structures given in Theorem \ref{carharm} is expressed
by
\begin{equation} \label{characharmhermw1w4}
- \langle \xi_{{(1)} \xi_{{(4)}e_i} e_i} X , Y \rangle = \langle (
\nabla^{\Lie{U}(n)}_{e_i}
\xi_{(1)})_{e_i} X , Y \rangle + \langle (
\nabla^{\Lie{U}(n)}_{e_i}
\xi_{(4)})_{e_i} X , Y \rangle .
\end{equation}
Now, for $n \geq 3$, it is straightforward to check that Equation
\eqref{d2omega:part20w1w4}, Equation \eqref{otraricshw1w4} and
Equation \eqref{characharmhermw1w4} imply the expression for
$\Ric^*_{\mbox{\rm \footnotesize alt}}$ required in (iv).
Reciprocally, it is also direct to see that such an expression for
$\Ric^*_{\mbox{\rm \footnotesize alt}}$, Equation
\eqref{d2omega:part20w1w4} and Equation \eqref{otraricshw1w4}
imply Equation \eqref{characharmhermw1w4}. Therefore, the almost
Hermitian structure is harmonic.
For (v). The intrinsic torsion $\xi$ for Hermitian structures is
such that $\xi_{JX} JY = \xi_{X} Y$ \cite{Gray-H:16}. Therefore,
the required identity in (v) is an immediate consequence of
Theorem \ref{carharm} and Lemma \ref{astricciah}.
For (ii)$^*$. By Lemma \ref{previo}, if the exterior derivative of
the Lee form is Hermitian, then
$(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(4)})_{e_i} = 0$ in
this case. But we also have
$(\nabla^{\Lie{U}(n)}_{e_i}\xi_{(3)})_{e_i}= \xi_{{(3)} \xi_{e_i} e_i}$ by
\eqref{d2omega:part20}. Therefore, the assertion is a
consequence of (v).
For (iii)$^*$. Now, we have $\xi_{e_i} e_i=0$. Moreover, Equation
\eqref{d2omega:part20} implies $(
\nabla^{\Lie{U}(n)}_{e_i}\xi_{(3)})_{e_i} = 0$.
\end{proof}
\begin{example}{\rm It is well-known that a $3$-symmetric space
$(M,\langle\cdot,\cdot\rangle)$ admits a canonical almost complex
structure $J$ compatible with $\langle\cdot,\cdot\rangle$ and
$(M,\langle\cdot,\cdot\rangle, J)$ becomes into a quasi-K{\"a}hler
manifold. Further, the intrinsic torsion $\xi=
-\frac{1}{2}J(\nabla J)$ of the corresponding $U(n)$-structure is
a homogeneous structure (see for example \cite{Sato}). Hence,
$\xi$ is $\nabla^{U(n)}$-parallel and then we get
$\Ric^*_{\mbox{\rm \footnotesize alt}}= 0.$ Then, from Theorem
\ref{classhermharm} (ii), we can concluse that} the canonical
almost Hermitian structure of a $3$-symmetric space is harmonic.
\end{example}
If we write $\langle \xi_{(1)X} Y , Z \rangle = \Psi_{\xi}
(X,Y,Z)$, then $\Psi_{\xi}$ is a skew-symmetric three-form such
that $\Psi_{\xi} (JX,JY,Z)= -\Psi_{\xi} (X,Y,Z)$ \cite{Gray-H:16}.
For $n \geq 3$, if we have a harmonic almost Hermitian structure
of type $\mathcal W_1 \oplus \mathcal W_4$, then it follows, using
Theorem \ref{characharmherm1} (iv) and Equation \eqref{torsionw4},
that the connection Laplacian of $\omega$ is given by
$$
\nabla^* \nabla \omega (X,Y) = 4 \langle X \lrcorner \Psi_{\xi} ,
JY \lrcorner \Psi_{\xi} \rangle + \frac{1}{4(n-1)^2} d^* \omega
\wedge J d^* \omega (X,Y).
$$
Note that, in general, the right side of this equality is not
collinear with $\omega$. In particular, if $n=3$, we obtain
$$
\nabla^* \nabla \omega = \frac{ \| \Psi_{\xi} \|^2}{36} \omega +
\frac{1}{16} d^* \omega \wedge J d^* \omega.
$$
A harmonic section $\sigma$ into a sphere bundle of a
Riemannian vector bundle is characterised by the condition
$\nabla^*\nabla \sigma = \frac{\| \nabla \sigma\|^2}{\| \sigma
\|^2} \sigma$ or, equivalently, $\nabla^*\nabla \sigma$ is
collinear with $\sigma$ (see \cite{GMS}, \cite{Salvai}). From the
previous paragraphs, the first part of next result is immediate.
\begin{proposition} For six-dimensions,
the nearly K\"{a}hler structures are the only harmonic almost
Hermitian structures of type $\Wc1 + \Wc4$, such that $\omega$ is
also a harmonic section into a sphere bundle in $\Lambda^2 T^*
M$. For four-dimensions, locally conformal K\"{a}hler structures
implies that $\omega$ is a harmonic section into a sphere
bundle in $\Lambda^2 T^* M$.
\end{proposition}
\begin{proof} Let $M$ be a locally conformal K\"{a}hler four-manifold.
In order to compute $(\nabla^* \nabla \omega)_m$, for $m \in M$, we
will consider a local orthonormal frame field $\{ e_1 , \ldots ,
e_4 \}$ such that $(\nabla_{e_i} e_j )_m =0$. Thus, because in
this case, $\nabla_X \omega = X^\flat \wedge (\theta^\sharp
\lrcorner \omega) - \theta \wedge (X \lrcorner \omega)$, where
$\theta = \frac12 J d^* \omega = - \xi_{e_i} e_i$
\cite{Gray-H:16}, we have
$$
(\nabla^* \nabla \omega)_m = - e_i^\flat \wedge (\theta^\sharp
\lrcorner (\nabla_{e_i} \omega)) - e_i^\flat \wedge (
(\nabla_{e_i} \theta)^\sharp \lrcorner \omega) + \nabla_{e_i}
\theta \wedge (e_i \lrcorner \omega) - \theta \wedge d^* \omega.
$$
Now, using the expression for $\nabla \omega$ and the identities
$e_i \wedge (e_i \lrcorner \omega) = 2 \omega$ and $e_i^\flat
\wedge \theta \wedge (\theta^\sharp \lrcorner ( e_i \lrcorner
\omega))= \theta \wedge (\theta^\sharp \lrcorner \omega)$, we
obtain
$$
- e_i^\flat \wedge (\theta^\sharp
\lrcorner (\nabla_{e_i} \omega)) = - 2 \theta \wedge (
\theta^\sharp \lrcorner \omega) + 2 \| \theta \|^2 \omega.
$$
Moreover, because $\theta$ is closed, we have $(\nabla_X
\theta)(Y) = (\nabla_Y \theta)(X)$ and it is not hard to see
$$
e_i^\flat \wedge ( (\nabla_{e_i}
\theta)^\sharp \lrcorner \omega) = \nabla_{e_i} \theta \wedge (e_i
\lrcorner \omega).
$$
Finally, from all of this and $d^* \omega =- 2 \theta^\sharp
\lrcorner\omega$, we get $\nabla^* \nabla \omega = 2 \| \theta
\|^2 \omega $.
\end{proof}
\begin{remark}{\rm
For nearly K\"{a}hler connected six-manifolds which are not K\"{a}hler,
if $5 \alpha$ denotes the Einstein constant and using
\cite[Equation (3.10)]{FMCAS}, we have $$ \nabla^* \nabla \omega
(X,Y) =
4 \langle \xi_{e_i} X,
\xi_{e_i}JY \rangle = 4 \alpha \, \omega(X,Y).
$$
Therefore, $\| \Psi_{\xi} \|^2= 144 \alpha$.
On the other hand, for locally conformal K\"{a}hler four-manifolds, we
have $\nabla^* \nabla \omega = 2 \| \theta \|^2 \omega $.
Therefore, $ \frac1{16} \| \nabla \omega \|^2 = \frac12 \|
\theta \|^2 = \frac12 \| \xi_{e_i} e_i \|^2 = \frac18 \| J d^*
\omega\|^2 $ that, in general, it is not constant. }
\end{remark}
In \cite{BHLS}, Bor et al. have shown diverse results relative to the energy of almost
Hermitian structures defined on certain compact Riemannian
manifolds. Concretely, they prove the following
\begin{theorem}[\cite{BHLS}] \label{bor-Hlam-Salva} Let
$(M^{2n},\langle \cdot , \cdot \rangle )$ be a compact Riemannian
manifold such that
\begin{enumerate}
\item[$\bullet$] $n \geq 3$ and $(M,\langle \cdot , \cdot \rangle
)$is conformally flat, or
\item[$\bullet$] $n =2$ and $(M,\langle \cdot , \cdot \rangle
)$ is anti-self-dual.
\end{enumerate}
Then an orthogonal almost complex structure $J$ on $M$ is an
energy minimiser in each one of the following three cases:
\begin{enumerate}
\item[\rm (i)] $n=3$ and $J$ is of type $\mathcal W_1 \oplus
\mathcal W_4$.
\item[\rm (ii)] $n=2$ or $n\geq 4$ and $J$ is of
type $\mathcal W_4$.
\item[\rm (iii)] $n$ arbitrary and $J$ is of type $\mathcal W_2$.
\end{enumerate}
\end{theorem}
Because $\Ric_{\mbox{\footnotesize alt}}^*$ determines certain
$\Lie{U}(n)$-component, $n \geq 2$, of the Weyl curvature tensor
$W$ on almost Hermitian (see \cite{Falcitelli-FS:aH,FMCAS}), then
we have $\Ric_{\mbox{\footnotesize alt}}^*=0$ for almost
Hermitian $2n$-manifolds which are locally conformal flat. In
particular, for $n=2$, if we consider the action $\Lie{SO}(4)$
determined by the volume form given by $Vol = \frac12 \omega
\wedge \omega$, the Weyl curvature tensor is decomposed into two
components, that is, $W= W^+ + W^-$. If $W^+=0$ ($W^-=0$), the
manifold is called {\it anti-self-dual} ({\it self-dual}). More
details can be found in \cite{Salamon,Falcitelli-FS:aH}.
Since $\Ric_{\mbox{\footnotesize alt}}^*$
determines certain $\Lie{U}(2)$-component of $W^+$, if the
manifold is anti-self-dual, then we will also have
$\Ric_{\mbox{\footnotesize alt}}^*=0$. Therefore, it follows that
the results here presented are in agreeing with Theorem
\ref{bor-Hlam-Salva}.
Now, we focus attention on harmonicity as a map of almost
Hermitian structures. Results in that direction were already
obtained in \cite{Wood2}, we will complete such results by using
tools here presented. In next Lemma, $s^*$ will denote the {\it
$*$scalar curvarture} defined by $s^* = \Ric^* (e_i, e_i)$. If
$\Ric^* (X,Y) = \frac{1}{2n} s^* \langle X,Y \rangle$, then the
almost Hermitian manifold is said to be {\it weakly $*$Einstein}.
If $s^*$ is constant, a weakly-$*$Einstein manifold is called {\it
$*$Einstein}.
In Riemannian geometry, it is satisfied $2 d^* \Ric + d s =0$,
where $s$ is the scalar curvature. The $*$analogue in almost
Hermitian geometry is false. In fact, this is clarified by the
following two results.
\begin{lemma} \label{id:genera}
For almost Hermitian manifolds, we have {\rm
$$
2 d^* \Ric^{*t}(X) + ds^* (X) = 2 \langle R{(e_i, X)} , \xi_{Je_i} J \rangle
- 4 \Ric^* ( X, \xi_{e_i} e_i ) + 4 \langle \Ric^* , \xi^{\flat}_X \rangle,
$$}
where $\Ric^{*t}(X,Y) = \Ric^{*}(Y,X)$ and $\xi_X^{\flat} (Y,Z) =
\langle \xi_X Y, Z \rangle$. In particular, if the manifold is
weakly $\ast$Einstein, then
$$
\frac{n-1}{n} d s^*(X) = 2 \langle R{( e_i, X)} , \xi_{Je_i} J
\rangle - 2 s^* \langle \xi_{e_i} e_i , X \rangle.
$$
\end{lemma}
\begin{proof}
Note that $ \Ric^{*t}(X,Y) = \frac12 \langle R{(e_i,Je_i)} Y,
JX\rangle$. Then, we get
\begin{eqnarray*}
d^* \Ric^{*t}(X) & = & - (\nabla_{e_j} \Ric^{*t}) ( e_j ,X)
\\
&=& -\frac12 e_j \langle R{(e_i,Je_i)} X ,Je_j \rangle
+ \frac12 \langle R{(e_i,Je_i)} \nabla_{e_j} X , Je_j \rangle
+ \frac12 \langle R{(e_i,Je_i)} X, J\nabla_{e_j} e_j \rangle
\\
&=& - \frac12 \langle ( \nabla_{e_j} R){(e_i,Je_i)} X ,Je_j \rangle
- \langle R{(\nabla_{e_j} e_i,Je_i)} X , Je_j \rangle
- \frac12 \langle R{(e_i,Je_i)} X, (\nabla_{e_j}J) e_j \rangle
\end{eqnarray*}
Now, by symmetric properties of $R$ and $\xi = - \frac12 J(\nabla
J)$, it follows that
\begin{eqnarray*}
d^* \Ric^{*t}(X) & = & - \frac12 \langle ( \nabla_{e_j} R){(X
,Je_j)} e_i,Je_i\rangle
+ \langle R{(X , e_j)} e_i , \nabla_{Je_j} Je_i) \rangle
- \langle R{(e_i,Je_i)} X, J \xi_{e_j} e_j \rangle.
\end{eqnarray*}
Using second Bianchi's identity and taking
$$
\langle R{(X , e_j)} e_i , \nabla_{Je_j} Je_i) \rangle =
\langle R{(X , e_j)} e_i , \nabla^{\Lie{U}(n)}_{Je_j} Je_i) \rangle
- \langle R{(X , e_j)} e_i , \xi_{Je_j} Je_i) \rangle
$$
into account, we get
\begin{eqnarray*}
d^* \Ric^{*t}(X) & = & - \frac14 \langle ( \nabla_{X} R){(e_j,
Je_j )} e_i,Je_i\rangle
- \langle R{(X , e_j)} , \xi_{Je_j} J \rangle
- 2 \Ric^* ( X, \xi_{e_j} e_j ) .
\end{eqnarray*}
Note that
$$
\langle R{(X , e_j)} e_i , \nabla^{\Lie{U}(n)}_{Je_j} Je_i) \rangle = \langle R{(X , e_j)} e_i , e_k \rangle
\langle \nabla^{\Lie{U}(n)}_{Je_j} Je_i , e_k \rangle =0,
$$
because it is a scalar product of a skew-symmetric matrix by a
Hermitian symmetric matrix.
Finally, it is obtained
\begin{eqnarray} \label{uno}
\qquad 2d^* \Ric^{*t}(X) & = & - \frac12 \langle ( \nabla_{X}
R){(e_j, Je_j )} e_i,Je_i\rangle
- 2 \langle R{(X , e_j)} , \xi_{Je_j} J \rangle
- 4 \Ric^* ( X, \xi_{e_j} e_j ) .
\end{eqnarray}
In a second instance, $ ds^* (X) = \frac12 X \langle
R{(e_i,Je_i)} e_j, Je_j \rangle$. Hence, we get
\begin{eqnarray*}
ds^* (X) & = & \frac12 \langle (\nabla_X R){(e_i,Je_i)} e_j, Je_j \rangle
+ 2 \langle R{(e_i,Je_i)} e_j, \nabla_X Je_j \rangle.
\end{eqnarray*}
But we have also that
\begin{eqnarray*}
\langle R{(e_i,Je_i)} e_j, \nabla_X Je_j \rangle & = & \langle R{(e_i,Je_i)} e_j, e_k \rangle \langle
\nabla^{\Lie{U}(n)}_X Je_j , e_k \rangle - \langle R{(e_i,Je_i)} e_j, e_k \rangle \langle
\xi_X Je_j , e_k \rangle \\
& = & \langle R{(e_i,Je_i)} e_j, J\xi_X e_j \rangle = 2
\Ric^* (e_i , \xi_X e_i) = 2 \langle \Ric^* , \xi_X \rangle.
\end{eqnarray*}
Thus, it follows that
\begin{equation} \label{dos}
ds^* (X) = \frac12 \langle (\nabla_X
R){(e_i,Je_i)} e_j, Je_j \rangle + 4 \langle \Ric^* , \xi_X
\rangle.
\end{equation}
From \eqref{uno} and \eqref{dos}, the required identity is
obtained.
\end{proof}
\begin{theorem}
For an almost Hermitian $2n$-manifold $(M,\langle \cdot
,\cdot \rangle, J)$, we have:
\begin{enumerate}
\item[{\rm (i)}] If $M$ is of type $\mathcal{W}_1 \oplus
\mathcal{W}_2 \oplus \mathcal{W}_4$, then the almost Hermitian
structure is a harmonic map if and only if the almost Hermitian
structure is harmonic and
\begin{eqnarray*}
\qquad (n-1) d^* \Ric^{*t}(X) + \frac{n-1}{2} ds^* (X) & = & \Ric
( X, \xi_{e_i} e_i )
- (2n-1) \Ric^* ( X, \xi_{e_i} e_i )\\
&& + 2 (n-1) \langle \Ric^* , \xi^{\flat}_X \rangle,
\end{eqnarray*}
for all $X \in \mathfrak{X}(M)$.
\item[{\rm (ii)}] If $M$ is quasi-K{\"a}hler $(\mathcal{W}_1 \oplus
\mathcal{W}_2)$, then the almost Hermitian structure is a
harmonic map if and only if $\Ric^*$ is symmetric and $2 d^*
\Ric^{*}+ ds^*=0$. In particular,
if the quasi-K{\"a}hler
manifold is weakly-$\ast$Einstein, then the almost Hermitian
structure is a harmonic map if and only if $s^*$ is constant.
\item[{\rm (iii)}] If $M$ is
Hermitian $(\mathcal{W}_3 \oplus \mathcal{W}_4)$, then the almost
Hermitian structure is a harmonic map
if and only if $\Ric^*_{\mbox{\rm \footnotesize alt}} = - 2 \xi^{\flat}_{\xi_{e_i}
e_i}$
and
$$
2 d^* \Ric^{*t}(X) + ds^* (X) + 4 \Ric^* ( X, \xi_{e_j} e_j ) -
4 \langle \Ric^* , \xi^{\flat}_X \rangle =0,
$$
for all $X \in \mathfrak X (M)$. In particular:
\begin{enumerate}
\item[{\rm (a)$^*$}] If the exterior derivative of the Lee form is Hermitian $($in particular, if it is closed$)$,
then the Hermitian
structure is a harmonic map if and only if $\Ric_{\mbox{\footnotesize \rm alt}}^* =0$ and
$2 d^* \Ric^{*} + ds^* + 4 \xi_{e_i} e_i \lrcorner
\Ric^* = 0$.
\item[{\rm (b)$^*$}] If $\Ric^*$ is symmetric, then the Hermitian
structure is a harmonic map if and only if $\xi_{\xi_{e_i} e_i}=0$ and
$2 d^* \Ric^{*} + ds^* + 4 \xi_{e_i} e_i \lrcorner
\Ric^* = 0$. In particular, if the manifold is weakly-$\ast$Einstein, then the Hermitian
structure is a harmonic map if and only if $\xi_{\xi_{e_i} e_i}=0$ and
$(n-1) ds^* + 2 s^* \xi^{\flat}_{e_i} e_i = 0$.
\item[{\rm (c)$^*$}] If the manifold is balanced Hermitian
$(\mathcal{W}_3)$, then the almost Hermitian structure is a
harmonic map if and only if $2 d^* \Ric^{*} + ds^*=0$.
Furthermore, if the balanced Hermitian manifold is
weakly-$*$Einstein, the almost Hermitian
structure is a harmonic map if and only if $s^*$ is constant.
\item[{\rm (d)$^*$}] If the manifold is locally conformal K\"{a}hler
$(\mathcal{W}_4)$, then the almost Hermitian structure is a
harmonic map if and only if $2 d^* \Ric^{*} + ds^* + 4 \xi_{e_i} e_i \lrcorner
\Ric^*=0$ if and only if, for all $X \in \mathfrak{X}(M)$, $(\Ric -\Ric^{*}) (X , \xi_{e_i} e_i) =0$.
\end{enumerate}
\end{enumerate}
\end{theorem}
\begin{proof} All results contained in Theorem are immediate
consequences of Theorem \ref{classhermharm}, Lemma
\ref{id:genera}, and the following consequence of the expression
for $\xi_{(4)}$ given by \eqref{w4expre}
$$
(n-1) \langle \xi_{(4) e_i} , R_{(e_i,X)} \rangle = (\Ric
-\Ric^{*}) (X , \xi_{e_i} e_i).
$$
\end{proof}
\begin{example}{\rm
Hopf manifolds are diffeomorphic to $S^1 \times S^{2n-1}$ and
admit a locally conformal K\"{a}hler structure with parallel Lee form
$\frac{1}{2(n-1)} \xi_{e_i}^\flat e_i$ \cite{Va}. Furthermore,
$\xi_{e_i} e_i$ is nowhere zero and tangent to $S^1$. The metric
on $S^1 \times S^{2n-1}$ is the product metric of constant
multiples of the metrics on $S^1$ and $S^{2n-1}$ induced by the
respective Euclidean metrics on $\mathbb R^2$ and $\mathbb
R^{2n}$. The set $\mathcal L ( S^{2n-1})$ will consist of those
vector fields on $S^1 \times S^{2n-1}$ which are lifts of vector
fields on $S^{2n-1}$. The Riemannian curvature tensor $R$ is such
that
\begin{gather*} \langle R{(X,Y)} Z_1
, Z_2 \rangle = k ( \langle X,Z_1 \rangle \langle Y,Z_2 \rangle -
\langle X,Z_2 \rangle \langle Y,Z_1 \rangle), \qquad R{(X,
\xi_{e_i}e_i)} =0,
\end{gather*}
for all $X,Y,Z_1,Z_2 \in \mathcal L ( S^{2n-1})$, where $k$ is a
constant. Therefore, $\langle R{(e_i , \xi_{e_i} e_i)} ,
\xi_{e_i} \rangle =0$. Moreover, using the expression given by
\eqref{w4expre}, for all $X \in \mathcal L ( S^{2n-1})$, we have
$$
\langle R{(e_i , X)} , \xi_{e_i}
\rangle = 2k \langle \xi_{e_i} e_i , X\rangle =0.
$$
Additionally, it can be checked that
$$
\frac{n-1}{k} \langle \xi_X , R{(Y,Z)} \rangle = - J X^\flat \wedge J \xi_{e_i}^\flat e_i
(Y,Z),
$$
for all $X,Y,Z$ orthogonal to $ \xi_{e_i} e_i$. Therefore, the almost
Hermitian structure is not horizontally geodesic. As a consequence, it is also not a flat structure.
Finally, using again the expression \eqref{w4expre} and the fact
that $\xi_{e_i} e_i$ is parallel, it is obtained
\begin{eqnarray*}
2(n-1)^2 (\nabla_X \xi)_X
& = &
J \xi^\flat_{e_i} e_i (X) \; (X^\flat \otimes J \xi_{e_i} e_i
- J \xi^\flat_{e_i} e_i \otimes X + JX^\flat \otimes \xi_{e_i} e_i
- \xi^\flat_{e_i} e_i \otimes JX).
\end{eqnarray*}
Note that this expression is not vanished for all $X$.
In conclusion}, the locally conformal K\"ahler structure on $S^1
\times
S^{2n-1}$ is a harmonic map which is neither horizontally geodesic, nor vertically
geodesic.
\end{example}
\begin{example}
{\rm In general, locally conformal K\"{a}hler structures are not
harmonic maps. In fact, one can consider the K\"ahler structure on
$\mathbb R^{2n}$ determined by the Euclidean metric $\langle \cdot
, \cdot \rangle$ and the standard almost complex structure $J$. If
we do a conformal change of metric using a function $f$ on
$\mathbb R^{2n}$, the new metric $\langle \cdot , \cdot \rangle_o
= e^f \langle \cdot , \cdot \rangle$ and $J$ determine a new
almost Hermitian structure which is locally conformal K\"ahler.
The Lee form for the new structure is $df$ and the Riemannian
curvature tensor is given by
\begin{eqnarray*}
-2 e^{-f} \langle R_o (X,Y)Z,W \rangle_o & = & L(X,Z) \langle Y, W \rangle +
L(Y,W) \langle X , Z \rangle \\
&&
- L(X,W) \langle Y, Z \rangle - L(Y,Z) \langle X, W \rangle
\\
&& + \frac{\|d f \|^2}{2} \{ \langle X, Z \rangle \langle Y, W
\rangle - \langle Y, Z \rangle \langle X, W \rangle \},
\end{eqnarray*}
where $L(X,Y)= (\nabla_X df) (Y) - \frac12 df(X) df(Y)$ and
$\nabla$ is the Levi-Civita connection associated to $\langle
\cdot , \cdot \rangle$ (see \cite{Tricerri-Vanhecke:aH}). If
$\xi_o$ denotes the intrinsic torsion of the structure $(J,
\langle \cdot , \cdot \rangle_o)$, an straightforward computation
shows that
\begin{equation*}
16 e^f \langle R_o (e_{o \,i} ,X) , \xi_{o e_{o \,i}} \rangle_o =
- \frac{2n-3}{2} d (\| df \|^2)(X) + d^* (df) df(X)
+ ( \nabla_{JX} df )(J \mbox{grad}\,f ),
\end{equation*}
where $\{ e_{o \,1} , \dots , e_{o \,2n} \}$ is an orthonormal
basis for vectors with respect to $\langle \cdot , \cdot
\rangle_o$ and the terms in the right side, the norm $\| \cdot
\|$, $\mbox{grad}$, etc., are considered with respect to the
Euclidean metric $\langle \cdot , \cdot \rangle$. Therefore, it is
not hard to find functions $f$ such that $\langle R_o (e_{o \,i}
,X) , \xi_{o e_{o \,i}} \rangle_o \neq 0$. For instance, if $f=
\sin x_1$, then $\langle R_o (e_{o \,i} ,X) , \xi_{o e_{o \,i}}
\rangle_o = \frac{n-1}{8}\, e^{-\sin x_1} \sin x_1 \, \cos x_1 \,
d x_1$.
If we take the function $f$ such that $(x^{i}\comp f)(x) =
(x^{i}\comp f)(x + 2\pi),$ $i=1,\dots ,2n,$ then
$\langle\cdot,\cdot\rangle_{o}$ determines a Riemannian metric on
the torus $T^{2n} = S^{1}\times \dots \times S^{1}$ and the
natural projection of $\mathbb R^{2n}$ on $T^{2n}$ becomes into a
local isometry. Hence, we also get} locally conformal K\"{a}hler
structures which are not harmonic maps on the torus
$(T^{2n},\langle\cdot,\cdot\rangle_{o}).$
\end{example}
\lie{su}bsection{Nearly K\"{a}hler manifolds} For completeness, here we will
give a detailed and self-contained explanation of the situation
for nearly K\"{a}hler manifolds. Thus, we will recover results
already known originally proved, some of them, by Gray and,
others, by Wood. However, we will display alternative proofs in
terms of the intrinsic torsion $\xi$. Additionally, it is also
shown that, for nearly K\"{a}hler manifolds, $\xi$ is parallel
with respect to the minimal connection $\nabla^{\Lie{U}(n)}$,
i.e., $\nabla^{\Lie{U}(n)} \xi =0$. This last result is
originally due to Kirichenko \cite{Kir}.
The intrinsic torsion $\xi$ of a nearly K\"{a}hler manifold is
characterised by the condition $\xi_X Y= - \xi_Y X$. Because this
property is preserved by the action of $\Lie{O}(2n)$, then we have
also $(\nabla_X \xi)_Y Z = - (\nabla_X \xi)_Z Y$ and $(\nabla_X
\xi)^{\Lie{U}(n)}_Y Z = - (\nabla^{\Lie{U}(n)}_X \xi)_Z Y$.
Moreover, with respect to the almost complex structure $J$, it is
also satisfied $\xi_{JX} JY= - \xi_X Y$. Therefore, $(\nabla_X
\xi)^{\Lie{U}(n)}_{JY} JZ = - (\nabla^{\Lie{U}(n)}_X \xi)_{Y} Z$.
For nearly K\"{a}hler manifolds, Gray \cite{Gray:spheres} showed that
the following identities are satisfied
\begin{eqnarray} \label{ecxy}
\langle R{(X,Y)} X, Y \rangle - \langle R{(X,Y)} JX, JY \rangle & = & 4 \| \xi_X Y \|^2 \\
\langle R{(JX,JY)} JZ, JW \rangle & = & \langle R{(X,Y)} Z, W
\rangle. \label{ecjxjy}
\end{eqnarray}
In fact, since $\langle (\nabla_{X} \xi)_Y X , Y \rangle = 0 =
\langle (\nabla_{Y} \xi)_X X , Y \rangle$, it is immediate that
$$
\langle R{(X,Y)\lie{u}(n)^{\perp}} X , Y \rangle = \frac12 \left(
\langle R{(X,Y)} X , Y \rangle - \langle R{(X,Y)} JX , JY \rangle
\right) = 2 \langle [\xi_X , \xi_Y] X , Y \rangle.
$$
From this, \eqref{ecxy} follows. Also \eqref{ecxy} follows from
Proposition \ref{pro:skew}, because $[ \lie{u}(n)^{\perp},
\lie{u}(n)^{\perp}] \lie{su}bseteq \lie{u}(n)$.
For \eqref{ecjxjy}. Using
\eqref{ecxy}, it is not hard to prove $\langle R{(JX,JY)} JX, JY
\rangle = \langle R{(X,Y)} X, Y \rangle$. Then, by linearizing,
we will have \eqref{ecjxjy}.
\begin{theorem} Nearly K\"{a}hler structures are vertically geodesic
harmonic maps. Moreover, for nearly K\"{a}hler manifolds, we have
\begin{eqnarray} \label{ecxyzw}
\langle R{(X,Y)} Z, W \rangle - \langle R{(X,Y)} JZ, JW \rangle & = & 4 \langle \xi_X Y , \xi_Z W \rangle, \\
\nabla^{\Lie{U}(n)}_X \xi & = &0. \label{nparallel}
\end{eqnarray}
In particular, if the nearly K\"{a}hler structure is
flat, then is K\"{a}hler.
\end{theorem}
\begin{remark}{\rm Equation \eqref{ecxyzw} is due to Gray
\cite{Gray:nearly}. On the other hand, Wood proved in
\cite{Wood2} that nearly K\"{a}hler structures are vertically geodesic
harmonic maps.}
\end{remark}
\begin{proof} Since $\xi_X Y = - \xi_Y X$ and $\nabla \xi =
\nabla^{\Lie{U}(n)}\xi - \xi \xi$, it is direct to show that
\begin{eqnarray} \label{rjjxixi}
\langle R{(X,Y)\lie{u}(n)^{\perp}} Z , W \rangle
& = & \frac12
\left( \langle R{(X,Y)} Z , W \rangle - \langle R{(X,Y)} JZ , JW
\rangle \right) \\
& = & \langle (\nabla^{\Lie{U}(n)}_X \xi)_Y Z , W \rangle -
\langle (\nabla^{\Lie{U}(n)}_Y \xi)_X Z , W \rangle
+ 2 \langle \xi_X Y , \xi_{Z} W \rangle. \nonumber
\end{eqnarray}
Now, we consider the map ${\sf s} : \Lambda^2 T^* M \otimes
\Lambda^2 T^* M \to S^2 (\Lambda^2 T^* M)$ defined by ${\sf
s}(a\otimes b) = a \otimes b + b \otimes a$ and the map ${\sf b} :
S^2 (\Lambda^2 T^* M) \to S^2 (\Lambda^2T^* M)$ defined by
$$
{\sf b}(\Upsilon)(X,Y,Z,W) = 2 \Upsilon(X,Y,Z,W) -
\Upsilon(Z,X,Y,W) - \Upsilon(Y,Z,X,W).
$$
Applying the composition ${\sf b} \circ {\sf s}$ to both sides of
Equation \eqref{rjjxixi} and, then, making use of \eqref{ecjxjy}
and first Bianchi's identity, we will obtain
\begin{eqnarray} \label{sbianchi}
&& \\
3 \langle R{(X,Y)} Z , W \rangle - 2 \langle R{(X,Y)} JZ , JW
\rangle && \nonumber \\
+ \langle R{(Z, X)} JY , JW \rangle
+ \langle R{(Y, Z)} JX , JW \rangle
& = &
8 \langle \xi_{Z} W , \xi_X Y \rangle
+4 \langle \xi_{Y} W , \xi_X Z \rangle
-4 \langle \xi_{X} W , \xi_Y Z \rangle. \nonumber
\end{eqnarray}
Note that we have also taken $(\nabla_X^{\Lie{U}(n)} \xi)_Y Z = -
(\nabla_X^{\Lie{U}(n)} \xi)_Z Y $ into account. Now, if we
replace $Z$ and $W$ by $JZ$ and $JW$, subtract the result from
\eqref{sbianchi} and use $\xi_{JX} JY= - \xi_X Y$, then we get
\begin{eqnarray} \label{ultimopaso}
5 \langle R{(X,Y)} Z , W \rangle - 5\langle R{(X,Y)} JZ , JW
\rangle & &
\\
- \langle R{(X,JY)} Z , JW \rangle - \langle R{(X, JY)} JZ , W \rangle
& = &
16 \langle \xi_{X} Y , \xi_Z W \rangle. \nonumber
\end{eqnarray}
Finally, replacing in \eqref{ultimopaso} $Y$ and $Z$ by $JY$ and
$JZ$, multiplying by $1/5$ the resulting equation and adding the
final result to \eqref{ultimopaso}, the required identity
\eqref{ecxyzw} is obtained.
Now, from the following identity
$$
\frac12 \left( \langle R{(X,Y)} X, Z \rangle - \langle R{(X,Y)}
JX, JZ \rangle \right) = \langle (\nabla_{X} \xi)_Y X , Z \rangle
- \langle (\nabla_{Y} \xi)_X X , Z \rangle + 2 \langle [\xi_X ,
\xi_Y] X , Z \rangle,
$$
using \eqref{ecxyzw}, we get $(\nabla_{X} \xi)_X = 0$. Hence the
nearly K\"{a}hler structure is vertically geodesic.
For \eqref{nparallel}. Since
\begin{eqnarray*}
\langle R{(X,Y)} Z , W \rangle - \langle R{(X,Y)} JZ , JW
\rangle & = & 2 \langle (\nabla^{\Lie{U}(n)}_X \xi)_Y Z , W
\rangle - 2 \langle (\nabla^{\Lie{U}(n)}_Y \xi)_X Z , W \rangle
\\
&& + 4 \langle \xi_X Y , \xi_{Z} W \rangle
\\ & = & 4 \langle \xi_X
Y , \xi_{Z} W \rangle,
\end{eqnarray*}
we have $(\nabla^{\Lie{U}(n)}_X \xi)_Y = (\nabla^{\Lie{U}(n)}_Y
\xi)_X$. Moreover, from the identity
\begin{eqnarray*}
\langle (\nabla_X \xi)_Y Z , W \rangle
& = & \langle (\nabla^{\Lie{U}(n)}_X \xi)_Y Z , W \rangle + \langle \xi_{Z} W , \xi_X Y \rangle
- \langle
\xi_X \xi_Y Z , W \rangle + \langle \xi_Y \xi_X Z , W \rangle,
\end{eqnarray*}
taking $(\nabla_{X} \xi)_X = 0$ into account, it follows
$(\nabla^{\Lie{U}(n)}_{X} \xi)_X = 0$. Therefore,
$(\nabla^{\Lie{U}(n)}_X \xi)_Y = (\nabla^{\Lie{U}(n)}_Y \xi)_X = -
(\nabla^{\Lie{U}(n)}_X \xi)_Y =0$.
The final remark contained in Theorem follows from Proposition
\ref{pro:skew} (i).
\end{proof}
\end{document}
|
\begin{document}
\title{f General condition of quantum teleportation \ by one-dimensional quantum walks}
\noindent{\bf Abstract}\quad
We extend the scheme of quantum teleportation by quantum walks introduced by Wang et al. (2017). First, we introduce the mathematical definition of the accomplishment of quantum teleportation by this extended scheme. Secondly, we show a useful necessary and sufficient condition that the quantum teleportation is accomplished rigorously. Our result classifies the parameters of the setting for {the accomplishment of quantum teleportation}.
\section{Introduction}\label{intro}
Quantum walk is considered as a quantum analogue of random walk. This model was first introduced in the context of quantum information theory such as Aharonov et al. \cite{ADZ93} and Ambainis et al. \cite{ABNV01}. Since then, quantum walk is treated as an interesting model in the field of mathematics and information theory \cite{CFG02, K03, Kn02, VA08, VA12} and expected of its application \cite{P13, KI19}. Quantum walk is capable of universal quantum computation and able to be implemented by the physical system in various ways \cite{C09, LCETK10, CGW13, KRBD10}, which is why the model is considered to be expectable one.\par
On the other hand, quantum teleportation is a communication protocol that transmits a quantum state from one place to another. It is first introduced by Bennett et al. \cite{BBCJPW93} and regarded as not only a system for communication but also the basis of quantum computation \cite{TMFLF13}.\par
Recently, the works on applications of quantum walks to quantum teleportation {\cite{WSX17, SWLR19, LCWHL19, ZYLZ20} appear}. In previous quantum teleportation systems, they had to produce prior entangled states and carried on transmission with it. However, by using quantum walks, the walk itself has a role of entanglement, which makes teleportation simpler. In the previous study \cite{WSX17}, the concrete models of teleportation by quantum walks are shown, but the general condition where the scheme of teleportation succeeds is not shown. In this paper, we extend the scheme of quantum teleportation by quantum walks introduced by Wang et al. \cite{WSX17}. We introduce the mathematical definition of the accomplishment of quantum teleportation by this extended scheme. Then, we show a useful necessary and sufficient condition for it. Our result classifies the parameters of the setting for the accomplishment of the quantum teleportation including Wang et al.'s settings.\par
The rest of the paper is organized as follows. Section 2 gives the definition of our quantum walk model, and in Sect. 3 we give the scheme of teleportation by the quantum walk model. In Sect. 4, we present our main theorem of this paper and demonstrate some examples of the theorem. Furthermore, Sect. 5 is devoted to the proof of the result. Finally, we give {a} summary and discussion in Sect. 6.
\section{Quantum Walks}\label{sec:1}
Here, we introduce the quantum walks (QWs). First, we review a basic model of discrete QW and then introduce the QW applied to the scheme of quantum teleportation.
\subsection{The One-Coin Quantum Walks on One-Dimensional Lattice}\label{sec:2}
The one-dimensional quantum walk with one coin is defined in a compound Hilbert space of the position Hilbert space $\mathcal{H}_{\rm P} = {\rm span}\{ \ket{x} | x \in \mathbb{Z} \}$ and the coin Hilbert space $\mathcal{H}_{\rm C} = {\rm span}\{ \ket{R},\,\ket{L} \}$ with
\begin{eqnarray}
\ket{R} = \onebytwo{1}{0},\quad \ket{L} = \onebytwo{0}{1}. \nonumber
\end{eqnarray}
Note that $\Hcn$ is equivalent to $\mathbb{C}^2$. Then, the whole system is described by $\mathcal{H} = \mathcal{H}_{\rm P} \otimes \mathcal{H}_{\rm C}$. \par
Now, we define one-step time evolution of the quantum walk as $W = \hat{S} \cdot \hat{C}$, where $\hat{S}$ is a shift operator described by
\begin{eqnarray}
\hat{S} = S \otimes \ket{R}\bra{R} + S^{-1} \otimes \ket{L}\bra{L}\nonumber
\end{eqnarray}
with
\begin{eqnarray}
S = \sum_{x \in \mathbb{Z}} \ket{x+1}\bra{x},\nonumber
\end{eqnarray}
and $\hat{C}$ is a coin operator defined by
\begin{eqnarray}
\hat{C} = I_2 \otimes C,\nonumber
\end{eqnarray}
with
\begin{eqnarray}
I_2 = \twobytwo{\,1\,}{\,0\,}{\,0\,}{\,1\,},\quad C \in {\rm U}(2).\nonumber
\end{eqnarray}
Here, U($n$) is the set of $n\times n$ unitary matrices.
\subsection{$m$-Coin Quantum Walks on One-Dimensional Lattice}\label{sec:MCQW}
To implement schemes of quantum teleportation based on quantum walks, we need to define quantum walks with many coins, which are determined on the whole system $\mathcal{H} = \Hp \otimes \Hcn^{\otimes m}$ with $m\geq n$ (the previous case was one coin QW).\par
Now, we define one-step time evolution of the $m$-coin quantum walk at time $n$ as $W_n = \hat{S}_n \cdot \hat{C}_n$, where $\hat{S}_n$ is a shift operator described by
\begin{eqnarray}\nonumber
\hat{S}_n &=& S \otimes \left(I_{2} \otimes \cdots \otimes I_{2} \otimes \overbrace{\ket{R}\bra{R}}^{n} \otimes I_{2} \otimes \cdots \otimes I_{2}\right) \nonumber \\
&& + S^{-1} \otimes \left(I_{2} \otimes \cdots \otimes I_{2} \otimes \overbrace{\ket{L}\bra{L}}^{n} \otimes I_{2} \otimes \cdots \otimes I_{2}\right),\nonumber
\end{eqnarray}
and $\hat{C}_n$ is the coin operator described by
\begin{eqnarray}
\hat{C}_n = I_{\infty} \otimes \left(I_{2} \otimes \cdots \otimes I_{2} \otimes \overbrace{C_n}^n \otimes I_{2} \otimes \cdots \otimes I_{2}\right).\nonumber
\end{eqnarray}
Here, ``$\overbrace{}^n$" means that the matrix corresponds to $n$th $\Hcn$ and $C_n \in {\rm U}(2)$.\par
Moreover, we put
\begin{eqnarray}
P_n = \ket{L}\bra{L}C_n,\quad Q_n = \ket{R}\bra{R}C_n.\nonumber
\end{eqnarray}
We should note that $C_n = P_n + Q_n$. Then, a quantum walker at time $n$ moves one unit to the left with the weight
\begin{eqnarray}
I_{2} \otimes \cdots \otimes I_{2} \otimes \overbrace{P_n}^n \otimes I_{2} \otimes \cdots \otimes I_{2},\nonumber
\end{eqnarray}
or to the right with weight
\begin{eqnarray}
I_{2} \otimes \cdots \otimes I_{2} \otimes \overbrace{Q_n}^n \otimes I_{2} \otimes \cdots \otimes I_{2}.\nonumber
\end{eqnarray}
In other words, for $n\in \mathbb{Z}_{\geq}$ and $\ket{\varPsi_n}$, the state of the system at time $n$, the relationship between the states $\ket{\varPsi_n}$ and $\ket{\varPsi_{n+1}}$ is described as
\begin{eqnarray}
\ket{\varPsi_{n+1}} = W_{n+1}\ket{\varPsi_n}.\nonumber
\end{eqnarray}
\section{Schemes of Teleportation}
Let us set $\Hp\otimes \Hc{\rm A}$ and $\Hc{\rm B}$ as the Alice and Bob's spaces, respectively after the fashion of the proposed idea by \cite{WSX17}. Here, $\Hc{\rm A}$, $\Hc{\rm B}\cong \mathbb{C}^2$. In this section, we consider quantum teleportation described in Figure 1. Now, the sender Alice wants to send $\ket{\phi} \in \Hc{\rm A} (\cong \mathbb{C}^2)$ with $\|\phi\| = 1$ to the receiver Bob. We call $\ket{\phi}$ the target state. \par
The space of this quantum teleportation is denoted by $\mathcal{H} = \Hp \otimes \Hc{A}\otimes \Hc{B}$. We set the initial state as
\begin{eqnarray}
\ket{\varPsi_0} = \ket{0} \otimes \ket{\phi} \otimes \ket{\psi} \in \mathcal{H}.\nonumber
\end{eqnarray}
Here, $\ket{\psi}$ satisfies $\|\psi\| =1$. In the framework of quantum walk, the total state space of quantum teleportation is isomorphic to a two-coin quantum walk whose position Hilbert space is $\Hp$ and whose coin Hilbert space is $\Hc{A}\otimes \Hc{B}$. On the other hand, from the point of view of quantum teleportation, Alice has two initial states $\ket{0} \otimes \ket{\phi} \in \Hp \otimes \Hc{A}$ and Bob has an initial state $\ket{\psi} \in \Hc{B}$, and the goal of the teleportation is that Bob obtains the state $\ket{\phi}$ as the element of $\Hc{B}$.\par
\begin{figure}
\caption{Circuit diagram of quantum teleportation by 2-coin quantum walks}
\label{fig:1}
\end{figure}
Then, we provide three stages: (1) time evolution, (2) measurement and (3) transformation.
\subsection{Time Evolution by QW}
In the first stage, we take 2 steps of QWs with two coins; we describe the time evolution operator at the first and second steps $W_1$,\,$W_2$ as
\begin{eqnarray}
W_1 = \hat{S_1} \cdot \hat{C_1} = (S \otimes \ket{R}\bra{R} \otimes I_{2} + S^{-1} \otimes \ket{L}\bra{L} \otimes I_{2})(I_{\infty} \otimes C_1 \otimes I_{2}), \nonumber \\
W_2 = \hat{S_2} \cdot \hat{C_2} = (S \otimes I_{2} \otimes \ket{R}\bra{R} + S^{-1} \otimes I_{2} \otimes \ket{L}\bra{L})(I_{\infty} \otimes I_{2} \otimes C_2), \nonumber
\end{eqnarray}
respectively. Suppose $\ket{\varPsi_n}\in \mathcal{H}$ $(n=0,1,2)$ is the state after the $n$-th time evolution of the QW, and we regard the initial state of $\ket{\varPsi_0}$ of the quantum teleportation as the initial state of the QW. We run this QW for two steps, that is,
\begin{eqnarray}
\ket{\varPsi_0}\stackrel{W_1}{\longmapsto} \ket{\varPsi_1}\stackrel{W_2}{\longmapsto}\ket{\varPsi_2}.\nonumber
\end{eqnarray}
\subsection{Measurement}
In the second stage, to carry out the measurement on the Alice's state, we introduce the observables denoted by self-adjoint operators $M_1$ and $M_2$ on $\Hc{A}$ and $\Hp$, respectively, as follows:
\begin{eqnarray}
M_1 &=& (+1)\ket{\eta_R}\bra{\eta_R} + (-1) \ket{\eta_L}\bra{\eta_L},\nonumber \\
M_2 &=& \sum_{j \in \mathbb{Z}} \frac{\,j\,}{2} \ket{\xi_j}\bra{\xi_j},\nonumber
\end{eqnarray}
where $\ket{\eta_\varepsilon} = H_1\ket{\varepsilon}\,(\varepsilon \in \{R,\,L\})$, and $\ket{\xi_j} = H_2\ket{j}\,(j \in \mathbb{Z})$. Here, $H_1$ and $H_2$ are unitary operators on $\Hc{A}(\cong\mathbb{C}^2)$ and $\Hp(\cong\ell^2({\mathbb{Z}}))$, respectively. Especially, $H_2$ is described as follow:
\begin{eqnarray}\nonumber
&H_2 \simeq
\left[ \begin{array}{ccc|ccc}
\alpha_{22} & \alpha_{20} & \alpha_{2{(-2)}} &&&\\
\alpha_{02} & \alpha_{00} & \alpha_{0{(-2)}} &\multicolumn{2}{c}{\raisebox{-3pt}[0pt][0pt]{\LARGE $\,\,\,O$}}&\\
\alpha_{{(-2)}2} & \alpha_{{(-2)}0} & \alpha_{{(-2)}{(-2)}} &&&\\ \hline
&&&&&\\
\multicolumn{2}{c}{\raisebox{3pt}[0pt][0pt]{\LARGE $\,\,\,\,\,\,O$}}&&\multicolumn{2}{c}{\raisebox{3pt}[0pt][0pt]{\LARGE $\,\,\,I$}}&
\end{array} \right] =
\left[ \begin{array}{ccc|ccc}
&&&&&\\
\multicolumn{2}{c}{\raisebox{3pt}[0pt][0pt]{\LARGE $\,\,\tilde{H}_2$}}&&\multicolumn{2}{c}{\raisebox{3pt}[0pt][0pt]{\LARGE $\,\,O$}}&\\ \hline
&&&&&\\
\multicolumn{2}{c}{\raisebox{3pt}[0pt][0pt]{\LARGE $\,\,O$}}&&\multicolumn{2}{c}{\raisebox{3pt}[0pt][0pt]{\LARGE $\,\,I$}}&
\end{array} \right],&
\end{eqnarray}
where
\begin{eqnarray}\nonumber
\tilde{H}_2 =
\left[ \begin{array}{ccc}
\alpha_{22} & \alpha_{20} & \alpha_{2{(-2)}} \\
\alpha_{02} & \alpha_{00} & \alpha_{0{(-2)}} \\
\alpha_{{(-2)}2} & \alpha_{{(-2)}0} & \alpha_{{(-2)}{(-2)}}
\end{array} \right].
\end{eqnarray}
The computational basis of $H_2$ in RHS is $\{\ket{2},\,\ket{0},\,\ket{-2},\,\hdots\}$ by this order.
The observed values of the observable $M_1$ are $\varepsilon\in \{\pm 1\}$ after the description of \cite{WSX17}, but in this paper, we describe the observed values of $M_1$ by $R, L$ by the bijection map
\[ R\leftrightarrow +1 \text{ and } L\leftrightarrow -1.\]
In the same way, we describe the observed values of $M_2$ as $\{-2,0,2\}$ by the bijection map
\[ 2k \leftrightarrow k \;(k=-1,0,1). \]
Furthermore, we extend the domains of operators $M_1$ and $M_2$ to the whole system $\mathcal{H}$ by putting $M_1^{\rm (s)}$ and $M_2^{\rm (s)}$ as follows:
\begin{align*}
M_1^{\rm (s)} &:= I_{\infty} \otimes
M_1 \otimes I_{\Hc{B}}, \nonumber \\
M_2^{\rm (s)} &:=
M_2 \otimes I_{\Hc{A}} \otimes I_{\Hc{B}}. \nonumber
\end{align*}
This means that Alice carries out projection measurements on $\Hc{A}$ and $\Hp$ with the eigenvectors $\mathcal{B}_1 = \{ \ket{\eta_\varepsilon} | \varepsilon \in \{R,\,L\} \}$ of $M_1$ and $\mathcal{B}_2 = \{ \ket{\xi_j} | j \in \mathbb{Z} \}$ of $M_2$, respectively.
If Alice gets the observed values $\varepsilon$ by $M_1$ and $j$ by $M_2$, respectively, then the states collapse to $\ket{\eta_\varepsilon}\in\mathcal{H}_C^{(A)}$ and $\ket{\xi_j}\in \mathcal{H}_P$, respectively.
Through the measurements, if the state of $\Hc{A}$ collapses to $\ket{\eta_\varepsilon} \in \mathcal{B}_1$ by $M_1$ and the state of $\Hp$ collapses to $\ket{\xi_j} \in \mathcal{B}_2$ by $M_2$, the degenerate state on the whole state is denoted by $\ket{\varPsi_*^{(j,\,\varepsilon)}}\in \mathcal{H}$. So, the state $\ket{\varPsi_*^{(j,\,\varepsilon)}}$ can be described explicitly as follows. The proof is given in Sect. 5.
\begin{proposition}\label{prop:finalstate}
{\rm The state $\ket{\varPsi_*^{(j,\,\varepsilon)}}$ can be described as}
\begin{eqnarray}
\ket{\varPsi_*^{(j,\,\varepsilon)}} = \ket{\xi_j} \otimes \ket{\eta_\varepsilon} \otimes \ket{\varPhi_*^{(j,\,\varepsilon)}}, \label{state}
\end{eqnarray}
{\rm where $\ket{\varPhi_*^{(j,\,\varepsilon)}} = V^{(j,\,\varepsilon)}\ket{\phi}$ and $V^{(j,\,\varepsilon)}$ is a linear map on $\Hc{B}$ (See (\ref{vje}) for the detailed expression for $V^{(j,\,\varepsilon)}$).}
\end{proposition}\par
Then, our problem is converted to finding a practical necessary and sufficient condition for the unitarity of $V^{(j,\,\varepsilon)}$.
\subsection{Transformation}
In the final stage, Bob should convert his state $\ket{\varPhi_*^{(j,\,\varepsilon)}}\in \Hc{B}$ to the state $\ket{\phi}$. After the measurements, Alice sends the outcomes $\varepsilon\in\{L,R\}$ and $j\in\{-2,0,2\}$ to Bob. Then, Bob acts a unitary operator $U^{(j,\,\varepsilon)}$ on $\Hc{B}$ to $\ket{\varPhi_*^{(j,\,\varepsilon)}}$, depending on a pair of observed results $(j,\,\varepsilon)$. Finally, Bob obtains a state $\ket{\varPhi}:=U^{(j,\,\varepsilon)}\ket{\varPhi_*^{(j,\,\varepsilon)}} \in \Hc{B}$. If $\ket{\varPhi}=\ket{\phi}$, we can regard that the teleportation is ``accomplished" (we define this clearly below).
\subsection{A mathematical formulation of schemes of teleportation}
In the above subsections, we introduced the notion of quantum teleportation driven by quantum walk. As we have seen, the factors to determine the scheme of this teleportation are Bob's initial state $\ket{\psi}$, the coin operators $C_1$ and $C_2$, and the measurement operator $H_1$ and $H_2$. Then, for convenience, we define the set of them as the parameter of the teleportation as follows:
\begin{definition}
{\rm We call
\begin{align}\nonumber
\bfit{T}= (\ket{\psi};\,C_1,\,C_2;\,H_1,\,H_2) \in {\rm \mathbb{C}^2 \times U(2) \times U(2) \times U(2) \times U(\infty)}
\end{align} a {\bf quantum walk measurement procedure}.}
\end{definition}
\begin{definition}
{\rm Let $\ket{\varPhi}\in \Hc{B}$ be a Bob's final state of a quantum walk measurement procedure ${\bfit T}$ and $\ket{\phi}\in \Hc{A}$ be the target state.
If this quantum walk measurement procedure ${\bfit T}$ satisfies $\ket{\varPhi} = \ket{\phi}$ for any observed value $(j,\,\varepsilon)\in \{ -2,0,2 \}\times \{L,R\}$ by Alice, we say that {\bf the quantum teleportation is accomplished by {\bfit T}}. }
\end{definition}
\begin{definition}
{\rm We define $\mathcal{T} \subset {\rm \mathbb{C}^2 \times U(2) \times U(2) \times U(2) \times U(\infty)}$ by
\begin{eqnarray}\nonumber
\mathcal{T} := \left\{ \bfit{T} = (\ket{\psi};\,C_1,\,C_2;\,H_1,\,H_2)\,|\,\scalebox{0.8}[1]{\rm {\bfit T}\, accomplishes\, the\, quantum\, teleportation.} \right\}
\end{eqnarray}
and call $\mathcal{T}$ {\bf the class of quantum teleportation driven by 2-coin quantum walks}.}
\end{definition}\par
The main purpose of this paper is to determine explicitly the class $\mathcal{T}$.
\section{Our result}
In this section, we present our main result on the quantum teleportation by quantum walks.
\subsection{Main Theorem}
\begin{theorem}\label{thm:main}
{\rm Quantum walk measurement procedure $\bfit{T} = (\ket{\psi};\,C_1,\,C_2;\,H_1,\,H_2)$ accomplishes the quantum teleportation, i.e., $\bfit{T} \in \mathcal{T}$ iff $\bfit{T}$ satisfies the following three conditions simultaneously}:
\begin{enumerate}[{\bf (I)}]
\item {\bf [Condition for $H_1$]} {\rm $|\braket{R|H_1|R}|=|{\braket{R|H_1|L}}|$.}
\item {\bf [Condition for $C_2$ and $\psi$\,]} {\rm $\left|\left\langle R|C_{2}| \psi\right\rangle\right|=\left|\left\langle L|C_{2}| \psi\right\rangle\right| = \displaystyle\frac{1}{\sqrt{2}}$.}
\item {\bf [Condition for $H_2$]} {\rm $\bfit{T}$ satisfies one of the following {two} conditions at least:}
\begin{enumerate}
\renewcommand{\bf (\roman{enumii})}{\bf (\roman{enumii})}
\item
{\rm Let $\bfit{H}$ be the set of three-dimensional unitary matrices defined by
\begin{equation}\nonumber \bfit{H}=\left\{
\begin{bmatrix} p & r & 0 \\ 0 & 0 & t \\q & s & 0 \end{bmatrix},\;
\begin{bmatrix} p & 0 & r \\ 0 & t & 0 \\ q & 0 & s \end{bmatrix},\;
\begin{bmatrix} 0 & p & r \\ t & 0 & 0 \\ 0 & q & s \end{bmatrix}
\in {\rm U}(3) \;:\;
|p|=|q| \right\}
\end{equation}
Then, $H_2=\tilde{H}_2\oplus I_\infty$ with $\tilde{H}_2\in \bfit{H}$. }
\item {\rm for all $k\in \{0,\,\pm 2\}$,}
\begin{align}\nonumber
|(H_2)_{2k}| = |(H_2)_{{(-2)}k}|
\end{align}
{\rm and}
\begin{align}\nonumber
\agmnt(H_2)_{2k} + \agmnt(H_2)_{{(-2)}k} - 2\agmnt(H_2)_{0k} \in (2\mathbb{Z}+1)\pi.
\end{align}
\end{enumerate}
{\rm Here, $(H_2)_{jk} =\braket{j|H_2|k}$.}
\end{enumerate}
{\rm Moreover, in any case, the transformation $U^{(j,\,\varepsilon)}$ by Bob depending on observed results $(j,\,\varepsilon)$ is unitary described as}
\begin{align}\nonumber
U^{(j,\,\varepsilon)} = \frac{1}{\|V^{(j,\,\varepsilon)}\ket{\phi}\|} \left(V^{(j,\,\varepsilon)}\right)^{-1},
\end{align}
{\rm where}
\begin{align}\nonumber
V^{(j,\,\varepsilon)}=\twovec {\bra{\eta_\varepsilon}(\overline{\alpha_{2j}}Q_1+\overline{\alpha_{0j}}P_1)\beta_{R}}{\bra{\eta_\varepsilon}(\overline{\alpha_{0j}}Q_1 + \overline{\alpha_{(-2)j}}P_1)\beta_{L}},
\end{align}
{\rm regardless of $\ket{\phi}$. Here $\alpha_{jk}=(H_2)_{jk}$ and $\beta_L=\bra{L}C_2\ket{\psi}$, $\beta_R=\bra{R}C_2\ket{\psi}$.}
\end{theorem}
\begin{remark}
{\rm This theorem implies that accomplishment of the quantum teleportation is independent of $C_1$. Moreover, the theorem does not depend on $C_2$ and $\ket{\psi}$, for each one, but
``$C_2\ket{\psi}$." After all, the accomplishment of quantum teleportation is determined only by three factors, that is, $H_1$, $H_2$, and $\ket{\psi'}=C_2\ket{\psi}$; this is a generalization of the statement of \cite{WSX17}. }
\end{remark}
\begin{remark}
{\rm The condition {\bf (II)} means that the coin operator $C_2$ must be unbiased. This claim agrees with Li et al. \cite{LCWHL19}, in which it is the case of the number of qubit $N=1$.}
\end{remark}
\subsection{Examples and Demonstrations}
In the following, we put $H=\displaystyle\frac{1}{\sqrt{2}} \twobytwo{1}{1}{1}{-1}$.
\begin{enumerate}[(1)]
\item\quad We choose
\begin {eqnarray}
\ket{\psi} = \ket{R},\,\,C_1 = I_2,\,\,C_2 = H_1=H,\,\,\tilde{H}_2 \simeq H\oplus {I_{\infty}}. \nonumber
\end{eqnarray}
This case satisfies {\bf (III)-(i)} and Wang et al.\cite{WSX17} has shown that in this case the quantum teleportation is accomplished. Bob's state before measurement $\ket{\varPhi^{(j,\,\varepsilon)}}$ and the operator $U^{(j,\,\varepsilon)}$ are as follows:
\begin{align*}
\begin{array}{|c|c|c|}
\hline
(j,\,\varepsilon) &\quad\quad\ket{\varPhi^{(j,\,\varepsilon)}}\quad\quad &\quad\quad U^{(j,\,\varepsilon)}\quad\quad \rrule\\ \hline \hline
(2,\,R) & \ket{\phi} & I_2 \rrule\\ \hline
(0,\,R) & X\ket{\phi} & X \rrule\\ \hline
(-2,\,R) & Z\ket{\phi} & Z \rrule\\ \hline
(2,\,L) & Z\ket{\phi} & Z \rrule\\ \hline
(0,\,L) & XZ\ket{\phi} & ZX \rrule\\ \hline
(-2,\,L) & \ket{\phi} & I_2 \rrule\\ \hline
\end{array}
\end{align*}
\item We choose
\begin {eqnarray}
\ket{\psi} = \frac{\ket{R}+\ket{L}}{\sqrt{2}},\,\,C_1 = C_2 = I_2,\,H_1=H,\,\,
\tilde{H}_2 = \displaystyle\frac{1}{\sqrt{3}}\threebythree{-e^{ \frac{4}{3}\pi i }}{-1}{-e^{ \frac{2}{3}\pi i }}{1}{1}{1}{e^{ \frac{2}{3}\pi i }}{1}{e^{ \frac{4}{3}\pi i }}. \nonumber
\end{eqnarray}
This case satisfies {\bf (III)-(ii)}. Bob's state before measurement $\ket{\varPhi^{(j,\,\varepsilon)}}$ and the operator $U^{(j,\,\varepsilon)}$ are as follows:
\begin{align*}
\begin{array}{|c|c|c|}
\hline
(j,\,\varepsilon) & \ket{\varPhi^{(j,\,\varepsilon)}} & U^{(j,\,\varepsilon)} \rrule \\ \hline \hline
(2,\,R) &\quad \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{2}{3}\pi i}}{1}{1}{-e^{\frac{4}{3}\pi i}}\ket{\phi} \quad&\quad \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{4}{3}\pi i}}{1}{1}{-e^{\frac{2}{3}\pi i}} \quad \rrrule \\ \hline
(0,\,R) & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{1}{1}{1}{-1}\ket{\phi} & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{1}{1}{1}{-1} \rrrule \\ \hline
(-2,\,R) & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{4}{3}\pi i}}{1}{1}{-e^{\frac{2}{3}\pi i}}\ket{\phi} & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{2}{3}\pi i}}{1}{1}{-e^{\frac{4}{3}\pi i}} \rrrule \\ \hline
(2,\,L) & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{2}{3}\pi i}}{-1}{1}{e^{\frac{4}{3}\pi i}}\ket{\phi} & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{4}{3}\pi i}}{1}{-1}{e^{\frac{2}{3}\pi i}} \rrrule \\ \hline
(0,\,L) & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{1}{-1}{1}{1}\ket{\phi} & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{1}{1}{-1}{1} \rrrule \\ \hline
(-2,\,L) & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{4}{3}\pi i}}{-1}{1}{e^{\frac{2}{3}\pi i}}\ket{\phi} & \displaystyle\frac{1}{\sqrt{2}}\twobytwo{e^{\frac{2}{3}\pi i}}{1}{-1}{e^{\frac{4}{3}\pi i}} \rrrule \\ \hline
\end{array}
\end{align*}
\item\quad We choose
\begin {eqnarray}
\ket{\psi} = \frac{\ket{R}+i\ket{L}}{\sqrt{2}},\,\,C_1 = C_2 =I_2,\,\,H_1=H,\,\,
\tilde{H}_2 = \threebythree{i/2}{1/\sqrt{2}}{-i/2}{1/\sqrt{2}}{0}{1/\sqrt{2}}{i/2}{-1/\sqrt{2}}{-i/2}. \nonumber
\end{eqnarray}
This case is another example of {\bf (III)-(ii)}. Bob's state before measurement $\ket{\varPhi^{(j,\,\varepsilon)}}$ and the operator $U^{(j,\,\varepsilon)}$ are as follows:
\begin{align*}
\begin{array}{|c|c|c|}
\hline
(j,\,\varepsilon) & \ket{\varPhi^{(j,\,\varepsilon)}} & U^{(j,\,\varepsilon)} \rrule\\ \hline \hline
(2,\,R) & \quad\displaystyle\frac{1}{\sqrt{3}}\twobytwo{i}{\sqrt{2}}{\sqrt{2}i}{-1}\ket{\phi} \quad &\quad \displaystyle\frac{1}{\sqrt{3}}\twobytwo{-i}{-\sqrt{2}i}{\sqrt{2}}{-1} \quad\rrrule \\ \hline
(0,\,R) & \twobytwo{-1}{0}{0}{i}\ket{\phi} & \twobytwo{-1}{0}{0}{-i} \rrrule \\ \hline
(-2,\,R) & \displaystyle\frac{1}{\sqrt{3}}\twobytwo{-i}{\sqrt{2}}{\sqrt{2}i}{1}\ket{\phi} & \displaystyle\frac{1}{\sqrt{3}}\twobytwo{i}{-\sqrt{2}i}{\sqrt{2}}{1} \rrrule \\ \hline
(2,\,L) & \displaystyle\frac{1}{\sqrt{3}}\twobytwo{i}{-\sqrt{2}}{\sqrt{2}i}{1}\ket{\phi} & \displaystyle\frac{1}{\sqrt{3}}\twobytwo{-i}{-\sqrt{2}i}{-\sqrt{2}}{1} \rrrule \\ \hline
(0,\,L) & \twobytwo{-1}{0}{0}{-i}\ket{\phi} & \twobytwo{-1}{0}{0}{i} \rrrule \\ \hline
(-2,\,L) & \displaystyle\frac{1}{\sqrt{3}}\twobytwo{-i}{-\sqrt{2}}{\sqrt{2}i}{-1}\ket{\phi} & \displaystyle\frac{1}{\sqrt{3}}\twobytwo{i}{-\sqrt{2}i}{-\sqrt{2}}{-1} \rrrule \\ \hline
\end{array}
\end{align*}
\end{enumerate}
\section{Proof of Main Theorem}
\subsection{Proof of Proposition~1}
\begin{proof}
At $n=1$, $\ket{\varPsi_0}$ evolves to
\begin{eqnarray}
\ket{\varPsi_1} = W_1\ket{\varPsi_0} = \ket{1} \otimes \ket{Q_1 \phi} \otimes \ket{\psi} + \ket{-1} \otimes \ket{P_1 \phi} \otimes \ket{\psi}, \nonumber
\end{eqnarray}
and at $n=2$, $\ket{\varPsi_1}$ evolves to
\begin{align}
\ket{\varPsi_2} = W_2\ket{\varPsi_1} =& \ket{2} \otimes \ket{Q_1 \phi} \otimes \ket{Q_2 \psi} \nonumber \\
& + \ket{0} \otimes \left(\ket{Q_1 \phi} \otimes \ket{P_2 \psi} + \ket{P_1 \phi} \otimes \ket{Q_2 \psi}\right) \nonumber \\
& + \ket{-2} \otimes \ket{P_1 \phi} \otimes \ket{P_2 \psi}. \nonumber
\end{align}
If the coin state of Alice collapses to $\ket{\eta_\varepsilon} \in \mathcal{B}_1$ after the observable $M_1$, the total state $\ket{\varPsi_2}$ is changed to
\begin{align}
\ket{\varPsi_*^{(\varepsilon)}} =\frac{1}{\kappa^{(\varepsilon)}}\{& \ket{2} \otimes \ket{\eta_\varepsilon} \otimes \braket{\eta_\varepsilon|Q_1 \phi}\ket{Q_2 \psi} \nonumber \\
&+ \ket{0} \otimes \ket{\eta_\varepsilon} \otimes \left( \braket{\eta_\varepsilon|Q_1 \phi}\ket{P_2 \psi}+ \braket{\eta_\varepsilon|P_1 \phi}\ket{Q_2 \psi}\right) \nonumber \\
&+ \ket{-2} \otimes \ket{\eta_\varepsilon} \otimes \braket{\eta_\varepsilon|P_1 \phi}\ket{P_2 \psi}\}.\nonumber
\end{align}
Here, $\kappa^{(\varepsilon)}$ is a normalizing constant. Moreover, if the position state of Alice collapses to $\ket{\xi_j} \in \mathcal{B}_2$ after the observable $M_2$, the total state $\ket{\varPsi_*^{(\varepsilon)}}$ is changed to the normalized state of
\begin{align}
\ket{\varPsi_*^{(j,\,\varepsilon)}}
&= \displaystyle\frac{1}{\kappa^{(j,\,\varepsilon)}}[\ket{\xi_j} \otimes \ket{\eta_\varepsilon} \nonumber \otimes \{\braket{\eta_\varepsilon|(\braket{\xi_j|2}Q_1+\braket{\xi_j|0}P_1|{\phi}}\braket{R|{C_2}|{\psi}})\ket{R} \nonumber\\
&\hspace{40mm}+ \braket{\eta_\varepsilon|\braket{\xi_j|0}Q_1 + \braket{\xi_j|{-2}}P_1|\phi}\braket{L|{C_2}|{\psi}}\ket{L}\}] \nonumber \\
&= \ket{\xi_j} \otimes \ket{\eta_\varepsilon} \otimes \displaystyle\frac{\tilde{V}^{(j,\,\varepsilon)}}{\kappa^{(j,\,\varepsilon)}}\ket{\phi},\nonumber
\end{align}
where
\begin{align}\label{vje}
\tilde{V}^{(j,\,\varepsilon)} := \onebytwo {\bra{\eta_\varepsilon}(\braket{\xi_j|2}Q_1+\braket{\xi_j|0}P_1)\braket{R|{C_2}|{\psi}}}{\bra{\eta_\varepsilon}(\braket{\xi_j|0}Q_1 + \braket{\xi_j|{-2}}P_1)\braket{L|{C_2}|{\psi}}},
\end{align}
and $\kappa^{(j,\,\varepsilon)}$ is a normalizing constant. Note that the amplitudes are inserted into the third slots in the above expression. Now, because $\|\ket{\xi_j} \otimes \ket{\eta_\varepsilon}\| = 1$,
\begin{eqnarray}
\kappa^{(j,\,\varepsilon)} = \| \ket{\xi_j}\otimes\ket{\eta_\varepsilon}\otimes\tilde{V}^{(j,\,\varepsilon)}\ket{\phi} \| = \|\tilde{V}^{(j,\,\varepsilon)}\ket{\phi} \|.\nonumber
\end{eqnarray}
Here, putting
\begin{align}
V^{(j,\,\varepsilon)} = \frac{\tilde{V}^{(j,\,\varepsilon)}}{\kappa^{(j,\,\varepsilon)}}\text{\quad and\quad} \ket{\varPhi^{(j,\,\varepsilon)}} = \frac{\tilde{V}^{(j,\,\varepsilon)}}{\kappa^{(j,\,\varepsilon)}}\ket{\phi} = V^{(j,\,\varepsilon)}\ket{\phi},\nonumber
\end{align}
we obtain the desired conclusion.
\end{proof}
Let us put $\alpha_{jk} = \braket{j|H_1|k}\,(j,\,k\in \{0,\,\pm 2\})$ and $\beta_{\varepsilon}=\braket{\varepsilon |C_2|\psi}\,(\varepsilon \in \{L,\,R\})$. Then $\tilde{V}^{(j,\,\varepsilon)}$ is re-expressed by the following:
\begin{align}
\tilde{V}^{(j,\,\varepsilon)} =\onebytwo {\bra{\vr^{(j,\,\varepsilon)}}}{\bra{\vl^{(j,\,\varepsilon)}}} = \displaystyle\onebytwo {\bra{\eta_\varepsilon}
\twobytwo{\overline{\alpha_{2j}}\beta_R}{0}{0}{\overline{\alpha_{0j}}\beta_R}
}{\bra{\eta_\varepsilon}
\twobytwo{\overline{\alpha_{0j}}\beta_L}{0}{0}{\overline{\alpha_{(-2)j}}\beta_L}
}C_1, \label{v}
\end{align}
where
\begin{align}
\bra{\vr^{(j,\,\varepsilon)}}=&\bra{\eta_\varepsilon}(\braket{\xi_j|2}Q_1+\braket{\xi_j|0}P_1)\braket{R|{C_2}|{\psi}}, \label{eq:vR}\\
{\bra{\vl^{(j,\,\varepsilon)}}} =& {\bra{\eta_\varepsilon}(\braket{\xi_j|0}Q_1 + \braket{\xi_j|{-2}}P_1)\braket{L|{C_2}|{\psi}}},\label{eq:vL}
\end{align}
$P_1=\ket{L}\bra{L}C_1${\rm , and }$Q_1=\ket{R}\bra{R}C_1$. We will use this expression later.
\subsection{Rewrite of the accomplishment of teleportation}
The following lemma seems to be simple, but plays an important role later.
\begin{lemma}\label{l1}
{\rm The following two statements are equivalent for $V\in M_n(\mathbb{C})$}:
\begin{enumerate}[{\rm (i)}]
\item {\rm There exists $U\in \mathrm{U}(n)$ such that for any $\phi\in \mathbb{C}^n\backslash\{0\}$, there exists a complex value $\kappa=\kappa(\phi)$ such that}
\begin{align}\nonumber
UV\phi =\kappa(\phi)\phi.
\end{align}
\item {\rm There exists a complex number $\kappa$ such that }
\begin{align}\nonumber
V\in \kappa\mathrm{U}(n).
\end{align}
\end{enumerate}
\end{lemma}
\begin{proof}
\quad Assume (i) holds. For any $\phi\in \mathbb{C}^n$, $UV\phi = \kappa(\phi)\phi \,\,\Longleftrightarrow\,\, (UV-\kappa(\phi)I)\phi = 0 \,\,\Longleftrightarrow\,\,$ eigenvector of $UV$ is every $\phi \in \mathbb{C}^n\setminus\{0\}$. That is equivalent to $UV = \kappa(\phi)I$. Since $U$ and $V$ are independent of $\phi$, the eigenvalue $\kappa(\phi)$ must be independent of $\phi$. So (ii) holds. The converse is obvious.
\end{proof}
By using Lemma~\ref{l1}, the following lemma is completed:
\begin{lemma}\label{thm:unitarity}
\begin{multline*}
{\bfit T} \in \mathcal{T}\,\, \Longleftrightarrow\,\,
\text{\rm for any } (j,\,\varepsilon)\in \{-2,\,0,\,2\}\times \{R,\,L\} \\ \text{\rm there exists } \kappa=\kappa^{(j,\,\varepsilon)} \text{\rm \:such that } \displaystyle\tilde{V}^{(j,\,\varepsilon)} \in \kappa {\rm U}(2).
\end{multline*}
\end{lemma}
\begin{proof}
Let $\ket{\Phi_*^{(j,\,\varepsilon)}}\in \mathcal{H}$ be the final state after obtaining the observed values $(j,\,\varepsilon)$; that is, there exists $\ket{\Psi_*^{(j,\,\varepsilon)}}\in\Hc{B}$ such that $\ket{\Phi_*^{(j,\,\varepsilon)}}=\ket{\xi_j}\otimes\ket{\eta_\varepsilon}\otimes\ket{\Psi_*^{(j,\,\varepsilon)}}$.
\quad By the definition of $\mathcal{T}$ and Proposition~\ref{prop:finalstate}, ${\bfit T} \in \mathcal{T}$ if and only if there must exist a unitary matrix $U^{(j,\,\varepsilon)}$ on $\Hc{B}$ such that
\begin{align}
U^{(j,\,\varepsilon)}\ket{\varPhi^{(j,\,\varepsilon)}} =U^{(j,\,\varepsilon)}\displaystyle\frac{\tilde{V}^{(j,\,\varepsilon)}}{\kappa^{(j,\,\varepsilon)}}\ket{\phi}= \ket{\phi}
\,\,\Longleftrightarrow\,\,
U^{(j,\,\varepsilon)}\tilde{V}^{(j,\,\varepsilon)}\ket{\phi} = \kappa^{(j,\,\varepsilon)}\ket{\phi}.\nonumber
\end{align}
Here, because $\kappa^{(j,\,\varepsilon)} =\|\tilde{V}^{(j,\,\varepsilon)}\ket{\phi}\|$, this is equivalent to the following by Lemma~\ref{l1}: $\kappa^{(j,\,\varepsilon)}$ is independent of $\ket{\phi}$ and
\begin{align}
\tilde{V}^{(j,\,\varepsilon)}\in \kappa^{(j,\,\varepsilon)}\mathrm{U}(2).\nonumber
\end{align}
\quad
\\
\begin{flushright}\end{flushright}
\end{proof}
\par
In the next section, we will apply the statement of Lemma~\ref{thm:unitarity} and the expression of $\tilde{V}^{(j,\,\varepsilon)}$ in (\ref{v}).
\subsection{A necessary condition of measurement}
In this section, we will show that to accomplish the quantum teleportation, the eigenbasis of the observables on $\mathcal{B}_1$ and $\mathcal{B}_2$ must be different from each computational standard basis. More precisely, we obtain the following theorem:
\begin{lemma}
{\rm If ${\bfit T} \in \mathcal{T}$, $H_1 \neq I_2$ and $H_2 \neq I_\infty$.}
\end{lemma}
\begin{proof}
\quad We show the contrapositive of the theorem: if $H_1 = I_{2}$ or $H_2 = I_{\infty}$, ${\bfit T} \notin \mathcal{T}$, that is, by Lemma~\ref{thm:unitarity} and (\ref{v}),
\begin{eqnarray}
\onebytwo {\bra{\vr^{(j,\,\varepsilon)}}}{\bra{\vl^{(j,\,\varepsilon)}}} = \displaystyle\onebytwo {\bra{\eta_\varepsilon}
\twobytwo{\overline{\alpha_{2j}}\beta_R}{0}{0}{\overline{\alpha_{0j}}\beta_R}
}{\bra{\eta_\varepsilon}
\twobytwo{\overline{\alpha_{0j}}\beta_L}{0}{0}{\overline{\alpha_{{(-2)}j}}\beta_L}
}C_1 \notin {}^\forall\kappa{\rm U}(2).\,\,
\label{imp}
\end{eqnarray} \par
In case of $H_1 = I_{2}$, $\ket{\eta_\varepsilon}$ is equal to $\ket{\varepsilon}$, so
\begin{eqnarray}
\onebytwo {\bra{\vr^{(j,\,\varepsilon)}}}{\bra{\vl^{(j,\,\varepsilon)}}} = \displaystyle\onebytwo {\bra{\varepsilon}
\twobytwo{\overline{\alpha_{2j}}\beta_R}{0}{0}{\overline{\alpha_{0j}}\beta_R}
}{\bra{\varepsilon}
\twobytwo{\overline{\alpha_{0j}}\beta_L}{0}{0}{\overline{\alpha_{{(-2)}j}}\beta_L}
}C_1. \nonumber
\end{eqnarray}\par
Now, when $(j,\,\varepsilon) = (j,\,R)$, we obtain
\begin{eqnarray}
\onebytwo { [1\,\,\,\,0]
\twobytwo{\overline{\alpha_{2j}}\beta_R}{0}{0}{\overline{\alpha_{0j}}\beta_R}
}{ \left[ 1\,\,\,\,0 \right]
\twobytwo{\overline{\alpha_{0j}}\beta_L}{0}{0}{\overline{\alpha_{{(-2)}j}}\beta_L}
}=
\twobytwo{\overline{\alpha_{2j}}\beta_R}{0}{\overline{\alpha_{0j}}\beta_L}{0}.\nonumber
\end{eqnarray}
It is followed by ${\rm det}\onebytwo {\bra{\vr^{(j,\,R)}}}{\bra{\vl^{(j,\,R)}}}= 0$, and it implies (\ref{imp}).\par
In case of $H_2 = I_{\infty}$, $\ket{\xi_j}$ is equal to $\ket{j}$, so
\begin{eqnarray}\nonumber
\onebytwo {\bra{\vr^{(j,\,\varepsilon)}}}{\bra{\vl^{(j,\,\varepsilon)}}}= \displaystyle\onebytwo {
\bra{\eta_\varepsilon}
\twobytwo{\overline{\alpha_{2j}}\beta_R}{0}{0}{\overline{\alpha_{0j}}\beta_R}
}{\bra{\eta_\varepsilon}
\twobytwo{\overline{\alpha_{0j}}\beta_L}{0}{0}{\overline{\alpha_{{(-2)}j}}\beta_L}
}C_1
= \displaystyle\onebytwo {
\bra{\eta_\varepsilon}
\twobytwo{\delta_{2j}\beta_R}{0}{0}{\delta_{0j}\beta_R}
}{\bra{\eta_\varepsilon}
\twobytwo{\delta_{0j}\beta_L}{0}{0}{\delta_{(-2)j}\beta_L}
}C_1,
\end{eqnarray}
where
\begin{align}\nonumber
\braket{\xi_j|k}=\braket{j|k}=\delta_{jk}=
\left\{ \begin{array}{ll}
1 & (j=k)\\
0 & (j\neq k)
\end{array} \right. .
\end{align}
\par
Now, we put $H_1 = \twobytwo{a}{b}{c}{d}$. Because $\ket{\eta_\varepsilon} = H_1 \ket{\varepsilon}$, we can rewrite $\onebytwo {\bra{\vr^{(j,\,\varepsilon)}}}{\bra{\vl^{(j,\,\varepsilon)}}}$ as following:
\begin{eqnarray}\nonumber
\onebytwo {\bra{\vr^{(j,\,\varepsilon)}}}{\bra{\vl^{(j,\,\varepsilon)}}} =
\onebytwo {
\bra{\varepsilon}H_1^\dagger
\twobytwo{\delta_{2j}\beta_R}{0}{0}{\delta_{0j}\beta_R}
}{\bra{\varepsilon}H_1^\dagger
\twobytwo{\delta_{0j}\beta_L}{0}{0}{\delta_{(-2)j}\beta_L}
}C_1 =
\onebytwo {
\bra{\varepsilon}\twobytwo{\overline{a}\delta_{2j}\beta_R}{\overline{c}\delta_{0j}\beta_R}{\overline{b}\delta_{2j}\beta_R}{\overline{d}\delta_{0j}\beta_R}
}{\bra{\varepsilon}\twobytwo{\overline{a}\delta_{0j}\beta_L}{\overline{c}\delta_{(-2)j}\beta_L}{\overline{b}\delta_{0j}\beta_L}{\overline{d}\delta_{(-2)j}\beta_L}}C_1. \nonumber
\end{eqnarray}\par
Under here, if $(j,\,\varepsilon) = (2,\,R)$,
\begin{eqnarray}
\onebytwo {
[1\,\,\,\,0]\twobytwo{\overline{a}\beta_R}{0}{\overline{b}\beta_R}{0}
}{\left[1\,\,\,\,0\right]\twobytwo{0}{0}{0}{0}} \nonumber
=\twobytwo{\overline{a}\beta_R}{0}{0}{0}.
\end{eqnarray}
It is followed by ${\rm det}\onebytwo {\bra{\vr^{(2,\,R)}}}{\bra{\vl^{(2,\,R)}}}= 0$, and it implies (\ref{imp}).
\end{proof}
\subsection{Two conditions for $\bra{v_{L}^{(j,\,\varepsilon)}}$, $\bra{v_{R}^{(j,\,\varepsilon)}}$}
By Lemma~\ref{thm:unitarity}, the problem is reduced to find a condition for the unitarity of $\tilde{V}^{(j,\,\varepsilon)}$ except a constant multiplicity. Since
\[ \tilde{V}^{(j,\,\varepsilon)}=\begin{bmatrix} \bra{v_R^{(j,\,\varepsilon)}} \\ \bra{v_L^{(j,\,\varepsilon)}} \end{bmatrix}, \]
the two vectors in $\Hc{B}$ must satisfy the following two conditions as the corollary of Lemma~\ref{thm:unitarity}.
\begin{corollary}
{\rm $\bfit T\in \mathcal{T}$ if and only if the two row vectors of $\tilde{V}^{(j,\,\varepsilon)};$ $\,\,\bra{\vr^{(j,\,\varepsilon)}}$ and $\bra{\vl^{(j,\,\varepsilon)}}$, satisfy}
\begin{eqnarray}
[{\bf Condition\,I}]: &\|\vr^{(j,\,\varepsilon)}\|^2=\|\vl^{(j,\,\varepsilon)}\|^2\nonumber \\
\left[{\bf Condition\,II}\right]: & \braket{\vr^{(j,\,\varepsilon)}|\vl^{(j,\,\varepsilon)}}=0 \nonumber
\end{eqnarray}
{\rm for any observed values $(j,\,\varepsilon)$.}
\end{corollary}
\begin{proof}
By the expression of $\tilde{V}^{(j,\,\varepsilon)}$ in (\ref{v}) and Lemma~\ref{thm:unitarity}, we obtain the desired condition.
\end{proof}
From now on, we find more useful equivalent expressions of Conditions I and II.
\subsection{Equivalent expression of $[{\bf Condition\,I}]$}
From the definition of Condition I and the expressions of $\bra{v_R^{(j,\,\varepsilon)}}$ and $\bra{v_L^{(j,\,\varepsilon)}}$ in (\ref{v}), we have
\begin{align}
{\bf [Condition\, I]} & \Leftrightarrow ||\vr ^{(j,\,\varepsilon)}||^2 = ||\vl ^{(j,\,\varepsilon)}||^2 \nonumber \\
&\Leftrightarrow \scalebox{0.8}[1]{$\bra{\eta_\varepsilon}
\left[ \begin{array}{cc}
|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2 & 0 \\
0 & |\alpha_{0j}|^2|\beta_R|^2 - |\alpha_{(-2)j}|^2|\beta_L|^2
\end{array} \right]
\ket{\eta_\varepsilon} = 0. $}\label{3}
\end{align}
Here, we put $A:=|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2$ and $B:=|\alpha_{0j}|^2|\beta_R|^2 - |\alpha_{(-2)j}|^2|\beta_L|^2$.
\begin{align}
(\ref{3}) &\Longleftrightarrow \bra{\eta_\varepsilon}
\left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right]
\ket{\eta_\varepsilon} = 0 \nonumber \\
& \Longleftrightarrow
\left(\begin{array}{l}
X_1 : ``\left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right] = O\," \\ {\rm or} \\
Y_1 : ``\twobytwo{A}{0}{0}{B}\neq O\text{ and } \left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right]
\ket{\eta_\varepsilon} ={}^\exists \lambda_{j,\,\varepsilon} \ket{\eta_{\lnot\varepsilon}},"
\end{array}\right.\nonumber
\end{align}
where $\lambda_{j,\,\varepsilon}\in\mathbb{C}$. Then, we have Condition~I $= ``X_1 \lor Y_1$ for any $(j,\,\varepsilon)$" and in the following, we will transform $X_1$ and $Y_1$, respectively.
\subsubsection{Equivalent transformation of $X_1$}
{ The condition $X_1$ can be characterized by the following more practical condition using the parameters $|\alpha_{jk}|$, $|\beta_{R}|$ and $|\beta_{L}|$, which decide $H_2$ and $C_2$:}
\begin{lemma}
\begin{eqnarray}
&X_1\,\,\Longleftrightarrow\,\,&\displaystyle|\alpha_{jk}|=\frac{1}{\sqrt{3}} \text{\rm\; for all $j, k \in \{ 0, \pm 2\}$ }
\text{\rm\; and }|\beta_R|=|\beta_L|=\frac{1}{\sqrt{2}}.\nonumber
\end{eqnarray}
\end{lemma}
\begin{proof}
Assume $|\alpha_{jk}|=1/\sqrt{3}$ for all $k,j$ and $|\beta_R|=|\beta_L|=1/\sqrt{2}$, it is easy to check that $X_1$ holds.
Let us consider the inverse.
Assume $X_1$ holds. In this case, we obtain
\begin{align}
A &=|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2=0, \nonumber \\
B &=|\alpha_{0j}|^2|\beta_R|^2 - |\alpha_{(-2)j}|^2|\beta_L|^2=0, \nonumber
\end{align}
that is,
\begin{align}
\left[ \begin{array}{cc}
|\alpha_{2j}|^2 & -|\alpha_{0j}|^2 \\
|\alpha_{0j}|^2 & -|\alpha_{(-2)j}|^2 \\
\end{array} \right]
\left[ \begin{array}{c}
|\beta_R|^2 \\
|\beta_L|^2
\end{array} \right]
=
{\bf 0}. \nonumber
\end{align}
Because of $\T[|\beta_R|^2\,\,|\beta_L|^2]\neq {\bf 0},$ we have
\begin{eqnarray}
{\rm det} \begin{bmatrix}
|\alpha_{2j}|^2 & -|\alpha_{0j}|^2 \\
|\alpha_{0j}|^2 & -|\alpha_{(-2)j}|^2 \\
\end{bmatrix}
=0\nonumber
\end{eqnarray}
This is equivalent to
\begin{equation}|\alpha_{2j}|^2 |\alpha_{(-2)j}|^2=\left(|\alpha_{0j}|^2\right)^2. \label{prod}
\end{equation}
On the other hand, by the unitarity of $\tilde{H}_2$, we have
\begin{equation}
|\alpha_{2j}|^2 + |\alpha_{(-2)j}|^2 =1 - |\alpha_{0j}|^2. \label{sum}
\end{equation}
for any $j=-2,0,2$.
By (\ref{prod}) and (\ref{sum}), $|\alpha_{2j}|^2$, $|\alpha_{(-2)j}|^2$ are the solutions of the following quadratic equation:
\begin{eqnarray}
t^2 - (1 - |\alpha_{0j}|^2)t + \left(|\alpha_{0j}|^2\right)^2 = 0. \nonumber
\end{eqnarray}
Its solution is
\begin{eqnarray}
t = \frac{1 - |\alpha_{0j}|^2 \pm \sqrt{D}}{2},\quad D = -(3|\alpha_{0j}|^2-1)(|\alpha_{0j}|^2+1). \nonumber
\end{eqnarray}
Here, because the solution $t$ is a real number, the discriminant $D\geq 0$, i.e., $3|\alpha_{0j}|^2-1\leq 0$. Therefore, because $|\alpha_{0j}|\geq 0$,
\begin{eqnarray}
0 \leq |\alpha_{0j}|^2 \leq \frac{1}{3}. \nonumber
\end{eqnarray}\par
Here, the necessary condition for the unitarity of $\tilde{H}_2$ that $|\alpha_{02}|^2 + |\alpha_{00}|^2 + |\alpha_{0{(-2)}}|^2=1$ is satisfied by only the case for
\begin{eqnarray}
|\alpha_{02}|^2=|\alpha_{00}|^2=|\alpha_{0{(-2)}}|^2=\frac{1}{3}.\nonumber
\end{eqnarray}
Hence, for $j\in \{0,\,\pm 2\}$, we obtain $D=0$, and then $t=1/3$ holds. Therefore, for $j,\,k\in \{0,\,\pm 2\}$,
\begin{eqnarray}
|\alpha_{jk}|=\frac{1}{\sqrt{3}},\nonumber
\end{eqnarray}
which implies,
\begin{align}\nonumber
A=B=\frac{1}{3}(|\beta_R|^2-|\beta_L|^2)=0\,\,\Longleftrightarrow\,\, |\beta_R|=|\beta_L| =\frac{1}{\sqrt{2}}.
\end{align}
\quad
\\
\begin{flushright}\end{flushright}
\end{proof}
\subsubsection{Equivalent transformation of $Y_1$}
{ The condition $Y_1$ is equivalently deformed by the following lemma. This shows that the parameters of $H_1$ are independent of the others.}
\begin{lemma}{\rm Let measurement operator of $\Hc{A}$ be
\begin{eqnarray}
H_1 =\left[ \begin{array}{cc}
a & b \\
c & d
\end{array} \right],\nonumber
\end{eqnarray}
which is unitary. Then, we have}
\begin{align}
Y_1\,\,\Longleftrightarrow\,\,&
|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2 =-|\alpha_{0j}|^2|\beta_R|^2 + |\alpha_{(-2)j}|^2|\beta_L|^2\nonumber\\
&\text{\rm\; for all $ j, k \in \{ 0, \pm 2\}$}\text{\rm\; and}\,\, |a|=|b|. \nonumber
\end{align}
\end{lemma}
\begin{proof}
First let us consider the proof of the ``$\Leftarrow$" direction.
It holds
\begin{align}
\begin{bmatrix}1 & 0 \\ 0 & -1 \end{bmatrix} \ket{\eta_R} &= \begin{bmatrix} b \\ -d \end{bmatrix}
=\begin{bmatrix} (b/a)\cdot a \\ (\bar{a}/\bar{b})\cdot c \end{bmatrix} \notag \\
&= \frac{\,b\,}{a} \begin{bmatrix} b \\ d \end{bmatrix} = \frac{\,a\,}{b} \ket{\eta_L} \label{eq:etaR},
\end{align}
{where $\ket{\eta_\varepsilon} = H_1 \ket{\varepsilon}$.} Here, the second equality derives from $c=-\varDelta\bar{b}$ and $d=\varDelta\bar{a}$, where $\varDelta=\det(H_1)$ by the unitarity of $H_1$ and the third equality comes from the last assumption of $|a|=|b|$.
In the same way, we obtain
\begin{align}\label{eq:etaL}
\begin{bmatrix}1 & 0 \\ 0 & -1 \end{bmatrix} \ket{\eta_L} &= \frac{\,a\,}{b} \ket{\eta_R}.
\end{align}
The first assumption implies $A=-B$. Then, (\ref{eq:etaR}) and (\ref{eq:etaL}) include
\[ \begin{bmatrix} A & 0 \\ 0 & B \end{bmatrix} \ket{\eta_R}= A\cdot\frac{\,b\,}{a}\ket{\eta_L}
\text{ and }
\begin{bmatrix} A & 0 \\ 0 & B \end{bmatrix} \ket{\eta_L}= A\cdot\frac{\,a\,}{b}\ket{\eta_R}
\]
Thus, the condition $Y_1$ holds.
Secondly, assume $Y_1$ holds.
In this case, there exist $\lambda$ and $\lambda'$ such that
\begin{eqnarray}
\left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right]
\ket{\eta_R} = \lambda \ket{\eta_{L}}\,\,{\rm and}\,\,
\left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right]
\ket{\eta_L} = \lambda' \ket{\eta_{R}}.
\label{rl}
\end{eqnarray}
Therefore,
\begin{subequations}
\begin{empheq}[left = {(\ref{rl})\,\,\Longleftrightarrow\,\,\empheqlbrace \,}, right = {}]{align}
& \left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right]
H_1 \ket{R} = \lambda H_1 \ket{L}\, \label{a} \\
& {\rm and} \nonumber\\
& \left[ \begin{array}{cc}
A & 0 \\
0 & B
\end{array} \right]
H_1 \ket{L} =\lambda' H_1 \ket{R}. \label{b}
\end{empheq}
\end{subequations}\par
Let us give further transformation of (\ref{a}). Because $H_1$ is unitary,
\begin{align}
& H_1^\dagger \twobytwo{A}{0}{0}{B} H_1 \ket{R} = \lambda \ket{L} \nonumber \\
\Longleftrightarrow & \onebytwo{|a|^2A+|c|^2B}{a\overline{b}A+c\overline{d}B} = \onebytwo{0}{\lambda}. \label{c}
\end{align}\par
Similarly, (\ref{b}) is equivalently deformed as follows:
\begin{align}
& H_1^\dagger \twobytwo{A}{0}{0}{B} H_1 \ket{L} = \lambda' \ket{R} \nonumber \\
\Longleftrightarrow& \onebytwo{\overline{a}bA+\overline{c}dB}{|b|^2A+|d|^2B} = \onebytwo{\lambda'}{0}. \label{d}
\end{align}
Therefore, (\ref{rl}) is equivalent to (\ref{c}) and (\ref{d}), and these are also equivalent to
\begin{eqnarray}
\onebytwo{|a|^2A+|c|^2B}{|b|^2A+|d|^2B}=\twobytwo{|a|^2}{1-|a|^2}{1-|a|^2}{|a|^2}\onebytwo{A}{B} = {\bf 0} \label{e}
\end{eqnarray}
\begin{center}
and
\end{center}
\begin{eqnarray}
\onebytwo{a\overline{b}A+c\overline{d}B}{\overline{a}bA+\overline{c}dB}=\twobytwo{a\overline{b}}{c\overline{d}}{\overline{a}b}{\overline{c}d}\onebytwo{A}{B} = \twovec{\lambda}{\lambda'}
\label{f}
\end{eqnarray}
Here, we used in (\ref{e}), the unitarity of $H_1$, $|a|^2=|d|^2=1-|b|^2=1-|c|^2$.
Moreover, because of the assumption $\T[A,\,B] \neq {\bf 0}$,
\begin{eqnarray}
\det\twobytwo{|a|^2}{1-|a|^2}{1-|a|^2}{|a|^2}=0 \,\,\Longleftrightarrow\,\, |a| = \frac{1}{\sqrt{2}}. \nonumber
\end{eqnarray}
Then, we have $|a|=|b|$.
By substituting this result to (\ref{e}), we obtain
\begin{eqnarray}\label{eq:A+B=0}
A+B=0,
\end{eqnarray}
which is equivalent to
\[ |\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2 =-|\alpha_{0j}|^2|\beta_R|^2 + |\alpha_{(-2)j}|^2|\beta_L|^2 \]
for all $j$.
\end{proof}
Note that, by substituting (\ref{eq:A+B=0}) to (\ref{f}), we obtain
\begin{align}
a\bar{b}-c\bar{d}=\frac{\lambda'}{A},\; \bar{a}b-\bar{c}d=\frac{\lambda}{A}. \nonumber
\end{align}
The unitarity of $H_1$ implies $d=\varDelta \bar{a}$, $c=-\varDelta \bar{b}$, where $\varDelta=\det(H_1)$.
Therefore, we obtain
the constants of the Condition $Y_1$ are \[\lambda'=2a\bar{b}\cdot A=\frac{\,b\,}{a}\cdot A \text{ and } \lambda=2\bar{a}b\cdot A=\frac{\,a\,}{b}\cdot A\]
since $|a|=|b|=1/\sqrt{2}$.
\subsection{Calculation of $\left[{\bf Condition\,II}\right]$}
From the definition of [Condition I] and the expressions of $\bra{v_R^{(j,\,\varepsilon)}}$ and $\bra{v_L^{(j,\,\varepsilon)}}$ in (\ref{v}), we have
\begin{align}
{\bf [Condition\,II]} \Longleftrightarrow & \braket{\vr ^{(j,\,\varepsilon)}|\vl ^{(j,\,\varepsilon)}} = 0 \nonumber \\
\Longleftrightarrow & \bra{\eta_\varepsilon}
\left[ \begin{array}{cc}
\beta_R \alpha_{2j} \overline{\alpha_{0j}\beta_L} & 0 \\
0 & \beta_R \alpha_{0j} \overline{\alpha_{({-2})j}\beta_L}
\end{array} \right]
\ket{\eta_\varepsilon} = 0 \label{3'}
\end{align}
Putting $A':=\beta_R \alpha_{2j} \overline{\alpha_{0j}\beta_L}$ and $B':=\beta_R \alpha_{0j} \overline{\alpha_{({-2})j}\beta_L}$, we decompose (\ref{3'}) into the conditions $X_2$ and $Y_2$, as follows.
\begin{align}
(\ref{3'}) \,\,\Longleftrightarrow\,\, & \bra{\eta_\varepsilon}
\left[ \begin{array}{cc}
A' & 0 \\
0 & B'
\end{array} \right]
\ket{\eta_\varepsilon} = 0 \nonumber \\
\Longleftrightarrow\,\,&
\left( \begin{array}{l}
X_2 : ``\left[ \begin{array}{cc}
A' & 0 \\
0 & B'
\end{array} \right] = O\," \\ {\rm or} \\
Y_2 : ``\twobytwo{A'}{0}{0}{B'}\neq O\text{ and }\left[ \begin{array}{cc}
A' & 0 \\
0 & B'
\end{array} \right]
\ket{\eta_\varepsilon} ={}^\exists\mu_{j,\,\varepsilon} \ket{\eta_{\lnot\varepsilon}},"
\end{array}\right. \nonumber
\end{align}
where $\mu_{j,\,\varepsilon}\in\mathbb{C}$.
Then we obtain [Condition II]$=X_2 \lor Y_2$.
We will transform $X_2$ and $Y_2$ to more useful forms.
\subsubsection{Equivalent transformation of $X_2$}
{The condition $X_2$ is characterized only by the parameters of $H_2$ as follows:}
\begin{lemma}
{\rm {Let $\bfit{H}$ be the set of three dimensional unitary matrices defined by
\begin{equation}\label{eq:H} \bfit{H}=\left\{
\begin{bmatrix} p & r & 0 \\ 0 & 0 & t \\q & s & 0 \end{bmatrix},\;
\begin{bmatrix} p & 0 & r \\ 0 & t & 0 \\ q & 0 & s \end{bmatrix},\;
\begin{bmatrix} 0 & p & r \\ t & 0 & 0 \\ 0 & q & s \end{bmatrix}
\in {\rm U}(3) \;:\;
|p|=|q| \right\}
\end{equation}
}The condition $X_2$ is equivalent to the following condition;
\[{ H_2=\tilde{H}_2 \oplus I_{\infty}\text{\,\, with\,\,\,} \tilde{H}_2=
\left[ \begin{array}{ccc}
\alpha_{22} & \alpha_{20} & \alpha_{2{(-2)}} \\
\alpha_{02} & \alpha_{00} & \alpha_{0{(-2)}} \\
\alpha_{{(-2)}2} & \alpha_{{(-2)}0} & \alpha_{{(-2)}{(-2)}}
\end{array} \right]\in \bfit{H}. }\]}
\end{lemma}
\begin{proof}
Assume $\tilde{H}_2\in \bfit{H}$. Then, each raw vector of $\tilde{H}_2$ is of the form $[*,\;0\;,*]$ or $[0,\;*,\;0]$, where ``$*$" takes a nonzero value. Since the computational basis of $\tilde{H}_2$ is $\ket{-2},\ket{0},\ket{2}$ by this order, it holds that $\alpha_{2j}\alpha_{0j}=\alpha_{(-2)j}\alpha_{0j}=0$ for any $j\in\{-2,0,2\}$. Then, we have $A'=B'=0$ which implies the condition $X_2$.
On the other hand, assume the condition $X_2$. In this case, for $A'$ and $B'$, the followings are held:
\begin{align}
A' &=\beta_R \alpha_{2j} \overline{\alpha_{0j}\beta_L}=0, \nonumber \\
B' &=\beta_R \alpha_{0j} \overline{\alpha_{({-2})j}\beta_L}=0. \nonumber
\end{align}
Therefore,
\begin{align}
& |\beta_R \alpha_{2j}\alpha_{0j}\beta_L| = |\beta_R \alpha_{0j}\alpha_{j({-2})}\beta_L| = 0 \nonumber \\
\Longleftrightarrow\,\,& |\beta_R\alpha_{0j}\beta_L| = 0\,\,\,{\rm or}\,\,\,|\alpha_{2j}| = |\alpha_{j({-2})}| = 0 \nonumber \\
\Longleftrightarrow\,\,&
``(\;|\beta_R|,\;|\beta_L|\;)\in \{(0,1),(1,0)\}"\notag \\ & \qquad\qquad\text{ or }``(\;|\alpha_{0j}|^2,\;|\alpha_{2j}|^2+|\alpha_{(-2)j}|^2\;)\in \{(0,1),(1,0)\}"\nonumber
\end{align}
Here, we used $|\alpha_{0j}|^2+|\alpha_{2j}|^2+|\alpha_{(-2)j}|^2=1$ due to the unitarity of $\tilde{H}_2$ in the last equivalence.
When $(\;|\beta_R|,\,|\beta_L|\;) = (0,\,1)$ or $(1,\,0)$, the determinant of $\tilde{V}^{(j,\,\varepsilon)}$ is $\det(\tilde{V}^{(j,\,\varepsilon)})=0$ by (\ref{v}), and because of it, the matrix $\tilde{V}^{(j,\,\varepsilon)}$
does not satisfy the condition of Theorem~2. Hence, the conditions we should only impose are
\begin{align*}
({\rm a})\,\,
(\;|\alpha_{0j}|^2,|\alpha_{2j}|^2+|\alpha_{(-2)j}|^2\;) &= (0,1)\\
{\rm or}\quad\quad\quad\quad\quad\\
({\rm b})\,\,
(\;|\alpha_{0j}|^2,|\alpha_{2j}|^2+|\alpha_{(-2)j}|^2\;)&=(1,0)
\end{align*}
to each column vector of $\tilde{H}_2$ ($j=-2,0,2$). Each column vector satisfies the condition (a) or (b), however by the unitarity of $\tilde{H}_2$, we notice that one of the column vectors in $\tilde{H}_2$ satisfies the condition (b) and all the rest of the two column vectors satisfy (a) because every {\it raw} vector of $\tilde{H}_2$ must be a unit vector. This implies that $H_2=\tilde{H}_2\oplus I_\infty$ with $\tilde{H}_2\in \bfit{H}$.
Then, we obtained the desired conclusion.
\end{proof}
\subsubsection{Equivalent transformation of $Y_2$}
By {a similar} discussion {to} that of the condition $Y_1$, we obtain the following lemma. {It is important that the lemma is free from constraints of Alice's coin operator $C_2$. In spite of a similar fashion of the proof, this gives us a different observation from the observation of $Y_1$.}
\begin{lemma}{\rm For all $ j, k \in \{ 0, \pm 2\}$,}
\begin{eqnarray}\nonumber
Y_2 \,\,\Longleftrightarrow\,\, \alpha_{2j} \overline{\alpha_{0j}} = -\alpha_{0j} \overline{\alpha_{({-2})j}} \,\,{\rm and}\,\, |a|=|b|.
\end{eqnarray}
\end{lemma}
\subsection{Fusion of the conditions}
We have shown that a necessary and sufficient condition for $\bfit{T}\in \mathcal{T}$ is $(X_1 \lor Y_1) \land (X_2 \lor Y_2)$ and we have converted $X_j$ and $Y_j$ $(j=1,2)$ to useful expressions in the above discussions.
Expanding
\begin{equation}
(X_1 \lor Y_1) \land (X_2 \lor Y_2) = (X_1 \land X_2) \lor (X_1 \land Y_2) \lor (Y_1 \land X_2) \lor (Y_1 \land Y_2),
\nonumber
\end{equation}
we consider each case as follows to finish the proof of Theorem~\ref{thm:main}.
\begin{center}
\begin{tabular}{|c||c|c|}\hline
& $\begin{array}{c} X_2 \\ \tilde{H_2} \in \bfit{H} \end{array}$
& $\begin{array}{c} Y_2 \\ \scalebox{0.8}[1]{$\alpha_{2j} \overline{\alpha_{0j}} = -\alpha_{0j} \overline{\alpha_{({-2})j}}$}
\\|a|=|b| \end{array}$ \\ \hline\hline
$\begin{array}{c} X_1 \\ |\beta_R|=|\beta_L|=1/\sqrt{2} \\ |\alpha_{jk}| = 1/\sqrt{3} \end{array}$
& {\LARGE (A)} & {\LARGE (B)} \\ \hline
$\begin{array}{c} Y_1 \\ \scalebox{0.7}[1]{$|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2
=-|\alpha_{0j}|^2|\beta_R|^2 + |\alpha_{(-2)j}|^2|\beta_L|^2$} \\ |a|=|b| \end{array} $
& {\LARGE (C)} & {\LARGE (D)} \\ \hline
\end{tabular}
\begin{comment}
\begin{tabular}{|c|c|c|} \hline
& \,\,\,$X_1$\,\,\, & \,\,\,$Y_1$\,\,\, \\
& $|\beta_R|=|\beta_L|$ & $|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2 $ \\
&${}^\forall j,\,k \in \{0,\, \pm 2\},\,\,|\alpha_{jk}| = 1/\sqrt{3}$&$=-|\alpha_{0j}|^2|\beta_R|^2 + |\alpha_{(-2)j}|^2|\beta_L|^2$\\
&&{\rm and} \quad $|a|=|b|$ \\
\hline
\,\,\,$X_2$\,\,\, &\raisebox{-10pt}[0pt][0pt]{\LARGE (A)}&\raisebox{-10pt}[0pt][0pt]{\LARGE (B)}\\
$\tilde{H_2} \in \bfit{H}$ & \\ \hline
\,\,\,$Y_2$\,\,\, &\raisebox{-15pt}[0pt][0pt]{\LARGE (C)}&\raisebox{-15pt}[0pt][0pt]{\LARGE (D)} \\
$\alpha_{2j} \overline{\alpha_{0j}} = -\alpha_{0j} \overline{\alpha_{({-2})j}}$&& \\
{\rm and}\,\, $|a|=|b|$ && \\ \hline
\end{tabular}  
\end{comment}
\end{center}
\begin{enumerate}[(A)]
\item $X_1 \land X_2$
\begin{lemma}\label{lem:A}
$X_1 \land X_2=\emptyset$
\end{lemma}
\begin{proof}
It is easy to see that $X_1$ and $X_2$ are contradictory each other.
\end{proof}
\item $X_1 \land Y_2$
\begin{lemma}\label{lem:C}
{\rm The condition $X_1 \land Y_2$ coincides with {\bf (I)}, {\bf (II)} and {\bf (III)-(ii)} in the condition of Theorem~\ref{thm:main} for the case of $|(H_2)_{jk}|=1/\sqrt{3}$ for any $j,k\in\{-2,0,2\}$. }
\end{lemma}
\begin{proof}
Let us assume $X_1 \land Y_2$.
By $X_1$, for $j,\,k \in \{ 0,\,\pm 2\}$,
\begin{eqnarray}
\alpha_{jk} = \frac{e^{i\arg\alpha_{jk}}}{\sqrt{3}}.\nonumber
\end{eqnarray}
We can rewrite $Y_2$ by using it as follows:
\begin{align}
&\frac{1}{\sqrt{3}}\cdot e^{i(\agmnt\alpha_{2j} - \agmnt\alpha_{0j})} = -\frac{1}{\sqrt{3}}\cdot e^{i(\agmnt\alpha_{0j} - \agmnt\alpha_{(-2)j})} \nonumber \\
\Longleftrightarrow\,\, & \agmnt\alpha_{2j} + \agmnt\alpha_{(-2)j} - 2\agmnt\alpha_{0j} \in (2\mathbb{Z}+1)\pi = \{ (2m + 1)\pi | m \in \mathbb{Z} \}. \nonumber
\end{align}
Therefore, the condition $X_1 \land Y_2$ includes
\begin{align*}
& |a|=|b|;\\
& |\beta_R| = |\beta_L| = \displaystyle\frac{1}{\sqrt{2}};\\
& |\alpha_{jk}| = \displaystyle\frac{1}{\sqrt{3}} \text{ for any } j,k\in\{0,\,\pm2\};\\
& \agmnt\alpha_{2j} + \agmnt\alpha_{(-2)j} - 2\agmnt\alpha_{0j} \in (2\mathbb{Z}+1)\pi \text{ for any } j\in\{0,\,\pm2\};
\end{align*}
The reverse is also true.
\end{proof}
\item $Y_1 \land X_2$
\begin{lemma}\label{lem:B}
{\rm The condition $Y_1 \land X_2$ coincides with {\bf (I)},{\bf (II)} and {\bf (III)-(i)} in the condition of Theorem~\ref{thm:main}.}
\end{lemma}
\begin{proof}
Let us assume $Y_1\land X_2$. By $Y_1$, the condition
$|\alpha_{2j}|^2|\beta_R|^2 - |\alpha_{0j}|^2|\beta_L|^2=-|\alpha_{0j}|^2|\beta_R|^2 + |\alpha_{(-2)j}|^2|\beta_L|^2$ holds for any $j\in\{-2,0,2\}$, and by $X_2$, the condition $\tilde{H}_2\in\bfit{H}$ holds. Therefore, by the definition of $\bfit{H}$ in (\ref{eq:H}),
we obtain
\begin{eqnarray}
|p\beta_R| = |q\beta_L|\quad {\rm and}\quad |r\beta_R| = |s\beta_L|\quad {\rm and} \quad |\beta_R| = |\beta_L|. \nonumber
\end{eqnarray}
Therefore, we can obtain $|\beta_R| = |\beta_L| = 1/\sqrt{2}$ from all of the condition and $|p|=|q|=|r|=|s|$. Hence, the condition $Y_1 \land X_2$ includes
\begin{eqnarray}
\tilde{H}_2 \in \bfit{H}\quad {\rm and} \quad |\beta_R| = |\beta_L| = \frac{1}{\sqrt{2}} \quad {\rm and} \quad |a|=|b|.\nonumber
\end{eqnarray}
The reverse is also true.
\end{proof}
\par
By this result, there exist permutation matrices $\mathcal{U}$ and $\mathcal{V}$ such that $H_2$ can be expressed by
\begin{eqnarray}
H_2 =
\mathcal{U}\left[
\begin{array}{@{\,}ccc|c@{\,}}
\frac{1}{\sqrt{2}}e^{i\agmnt\alpha_{j_1 k_1}}&\frac{1}{\sqrt{2}}e^{i\agmnt\alpha_{j_1 k_2}} & 0 & \\
\frac{1}{\sqrt{2}}e^{i\agmnt\alpha_{j_2 k_1}}&\frac{1}{\sqrt{2}}e^{i\agmnt\alpha_{j_2 k_2}} & 0 & O\\
0 &0 & 1 & \\
\hline
&O & &I
\end{array}
\right]\mathcal{V}.\nonumber
\end{eqnarray}
In particular, when $j_1=k_1= 2$, $j_2 = k_2 =-2$, $\agmnt\alpha_{22}=\agmnt\alpha_{({-2})2}=\agmnt\alpha_{2({-2})}=0$ and $\agmnt\alpha_{{(-2)}{(-2)}} = \pi$, the result meets the example in paper \cite{WSX17}.
\item $Y_1 \land Y_2$
\begin{lemma}\label{lem:D}
{\rm $Y_1 \land Y_2$ coincides with {\bf (I)}, {\bf (II)} and {\bf (III)-(ii)} in the condition of Theorem~\ref{thm:main}.}
\end{lemma}
\begin{proof}
Let us assume $Y_1\land Y_2$. Taking the absolute values to both sides of the condition $Y_2$, we obtain $|\alpha_{2j}|=|\alpha_{(-2)j}|$ for any $j\in \{-2,0,2\}$. Inserting this into the condition $Y_1$, we have
\[ (|\alpha_{2j}|^2+|\alpha_{0j}|^2)(|\beta_R|^2-|\beta_L|^2)=0. \]
Since $|\alpha_{2j}|,|\alpha_{0j}|>0$, we get $|\beta_R|^2=|\beta_L|^2$.
In the next, let us consider $Y_2$ with respect to the phase;
the condition $Y_2$ implies
\[ \agmnt \alpha_{2j}-\agmnt \alpha_{0j}=(2m+1)\pi+\agmnt \alpha_{0j}
-\agmnt \alpha_{(-2)j} \]
for any $m\in\mathbb{Z}$.
This implies
\[ \agmnt \alpha_{2j}- 2\agmnt \alpha_{0j}
+\agmnt \alpha_{(-2)j}\in (2\mathbb{Z}+1)\pi. \]
\begin{comment}
When for $j \in \{ \pm 2,\,0\}$, $|\alpha_{0j}|=0$, we obtain $|\alpha_{2j}\beta_{R}|^2 = |\alpha_{(-2)j}\beta_{L}|^2$ by $X_2$. The right side is transformed as follow:
\begin{eqnarray}
|\alpha_{(-2)j}\beta_{L}|^2 & = & (1-|\alpha_{2j}|^2)(1-|\beta_{R}|^2) \nonumber \\
\end{eqnarray}
$\therefore \,\, |\alpha_{2j}|^2 + |\beta_{R}|^2 = 1$. Therefore, we obtain
\begin{eqnarray}
|\alpha_{2j}| = |\beta_{L}|,\,\,|\alpha_{(-2)j}| = |\beta_{R}|
\end{eqnarray}\par
When for $j \in \{ \pm 2,\,0\}$, $|\alpha_{0j}| \neq 0$, $|\alpha_{2j}| = |\alpha_{(-2)j}| =: |\alpha| $. Hence, $|\alpha\beta_{R}|^2 - |\alpha_{0j}\beta_{L}|^2 = -|\alpha_{0j}\beta_{R}|^2 + |\alpha\beta_{L}|^2$ by $X_2$. The formula is transformed as follow:
\begin{align}
& (|\alpha|^2 + |\alpha_{0j}|^2)( |\beta_{R}|^2 - |\beta_{L}|^2) = 0\nonumber \\
\Longleftrightarrow\,\, & |\beta_{R}| = |\beta_{L}| = \frac{1}{\sqrt{2}}.
\end{align}
Moreover, by $Y_2$,
\begin{align}
&|\alpha|e^{i \agmnt\alpha_{2j}} \cdot |\alpha_{0j}|e^{i \agmnt\alpha_{0j}} =
|\alpha_{0j}|e^{i \agmnt\alpha_{0j} + \pi} \cdot |\alpha|e^{i \agmnt(\alpha_{(-2)j})} \nonumber \\
\Longleftrightarrow\,\, & \agmnt\alpha_{2j} + \agmnt\alpha_{(-2)j} - 2\agmnt\alpha_{0j} \in (2\mathbb{Z}+1)\pi. \nonumber
\end{align}
Here, when for all $j \in \{ \pm 2,\,0\}$, $\alpha_{0j} = 0$,
\begin{eqnarray}
\tilde{H}_2 =
\left[ \begin{array}{ccc}
\alpha_{22} & 0 & \alpha_{2{(-2)}} \\
\alpha_{02} & 0 & \alpha_{0{(-2)}} \\
\alpha_{{(-2)}2} & 0 & \alpha_{{(-2)}{(-2)}}
\end{array} \right]
\notin {\rm U}(3).
\end{eqnarray}
This means that the teleportation cannot be accompleshed. Therefore, There exists $j$ such that $\alpha_{0j} \neq 0$, and $|\beta_R| = |\beta_L| = 1/\sqrt{2}$. This applies to $j$ that satisfies $\alpha_{0j} = 0$, and we obtain
\begin{eqnarray}
\alpha_{0j} = 0\,\,\Longrightarrow |\alpha_{2j}| = |\alpha_{(-2)j}| \left( = \frac{1}{\sqrt{2}} \right).
\end{eqnarray}
\end{comment}
Therefore, $Y_1 \land Y_2$ includes
\begin{align*}
& |a|=|b|;\\
& |\beta_R| = |\beta_L| = \displaystyle\frac{1}{\sqrt{2}};\\
& |\alpha_{2j}| = |\alpha_{(-2)j}| \text{ for any } j\in\{0,\,\pm2\};\\
& \agmnt\alpha_{2j} + \agmnt\alpha_{(-2)j} - 2\agmnt\alpha_{0j} \in (2\mathbb{Z}+1)\pi \text{ for any } j\in\{0,\,\pm2\};
\end{align*}
The reverse is also true.
\end{proof}
\end{enumerate}
Combining all together with Lemmas~\ref{lem:A}--\ref{lem:D}, we complete the proof of Theorem~\ref{thm:main}.
\section{Summary and Discussion}
In this paper, we extended the scheme of quantum teleportation by quantum walks introduced by Wang et al. \cite{WSX17}. First, we introduced the mathematical definition of the accomplishment of quantum teleportation by this extended scheme. Secondly, we showed a useful necessary and sufficient condition that the quantum teleportation is accomplished rigorously. Our result classified the parameters of the setting for {the accomplishment of quantum teleportation}. Moreover, we demonstrated some examples of the scheme of the teleportation that is accomplished. Here, we identified the model proposed in the previous study as one of the examples and gave the new models of the teleportation. Moreover, we implied that we can simplify the teleportation in terms of theory and experiment. \par
{In terms of experiment, the example (1) in 4.2 has been realized \cite{CDBP20}. Using Theorem 1, we covered all the patterns of teleportation scheme via quantum walks on $\mathbb{Z}$ and mathematically suggested that this model is the easiest one to implement. This expectation implies that the model is also the most reliable model from the perspective of accuracy of algorithm. \par
Also, this mathematical structure itself can be discussed or extended. For example, the relationship between the number of possible measurement outcomes $t_1 = \#\{(j,\,\varepsilon)\}$ and that of possible revise operator $t_2 = \#\{ U^{(j,\,\varepsilon)}\}$ is interesting. In this paper, $t_1$ is restricted to 6 ($t_1 = \#(\{\pm 2,\,0\}\times\{R,\,L\})$). Moreover, $t_2=\#\{I_2,\,X,\,Z,\,ZX\}=4<t_1$ for example (1) in 4.2, and $t_2=6=t_1$ for example (2) or (3), both of which are from the case satisfying {\bf (III)-(ii)}. Here one question arises: can we structure some examples that satisfies both {\bf (III)-(ii)} and $t_2<t_1$? Structuring such models will lead us to implement simpler teleportation schemes if we can do. Possibly, one can also think that how the model would be if we extend it so that $t_1>6$. By adding $\ket{\xi_j}$ for $j\notin\{\pm 2,\,0\}$ to $\mathcal{B}_2$, we can extend the number of possible measurement outcomes to $t_1=2d+2$ with $d\geq 3$. This extension is meaningful when we run quantum walks more steps before measurement, and then the scheme of teleportation will be the one which is different from what we explained in this paper. It is interesting whether we can carry on teleportation such that Alice does not simply send information to Bob via the scheme. We would like to treat them as future work from the perspective of both mathematics and application.}
\end{document}
|
\begin{document}
\title{{Quantum Coherence and Intrinsic Randomness}}
\author{Xiao Yuan, Qi Zhao, Davide Girolami, Xiongfeng Ma*
\thanks{~X.~Y., Q.~Z., X.~M., Center for Quantum Information, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing 100084, China}
\thanks{D.~G., Clarendon Laboratory, Department of Physics, University of Oxford, Parks Road, Oxford OX1 3PU, United Kingdom}
\thanks{*[email protected]}
}
\maketitle
\begin{abstract}
The peculiar uncertainty or randomness of quantum measurements stems from coherence, whose information-theoretic characterization is currently under investigation. Under the resource theory of coherence, it is interesting to investigate interpretations of coherence measures and the interplay with other quantum properties, such as quantum correlations and intrinsic randomness. Coherence can be viewed as the resource for the intrinsic randomness in the measurement outcomes of a state in the computational basis. We observed in our previous work that the coherence of formation, which measures the asymptotic coherence dilution rate, indeed quantifies the uncertainty of a (classical) correlated party about the system measurement outcome. In this work, we re-derive the result from a quantum point of view and then connect the intrinsic randomness to the relative entropy of coherence, another important coherence measure that quantifies the asymptotic distillable coherence. Even though there does not exist bound coherent states, these two intrinsic randomness quantified by coherence of formation and the relative entropy of coherence are different. Interestingly, we show that this gap is equal to the quantum discord, a general form of quantum correlations, in the state of the system of interest and the correlated party, after a local measurement on the former system.
\end{abstract}
\section{Introduction}
According to the Born rule\upcite{born}, the outcome of a quantum measurement is intrinsically random. Given a quantum state $\ket{\alpha}=\sum_i c_i \ket i$, where $\{c_i\}$ are complex coefficients, the result of a projection measurement $\{\dyad{i}\}$ is not deterministic, having the output $i$ with a probability $p_i=|c_i|^2$. Such randomness differs from the classical uncertainty due to uncharacterized measurements.
This intrinsic randomness promises to be a potential resource for information processing tasks. There are many proposals for quantum random number generation, we refer to Refs.\upcite{Ma2016QRNG,RevModPhys.89.015004} for reviews. As it is immediately clear from the example above, intrinsic randomness is a consequence of breaking coherent superpositions of quantum states, a phenomenon nowadays routinely observed in the laboratory. Recently, several works have studied the properties of coherent states as an information-theoretic resource\upcite{spekkens,aberg}. It turns out that the key notion to identify a resource, the definition of free operations, is not unique in the case of coherence. As a result, several measures have been proposed to quantify coherent superposition.
The most intuitive way to quantify coherence is via the distance to the set of incoherent states ${\cal I}$ for a reference basis $\{\ket{i}\}$, given by an appropriate yet arbitrary (pseudo-)metric function,
\begin{equation} \label{eq:geomeasure}
\begin{aligned}
C_d(\rho)=\min\limits_{\sigma\in {\cal I}} d(\rho,\sigma),
\end{aligned}
\end{equation}
where $d(\rho,\sigma)$ is a function to measure the distance of two states and $\cal I$ is the incoherent state set which contains all statistical mixtures of the basis states $\{\dyad{i}\}$. We label this notion of coherence as the BCP coherence\upcite{Baumgratz14,herbut}. A widely employed solution is to adopt the relative entropy of coherence as a measure,
\begin{equation} \label{eq:RelEntropy}
\begin{aligned}
C_R(\rho)=\min\limits_{\sigma\in {\cal I}} S(\rho||\sigma),
\end{aligned}
\end{equation}
where the relative entropy of two states are given by $S(\rho||\sigma)=\tr(\rho\log (\rho)-\rho\log(\sigma))$, mainly because of its computability and importance in information theory\upcite{vedral}.
Another option is to quantify coherence via a convex-roof construction, called the coherence of formation, via
\begin{equation}
C_{f}(\rho) \equiv \min _{\left\{p_{j}, | \psi_{j}\right\rangle \}} \sum_{j} p_{j} C_R\left(\dyad{\psi_{j}} \right),
\end{equation}
where the minimisation is over all possible decomposition of $\rho=\sum_j p_j\dyad{\psi_{j}}$.
In the resource theory of coherence, the relative entropy of coherence and the coherence of formation measures the asymptotic coherence distillation and dilution rates, respectively\upcite{winter2016}.
The coherence distillation and dilution problems are then extended into the non-asymptotical scenario using other coherence measures\upcite{Zhao2018OneShot,PhysRevLett.121.010401,zhao2019one,liu2018quantum}. We refer to Ref.\upcite{RevModPhys.89.041003,hu2018quantum} for reviews of the resource theory of coherence.
While the parent notion of asymmetry has a clear-cut interpretation in a number of physical settings\upcite{spekkens} and other significant advances have been reported\upcite{yadin}, the operational power offered by the BCP coherence still needs to be fully understood.
Given an input quantum state and a measurement owned by Alice, the intrinsic randomness of outcome, against a potential adversary Eve, is a topic of interest in the quantum information theory. From Alice's point of view, in the asymptotic limit, the Shannon entropy $H(p_i)_{\rho}=-\sum_i p_i \log p_i$ is the quantifier of the total uncertainty of a measurement with probability distribution $\{p_i\}$ in the measurement outcome of a state $\rho$, named \emph{nominal randomness}. The nominal randomness consists of two parts, \emph{intrinsic randomness} and \emph{extrinsic randomness}. The intrinsic randomness is quantum randomness which should be unpredictable, while the extrinsic randomness can be predicated by a quantum correlated party Eve in principle. For example, for pure states, since Eve's system is decoupled from Alice's one, the only kind of uncertainty is the truly quantum one (intrinsic randomness), there is no extrinsic randomness in the measurement outputs. Thus, the Shannon entropy (nominal randomness) is itself a measure of intrinsic randomness. For the case of incoherent states, the measurement uncertainty is purely classical (extrinsic randomness) and there is no intrinsic randomness as it entirely depends on Alice's incomplete knowledge of her system. Our goal is to quantify the quantum intrinsic randomness concerning the existence of a potential adversary by coherence measures and explore the quantum contribution to the total uncertainty. We consider a scenario which is consistent with the aforementioned two situations and allows to give an operational interpretation to the quantum coherence in the more complex case of mixed coherent states. From an operational perspective, the extractable randomness is measured by the conditional min-entropy\upcite{konig2009operational}. We therefore also show how to obtain our results by considering the asymptotic limit with the conditional min-entropy.
In this work, we focus on the interplay between quantum coherence and intrinsic randomness. In particular, we study operational interpretations of the relative entropy of coherence and the coherence of formation in characterizing intrinsic randomness. The result for the coherence of formation has been previously considered\upcite{Yuan2015}. This work re-derives this result by focusing on a more rigorous scenario with the conditional min-entropy. We further consider a more general scenario and relate the relative entropy of coherence with intrinsic randomness. We also found that while there is no bound coherent states, which have non vanishing coherence of formation but zero coherence of distillation, the two quantities are different.
The strategy we adopt is presented as follows. In Section~\ref{qrand}, we consider a projection measurement of the quantum state in the reference basis. We pick the smooth conditional min-entropy as the quantifier of the total uncertainty of the measurement outcomes conditioned on all possible environment systems. To do so, we consider a bipartite extension of a system manipulated by Alice, say accessible to a pair Alice-Eve in the state $\rho_{AE}$, and address the question of how much information Eve can access about Alice's measurement outcome with the probability distribution $\{p_i\}$ and outputs being the elements of a reference basis $\{\ket{i}_A\}$. We show that in the asymptotic scenario, Eve's ignorance is quantified by the relative entropy of coherence of Alice's state with respect to the reference basis, which is a good quantifier of the quantum uncertainty on Alice's measurement: $\min\limits_{\rho_E}H(\{p_i\}|E)_{\rho_{AE}}=C_R(\rho_A)$.
We then compare the results with the scenario where Eve gains information about Alice's measurement ``classically'' by performing a measurement on her part (Section \ref{crand}). A previous work proved that, as in the former setting, Eve's uncertainty on Alice's outcome is a full-fledged measure of the BCP coherence, namely, the coherence of formation\upcite{Yuan2015, liu2018superadditivity}. Such a measure is obtained by a convex roof construction, which is different from the relative entropy of coherence.
In this work, we re-derive the same result with the smooth conditional entropy.
Furthermore, we show that the gap between the two quantities, which characterizes the irreversibility of coherence resource theory\upcite{winter2016}, corresponds to the quantum discord of the Alice-Eve's system {after Alice's measurement} (Section \ref{disc}). This is an interesting result as the state is separable (precisely, it is a classical-quantum state) so no entanglement appears and the quantum advantage of Eve is indisputably due to quantum discord. In Section \ref{concl}, we draw our conclusions.
\section{Coherence and intrinsic randomness}
In this section, we introduce the intrinsic randomness or uncertainty that one has conditioned on a correlated party. We show that the intrinsic randomness is quantified by the coherence of the state in the measurement basis.
\subsection{Relative entropy of coherence as uncertainty of correlated party}\label{qrand}
Let us consider a $d$-dimensional Hilbert space and a reference basis $I:=\{\ket{i}\} = \left\{\ket{1},\ket{2},\dots,\ket{d}\right\}$. Suppose a projective measurement $\{\dyad{i}\}$ is performed on a given quantum state $\rho_A$ accessed by Alice. The measurement outcome has a probability distribution $\{p_i\}$, with $\sum_{i=1}^d p_i=1$ and $p_i= \textrm{Tr}[\rho \dyad{i}]\geq 0 $. We aim to assess the intrinsic or unpredictable randomness of the measurement outcome. To do so, we consider another adversarial party Eve where the joint state shared by Alice and Eve is $\rho_{AE}$, satisfying $\tr_E[\rho_{AE}]=\rho_A$ with partial trace over system $E$. Note that the state $\rho_{AE}$ is not assumed to be pure in our analysis, though we show shortly that considering pure states $\rho_{AE}$ is sufficient for characterising intrinsic randomness. The measurement can be represented as a dephasing channel
\begin{equation}
\Delta_A(\rho) = \sum_i\bra{i}\rho\ket{i}\dyad{i},
\end{equation}
and the joint state after Alice's measurement becomes $\rho'_{AE}=\Delta_A(\rho_{AE})$. The state $\rho'_{AE}$ is a classical-quantum state and the randomness of the measurement outcome conditioned on Eve's system is characterized by the smooth conditional min-entropy\upcite{konig2009operational},
\begin{equation} \label{eq:smoothmin}
H_{\min}^\varepsilon(A|E)_{\rho'_{AE}} = \textrm{sup}_{\|\sigma_{AE}-\rho'_{AE}\|\le\varepsilon}H_{\min}(A|E)_{\sigma_{AE}},
\end{equation}
where $\varepsilon$ is the smooth parameter, and the supremum takes over all states $\sigma_{AE}$ that are $\varepsilon$ close to $\rho'_{AE}$ with $\|\sigma_{AE}-\rho'_{AE}\|=1-F(\sigma_{AE},\rho'_{AE})$ and fidelity $F(\rho,\sigma)=(\tr[\sqrt{\sqrt{\rho}\sigma\sqrt{\rho}}])^2$. Here the conditional min-entropy $H_{\min}(A|E)_{\rho_{AE}}$ is
\begin{equation}
H_{\min}(A|E)_{\rho_{AE}} = -\textrm{inf}_{\sigma_E}D_{\max}(\rho_{AE}\|\textrm{id}_A\otimes \sigma_E),
\end{equation}
where the infimum is over all normalized density operators on system $E$, $\textrm{id}_A$ is the identity matrix on system $A$, and the max-relative entropy $D_{\max}(\rho\|\sigma)$ is defined by
\begin{equation}
D_{\max}(\rho\|\sigma) = \textrm{inf}\{\lambda\in \mathbb R, \rho\le2^\lambda\sigma\}.
\end{equation}
We choose the smooth conditional min-entropy which measures the maximum amount of private and uniformly random bits that can be extracted\upcite{konig2009operational}. Specifically, given a general classical-quantum state $\rho_{AE}$, one can apply an extractor on system $A$ so that it is $\varepsilon'$-close to the perfectly uniform bits that are independent of any side information of system $E$. Therefore, the length $\ell_{\mathrm{extr}}^{\varepsilon'}(A | E)$ of the extracted bits is given by,
\begin{equation}
\ell_{\mathrm{extr}}^{\varepsilon'}(A | E)=H_{\min }^{\varepsilon}(A | E)_{\rho_{AE}}+O(\log 1 / \varepsilon'),
\end{equation}
with $\varepsilon \in\left[\frac{1}{2} \varepsilon^{\prime}, 2 \varepsilon^{\prime}\right]$. One can also consider other entropic quantifiers which may characterise other operational tasks. We leave the discussion of generalising our results to other entropic quantifiers to a future work.
\begin{figure}
\caption{Intrinsic randomness of measurements conditioned on quantum and classical information.
(a)
In a bipartite Alice-Eve system described by a pure state $\psi_{AE}
\label{qfigure}
\end{figure}
After Alice's measurement on $\rho_{AE}$, the randomness of the measurement can be characterized by
\begin{equation}
R_I^{Q,\varepsilon}(\rho) = \min_{\rho_{AE}:\tr_E[\rho_{AE}]=\rho_A} H_{\min}^\varepsilon(A|E)_{\Delta_A(\rho_{AE})},
\end{equation}
where the minimization is over all states $\rho_{AE}$ satisfying $\tr_E[\rho_{AE}]=\rho_A$.
For each $\rho_{AE}$, we can further consider its purification by introducing an additional system $E'$ so that the whole system is $\ket{\psi}_{AEE'}$, satisfying $\tr_{E'}[\dyad{\psi}_{AEE'}]=\rho_{AE}$. Then the intrinsic randomness becomes
\begin{equation}
\begin{aligned}
R_I^{Q,\varepsilon}(\rho)&= \min_{\ket{\psi}_{AEE'}:\tr_{EE'}[\dyad{\psi}_{AEE'}]=\rho_{A}}H_{\min}^\varepsilon(A|E)_{\Delta_A\circ\tr_{E'}(\ket{\psi}_{AEE'})},\\
&\ge \min_{\ket{\psi}_{AE}:\tr_{E}[\dyad{\psi}_{AE}]=\rho_{A}}H_{\min}^\varepsilon(A|E)_{\Delta_A(\ket{\psi}_{AE})},
\end{aligned}
\end{equation}
where the second inequality is because of the data-processing inequality of the smooth conditional min-entropy defined in Eq.~\eqref{eq:smoothmin}. As the minimisation of the second line is only a special case of the minimisation in the definition of $R_I^Q$ in Eq.~\eqref{Eq:defiRIQ}, we have
\begin{equation}
R_I^{Q,\varepsilon}(\rho) = \min_{\ket{\psi}_{AE}:\tr_{E}[\dyad{\psi}_{AE}]=\rho_{A}}H_{\min}^\varepsilon(A|E)_{\Delta_A(\ket{\psi}_{AE})}.
\end{equation}
As all purification states $\ket{\psi}_{AE}$ are equivalent under isometry on system $E$, which nevertheless does not affect the smooth conditional min-entropy, we therefore have
\begin{equation}
R_I^{Q,\varepsilon}(\rho) = H_{\min}^\varepsilon(A|E)_{\Delta_A(\ket{\psi}_{AE})},
\end{equation}
where $\ket{\psi}_{AE}$ is any purification of $\rho_A$ as shown in Fig.~\ref{qfigure}(a).
Suppose Alice prepares $n\gg1$ copies of $\rho_A$ and performs the projective measurement for all the copies, the average randomness of each measurement outcome with the limit $n\rightarrow\infty$ and $\varepsilon\rightarrow0^+$ is then characterized by
\begin{equation}
\begin{aligned}
R_I^Q &= \lim_{\varepsilon\rightarrow0^+}\lim_{n\rightarrow\infty}\frac{1}{n}R_I^{Q,\varepsilon}(\rho^{\otimes n}),\\
&=\lim_{\varepsilon\rightarrow0^+}\lim_{n\rightarrow\infty}\frac{1}{n}H_{\min}^\varepsilon(A^{n}|E^n)_{\Delta^{\otimes n}_A(\ket{\psi}^{\otimes n}_{AE})}.
\end{aligned}
\end{equation}
\begin{theorem}
The intrinsic randomness of Alice's measurement outcome conditioned on any (quantum) adversary Eve, $R^Q_I $ is quantified by the relative entropy of coherence,
\begin{equation} \label{Eq:rq}
\begin{aligned}
R^Q_I(\rho_A) &=C_R(\rho_A)\\
&=S(\rho_A^{\mathrm{diag}}) - S(\rho_A) \\
&=S(\rho_A||\rho_A^{\mathrm{diag}}). \\
\end{aligned}
\end{equation}
\end{theorem}
\begin{proof}
According to the asymptotic equipartition property of the smooth entropies\upcite{tomamichel2015quantum}, we have
\begin{equation}\label{Eq:AEP}
\lim_{\varepsilon\rightarrow0^+}\lim _{n \rightarrow \infty}\left\{\frac{1}{n} H_{\min }^{\varepsilon}\left(A^{n} | B^{n}\right)_{\rho^{\otimes n}}\right\} = H(A | B)_{\rho}.
\end{equation}
Therefore, we have
\begin{equation}\label{Eq:defiRIQ}
R_I^Q = H(A|E)_{\Delta_A(\ket{\psi}_{AE})},
\end{equation}
where $H(A|B)_{\rho_{AB}}=S(\rho_{AB})-S(\rho_{B})$ is the von Neumann conditional entropy, $S(\rho)=-\tr[\rho\log\rho]$ is the von Neumann entropy and $\rho_B=\tr_A[\rho_{AB}]$. The right hand side of Eq.~\eqref{Eq:defiRIQ} can be explicitly evaluated for example with the analysis in Ref.\upcite{Coles12}. We also briefly summarize the proof here for self-consistence. After the measurement, the state is changed to
\begin{equation}
\rho_{AE}' = \Delta_A(\ket{\psi}_{AE}) = \sum_i p_i\ket{i}_A\bra{i}\otimes\rho_i^E,
\end{equation}
where $\rho_E = \sum_i p_i\rho_i^E$ and each $\rho_i^E=\bra{i}_A(\dyad{\psi}_{AE})\ket{i}_A/p_i$ is a pure state. Using the equality $S\left(\sum_ip_i\dyad{i}\otimes\rho_i\right) = H(p_i) + \sum_i p_i S(\rho_i)$, the conditional entropy of the post-measurement state is then
\begin{equation}
S(A|E)_{\rho_{AE}'} = H(p_i) + \sum_ip_iS(\rho_i^E) - S(\rho_E).
\end{equation}
Since $H(p_i) = S(\rho_A^{\mathrm{diag}})$ with $\rho_A^{\mathrm{diag}}:=\sum_i p_i \dyad{i}$, $S(\rho_E) = S(\rho_A)$, and $S(\rho_i^E) = 0, \forall i,$ we obtain our result in the theorem.
\end{proof}
Therefore, as a measure of BCP coherence\upcite{Baumgratz14}, the relative entropy of coherence $C_R(\rho_A)$ satisfies all the requirements for a consistent measure of intrinsic randomness.
\subsection{Coherence of formation as uncertainty of correlated party}\label{crand}
We observed that the quantum uncertainty of a local measurement corresponds to the best case uncertainty of a correlated party Eve, as quantified by means of the quantum conditional entropy. We compare the result with an alternative measure of quantum coherence --- coherence of formation\upcite{Yuan2015}. The setting is for the sake of clarity depicted in Fig.~\ref{qfigure}(b). The difference is that Eve performs a measurement whose outcomes follow a probability distribution $\{q^E_i\}, q^E_i=\textrm{Tr}[\rho_E\ket{e_i'}_E\bra{e_i'}]$, on her own system to classically predict Alice's measurement outcome.
Suppose Eve's measurement is represented by a quantum channel as
\begin{equation}
\mathcal{M}(\rho) = \sum_i \tr[\rho O_i]\dyad{i},
\end{equation}
where $O_i\ge 0$, $\sum_i O_i = \textrm{id}_E$, and $\textrm{id}_E$ is the identity matrix of system $E$. For one copy of $\rho_{AE}$, the randomness of Alice's measurement outcome conditioned on Eve's measurement outcome is then
\begin{equation}
R_I^{C,\varepsilon}(\rho) = \min_{\mathcal{M}}\min_{\rho_{AE}:\tr_E[\rho_{AE}]=\rho_A} H_{\min}^\varepsilon(A|E)_{\Delta_A\otimes \mathcal{M}(\rho_{AE})},
\end{equation}
where the minimisation is also over all Eve's possible measurements and all possible $\rho_{AE}$ satisfying $\tr_E[\rho_{AE}]=\rho_A$. With a similar argument of the proof in the last section, we only need to focus on any one specific purification $\ket{\psi}_{AE}$ of $\rho_A$. Therefore, the intrinsic randomness is
\begin{equation}
R_I^{C,\varepsilon} (\rho)= \min_{\mathcal{M}} H_{\min}^\varepsilon(A|E)_{\Delta_A\otimes \mathcal{M}(\ket{\psi}_{AE})}.
\end{equation}
When Alice prepares $n\gg1$ copies of $\rho_A$ and performs the projective measurement for all the copies, the average randomness of each measurement outcome is
\begin{equation}
R_I^C = \lim_{\varepsilon\rightarrow0^+}\lim_{n\rightarrow\infty}\frac{1}{n}R_I^{C,\varepsilon}(\rho^{\otimes n}).
\end{equation}
In general, Eve's measurement can be a joint measurement on all her local systems. In this case, we have $R_I^C=R_I^Q$. Here instead, we restrict to the scenario that Eve also performs identical measurement for each copy of her local system\footnote{Note that here we only need to assume that Eve performs independent measurements on each copy of her local system. It reduces to the identical measurement case by considering a random permutation of all the states.}. Therefore the joint state after the measurements is ${\Delta_A^{\otimes n}\otimes \mathcal{M}^{\otimes n}(\ket{\psi}_{AE}^{\otimes n})}$.
According to the asymptotic equipartition property of the smooth entropies\upcite{tomamichel2015quantum} shown in Eq.~\eqref{Eq:AEP}, we have
\begin{equation}\label{Eq:defiRIC}
R_I^C = \min_{\mathcal{M}}H(A|E)_{\Delta_A\otimes \mathcal{M}(\ket{\psi}_{AE})}.
\end{equation}
According to our recent work\upcite{liu2018quantum}, we can conclude as follows.
\begin{theorem}
The intrinsic randomness of Alice's measurement outcome conditioned on any (classical) adversary Eve with independent measurements is quantified by the coherence of formation,
\begin{equation}
R_I^C = C_{f}(\rho) \equiv \min _{\left\{p_{j}, | \psi_{j}\right\rangle \}} \sum_{j} p_{j} S\left(\Delta\left( \dyad{\psi_{j}} \right)\right),
\end{equation}
where $C_{f}(\rho)$ is the coherence of formation and the minimisation is over all decomposition of $\rho=\sum_{j} p_{j} \dyad{\psi_{j}}$.
\end{theorem}
\subsection{Qubit calculation for $R_I^Q$ and $R_I^C$}
The quantum uncertainty measure obtained by convex roof extension is a measure of BCP coherence as well\upcite{Yuan2015}. Let us compare the two quantities $R^C_I(\rho_A)$ and $R^Q_I(\rho_A)$ in a simple example about a qubit system. In the Bloch sphere representation, $\rho_A = (I+\vec{n}\cdot\vec{\sigma})/2$, where $\vec{n} = (n_x,n_y,n_z)$ and $\vec{\sigma} = (\sigma_x,\sigma_y, \sigma_z)$ are the Pauli matrices. Supposing that the measurement basis is the $\sigma_z$ eigenbasis, which is denoted by $\{\ket{0},\ket{1}\}$, then we obtain
\begin{eqnarray}\label{Eq:ef}
R_{z}^C(\rho_A)&=& H\left(\frac{1+\sqrt{1-n_x^2 - n_y^2}}{2}\right)\\
R_{z}^Q(\rho_A)&=& H\left(\frac{n_z + 1}{2}\right) - H\left(\frac{|n| + 1}{2}\right),\nonumber
\end{eqnarray}
where $|n| = \sqrt{n_x^2+n_y^2+n_z^2}$ and $H$ is the binary entropy.
Specifically, for the state $\rho_A(v) = v\dyad{+} + \frac{1 - v}{2}I$, where $\ket{+} = (\ket{0}+\ket{1})/2, v\in[0,1], \vec{n}(v) = (v, 0, 0)$, we have
\begin{eqnarray}\label{Eq:ef}
R_{z}^C(\rho_A)&=& H\left(\frac{1+\sqrt{1-v^2}}{2}\right),\\
R_{z}^Q(\rho_A)&=& 1 - H\left(\frac{v + 1}{2}\right).
\end{eqnarray}
In Fig.~\ref{fig:RQC}, we plot the two measures versus the mixing parameter $v$. By definition, the randomness quantifier $R_z^C$ is against a classical adversary, who can only perform independent measurements on her local systems. On the contrary, the randomness quantifier $R_z^Q$ assumes a powerful quantum adversary, who can perform general measurements. As a classical adversary is a special case of a general quantum adversary, the quantum coherence measure $R_z^Q$ is generally smaller than $R_z^C$. As they both measure randomness, it is not hard to see that they both vanish when the state is incoherent and they converge to the Shannon entropy in the pure state case. All those intuitions are verified in the numerical example shown in Fig.~\ref{fig:RQC}.
\begin{figure}
\caption{Comparison of the measures of quantum uncertainty $R_z^Q$ (red dotted line) and $R_z^C$ (blue dot-dashed line) in the qubit state $\rho_A(v)=v\ket{+}
\label{fig:RQC}
\end{figure}
\section{Quantum Coherence gap and Quantum Discord}
\subsection{Quantum Discord as difference between coherence measures}\label{disc}
Note that the quantum coherence measure $R_I^Q(\rho_A)$ is obtained when considering an adversary that utilizes quantum information to predict Alice's measurement outcome. In comparison, the measure $R_I^C(\rho_A)$ is obtained when the adversary performs independent and identical measurements on her local systems to have a classical prediction. Obviously, the latter adversary strategy is a special case of the former one, and hence $R_I^C(\rho_A) \geq R_I^Q(\rho_A)$.
In general, there is a non-zero gap between the two quantum coherence measures, while they both vanish for incoherent states. As the difference between the two frameworks in Fig.~\ref{qfigure} is brought about by making a measurement on Eve's party, it is intuitive to think that the gap is related to how much the local measurement changes the state. Indeed, we show that such a gap is associated to the quantum discord of the bipartite state $\rho_{AE}'=\sum_i p_i \dyad{i}\otimes \rho_E^i$ of the system after Alice carried out her measurement. Discord (we omit the quantum label from now on) is a kind of quantum correlation which equals entanglement for pure states but also shows up in all but a null measure set of separable states. It can be interpreted as the minimum disturbance induced on a bipartite system by a local measurement\upcite{Modi12}, but here it quantifies the advantage of a quantum correlated system Eve in accessing information about Alice's measurement. Its peculiarity is its asymmetry, as a measurement on one party has in general a different effect than performed on a different subsystem.
For a state $\rho_{AE}$, the discord defined as \[D_E(\rho_{AE})=\min\limits_{\{q_i^E\}}S(A|\{q_i^E\})_{\rho_{AE}}-S(A,E)_{\rho_{AE}}+S(E)_{\rho_{AE}}\] measures the least possible disturbance of a measurement with probability distribution $\{q_i^E\}$ on the $E$ party. Simple algebra steps show that $\min\limits_{\{q^E_i\}}S(A|\{q^E_i\})_{\rho_{AE}'}=\min\limits_{\{q^E_i\}}H(\{p_i\}|\{q^E_i\})_{\psi_{AE}}$. Hence, we obtain the following result for the meaning of the gap of the two coherence measures.
\begin{theorem}
The gap between the relative entropy of coherence and the coherence of formation is given by the discord of the joint state after Alice's measurement, i.e. the least possible state change induced by an Eve's measurement,
\begin{equation}\label{Eq:discord}
R_{I}^C(\rho_A) - R_{I}^Q(\rho_A) = D_E(\rho_{AE}').
\end{equation}
\end{theorem}
\begin{table}[t]
\centering
\caption{Comparison between coherence and entanglement measures. COF: Coherence of formation; REC: Relative entropy of coherence}
\begin{tabular}{ccc}
\hline
Properties&Coherence/Uncertainty&Entanglement\\
\hline
Cost& COF $R^C_I$, Eq.~\eqref{Eq:defiRIC}&Entanglement of formation\\
Distillation&REC $R^Q_I$, Eq.~\eqref{Eq:defiRIQ}& Distillable Entanglement \\
Gap&Discord, Eq.~\eqref{Eq:discord} & Bound Entanglement\upcite{jaeger}\\
\hline
\end{tabular}\label{table}
\end{table}
We observe that, in the resource theory of quantum coherence, the coherence of formation and the relative entropy of coherence measure the coherence cost and the distillable coherence in the asymptotic limit, respectively\upcite{winter2016}. Thus, the coherence cost and the distillable coherence equal the quantum uncertainty conditioned on Eve's classical\upcite{Yuan2015} and quantum strategies here discussed. The scenario is similar to what happens in the entanglement resource theory\upcite{Horodecki09}, where there is a nonzero gap between the entanglement cost and the distillable entanglement (Table \ref{table}). In particular, some entangled states have zero distillable entanglement, a phenomenon called bound entanglement. However, a key difference is that there is no coherent states with zero coherence of distillation\upcite{winter2016}. Hence, it emerges that zero relative entropy of coherence on a local Alice's measurement implies zero coherence cost, $R_I^Q(\rho_A)=0\Rightarrow R_I^C(\rho_A)=0$ and then zero quantum discord, i.e. there exists at least a measurement on Eve's side which does not change the state. We also observe that the state $\rho_{AE}'$ is always separable. Thus, the quantum advantage in accessing non-local information about a correlated party measurement is here genuinely due to quantum discord, rather than entanglement.
\subsection{An example}
To clarify the result, we consider the following example inspired by the cryptographic scenario of the BB84 protocol\upcite{bb84}. Alice processes two bits information representing eigenbasis and polarization of a quantum state $\rho_A$. If the basis bit is $0$ ($1$), she prepares the state in the $X$ ($Z$) basis, while if the polarization bit is $0$ ($1$), the state has polarization up (down) in the chosen eigenbasis. To set the notation, if the two bits are $00, 01, 10, 11$, Alice prepares $\ket{0},\ket{1},\ket{+},\ket{-}$, respectively. Let us suppose the probability of choosing each state is equal, and that Alice sends the quantum state to Eve, who tries to guess the state. Then, the state shared by Alice and Eve is given by
\begin{eqnarray}\label{state}
\rho_{AE}'&=&\frac{1}{4}(\dyad{00}\otimes\dyad{0}+\dyad{01}\otimes\dyad{1}\\
&+&\dyad{10}\otimes\dyad{+}+\dyad{11}\otimes\dyad{-}).\nonumber
\end{eqnarray}
Equivalently, we can consider that Alice and Eve initially share a pure state
\begin{equation}\label{Eq:phiAE}
\ket{\psi}_{AE}=\frac{1}{2}(\ket{00}\ket{0}+\ket{01}\ket{1}+\ket{10}\ket{+}+\ket{11}\ket{-}),\nonumber
\end{equation}
and the prepared state $\rho_{AE}'$ can be obtained by measuring Alice's subsystem of $\ket{\psi}_{AE}$ in the computational basis, $I =\{\ket{00},\ket{01},\ket{10},\ket{11}\}$.
Note that, as we consider the case where Alice has two qubits in her system, the randomness against Eve should be $0\le R_I \le 2$. Therefore, Eve's information about Alice's measurement outcome is equivalent to consider the coherence of
\begin{equation}
\begin{aligned}
&\rho_{A}\\
&=\frac{1}{4}((\ket{00}+\frac{1}{\sqrt{2}}(\ket{10}+\ket{11}))(\bra{00}+\frac{1}{\sqrt{2}}(\bra{10}+\bra{11}))\\
&+(\ket{01}+\frac{1}{\sqrt{2}}(\ket{10}+\ket{11}))(\bra{01}+\frac{1}{\sqrt{2}}(\bra{10}+\bra{11}))).
\end{aligned}
\end{equation}
The two quantum coherence measures are $R_I^Q(\rho_A)=1$ and $R_I^C(\rho_A)=3/2$, being the latter obtained via numerical optimization. Hence, the quantum discord of the post-measurement state
is $D_E(\ket{\psi}_{AE})=1/2$, measuring how much extra information Eve can obtain by performing coherent measurements than independent measurements of multiple copies of $\rho_{AE}'$.
\section{Conclusion}\label{concl}
Given the twofold uncertainty of a quantum measurement, we provided an operational interpretation to the genuinely intrinsic randomness about a measurement performed by an observer Alice, which we quantify with the relative entropy of coherence, as the minimum uncertainty about the outcome by a quantum correlated party Eve. We then compared the result to an alternative strategy to quantify quantum coherence by a convex roof extension of the Shannon entropy. The gap between the two strategies is equal to the discord of the bipartite state shared by Alice and Eve. The result provides a new link between single system quantumness and quantum correlations even in separable states, which was inspired by previous studies on the trade-off between local and global quantum properties\upcite{Girolami13,entco,Yao15,ma2015converting,Streltsov16,Chitambar16,Hu17relative,PhysRevX.7.011024}.
Following this line of thinking, other interesting scenarios where the interplay between coherence and correlations should be investigated is in the context of physical limits to privacy and to communication, e.g., data hiding protocols\upcite{DiVincenzo04,enigma,datta,molmer,boixo2011quantum}. Another potential avenue of further research is the extension of the result to the multipartite setting, i.e., to determine a link between local coherence and genuine multipartite quantum correlations. Note that in this paper, we only consider the uncertainty of a projective measurement (computational basis measurement), it is also interesting to explore the uncertainty of a general quantum measurements, instead of projective measurement\upcite{cao2015loss}. Quantum coherence is also connected to the generated randomness with the extraction process\upcite{zhao2019one,PhysRevA.97.012302}.
\end{document}
|
\begin{document}
\author[Robert Laterveer]
{Robert Laterveer}
\address{Institut de Recherche Math\'ematique Avanc\'ee,
CNRS -- Universit\'e
de Strasbourg,\
7 Rue Ren\'e Des\-car\-tes, 67084 Strasbourg CEDEX,
FRANCE.}
\email{[email protected]}
\title{Algebraic cycles on a very special EPW sextic}
\begin{abstract} Motivated by the Beauville--Voisin conjecture about Chow rings of powers of $K3$ surfaces, we consider a similar conjecture for Chow rings of powers of EPW sextics. We prove part of this conjecture for the very special EPW sextic studied by Donten--Bury et alii. We also prove some other results concerning the Chow groups of this very special EPW sextic, and of certain related hyperk\"ahler fourfolds.
\end{abstract}
\keywords{Algebraic cycles, Chow groups, motives, finite--dimensional motives, weak splitting property, weak Lefschetz conjecture for Chow groups, multiplicative Chow--K\"unneth decomposition, Bloch--Beilinson filtration, EPW sextics, hyperk\"ahler varieties, K3 surfaces, abelian varieties, Calabi--Yau varieties}
\subjclass[2010]{Primary 14C15, 14C25, 14C30. Secondary 14J32, 14J35, 14J70, 14K99}
\maketitle
\section{Introduction}
For a smooth projective variety $X$ over $\mathbb{C}$, let $A^i(X)=CH^i(X)_{\mathbb{Q}}$ denote the Chow group of codimension $i$ algebraic cycles modulo rational equivalence with $\mathbb{Q}$--coefficients. Intersection product defines a ring structure on $A^\ast(X)=\oplus_i A^i(X)$. In the case of $K3$ surfaces, this ring structure has an interesting property:
\begin{theorem}[Beauville--Voisin \cite{BV}]\label{K3} Let $S$ be a $K3$ surface. Let $D_i, D_i^\prime\in A^1(S)$ be a finite number of divisors. Then
\[ \sum_i D_i\cdot D_i^\prime=0\ \ \ \hbox{in}\ A^2(S)_{}\ \Leftrightarrow\ \sum_i D_i\cdot D_i^\prime=0\ \ \ \hbox{in}\ H^4(S,\mathbb{Q})\ .\]
\end{theorem}
Conjecturally, a similar property holds for self--products of $K3$ surfaces:
\begin{conjecture}[Beauville--Voisin]\label{K3conj} Let $S$ be a $K3$ surface. For $r\ge 1$, let $D^\ast(S^r)\subset A^\ast(S^r)_{}$ be the $\mathbb{Q}$--subalgebra generated by (the pullbacks of)
divisors and the diagonal of $S$. The restriction of the cycle class map induces an injection
\[ D^i(S^r)\ \to\ H^{2i}(S^r,\mathbb{Q})\ \]
for all $i$ and all $r$.
\end{conjecture}
(cf. \cite{V12}, \cite{V13}, \cite{Vo}, \cite{Y} for extensions and partial results concerning conjecture \ref{K3conj}.)
Beauville has asked which varieties have behaviour similar to theorem \ref{K3} and conjecture \ref{K3conj}. This is the problem of determining which varieties verify the ``weak splitting property'' of \cite{Beau3}. We briefly state this problem here as follows:
\begin{problem}[Beauville \cite{Beau3}]\label{prob} Find a nice class $\mathbb{C}C$ of varieties (containing $K3$ surfaces and abelian varieties), such that for any $X\in\mathbb{C}C$, the Chow ring of $X$ admits a multiplicative bigrading $A^\ast_{(\ast)}(X)$, with
\[ A^i(X)=\bigoplus_{j\ge 0} A^i_{(j)}(X)\ \ \ \hbox{for\ all\ }i\ .\]
This bigrading should split the conjectural Bloch--Beilinson filtration, in particular
\[ A^i_{hom}(X)= \bigoplus_{j\ge 1} A^i_{(j)}(X)\ .\]
\end{problem}
It has been conjectured that hyperk\"ahler varieties are in $\mathbb{C}C$ \cite[Introduction]{Beau3}. Also, not all Calabi--Yau varieties can be in $\mathbb{C}C$ \cite[Example 1.7(b)]{Beau3}. An interesting novel approach of problem \ref{prob} (as well as a reinterpretation of theorem \ref{K3}) is provided by the concept of {\em multiplicative Chow--K\"unneth decomposition\/} (cf. \cite{SV}, \cite{V6}, \cite{SV2} and subsection \ref{ssmck} below).
In this note, we ask whether EPW sextics might be in $\mathbb{C}C$. An EPW sextic is a special sextic $X\subset\mathbb{P}^5(\mathbb{C})$ constructed in \cite{EPW}. EPW sextics are not smooth; however, a generic EPW sextic is a quotient $X=X_0/(\sigma_0)$, where $X_0$ is a smooth hyperk\"ahler variety (called a double EPW sextic) and $\sigma_0$ is an anti--symplectic involution \cite[Theorem 1.1]{OG}, \cite{OG3}. Quotient varieties behave like smooth varieties with respect to intersection theory with rational coefficients, so the following conjecture makes sense:
\begin{conjecture}\label{optim} Let $X$ be an EPW sextic, and assume $X$ is a quotient variety $X=X_0/G$ with $X_0$ smooth and $G\subset\hbox{Aut}(X_0)$ a finite group. Then
$X\in\mathbb{C}C$.
\end{conjecture}
There are two reasons why conjecture \ref{optim} is likely to be true: first, because an EPW sextic is a Calabi--Yau hypersurface (and these are probably in $\mathbb{C}C$); secondly, because the
hyperk\"ahler variety $X_0$ should be in $\mathbb{C}C$, and the involution $\sigma_0$ should behave nicely with respect to the bigrading on $A^\ast_{(\ast)}(X_0)$.
Let us optimistically suppose conjecture \ref{optim} is true, and see what consequences this entails for the Chow ring of EPW sextics. We recall that Chow groups are expected to satisfy a weak Lefschetz property, according to a long--standing conjecture:
\begin{conjecture}[Hartshorne \cite{Ha}]\label{weak} Let $X\subset\mathbb{P}^{n+1}(\mathbb{C})$ be a smooth hypersurface of dimension $n\ge 4$. Then the cycle class map
\[ A^2_{}(X)_{}\ \to\ H^4(X,\mathbb{Q}) \]
is injective.
\end{conjecture}
Conjecture \ref{weak} is notoriously open for all hypersurfaces of degree $d\ge n+2$.
Since quotient varieties behave in many ways like smooth varieties, it seems reasonable to expect that conjecture \ref{weak} extends to hypersurfaces that are quotient varieties. This would imply that an EPW sextic $X$ as in conjecture \ref{optim} has $A^2_{hom}(X)=0$. That is, conjecturally we have that
\[ A^i(X)= A^i_{(0)}(X)\ \ \ \hbox{for\ all\ }i\le 2\ .\]
For any $r\ge 1$, let us now define
\[ E^\ast(X^r)\ \subset \ A^\ast(X^r) \]
as the $\mathbb{Q}$--subalgebra generated by (pullbacks of) elements of $A^1(X)$ and $A^2(X)$ and the class of the diagonal of $X$. The above remarks imply a conjectural inclusion
\[ E^\ast(X^r)\ \subset\ A^\ast_{(0)}(X^r)\ =\ A^\ast(X^r)/ A^\ast_{hom}(X^r) \ .\]
We thus arrive at the following concrete, falsifiable conjecture:
\begin{conjecture}\label{subring} Let $X$ be an EPW sextic as in conjecture \ref{optim}.
Then restriction of the cycle class map
\[ E^i(X^r)\ \to\ H^{2i}(X^r,\mathbb{Q}) \]
is injective for all $i$ and all $r$.
\end{conjecture}
Conjecture \ref{subring} is the analogon of conjecture \ref{K3conj} for EPW sextics; the role of divisors on the $K3$ surface is played by (the hyperplane section and) codimension $2$ cycles on the sextic.
The main result in this note provides some evidence for conjecture \ref{subring}: we can prove it is true for $0$--cycles and $1$--cycles on one very special EPW sextic:
\begin{nonumbering}[=theorem \ref{main2}] Let $X$ be the very special EPW sextic of \cite{DBG}. Let $r\in\mathbb{N}$.
The restriction of the cycle class map
\[ E^i(X^r)\ \to\ H^{2i}(X^r,\mathbb{Q}) \]
is injective for $i\ge 4r-1$.
\end{nonumbering}
The very special EPW sextic of \cite{DBG} (cf. section \ref{secepw} below for a definition) is not smooth, but it is a ``Calabi--Yau variety with quotient singularities''. The very special EPW sextic $X$ is very symmetric; it is also
remarkable for providing the only example known so far of a complete family of $20$ pairwise incident planes in $\mathbb{P}^5(\mathbb{C})$ \cite{DBG}.
As resumed in theorem \ref{epw} below, the very special EPW sextic $X$ is related to hyperk\"ahler varieties in two different ways: (a)
$X$ is rationally dominated via a degree $2$ map by the Hilbert scheme $S^{[2]}$ where $S$ is a $K3$ surface of Picard number $20$;
(b) $X$ admits a double cover that is the quotient of an abelian variety by a finite group of group automorphisms, and this quotient admits a hyperk\"ahler resolution $X_0$.
To prove theorem \ref{main2}, we first prove (proposition \ref{mck}) that the very special EPW sextic $X$ has a multiplicative Chow--K\"unneth decomposition, in the sense of Shen--Vial \cite{SV}, and so the Chow ring of $X$ has a bigrading. Next,
we establish (proposition \ref{2}) that
\begin{equation}\label{0} A^2(X)=A^2_{(0)}(X)\ .\end{equation}
Both these facts are proven using description (b), via the theory of {\em symmetrically distinguished cycles\/} \cite{OS}.
Note that equality (\ref{0}) might be considered as evidence for conjecture \ref{weak} for $X$. In order to prove conjecture \ref{weak}
for the very special EPW sextic $X$, it remains to prove that
\[ A^2_{(0)}(X)\cap A^2_{hom}(X)\stackrel{??}{=}0\ .\]
Likewise, in order to prove the full conjecture \ref{subring} for the very special EPW sextic $X$, it remains to prove that
\[ A^i_{(0)}(X^r)\cap A^i_{hom}(X^r)\stackrel{??}{=}0\ \ \ \hbox{for\ all\ }i, r\ .\]
We are not able to prove these equalities outside of the range $i\ge 4r-1$; this is related to some of the open cases of Beauville's conjecture on Chow rings of abelian varieties (remarks \ref{BB} and \ref{BBk}).
On the positive side, we establish a precise relation between the Chow ring of the very special EPW sextic $X$ and the Chow ring of the
hyperk\"ahler fourfold $X_0$ mentioned in description (b) (theorem \ref{main3}). This relation provides an alternative description of the splitting of the Chow ring of $X_0$ coming from a multiplicative Chow--K\"unneth decomposition (corollary \ref{XX0}). In proving this relation, we exploit description (a); a key ingredient in the proof is a strong version of the generalized Hodge conjecture for $X$ and $X_0$ (proposition \ref{ghc}), which crucially relies on the fact that the $K3$ surface $S$ has maximal Picard number.
We also obtain some results concerning Bloch's conjecture (subsection \ref{ssb}), as well as a conjecture of Voisin (subsection \ref{ssv}), for the very special EPW sextic. The application to Bloch's conjecture relies on description (b) (via the theory of symmetrically distinguished cycles), but also on description (a) (via the surjectivity result proposition \ref{surj}).
We end this introduction with a challenge: can one prove theorem \ref{main2} for other (not very special) EPW sextics ?
\vskip0.6cm
\begin{convention} In this note, the word {\sl variety\/} will refer to a reduced irreducible scheme of finite type over $\mathbb{C}$. A {\sl subvariety\/} is a (possibly reducible) reduced subscheme which is equidimensional.
{\bf All Chow groups will be with rational coefficients}: we denote by $A_jX$ the Chow group of $j$--dimensional cycles on $X$ with $\mathbb{Q}$--coefficients; for $X$ smooth of dimension $n$ the notations $A_jX$ and $A^{n-j}X$ will be used interchangeably.
The notations $A^j_{hom}(X)$, $A^j_{num}(X)$, $A^j_{AJ}(X)$ will be used to indicate the subgroups of homologically trivial, resp. numerically trivial, resp. Abel--Jacobi trivial cycles.
The contravariant category of Chow motives (i.e., pure motives with respect to rational equivalence as in \cite{Sc}, \cite{MNP}) will be denoted $\mathcal M_{\rm rat}$.
We will write $H^j(X)$
and $H_j(X)$
to indicate singular cohomology $H^j(X,\mathbb{Q})$,
resp. Borel--Moore homology $H_j(X,\mathbb{Q})$.
\end{convention}
\section{Preliminary material}
\subsection{Quotient varieties}
\begin{definition} A {\em projective quotient variety\/} is a variety
\[ X=Y/G\ ,\]
where $Y$ is a smooth projective variety and $G\subset\hbox{Aut}(Y)$ is a finite group.
\end{definition}
\begin{proposition}[Fulton \cite{F}]\label{quot} Let $X$ be a projective quotient variety of dimension $n$. Let $A^\ast(X)$ denote the operational Chow cohomology ring. The natural map
\[ A^i(X)\ \to\ A_{n-i}(X) \]
is an isomorphism for all $i$.
\end{proposition}
\begin{proof} This is \cite[Example 17.4.10]{F}.
\end{proof}
\begin{remark} It follows from proposition \ref{quot} that the formalism of correspondences goes through unchanged for projective quotient varieties (this is also noted in \cite[Example 16.1.13]{F}). We can thus consider motives $(X,p,0)\in\mathcal M_{\rm rat}$, where $X$ is a projective quotient variety and $p\in A^n(X\times X)$ is a projector. For a projective quotient variety $X=Y/G$, one readily proves (using Manin's identity principle) that there is an isomorphism
\[ h(X)\cong h(Y)^G:=(Y,\mathbb{D}elta^G_Y,0)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\ ,\]
where $\mathbb{D}elta^G_Y$ denotes the idempotent ${1\over \vert G\vert}{\sum_{g\in G}}\Gamma_g$.
\end{remark}
\subsection{Finite--dimensionality}
We refer to \cite{Kim}, \cite{An}, \cite{MNP}, \cite{Iv}, \cite{J4} for basics on the notion of finite--dimensional motive.
An essential property of varieties with finite--dimensional motive is embodied by the nilpotence theorem:
\begin{theorem}[Kimura \cite{Kim}]\label{nilp} Let $X$ be a smooth projective variety of dimension $n$ with finite--dimensional motive. Let $\Gamma\in A^n(X\times X)_{}$ be a correspondence which is numerically trivial. Then there is $N\in\mathbb{N}$ such that
\[ \Gamma^{\circ N}=0\ \ \ \ \in A^n(X\times X)_{}\ .\]
\end{theorem}
Actually, the nilpotence property (for all powers of $X$) could serve as an alternative definition of finite--dimensional motive, as shown by a result of Jannsen \cite[Corollary 3.9]{J4}.
Conjecturally, all smooth projective varieties have finite--dimensional motive \cite{Kim}. We are still far from knowing this, but at least there are quite a few non--trivial examples:
\begin{remark}
The following varieties have finite--dimensional motive: abelian varieties, varieties dominated by products of curves \cite{Kim}, $K3$ surfaces with Picard number $19$ or $20$ \cite{P}, surfaces not of general type with $p_g=0$ \cite[Theorem 2.11]{GP}, certain surfaces of general type with $p_g=0$ \cite{GP}, \cite{PW}, \cite{V8}, Hilbert schemes of surfaces known to have finite--dimensional motive \cite{CM}, generalized Kummer varieties \cite[Remark 2.9(\romannumeral2)]{Xu}, \cite{FTV},
threefolds with nef tangent bundle \cite{Iy}, \cite[Example 3.16]{V3}, fourfolds with nef tangent bundle \cite{Iy2}, log--homogeneous varieties in the sense of \cite{Br} (this follows from \cite[Theorem 4.4]{Iy2}), certain threefolds of general type \cite[Section 8]{V5}, varieties of dimension $\le 3$ rationally dominated by products of curves \cite[Example 3.15]{V3}, varieties $X$ with $A^i_{AJ}(X)_{}=0$ for all $i$ \cite[Theorem 4]{V2}, products of varieties with finite--dimensional motive \cite{Kim}.
\end{remark}
\begin{remark}
It is an embarassing fact that up till now, all examples of finite-dimensional motives happen to lie in the tensor subcategory generated by Chow motives of curves, i.e. they are ``motives of abelian type'' in the sense of \cite{V3}. On the other hand, there exist many motives that lie outside this subcategory, e.g. the motive of a very general quintic hypersurface in $\mathbb{P}^3$ \cite[7.6]{D}.
\end{remark}
The notion of finite--dimensionality is easily extended to quotient varieties:
\begin{definition} Let $X=Y/G$ be a projective quotient variety. We say that $X$ has finite--dimensional motive if the motive
\[ h(Y)^G:= (Y, \mathbb{D}elta^G_Y,0)\ \ \ \in \mathcal M_{\rm rat}\]
is finite--dimensional. (Here, $\mathbb{D}elta^G_Y$ denotes the idempotent ${1\over \vert G\vert}{\sum_{g\in G}}\Gamma_g \in A^n(Y\times Y)$.)
\end{definition}
Clearly, if $Y$ has finite--dimensional motive then also $X=Y/G$ has finite--dimensional motive. The nilpotence theorem extends to this set--up:
\begin{proposition}\label{quotientnilp} Let $X=Y/G$ be a projective quotient variety of dimension $n$, and assume $X$ has finite--dimensional motive. Let $\Gamma\in A^n_{num}(X\times X)$. Then there is
$N\in\mathbb{N}$ such that
\[ \Gamma^{\circ N}=0\ \ \ \ \in A^n(X\times X)_{}\ .\]
\end{proposition}
\begin{proof} Let $p\colon Y\to X$ denote the quotient morphism.
We associate to $\Gamma$ a correspondence $\Gamma_Y\in A^n(Y\times Y)$ defined as
\[ \Gamma_Y:= {}^t \Gamma_p\circ \Gamma\circ \Gamma_p\ \ \ \in A^n(Y\times Y)\ .\]
By Lieberman's lemma \cite[Lemma 3.3]{V3}, there is equality
\[ \Gamma_Y =(p\times p)^\ast \Gamma\ \ \ \hbox{in}\ A^n(Y\times Y)\ ,\]
and so $\Gamma_Y$ is $G\times G$--invariant:
\[ \mathbb{D}elta_Y^G\circ \Gamma_Y\circ \mathbb{D}elta_Y^G =\Gamma_Y\ \ \ \hbox{in}\ A^n(Y\times Y)\ .\]
This implies that
\[\Gamma_Y\in \mathbb{D}elta_Y^G\circ A^n(Y\times Y)\circ\mathbb{D}elta_Y^G\ ,\]
and so
\[ \Gamma_Y\in\hbox{End}_{\mathcal M_{\rm rat}}\bigl(h(Y)^G\bigr)\ .\]
Since clearly $\Gamma_Y$ is numerically trivial, and $h(Y)^G$ is finite--dimensional (by assumption), there exists $N\in\mathbb{N}$ such that
\[ (\Gamma_Y)^{\circ N} = {}^t \Gamma_p\circ \Gamma\circ\Gamma_p\circ{}^t \Gamma_p\circ \cdots \circ \Gamma_p=0\ \ \ \hbox{in}\ A^n(Y\times Y)\ .\]
Using the relation $\Gamma_p\circ{}^t \Gamma_p=d\mathbb{D}elta_X$, this boils down to
\[ d^{N-1}\ \ {}^t \Gamma_p\circ \Gamma^{\circ N}\circ \Gamma_p=0\ \ \ \hbox{in}\ A^n(Y\times Y)\ .\]
From this, we deduce that also
\[ \Gamma^{\circ N}= {1\over d^{N+1}} \Gamma_p\circ \Bigl( d^{N-1} \ \ {}^t \Gamma_p\circ \Gamma^{\circ N}\circ \Gamma_p\Bigr) \circ {}^t \Gamma_p=0\ \ \ \hbox{in}\ A^n(X\times X)\ .\]
\end{proof}
\subsection{MCK decomposition}
\label{ssmck}
\begin{definition}[Murre \cite{Mur}] Let $X$ be a projective quotient variety of dimension $n$. We say that $X$ has a {\em CK decomposition\/} if there exists a decomposition of the diagonal
\[ \mathbb{D}elta_X= \Pi_0+ \Pi_1+\cdots +\Pi_{2n}\ \ \ \hbox{in}\ A^n(X\times X)\ ,\]
such that the $\Pi_i$ are mutually orthogonal idempotents and $(\Pi_i)_\ast H^\ast(X)= H^i(X)$.
\end{definition}
\begin{remark} The existence of a CK decomposition for any smooth projective variety is part of Murre's conjectures \cite{Mur}, \cite{J2}. If a quotient variety $X$
has finite--dimensional motive, and the K\"unneth components are algebraic, then $X$ has a CK decomposition (this can be proven just as \cite{J2}, where this is stated for smooth $X$).
\end{remark}
\begin{definition}[Shen--Vial \cite{SV}] Let $X$ be a projective quotient variety of dimension $n$. Let $\mathbb{D}elta^X_{sm}\in A^{2n}(X\times X\times X)$ be the class of the small diagonal
\[ \mathbb{D}elta^X_{sm}:=\bigl\{ (x,x,x)\ \vert\ x\in X\bigr\}\ \subset\ X\times X\times X\ .\]
An MCK decomposition of $X$ is a CK decomposition $\{\Pi_i\}$ of $X$ that is {\em multiplicative\/}, i.e. it satisfies
\[ \Pi_k\circ \mathbb{D}elta^X_{sm}\circ (\Pi_i\times \Pi_j)=0\ \ \ \hbox{in}\ A^{2n}(X\times X\times X)\ \ \ \hbox{for\ all\ }i+j\not=k\ .\]
(NB: the acronym ``MCK'' is shorthand for ``multiplicative Chow--K\"unneth''.)
\end{definition}
\begin{remark} The small diagonal (seen as a correspondence from $X\times X$ to $X$) induces the {\em multiplication morphism\/}
\[ \mathbb{D}elta^X_{sm}\colon\ \ h(X)\otimes h(X)\ \to\ h(X)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\ .\]
Suppose $X$ has a CK decomposition
\[ h(X)=\bigoplus_{i=0}^{2n} h^i(X)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\ .\]
By definition, this decomposition is multiplicative if for any $i,j$ the composition
\[ h^i(X)\otimes h^j(X)\ \to\ h(X)\otimes h(X)\ \xrightarrow{\mathbb{D}elta^X_{sm}}\ h(X)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\]
factors through $h^{i+j}(X)$.
The property of having an MCK decomposition is severely restrictive, and is closely related to Beauville's ``weak splitting property'' \cite{Beau3}. For more ample discussion, and examples of varieties with an MCK decomposition, we refer to \cite[Section 8]{SV} and also \cite{V6},
\cite{SV2}, \cite{FTV}.
\end{remark}
\begin{lemma}\label{hk} Let $X, X^\prime$ be birational hyperk\"ahler varieties. Then $X$ has an MCK decomposition if and only if $X^\prime$ has one.
\end{lemma}
\begin{proof} This is noted in \cite[Introduction]{V6}; the idea is that Rie\ss's result \cite{Rie} implies that $X$ and $X^\prime$ have isomorphic Chow motives and the isomorphism is compatible with the multiplicative structure.
More precisely: let $\gamma\colon X\dashrightarrow X^\prime$ be a birational map between hyperk\"ahler varieties of dimension $n$, and suppose $\{\Pi^X_i\}$ is an MCK decomposition for $X$. Let $\mathbb{D}elta^X_{sm}, \mathbb{D}elta^{X^\prime}_{sm}$ denote the small diagonal of $X$ resp. $X^\prime$. As explained in \cite[Section 6]{SV}, the argument of \cite{Rie} gives the equality
\[ \Gamma_\gamma\circ \mathbb{D}elta^X_{sm}\circ {}^t \Gamma_{\gamma\times \gamma} = \mathbb{D}elta^{X^\prime}_{sm}\ \ \ \hbox{in}\ A^{2n}(X^\prime\times X^\prime\times X^\prime)\ .\]
The prescription
\[ \Pi^{X^\prime}_i:= \Gamma_\gamma\circ \pi^X_i\circ {}^t \Gamma_\gamma\ \ \ \in A^n(X^\prime\times X^\prime) \]
defines a CK decomposition for $F^\prime$.
(The $\Pi^{X^\prime}_i$ are orthogonal idempotents thanks to Rie\ss's result that $\Gamma_\gamma\circ {}^t \Gamma_\gamma=\mathbb{D}elta_{X^\prime}$ and ${}^t \Gamma_\gamma \circ \Gamma_\gamma=\mathbb{D}elta_X$ \cite{Rie}.)
To see this CK decomposition $\{\Pi^{X^\prime}_i\}$ is multiplicative, let us consider integers $i,j,k$ such that $i+j\not=k$. It follows from the above equalities that
\[ \begin{split} \Pi^{X^\prime}_k\circ \mathbb{D}elta^{X^\prime}_{sm}\circ (\Pi^{X^\prime}_i\times \Pi^{X^\prime}_j) &=\Gamma_\gamma\circ \Pi^X_k\circ {}^t \Gamma_\gamma\circ \Gamma_\gamma\circ \mathbb{D}elta^X_{sm}\circ
{}^t \Gamma_{\gamma\times\gamma}\circ \Gamma_{\gamma\times\gamma}\circ (\Pi^X_i\times \Pi^X_j)\circ {}^t \Gamma_\gamma\\
&=\Gamma_\gamma\circ \Pi^X_k\circ \mathbb{D}elta^X_{sm}\circ (\Pi^X_i\times \Pi^X_j)\circ {}^t \Gamma_\gamma\\
&=0\ \ \ \hbox{in}\ A^{2n} (X^\prime\times X^\prime)\ .\\
\end{split} \]
(Here we have again used Rie\ss's result that $\Gamma_\gamma\circ {}^t \Gamma_\gamma=\mathbb{D}elta_{X^\prime}$ and ${}^t \Gamma_\gamma \circ \Gamma_\gamma=\mathbb{D}elta_X$.)
\end{proof}
\subsection{Niveau filtration}
\begin{definition}[Coniveau filtration \cite{BO}]\label{con} Let $X$ be a quasi--projective variety. The {\em coniveau filtration\/} on cohomology and on homology is defined as
\[\begin{split} N^c H^i(X,\mathbb{Q})&= \sum \operatorname{i}a\bigl( H^i_Y(X,\mathbb{Q})\to H^i(X,\mathbb{Q})\bigr)\ ;\\
N^c H_i(X,\mathbb{Q})&=\sum \operatorname{i}a \bigl( H_i(Z,\mathbb{Q})\to H_i(X,\mathbb{Q})\bigr)\ ,\\
\end{split}\]
where $Y$ runs over codimension $\ge c$ subvarieties of $X$, and $Z$ over dimension $\le i-c$ subvarieties.
\end{definition}
Vial introduced the following variant of the coniveau filtration:
\begin{definition}[Niveau filtration \cite{V4}]\label{niv} Let $X$ be a smooth projective variety. The {\em niveau filtration} on homology is defined as
\[ \widetilde{N}^j H_i(X)=\sum_{\Gamma\in A_{i-j}(Z\times X)_{}} \operatorname{i}a\bigl( H_{i-2j}(Z)\to H_i(X)\bigr)\ ,\]
where the union runs over all smooth projective varieties $Z$ of dimension $i-2j$, and all correspondences $\Gamma\in A_{i-j}(Z\times X)_{}$.
The niveau filtration on cohomology is defined as
\[ \widetilde{N}^c H^iX:= \widetilde{N}^{c-i+n} H_{2n-i}X\ .\]
\end{definition}
\begin{remark}\label{is}
The niveau filtration is included in the coniveau filtration:
\[ \widetilde{N}^j H^i(X)\subset N^j H^i(X)\ .\]
These two filtrations are expected to coincide; indeed, Vial shows this is true if and only if the Lefschetz standard conjecture is true for all varieties \cite[Proposition 1.1]{V4}.
Using the truth of the Lefschetz standard conjecture in degree $\le 1$, it can be checked \cite[page 415 "Properties"]{V4} that the two filtrations coincide in a certain range:
\[ \widetilde{N}^j H^i(X)= N^j H^iX\ \ \ \hbox{for\ all\ }j\ge {i-1\over 2} \ .\]
\end{remark}
\subsection{Refined CK decomposition}
\begin{theorem}[Vial \cite{V4}]\label{pi_2} Let $X$ be a smooth projective variety of dimension $n\le 5$. Assume the Lefschetz standard conjecture $B(X)$ holds (in particular, the K\"unneth components $\pi_i\in H^{2n}(X\times X)$ are algebraic). Then there is a splitting into mutually orthogonal idempotents
\[ \pi_i=\sum_j \pi_{i,j}\ \ \ \in H^{2n}(X\times X)\ ,\]
such that
\[ (\pi_{i,j})_\ast H^\ast(X) =gr^j_{\widetilde{N}} H^i(X)\ .\]
(Here, the graded $ gr^j_{\widetilde{N}} H^i(X)$ can be identified with a Hodge substructure of $H^i(X)$ using the polarization.)
In particular,
\[ \begin{split} &(\pi_{2,1})_\ast H^j(X) = H^{2}(X)\cap F^1\ ,\\
&(\pi_{2,0})_\ast H^j(X)= H^2_{tr}(X)\ .\\
\end{split} \]
(Here $F^\ast$ denotes the Hodge filtration, and $H^2_{tr}(X)$ is the orthogonal complement to $H^2(X)\cap F^1$ under the pairing
\[ \begin{split} H^2(X)\otimes H^2(X)\ &\to\ \mathbb{Q}\ ,\\
a\otimes b\ &\mapsto\ a\cup h^{n-2}\cup b\ .)\\
\end{split} \]
\end{theorem}
\begin{proof} This is \cite[Theorem 1]{V4}.
\end{proof}
\begin{theorem}[Vial \cite{V4}]\label{Pi_2} Let $X$ be as in theorem \ref{pi_2}. Assume in addition $X$ has finite--dimensional motive. Then there exists a CK decomposition $\Pi_i\in A^n(X\times X)$, and a splitting into mutually orthogonal idempotents
\[ \Pi_i=\sum_j \Pi_{i,j}\ \ \ \in A^n(X\times X)\ ,\]
such that
\[ \Pi_{i,j}=\pi_{i,j}\ \ \ \hbox{in}\ H^{2n}(X\times X)\ ,\]
and
\[ (\Pi_{2i,i})_\ast A^k(X)=0\ \ \ \hbox{for\ all\ }k\not= i\ . \]
The motive $h_{i,0}(X)=(X,\Pi_{i,0},0)\in \mathcal M_{\rm rat}$ is well--defined up to isomorphism.
\end{theorem}
\begin{proof} This is \cite[Theorem 2]{V4}. The last statement follows from \cite[Proposition 1.8]{V4} combined with \cite[Theorem 7.7.3]{KMP}.
\end{proof}
\begin{remark} In case $X$ is a surface with finite--dimensional motive, there is equality
\[h_{2,0}(X)=t_2(X)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\ ,\]
where $t_2(X)$ is the ``transcendental part of the motive'' constructed for any surface (not necessarily with finite--dimensional motive) in \cite{KMP}.
\end{remark}
\begin{lemma}\label{indecomp} Let $X$ be a smooth projective variety as in theorem \ref{Pi_2}, and assume
\[ \dim H^2(X,\mathcal O_X)=1\ .\]
Then the motive
\[ h_{2,0}(X)\in \mathcal M_{\rm rat} \]
is {\em indecomposable\/}, i.e. any non--zero submotive $M\subset h_{2,0}(X)$ is equal to $h_{2,0}(X)$.
\end{lemma}
\begin{proof} (This kind of argument is well--known, cf. for instance \cite[Corollary 3.11]{V8} or \cite[Corollary 2.10]{Ped} where this is proven for $K3$ surfaces with finite--dimensional motive.)
The idea is that there are no non--zero Hodge substructures strictly contained in $H^2_{tr}(X)$. Since the motive $M\subset h_{2,0}(X)$ defines a Hodge substructure
\[ H^\ast(M)\ \subset\ H^2_{tr}(X)\ ,\]
we must have $H^\ast(M)=H^2_{tr}(X)$ and thus an equality of homological motives
\[ M=h_{2,0}(X)\ \ \ \hbox{in}\ \mathcal M_{\rm hom}\ .\]
Using finite--dimensionality of $X$, it follows there is an equality of Chow motives
\[ M=h_{2,0}(X)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\ .\]
\end{proof}
\begin{lemma}\label{equiv} Let $X_1, X_2$ be two projective quotient varieties of dimension $4$. Assume $X_1, X_2$ have finite--dimensional motive, verify the Lefschetz standard conjecture and
\[ N^1_H H^4(X_j)= \widetilde{N}^1 H^4(X_j)\ \ \ \hbox{for\ }j=1,2\ ,\]
where $N^\ast_H$ is the Hodge coniveau filtration.
Let $\Gamma\in A^4(X_1\times X_2)$ and $\Psi\in A^4(X_2\times X_1)$. The following are equivalent:
\noindent
(\romannumeral1)
\[ \Gamma_\ast\colon\ \ H^{0,4}(X_1)\ \to\ H^{0,4}(X_2)\]
is an isomorphism, with inverse $\Psi_\ast$;
\noindent
(\romannumeral2)
\[ \Gamma_\ast\colon\ \ H^{4}_{tr}(X_1)\ \to\ H^{4}_{tr}(X_2)\]
is an isomorphism, with inverse $\Psi_\ast$;
\noindent
(\romannumeral3)
\[ \Gamma\colon\ \ h_{4,0}(X_1)\ \to\ h_{4,0}(X_2)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\]
is an isomorphism, with inverse $\Psi$.
\end{lemma}
\begin{proof} Assume (\romannumeral1), i.e.
\[ \Psi_\ast \Gamma_\ast=\hbox{id}\colon\ \ H^{0,4}(X_1)\ \to\ H^{0,4}(X_1)\ .\]
Using the hypothesis $N_H^1=\widetilde{N}^1$, this implies
\[ \Psi_\ast \Gamma_\ast=\hbox{id}\colon\ \ H^4(X_1)/\widetilde{N}^1\ \to\ H^4(X_1)/\widetilde{N}^1\ ,\]
and so
\begin{equation}\label{hom} \bigl(\Psi\circ \Gamma \circ\Pi^{X_1}_{4,0}\bigr){}_\ast =(\Pi^{X_1}_{4,0})_\ast\colon\ \ H^\ast(X_1)\ \to\ H^\ast(X_1)\ .\end{equation}
Considering the action on $H^4_{tr}(X_1)$, this implies
\[ \Psi_\ast \Gamma_\ast=\hbox{id}\colon\ \ H^4_{tr}(X_1)\ \to\ H^4_{tr}(X_1)\ .\]
Switching the roles of $X_1$ and $X_2$, one finds that likewise $\Gamma_\ast \Psi_\ast=\hbox{id}$ on $H^4_{tr}(X_2)$, and so the isomorphism of (\romannumeral2) is proven.
Next, we note that it formally follows from equality (\ref{hom}) that $\Psi$ is left--inverse to
\[ \Gamma\colon\ \ h_{4,0}(X_1)\ \to\ h_{4,0}(X_2)\ \ \ \hbox{in}\ \mathcal M_{\rm hom}\ .\]
Switching roles of $X_1$ and $X_2$, one finds $\Psi$ is also right--inverse to $\Gamma$ and so
\[ \Gamma\colon\ \ h_{4,0}(X_1)\ \to\ h_{4,0}(X_2)\ \ \ \hbox{in}\ \mathcal M_{\rm hom}\ \]
is an isomorphism, with inverse $\Psi$. By finite--dimensionality, the same holds in $\mathcal M_{\rm rat}$, establishing (\romannumeral3).
\end{proof}
\begin{remark} The equality
\[N^1_H H^4(X_j)= \widetilde{N}^1 H^4(X_j)\]
in the hypothesis of lemma \ref{equiv} is the conjunction of the generalized Hodge conjecture $N^1_H=N^1$ and Vial's conjecture $N^1=\widetilde{N}^1$.
\end{remark}
\subsection{Symmetrically distinguished cycles on abelian varieties}
\begin{definition}[O'Sullivan \cite{OS}] Let $A$ be an abelian variety. Let $a\in A^\ast(A)$ be a cycle. For $m\ge 0$, let
\[ V_m(a)\ \subset\ A^\ast(A^m) \]
denote the $\mathbb{Q}$--vector space generated by elements
\[ p_\ast \Bigl( (p_1)^\ast(a^{r_1})\cdot (p_2)^\ast(a^{r_2})\cdot\ldots\cdot (p_n)^\ast(a^{r_n})\Bigr)\ \ \ \in A^\ast(A^m) \ . \]
Here $n\le m$, and $r_j\in\mathbb{N}$, and $p_i\colon A^n\to A$ denotes projection on the $i$--th factor, and $p\colon A^n\to A^m$ is a closed immersion with each component $A^n\to A$ being either a projection
or the composite of a projection with $[-1]\colon A\to A$.
The cycle $a\in A^\ast(A)$ is said to be {\em symmetrically distinguished\/} if for every $m\in\mathbb{N}$ the composition
\[ V_m(a)\ \subset\ A^\ast(A^m)\ \to\ A^\ast(A^m)/A^\ast_{hom}(A^m) \]
is injective.
\end{definition}
\begin{theorem}[O'Sullivan \cite{OS}]\label{os} The symmetrically distinguished cycles form a $\mathbb{Q}$--subalgebra $A^\ast_{sym}(A)\subset A^\ast(A)$, and the composition
\[ A^\ast_{sym}(A)\ \subset\ A^\ast(A)\ \to\ A^\ast(A)/A^\ast_{hom}(A) \]
is an isomorphism. Symmetrically distinguished cycles are stable under pushforward and pullback of homomorphisms of abelian varieties.
\end{theorem}
\begin{remark} For discussion and applications of the notion of symmetrically distinguished cycles, in addition to \cite{OS} we refer to \cite[Section 7]{SV}, \cite{V6}, \cite{Anc}, \cite{LFu2}.
\end{remark}
\begin{lemma}\label{sym} Let $A$ be an abelian variety of dimension $g$.
\noindent
(\romannumeral1)
There exists an MCK decomposition $\{ \Pi_i^A\}$ that is self--dual and consists of symmetrically distinguished cycles.
\noindent(\romannumeral2) Assume $g\le 5$, and let $\{ \Pi_i^A\}$ be as in (\romannumeral1). There exists a further splitting
\[ \Pi_2^A= \Pi_{2,0}^A +\Pi_{2,1}^A\ \ \ \hbox{in}\ A^g(A\times A)\ ,\]
where the $\Pi_{2,i}^A$ are symmetrically distinguished and $\Pi^A_{2,i}=\pi^A_{2,i}$ in $H^{2g}(A\times A)$.
\end{lemma}
\begin{proof}
\noindent
(\romannumeral1) An explicit formula for $\{ \Pi_i^A\}$ is given in \cite[Section 7 Formula (45)]{SV}.
\noindent
(\romannumeral2) The point is that $\Pi_{2,1}^A$ is (by construction) a cycle of type
\[ \sum_j C_j\times D_j\ \ \ \hbox{in}\ A^g(A\times A)\ ,\]
where $D_j\subset A$ is a symmetric divisor and $C_j\subset A$ is a curve obtained by intersecting a symmetric divisor with hyperplanes. This implies $\Pi^A_{2,1}$ is symmetrically distinguished.
By assumption, $\Pi^A_2$ is symmetrically distinguished and hence so is $\Pi^A_{2,0}$.
\end{proof}
\subsection{The very special EPW sextic}
\label{secepw}
This subsection introduces the main actor of this tale: the very symmetric EPW sextic discovered in \cite{DBG}.
\begin{definition}[\cite{Bea}] A {\em hyperk\"ahler variety\/} is a simply--connected smooth projective variety $X$ such that $H^0(X,\Omega^2_X)$ is spanned by a nowhere degenerate holomorphic $2$--form.
\end{definition}
\begin{theorem}[Donten--Bury et alii \cite{DBG}]\label{epw} Let $X\subset\mathbb{P}^5(\mathbb{C})$ be defined by the equation
\[ \begin{split} x_0^6+&x_1^6+x_2^6+x_3^6+x_4^6+x_5^6 + \bigl( x_0^4x_1^2 +x_0^4x_2^2+\cdots+ x_4^2x_5^4)\\
&+ (x_0^2x_1^2x_2^2+x_0^2x_1^2x_3^2+\cdots+x_3^2x_4^2x_5^2) + x_0x_1x_2x_3x_4x_5=0\ .
\end{split}\]
(Note that the parentheses are symmetric functions in the variables $x_0,\ldots,x_5$.)
\noindent
(\romannumeral1)
The hypersurface $X$ is an EPW sextic (in the sense of \cite{EPW}, \cite{OG}).
\noindent
(\romannumeral2)
Let $S$ be the $K3$ surface obtained from a certain Del Pezzo surface in \cite{Vin}, and let $S^{[2]}$ denote the Hilbert scheme of $2$ points on $S$. Then there is a rational map (of degree $2$)
\[ \phi\colon\ \ S^{[2]}\ \dashrightarrow\ X\ .\]
There exists a commutative diagram
\[ \begin{array}[c]{ccccccc}
S^{[2]} &\xdashrightarrow{\rm flops} & \overline{S^{[2]}} & \xrightarrow{} & X^\prime:=E^4/(G^\prime) &\xleftarrow{}& X_0\\
& {\scriptstyle \phi}\sebkarrow\ \ \ \ \ \ \ & &\ \ \ \ \ \ \swarrow{\scriptstyle g}&&&\\
&& X&&&&\\
\end{array}\]
Here all horizontal arrows are birational maps. $E$ is an elliptic curve and $X^\prime:=E^4/(G^\prime)$ is a quotient variety, and $X_0$ is a hyperk\"ahler variety with $b_2(X_0)=23$ which is a symplectic resolution of $X^\prime$. The morphism $g$ is a double cover; $X$ is a projective quotient variety $X=E^4/G$ where $G=(G^\prime,i)$ with $i^2\in G^\prime$. The groups $G^\prime$ and
$G$ consist of automorphisms that are group homomorphisms.
\noindent
(\romannumeral3) $S^{[2]}$ and $X_0$
have finite--dimensional motive and a multiplicative CK decomposition.
\end{theorem}
\begin{proof}
\noindent
(\romannumeral1) \cite[Proposition 2.6]{DBG}.
\noindent
(\romannumeral2) This is a combination of \cite[Proposition 1.1]{DBG} and \cite[Sections 5 and 6]{DBG}. (Caveat: the group that we denote $G^\prime$ is written $G$ in \cite{DBG}.)
\noindent
(\romannumeral3) Vinberg's $K3$ surface has Picard number $20$; as such, it is a Kummer surface and has finite--dimensional motive. This implies (using \cite{CM}) that $S^{[2]}$ has finite--dimensional motive. As birational hyperk\"ahler varieties have isomorphic Chow motives \cite{Rie}, $X_0$ has finite--dimensional motive. The Hilbert scheme $S^{[2]}$ of any $K3$ surface $S$ has an MCK decomposition \cite[Theorem 13.4]{SV}. As the isomorphism of \cite{Rie} is an isomorphism of algebras in the category of Chow motives, $X_0$ also has an MCK decomposition (lemma \ref{hk}).
\end{proof}
\begin{remark} The singular locus of the very special EPW sextic $X$ consists of $60$ planes. Among these 60 planes, there is a subset of 20 planes which form a complete family of pairwise incident planes in $\mathbb{P}^5(\mathbb{C})$ \cite{DBG}. This is the maximal number of elements in a complete family of pairwise incident planes, and this seems to be the only known example of a complete family of 20 pairwise incident planes.
\end{remark}
\begin{remark} The variety $X_0$ is not unique. In \cite[Section 6]{DBW}, it is shown there exist $81^{16}$ symplectic resolutions of $E^4/(G^\prime)$ (some of them non--projective).
One noteworthy consequence of theorem \ref{epw} is that the varieties $X_0$ are of $K3^{[2]}$ type (this was not a priori clear from \cite{DBW}).
\end{remark}
\begin{remark} For a {\em generic\/} EPW sextic $X$, there exists a hyperk\"ahler fourfold $X_0$ (called a ``double EPW sextic'') equipped with an anti--symplectic involution $\sigma_0$ such that $X=X_0/(\sigma_0)$ \cite[Theorem 1.1 (2)]{OG}. For the very special EPW sextic $X$, I don't know whether such $X_0$ exists. (For this, one would need to show that the Lagrangian subspace $A$ defining the very special EPW sextic is in the Zariski open $\mathbb{L}\mathbb{G}(\wedge^3 V)^0\subset\mathbb{L}\mathbb{G}(\wedge^3 V)$ defined in \cite[page 3]{OG}.)
\end{remark}
\section{Some intermediate steps}
\subsection{A strong version of the generalized Hodge conjecture}
For later use, we record here a proposition, stating that the very special EPW sextic, as well as some related varieties, satisfy the hypothesis of lemma \ref{equiv}:
\begin{proposition}\label{ghc} Let $X_0$ be any hyperk\"ahler variety as in theorem \ref{epw} (i.e., $X_0$ is a symplectic resolution of $E^4/(G^\prime)$). Then
\[ N^1_H H^4(X_0)= \widetilde{N}^1 H^4(X_0)\ .\]
(Here $N_H^\ast$ denotes the Hodge coniveau filtration and $\widetilde{N}^\ast$ denotes the niveau filtration (definition \ref{niv}).)
The same holds for $X^\prime:=E^4/(G^\prime)$ and for the very special EPW sextic $X$:
\[ \begin{split} N^1_H H^4(X^\prime)&= \widetilde{N}^1 H^4(X^\prime)\ ,\\
N^1_H H^4(X)&= \widetilde{N}^1 H^4(X)\ .\\
\end{split} \]
\end{proposition}
\begin{proof} The point is that Vinberg's $K3$ surface $S$ has Picard number $20$, and so the corresponding statement is easily proven for $S^{[2]}$:
\begin{lemma}\label{rho} Let $S$ be a smooth projective surface with $q=0$ and $p_g(S)=1$. Assume $S$ is $\rho$--maximal (i.e. $\dim H^2_{tr}(S)=2$). Then
\[ N^1_H H^4(S^{[2]}) = \widetilde{N}^1 H^4(S^{[2]})\ .\]
\end{lemma}
\begin{proof} Let $\widetilde{S\times S}\to S\times S$ denote the blow--up of the diagonal. As is well--known,
there are isomorphisms of homological motives
\[ \begin{split}
h(S^{[2]}) &\cong h(\widetilde{S\times S})^{\mathfrak S_2}\ ,\\
h(\widetilde{S\times S}) &\cong h(S\times S)\oplus h(S)(1)\ \ \ \hbox{in}\ \mathcal M_{\rm hom}\ ,\\
\end{split}\]
where $\mathfrak S_2$ denotes the symmetric group on $2$ elements acting by permutation. It follows there is a correspondence--induced injection
\[ H^4(S^{[2]})\ \hookrightarrow\ H^4(S\times S)\oplus H^2(S)\ .\]
It thus suffices to prove the statement for $S\times S$. Let us write
\[ H^2(S)=N\oplus T:=NS(S)\oplus H^2_{tr}(S)\ .\]
We have
\[ \begin{split} N^1_H H^4(S\times S)&= H^4(S\times S)\cap F^1\\
&= H^0(S)\otimes H^4(S)\oplus H^4(S)\otimes H^0(S)\oplus N\otimes N\oplus N\otimes T\oplus T\otimes N\\
&\ \ \ \ \ \ \ \ \ \ \ \ \oplus (T\otimes T)\cap F^1\ .\\
\end{split}\]
All but the last summand are obviously in $\widetilde{N}^1$.
As to the last summand, we have that
\[ (T\otimes T)\cap F^1=(T\otimes T)\cap F^2 \ . \]
Since the Hodge conjecture is true for $S\times S$ (indeed, $S$ is a Kummer surface and the Hodge conjecture is known for powers of abelian surfaces \cite[7.2.2]{Ab}, \cite[8.1(2)]{Ab2}),
there is an inclusion
\[ (T\otimes T)\cap F^2 \ \subset\ N^2 H^4(S\times S)= \widetilde{N}^2 H^4(S\times S)\ ,\]
and so the lemma is proven.
\end{proof}
Since birational hyperk\"ahler varieties have isomorphic cohomology rings \cite[Corollary 2.7]{Huy}, and the isomorphism (being given by a correspondence) respects Hodge structures,
this proves the result for $X_0$. Since $X_0$ dominates $X^\prime$ and $X$, the result for $X^\prime$ and $X$ follows. Proposition \ref{ghc} is now proven.
\end{proof}
\subsection{MCK for quotients of abelian varieties}
\begin{proposition}\label{mck} Let $A$ be an abelian variety of dimension $n$, and let $G\subset\hbox{Aut}_{\mathbb{Z}}(A)$ be a finite group of automorphisms of $A$ that are group homomorphisms. The quotient
\[ X=A/G \]
has a self--dual MCK decomposition.
\end{proposition}
\begin{proof} A first step is to show there exists a self--dual CK decomposition for $X$ induced by a CK decomposition on $A$:
\begin{claim}\label{sym1} Let $A$ and $X$ be as in proposition \ref{mck}, and let $p\colon A\to X$ denote the quotient morphism. Let $\{\Pi_i^A\}$ be a CK decomposition as in lemma \ref{sym}(\romannumeral1). Then
\[ \Pi_i^X:= {1\over d} \Gamma_p\circ \Pi_i^A\circ {}^t \Gamma_p\ \ \ \in A^n(X\times X)\ ,\ \ \ i=0,\ldots,2n \]
defines a self--dual CK decomposition for $X$.
\end{claim}
To prove the claim, we remark that clearly the given $\Pi_i^X$ lift the K\"unneth components of $X$, and their sum is the diagonal of $X$.
We will make use of the following property:
\begin{lemma}\label{comm} Let $A$ be an abelian variety of dimension $n$, and let $\{\Pi_i^A\}$ be an MCK decomposition as in lemma \ref{sym}(\romannumeral1). For any $g\in\hbox{Aut}_{\mathbb{Z}}(A)$, we have
\[ \Pi_i^A\circ \Gamma_g = \Gamma_g\circ \Pi_i^A\ \ \ \hbox{in}\ A^n(A\times A)\ .\]
\end{lemma}
\begin{proof} Because $g_\ast H^i(A)\subset H^i(A)$, we have a homological equivalence
\[ \Pi_i^A\circ \Gamma_g - \Gamma_g\circ \Pi_i^A=0\ \ \ \hbox{in}\ H^{2n}(A\times A)\ .\]
But the left--hand side is a symmetrically distinguished cycle, and so it is rationally trivial.
\end{proof}
To see that $\Pi_i^X$ is idempotent, we note that
\[ \begin{split} \Pi_i^X\circ \Pi_i^X &= {1\over d^2} \Gamma_p\circ \Pi_i^A\circ {}^t \Gamma_p \circ \Gamma_p\circ \Pi_i^A\circ {}^t \Gamma_p\\
&= {1\over d} \Gamma_p\circ \Pi_i^A\circ \bigl(\sum_{g\in G} \Gamma_g\bigr)\circ \Pi_i^A\circ {}^t \Gamma_p\\
&= {1\over d} \Gamma_p\circ \Pi_i^A\circ \Pi_i^A\circ \bigl(\sum_{g\in G} \Gamma_g\bigr)\circ {}^t \Gamma_p\\
&= {1\over d} \Gamma_p\circ \Pi_i^A \circ \bigl(\sum_{g\in G} \Gamma_g\bigr)\circ {}^t \Gamma_p\\
&={1\over d} \Gamma_p\circ \Pi_i^A \circ {}^t \Gamma_p \circ \Gamma_p \circ {}^t \Gamma_p\\
&={1\over d} \Gamma_p\circ \Pi_i^A \circ {}^t \Gamma_p \circ d\mathbb{D}elta_X\\
&= \Gamma_p\circ \Pi_i^A \circ {}^t \Gamma_p = \Pi_i^X\ \ \ \hbox{in}\ A^n(X\times X)\ .\\
\end{split}\]
(Here, the third equality is an application of lemma \ref{comm}, and the fourth equality is because $\Pi_i^A$ is idempotent.)
The fact that the $\Pi_i^X$ are mutually orthogonal is proven similarly; one needs to replace $\Pi_i^X\circ \Pi_i^X$ by $\Pi_i^X\circ \Pi_j^X$ in the above argument. This proves claim \ref{sym1}.
Now, it only remains to see that the CK decomposition $\{\Pi_i^X\}$ of claim \ref{sym1} is multiplicative.
\begin{claim}\label{mult} The CK decomposition $\{\Pi_i^X\}$ given by claim \ref{sym1} is an MCK decomposition.
\end{claim}
To prove claim \ref{mult}, let us consider the composition
\[ \Pi^X_k\circ \mathbb{D}elta^X_{sm}\circ (\Pi^X_i\times \Pi^X_j)\ \ \ \in\ A^n(X\times X)\ ,\]
where we suppose $i+j\not= k$.
There are equalities
\[ \begin{split}
\Pi^X_k\circ \mathbb{D}elta^X_{sm}\circ (\Pi^X_i\times \Pi^X_j) &= {1\over d^3}\ \Gamma_p\circ \Pi^A_k\circ {}^t \Gamma_p\circ \mathbb{D}elta^X_{sm}\circ \Gamma_{p\times p}\circ (\Pi^A_i\times \Pi^A_j)\circ {}^t \Gamma_{p\times p}\\
&={1\over d}\ \Gamma_p\circ \Pi^A_k\circ \mathbb{D}elta^G_A\circ \mathbb{D}elta^A_{sm}\circ (\mathbb{D}elta^G_A\times \mathbb{D}elta^G_A)\circ (\Pi^A_i\times \Pi^A_j)\circ {}^t \Gamma_{p\times p}\\
& = {1\over d}\ \Gamma_p\circ \mathbb{D}elta^G_A\circ \Pi^A_k\circ \mathbb{D}elta^A_{sm}\circ (\Pi^A_i\times \Pi^A_j)\circ (\mathbb{D}elta^G_A\times \mathbb{D}elta^G_A)\circ {}^t \Gamma_{p\times p}\\
& =0\ \ \ \hbox{in}\ A^{2n}(X\times X\times X)\ .\\
\end{split}\]
Here, the first equality is by definition of the $\Pi^X_i$, the second equality is lemma \ref{sm} below, the third equality follows from lemma \ref{comm}, and the fourth equality is the fact that $\{\Pi^A_i\}$ is an MCK decomposition for $A$ (lemma \ref{sym}).
\begin{lemma}\label{sm} There is equality
\[ \begin{split} {}^t \Gamma_p\circ \mathbb{D}elta^X_{sm}\circ \Gamma_{p\times p}&={1\over d} (\sum_{g\in G} \Gamma_g)\circ \mathbb{D}elta^A_{sm}\circ \bigl((\sum_{g\in G} \Gamma_g)\times
(\sum_{g\in G} \Gamma_g)\bigr) \\
&= d^2 \mathbb{D}elta^G_A\circ \mathbb{D}elta^A_{sm}\circ (\mathbb{D}elta^G_A\times \mathbb{D}elta^G_A)\ \ \ \hbox{in}\ A^{2n}(A\times A\times A)\ .\\
\end{split}\]
\end{lemma}
\begin{proof} The second equality is just the definition of $\mathbb{D}elta^G_A$. As to the first equality, we first note that
\[ \mathbb{D}elta^X_{sm} ={1\over d}(p\times p\times p)_\ast (\mathbb{D}elta^A_{sm}) = {1\over d}\Gamma_p\circ \mathbb{D}elta^A_{sm}\circ {}^t \Gamma_{p\times p}\ \ \ \hbox{in}\ A^{2n}(X\times X\times X)\ .\]
This implies that
\[ {}^t \Gamma_p\circ \mathbb{D}elta^X_{sm}\circ \Gamma_{p\times p}= {1\over d}\ {}^t \Gamma_p\circ \Gamma_p\circ \mathbb{D}elta^A_{sm}\circ {}^t \Gamma_{p\times p}\circ \Gamma_{p\times p}\ .\]
But ${}^t \Gamma_p\circ \Gamma_p=\sum_{g\in G} \Gamma_g$, and thus
\[ {}^t \Gamma_p\circ \mathbb{D}elta^X_{sm}\circ \Gamma_{p\times p}={1\over d} (\sum_{g\in G} \Gamma_g)\circ \mathbb{D}elta^A_{sm}\circ \bigl((\sum_{g\in G} \Gamma_g)\times
(\sum_{g\in G} \Gamma_g)\bigr) \ \ \ \hbox{in}\ A^{2n}(A\times A\times A)\ ,\]
as claimed.
\end{proof}
This ends the proof of proposition \ref{mck}.
\end{proof}
In the set--up of proposition \ref{mck}, one can actually say more about certain pieces $A^i_{(j)}(X)$:
\begin{proposition}\label{2} Let $X=A/G$ be as in proposition \ref{mck}. Assume $n=\dim X\le 5$ and $H^{2}(X,\mathcal O_X)=0$. Assume also
there exists $X^\prime=A/(G^\prime)$ where
$G=(G^\prime,i)$ with $i^2\in G^\prime$, and the action of $i$ on $H^2(X^\prime,\mathcal O_{X^\prime})$ is minus the identity.
Then any CK decomposition $\{\Pi_i\}$ of $X$
verifies
\[ \begin{split} &(\Pi_2)_\ast A^j(X)=0\ \ \ \hbox{for\ all\ }j\not=1\ ,\\
&(\Pi_6)_\ast A^j(X)=0\ \ \ \hbox{for\ all\ }j\not=3\ .
\end{split} \]
\end{proposition}
\begin{proof} It suffices to prove this for one particular CK decomposition, in view of the following lemma:
\begin{lemma} Let $X=A/G$ be as in proposition \ref{mck}. Let $\Pi, \Pi^\prime\in A^n(X\times X)$ be idempotents, and assume $\Pi-\Pi^\prime=0$ in $H^{2n}(X\times X)$. Then
\[ (\Pi)_\ast A^i(X)=0\ \Leftrightarrow\ (\Pi^\prime)_\ast A^i(X)=0\ .\]
\end{lemma}
\begin{proof} This follows from \cite[Lemma 1.14]{V4}. Alternatively, here is a direct proof.
Let $p\colon A\to X$ denote the quotient morphism, and let $d:=\vert G\vert$. One defines
\[ \begin{split} \Pi_A&:= {1\over d}\ \ {}^t \Gamma_p\circ \Pi\circ \Gamma_p\ \ \ \in A^n(A\times A)\ ,\\
\Pi^\prime_A&:= {1\over d} {}^t \Gamma_p\circ \Pi^\prime\circ \Gamma_p\ \ \ \in A^n(A\times A)\ .\\
\end{split}\]
It is readily checked $\Pi_A, \Pi_A^\prime$ are idempotents, and they are homologically equivalent.
Let us assume $(\Pi)_\ast A^i(X)=0$ for a certain $i$. Then also
\[ (\Pi_A)_\ast p^\ast A^i(X)= \bigl( {1\over d} {}^t \Gamma_p\circ \Pi\circ\Gamma_p\circ {}^t \Gamma_p\bigr){}_\ast A^i(X)= \bigl( {} {}^t \Gamma_p\circ \Pi\bigr){}_\ast A^i(X) =0\ .\]
By finite--dimensionality of $A$, the difference $\Pi_A-\Pi^\prime_A\in A^n_{hom}(A\times A)$ is nilpotent, i.e. there exists $N\in\mathbb{N}$ such that
\[ \bigl( \Pi_A-\Pi^\prime_A\bigr)^{\circ N}=0\ \ \ \hbox{in}\ A^n(A\times A)\ .\]
Upon developing, this implies
\[ \Pi^\prime_A=(\Pi^\prime_A)^{\circ N}= Q_1+\cdots +Q_N\ \ \ \hbox{in}\ A^n(A\times A)\ ,\]
where each $Q_j$ is a composition
\[ Q_j= Q_j^1\circ Q_j^2\circ\cdots\circ Q_j^N\ ,\]
with $Q_j^k\in \{ \Pi_A,\Pi^\prime_A\}$, and at least one $Q_j^k$ is $\Pi_A$. Since by assumption $ (\Pi_A)_\ast p^\ast A^i(X)=0$, it follows that
\[ (Q_j)_\ast= (\hbox{something})_\ast (\Pi_A)_\ast \bigl((\Pi^\prime_A)^{\circ r}\bigr){}_\ast =0\colon\ \ p^\ast A^i(X)\ \to\ p^\ast A^i(X)\ \ \ \hbox{for\ all\ }j\ .\]
But then also
\[ (\Pi^\prime_A)_\ast p^\ast A^i(X) =\bigl( Q_1+\cdots +Q_N\bigr){}_\ast p^\ast A^i(X)=0\ .\]
\end{proof}
Now, let us take a projector for $A$ of the form
\[\Pi^{A}_2=\Pi^{A}_{2,0}+\Pi^{A}_{2,1}\in A^n(A\times A)\ ,\]
where $\Pi^{A}_{2,0}, \Pi^{A}_{2,1}$ are as in lemma \ref{sym}.
\begin{lemma}\label{sym2} Let $A$ be an abelian variety of dimension $n\le 5$, and let $G\subset\hbox{Aut}_{\mathbb{Z}}(A)$ be a finite subgroup. Let $\Pi^A_{2,0}$ be as in lemma \ref{sym}. Then
\[ \Pi^A_{2,0}\circ \mathbb{D}elta^G_A = \mathbb{D}elta^G_A\circ \Pi^A_{2,0}\ \ \ \in A^n(A\times A) \]
is idempotent. (Here, as before, we write $\mathbb{D}elta^G_A:={1\over \vert G\vert}{\sum_{g\in G}} \Gamma_g\in A^n(A\times A)$.)
\end{lemma}
\begin{proof} For any $g\in G$, we have the commutativity
\[ \Pi^A_{2,0}\circ \Gamma_g=\Gamma_g\circ \Pi^A_{2,0}\ \ \ \hbox{in}\ A^n(A\times A)\ ,\ \ \ \hbox{for\ all\ }g\in G\ , \]
established in lemma \ref{sym}(\romannumeral2). (Indeed, these cycles are symmetrically distinguished by lemma \ref{sym}(\romannumeral2), and their difference is homologically trivial because an automorphism $g\in G$ respects the niveau filtration.)
This commutativity clearly implies the equality
\[ \Pi^A_{2,0}\circ \mathbb{D}elta^G_A = \mathbb{D}elta^G_A\circ \Pi^A_{2,0}\ \ \ \in A^n(A\times A)\ . \]
To check that $\Pi^A_{2,0}\circ \mathbb{D}elta^G_A $ is idempotent, we note that
\[ \Pi^A_{2,0}\circ \mathbb{D}elta^G_A \circ \Pi^A_{2,0}\circ \mathbb{D}elta^G_A = \Pi^A_{2,0}\circ \Pi^A_{2,0}\circ \mathbb{D}elta^G_A\circ \mathbb{D}elta^G_A = \Pi^A_{2,0}\circ \mathbb{D}elta^G_A \ \ \ \hbox{in}\ A^n(A\times A)\ .\]
\end{proof}
Let us write $G=G^\prime\times\{1,i\}$. Since by assumption, $i_\ast=-\hbox{id}$ on $H^{2,0}(X^\prime)$, we have equality
\[ {1\over 2} \Bigl( \Pi_{2,0}^A\circ \mathbb{D}elta_A^{G^\prime} + \Pi_{2,0}^A\circ \mathbb{D}elta_A^{G^\prime}\circ \Gamma_i\Bigr)=0\ \ \ \hbox{in}\ H^{2n}(A\times A)\ .\]
On the other hand, the left--hand side is equal to the idempotent $\Pi^A_{2,0}\circ \mathbb{D}elta^G_A$. By finite--dimensionality, it follows that
\[ \Pi^A_{2,0}\circ \mathbb{D}elta^G_A=0\ \ \ \hbox{in}\ A^n(A\times A)\ .\]
Using Poincar\'e duality, we also have $i_\ast=-\hbox{id}$ on $H^{2,4}(X^\prime)$, and so (defining $\Pi^A_{6,2}$ as the transpose of $\Pi^A_{2,0}$) there is also an equality
\[ \Pi^A_{6,2}\circ \mathbb{D}elta^G_A= {1\over 2} \Bigl( \Pi_{6,2}^A\circ \mathbb{D}elta_A^{G^\prime} + \Pi_{6,2}^A\circ \mathbb{D}elta_A^{G^\prime}\circ \Gamma_i\Bigr)=0\ \ \ \hbox{in}\ H^{2n}(A\times A)\ ,\]
and hence, by finite--dimensionality
\[ \Pi^A_{6,2}\circ \mathbb{D}elta^G_A =0\ \ \ \hbox{in}\ A^n(A\times A)\ .\]
Since $\Pi^A_{2,1}$ does not act on $A^j(A)$ for $j\not=1$ (theorem \ref{Pi_2}), we find in particular that
\[ (\Pi^A_2)_\ast =0\colon\ \ \ A^j(A)^G\ \to\ A^j(A)^G\ \ \ \hbox{for\ all\ }j\not=1\ .\]
Likewise, since $\Pi^A_{6,3}={}^t \Pi^A_{2,1}$ does not act on $A^j(A)$ for $j\not=3$ (theorem \ref{Pi_2}), we also find that
\[ (\Pi^A_6)_\ast =0\colon\ \ \ A^j(A)^G\ \to\ A^j(A)^G\ \ \ \hbox{for\ all\ }j\not=3\ .\]
We now consider the CK decomposition for $X$ defined as in lemma \ref{sym1}:
\[ \Pi_i^X:= {1\over d} \Gamma_p\circ \Pi_i^A\circ {}^t \Gamma_p\ \ \ \in A^n(X\times X)\ .\]
This CK decomposition has the required behaviour:
\[ \begin{split} (\Pi_2^X)_\ast A^j(X) &= \Bigl( {1\over d} \Gamma_p\circ \Pi_2^A\circ {}^t \Gamma_p \Bigr){}_\ast A^j(X)\\
&= ({1\over d}\Gamma_p)_\ast (\Pi_2^A)_\ast p^\ast A^j(X)\\
&= ({1\over d}\Gamma_p)_\ast (\Pi_2^A)_\ast A^j(A)^G=0\ \ \ \hbox{for\ all\ }j\not=1\ ,\\
\end{split}\]
and likewise
\[ (\Pi^X_6)_\ast A^j(X)=0\ \ \ \hbox{for\ all\ }j\not=3\ .\]
This proves proposition \ref{2}.
\end{proof}
For later use, we record here a corollary of the proof of proposition \ref{2}:
\begin{corollary}\label{symref} Let $A$ be an abelian variety of dimension $n\le 5$, and let $\Pi^A_{2,0}, \Pi^A_{2,1}$ be as in lemma \ref{sym}(\romannumeral2). Let $p\colon A\to X=A/G$ be a quotient variety with $G\subset\hbox{Aut}_{\mathbb{Z}}(A)$. The prescription
\[ \Pi^X_{2,i}:= \Gamma_p\circ \Pi^A_{2,i}\circ {}^t \Gamma_p\ \ \ \hbox{in}\ A^n(X\times X)\]
defines a decomposition in orthogonal idempotents
\[ \Pi^X_2= \Pi^X_{2,0}+\Pi^X_{2,1}\ \ \ \hbox{in}\ A^n(X\times X)\ .\]
The $\Pi^X_{2,i}$ verify the properties of the refined CK decomposition of theorem \ref{Pi_2}.
\end{corollary}
\begin{proof} One needs to check the $\Pi^X_{2,i}$ are idempotent and orthogonal. This easily follows from the fact that the $\Pi^A_{2,i}$ commute with $\Gamma_g$ for $g\in G$ (lemma \ref{sym2}).
\end{proof}
\subsection{A surjectivity statement}
\begin{proposition}\label{surj} Let $X_0$ be a hyperk\"ahler fourfold as in theorem \ref{epw}. Let $A^\ast_{(\ast)}(X_0)$ be the bigrading defined by the MCK decomposition. Then the intersection product map
\[ A^2_{(2)}(X_0)\otimes A^2_{(2)}(X_0)\ \to\ A^4_{(4)}(X_0) \]
is surjective.
The same holds for $X^\prime:=E^4/(G^\prime)$ as in theorem \ref{epw}: $X^\prime$ has an MCK decomposition, and the intersection product map
\[ A^2_{(2)}(X^\prime)\otimes A^2_{(2)}(X^\prime)\ \to\ A^4_{(4)}(X^\prime) \]
is surjective.
\end{proposition}
\begin{proof} The result of Rie\ss\, \cite{Rie} implies there is an isomorphism of bigraded rings
\[ A^\ast_{(\ast)}(S^{[2]})\ \xrightarrow{\cong}\ A^\ast_{(\ast)}(X_0)\ .\]
For the Hilbert scheme of any $K3$ surface $S$, the intersection product map
\[ A^2_{(2)}(S^{[2]})\otimes A^2_{(2)}(S^{[2]})\ \to\ A^4_{(4)}(S^{[2]}) \]
is known to be surjective \cite[Theorem 3]{SV}. This proves the first statement.
For the second statement, the existence of an MCK decomposition for $X^\prime$ is a special case of proposition \ref{mck}. To prove the surjectivity statement for $X^\prime$,
we note that $\phi\colon X_0\to X^\prime$ is a symplectic resolution and so there are isomorphisms
\[ \phi^\ast\colon\ \ \ H^{p,0}(X^\prime)\ \xrightarrow{\cong}\ H^{p,0}(X_0)\ \ \ (p=2,4)\ .\]
Using lemma \ref{equiv} (which is possible thanks to proposition \ref{ghc}), this implies there are isomorphisms
\[ \phi^\ast\colon\ \ \ H^{p}_{tr}(X^\prime)\ \xrightarrow{\cong}\ H^{p}_{tr}(X_0)\ \ \ (p=2,4)\ .\]
This means there is an isomorphism of homological motives
\[ {}^t \Gamma_\phi\colon\ \ \ h_{p,0}(X^\prime)\ \xrightarrow{\cong}\ h_{p,0}(X_0) \ \ \ \hbox{in}\ \mathcal M_{\rm hom} \ \ \ (p=2,4)\ .\]
By finite--dimensionality, there are isomorphisms of Chow motives
\[ {}^t \Gamma_\phi\colon\ \ \ h_{p,0}(X^\prime)\ \xrightarrow{\cong}\ h_{p,0}(X_0) \ \ \ \hbox{in}\ \mathcal M_{\rm rat} \ \ \ (p=2,4)\ .\]
Taking Chow groups, this implies there are isomorphisms
\begin{equation}\label{isiso} (\Pi^{X_0}_p \circ {}^t \Gamma_\phi\circ \Pi^{X^\prime}_p)_\ast \colon\ \ (\Pi^{X^\prime}_p)_\ast A^i(X^\prime)\ \to\ (\Pi^{X_0}_p)_\ast
A^i(X_0)\ \ \ (p=2,4)\ .\end{equation}
Let us now consider the diagram
\[ \begin{array}[c]{ccc}
A^2_{(2)}(X_0)\otimes A^2_{(2)}(X_0) & \to& A^4_{(4)}(X_0)\\
\uparrow && \uparrow\\
A^2_{}(X_0)\otimes A^2_{}(X_0) & \to& A^4_{}(X_0)\\
\uparrow && \uparrow\\
A^2_{(2)}(X^\prime)\otimes A^2_{(2)}(X^\prime) & \to& A^4_{(4)}(X^\prime)\\
\end{array}\]
Here, the vertical arrows in the upper square are given by projecting to direct summand; the vertical arrows in the lower square are given by $\phi^\ast$. Since pullback and intersection product commute, the lower square commutes. Since $A^\ast_{(\ast)}(X_0)$ is a bigraded ring, the upper square commutes.
The composition of vertical arrows is an isomorphism by (\ref{isiso}). The statement for $X^\prime$ now follows from the statement for $X_0$.
\end{proof}
\section{Main results}
\subsection{Splitting of $A^\ast(X)$}
\begin{theorem}\label{main} Let $X$ be the very special EPW sextic of theorem \ref{epw}. The Chow ring of $X$ is a bigraded ring
\[ A^\ast(X)= A^\ast_{(\ast)}(X)\ ,\]
where
\[ \begin{split} A^1(X)&=A^1_{(0)}(X)=\mathbb{Q}\ ,\\
A^2(X)&=A^2_{(0)}(X)\ ,\\
A^3(X)&=A^3_{(0)}(X)\oplus A^3_{(2)}(X)= \mathbb{Q}\oplus A^3_{hom}(X)\ ,\\
A^4(X)&=A^4_{(0)}(X)\oplus A^4_{(4)}(X)=\mathbb{Q}\oplus A^4_{hom}(X)\ .\\
\end{split}\]
\end{theorem}
\begin{proof} It follows from theorem \ref{epw} that $X$ is a quotient variety $X=E^4/G$ with $G\subset\hbox{Aut}_{\mathbb{Z}}(A)$. Moreover, there is another quotient variety $X^\prime=E^4/(G^\prime)$ where $G=(G^\prime,i)$ and $i^2\in G^\prime$ and such that $i$ acts on $H^2(X^\prime,\mathcal O_{X^\prime})$ as $-\hbox{id}$. Applying proposition \ref{mck}, it follows that $X$ has an MCK decomposition $\{\Pi_i^X\}$. Applying proposition \ref{2}, it follows that
\[ \begin{split} &(\Pi^X_2)_\ast A^j(X)=0\ \ \ \hbox{for\ all\ } j\not=1\ ,\\
&(\Pi^X_6)_\ast A^j(X)=0\ \ \ \hbox{for\ all\ }j\not=3\ .\\
\end{split}\]
The projectors $\Pi^X_i$ are $0$ for $i$ odd. (Indeed, $X$ has no odd cohomology so the $\Pi^X_i$ are homologically trivial. Using finite--dimensionality, they are rationally trivial.)
The projectors $\{\Pi_i^X\}$ define a multiplicative bigrading
\[ A^\ast(X)= A^\ast_{(\ast)}(X)\ ,\]
where $A^j_{(i)}(X):=(\Pi^X_{2j-i})_\ast A^j(X)$. The fact that $A^j_{(i)}(X)=0$ for $i<0$ follows from the corresponding property for abelian fourfolds \cite{Beau}.
Likewise, the fact that
\[ A^j_{(0)}(X)\cap A^j_{hom}(X)=0\ \ \ \hbox{for\ all\ }j\ge 3\]
follows from the corresponding property for abelian fourfolds \cite{Beau}.
\end{proof}
\begin{corollary}\label{multip} Let $X$ be the very special EPW sextic. The intersection product maps
\[ \begin{split} A^2(X)\otimes A^2(X)\ &\to\ A^4(X)\ ,\\
A^2(X)\otimes A^1(X)\ &\to\ A^3(X)\ \\
\end{split}\]
have image of dimension $1$.
\end{corollary}
\begin{remark} It is instructive to note that for smooth Calabi--Yau hypersurfaces $X\subset\mathbb{P}^{n+1}(\mathbb{C})$, Voisin has proven that
the intersection product map
\[ A^{j}(X)\otimes A^{n-j}(X)\ \to\ A^n(X) \]
has image of dimension $1$, for any $0<j<n$ \cite[Theorem 3.4]{V13}, \cite[Theorem 5.25]{Vo} (cf. also \cite{LFu} for a generalization to generic complete intersections).
In particular, the first statement of corollary \ref{multip} holds for any smooth sextic in $\mathbb{P}^5(\mathbb{C})$.
The second statement of
corollary \ref{multip}, however, is not known (and maybe not true) for a general sextic in $\mathbb{P}^5(\mathbb{C})$. It might be that the second statement is specific to EPW sextics, and related to the presence of a hyperk\"ahler fourfold $X_0$ which is generically a double cover.
\end{remark}
\begin{remark}\label{BB} Let $F^\ast$ be the filtration on $A^\ast(X)$ defined as
\[ F^i A^j(X)=\bigoplus_{\ell\ge i} A^j_{(\ell)}(X)\ .\]
For this filtration to be of Bloch--Beilinson type, it remains to prove that
\[ F^1 A^2(X) \stackrel{??}{=} A^2_{hom}(X) \ .\]
This would imply the vanishing $A^2_{hom}(X)=0$ (i.e. the truth of conjecture \ref{weak} for $X$).
Unfortunately, we cannot prove this. At least, it follows from the above description that the conjectural vanishing $A^2_{hom}(X)=0$ would follow from the truth of Beauville's conjecture
\[ A^2_{hom}(E^4) \stackrel{??}{=} A^2_{(1)}(E^4)\oplus A^2_{(2)}(E^4)\ ,\]
where $E$ is an elliptic curve.
\end{remark}
\subsection{Splitting of $A^\ast(X^r)$}
\begin{definition} Let $X$ be a projective quotient variety. For any $r\in\mathbb{N}$, and any $1\le i<j<k\le r$, let
\[ \begin{split} &p_j\colon\ \ X^r\ \to\ X\ ,\\
&p_{ij}\colon\ \ X^r\ \to\ X\times X\ ,\\
&p_{ijk}\colon\ \ X^r\ \to\ X\times X\times X\\
\end{split}\]
denote projection on the $j$-th factor, resp. projection on the $i$-th and $j$-th factor, resp. projection on the $i$-th and $j$-th and $k$-th factor.
We define
\[ E^\ast(X^r)\ \subset\ A^\ast(X^r) \]
as the $\mathbb{Q}$--subalgebra generated by
$(p_j)^\ast A^1(X)$ and $(p_j)^\ast A^2(X)$ and $(p_{ij})^\ast(\mathbb{D}elta_X)\in A^4(X^r)$ and $(p_{ijk})^\ast(\mathbb{D}elta^X_{sm})\in A^8(X^r)$.
\end{definition}
As explained in the introduction, the hypothesis that EPW sextics that are quotient varieties are in the class $\mathbb{C}C$ leads to the following concrete conjecture:
\begin{conjecture}\label{conjXk} Let $X\subset\mathbb{P}^5(\mathbb{C})$ be an EPW sextic which is a projective quotient variety. Let $r\in\mathbb{N}$. The restriction of the cycle class map
\[ E^i(X^r)\ \to\ H^{2i}(X^r) \]
is injective for all $i$.
\end{conjecture}
For the very special EPW sextic, we can prove conjecture \ref{conjXk} for $0$--cycles and $1$--cycles:
\begin{theorem}\label{main2} Let $X$ be the very special EPW sextic of definition \ref{epw}. Let $r\in\mathbb{N}$.
The restriction of the cycle class map
\[ E^i(X^r)\ \to\ H^{2i}(X^r) \]
is injective for $i\ge 4r-1$.
\end{theorem}
\begin{proof} The product $X^r$ has an MCK decomposition (since $X$ has one, and the property of having an MCK decomposition is stable under taking products \cite[Theorem 8.6]{SV}). Therefore, there is a bigrading on the Chow ring of $X^r$. As we have seen (theorem \ref{main}),
$A^1(X)=A^1_{(0)}(X)$ and $A^2(X)=A^2_{(0)}(X)$. Also, it is readily checked that
\[\mathbb{D}elta_X\in A^4_{(0)}(X\times X)\ .\]
(Indeed, this follows from the fact that
\[ \mathbb{D}elta_X= \sum_{i=0}^8 \Pi_i^X = \sum_{i=0}^8 \Pi_i^X\circ \mathbb{D}elta_X\circ \Pi_i^X=\sum_{i=0}^8 (\Pi_i^X\times \Pi_{8-i}^X)_\ast \mathbb{D}elta_X\ \ \ \hbox{in}\ A^4(X\times X)\ ,\]
where we have used the fact that the CK decomposition is self--dual.)
The fact that $X$ has an MCK decomposition implies that
\[ \mathbb{D}elta^X_{sm}\ \ \in A^8_{(0)}(X\times X\times X)\]
\cite[Proposition 8.4]{SV}.
Clearly, the pullbacks under the projections $p_i, p_{ij}, p_{ijk}$ respect the bigrading. (Indeed, suppose $a\in A^\ell_{(0)}(X)$, which means $a=(\Pi^X_{2\ell})_\ast (a)$. Then the pullback $(p_i)^\ast(a)$ can be written as
\[ X\times \cdots\times X\times (\Pi^X_{2\ell})_\ast (a)\times X\times\cdots\times X\ \ \ \in A^\ell(X^r)\ ,\]
which is the same as
\[ (\Pi^X_0\times\cdots \times\Pi^X_0\times\Pi^X_{2\ell}\times \Pi^X_0\times\cdots\times\Pi^X_0)_\ast (X\times \cdots\times X\times a\times X\times\cdots\times X)\ .\]
This implies that
\[ (p_i)^\ast (a)\ \ \ \in (\Pi^{X^r}_{2\ell})_\ast A^\ell(X^r)=A^\ell_{(0)}(X^r)\ ,\]
where $\Pi^{X^r}_\ast$ is the product CK decomposition. Another way to prove the fact that the projections $p_i, p_{ij}, p_{ijk}$ respect the bigrading is by invoking \cite[Corollary 1.6]{SV2}.)
It follows there is an inclusion
\[ E^\ast(X^r)\ \subset\ A^\ast_{(0)}(X^r)\ .\]
The finite morphism $p^{\times r}\colon A^r\to X^r$ induces a split injection
\[ (p^{\times r})^\ast\colon\ \ A^i_{(0)}(X^r)\cap A^i_{hom}(X^r)\ \to\ A^i_{(0)}(A^r)\cap A^i_{hom}(A^r)\ \ \ \hbox{for\ all\ }i.\]
But the right--hand side is known to be $0$ for $i\ge 4r-1$ \cite{Beau}, and so
\[ E^i(X^r)\cap A^i_{hom}(X^r)\ \subset\ A^i_{(0)}(X^r)\cap A^i_{hom}(X^r)=0\ \ \ \hbox{for\ all\ }i\ge 4r-1\ .\]
\end{proof}
\begin{remark}\label{BBk} As is clear from the proof of theorem \ref{main2}, there is a link with Beauville's conjectures for abelian varieties: let $E$ be an elliptic curve, and suppose one knows that
\[ A^i_{(0)}(E^{4r})\cap A^i_{hom}(E^{4r})=0\ \ \ \hbox{for\ all\ }i\ \hbox{and\ all\ }r\ .\]
Then conjecture \ref{conjXk} is true for the very special EPW sextic.
\end{remark}
\subsection{Relation with some hyperk\"ahler fourfolds}
\begin{theorem}\label{main3} Let $X$ be the very special EPW sextic of definition \ref{epw}. Let $X_0$ be one of the hyperk\"ahler fourfolds of \cite[Corollary 6.4]{DBW}, and let $f\colon X_0\to X$ be the generically $2:1$ morphism constructed in \cite{DBG}. Then $X_0$ has an MCK decomposition, and there is an isomorphism
\[ \begin{split} f^\ast\colon\ \ A^4_{hom}(X)\ &\xrightarrow{\cong}\ A^4_{(4)}(X_0)\ .\\
\end{split} \]
\end{theorem}
\begin{proof} The MCK decomposition for $X_0$ was established in theorem \ref{epw}.
The morphism $f\colon X_0\to X$ of \cite{DBG} is constructed as a composition
\[ f\colon\ \ X_0\ \xrightarrow{\phi}\ X^\prime:=E^4/(G^\prime)\ \xrightarrow{ g}\ X\ ,\]
where $\phi$ is a symplectic resolution and $g$ is the double cover associated to an anti--symplectic involution.
This implies $f$ induces an isomorphism
\[ f^\ast\colon\ \ H^{4,0}(X)\ \xrightarrow{\cong}\ H^{4,0}(X^\prime)\ \xrightarrow{\cong}\ H^{4,0}(X_0)\ .\]
In view of the strong form of the generalized Hodge conjecture (proposition \ref{ghc}), $X_0$ and $X^\prime$ and $X$ verify the hypotheses of lemma \ref{equiv}. Applying lemma \ref{equiv}, we find isomorphisms of Chow motives
\[ {}^t \Gamma_f\colon\ \ h_{4,0}(X)\ \xrightarrow{\cong}\ h_{4,0}(X^\prime) \ \xrightarrow{\cong}\ h_{4,0}(X_0)\ \ \ \hbox{in}\ \mathcal M_{\rm rat}\ .\]
Since $(\Pi^X_{4,i})_\ast A^4(X)=0$ for $i\ge 1$ for dimension reasons, we have
\[ (\Pi^X_4)_\ast A^4(X)= (\Pi^X_{4,0})_\ast A^4(X) \ ,\]
and the same goes for $X^\prime$ and $X_0$. It follows that
\[ f^\ast\colon A^4_{hom}(X)= A^4( h_{4,0}(X))\ \xrightarrow{\cong}\ A^4(h_{4,0}(X_0))=:A^4_{(4)}(X_0)\ .\]
\end{proof}
As a corollary, we obtain an alternative description of the splitting $A^\ast_{(\ast)}(X_0)$ for the hyperk\"ahler fourfolds $X_0$:
\begin{corollary}\label{XX0} Let $f\colon X_0\to X$ be as in theorem \ref{main3}. The splitting $A^\ast_{(\ast)}(X_0)$ (given by the
MCK decomposition of $X_0$) verifies
\[ \begin{split} A^4(X_0)&= A^4_{(4)}(X_0)\oplus A^4_{(2)}(X_0)\oplus A^4_{(0)}(X_0)\\
&= f^\ast A^4_{hom}(X)\oplus \ker\bigl( A^4(X_0)\xrightarrow{f_\ast} A^4(X)\bigr)\oplus \mathbb{Q}\ ;\\
A^3(X_0)&=A^3_{(2)}(X_0)\oplus A^3_{(0)}(X_0)\\
&= A^3_{hom}(X_0)\oplus H^{3,3}(X_0)\ ;\\
A^2(X_0)&= A^2_{(2)}(X_0)\oplus A^2_{(0)}(X_0)\\
&=\ker\bigl( A^2_{hom}(X_0)\xrightarrow{f_\ast} A^2(X)\bigr) \oplus A^2_{(0)}(X_0)\ .\\
\end{split}\]
\end{corollary}
\begin{remark} Just as we noted for the EPW sextic $X$ (remark \ref{BB}), for this filtration to be of Bloch--Beilinson type one would need to prove that
\[ A^2_{(0)}(X_0) \cap A^2_{hom}(X_0)\stackrel{??}{=}0\ ,\]
which I cannot prove. This situation is similar to that of the Fano varieties $F$ of lines on a very general cubic fourfold: thanks to work of Shen--Vial \cite{SV} there is a multiplicative bigrading
$A^\ast_{(\ast)}(F)$ which has many good properties and interesting alternative descriptions. The main open problem is to prove that
\[ A^2_{(0)}(F)\cap A^2_{hom}(F)\stackrel{??}{=}0\ ,\]
which doesn't seem to be known for any single $F$.
\end{remark}
\begin{remark} Conjecturally, the relations of corollary \ref{XX0} should hold for any double EPW sextic $X_0$ (with $X$ being the quotient of $X_0$ under the anti--symplectic involution). However, short of knowing $X_0$ has finite--dimensional motive (as is the case here, thanks to the presence of the abelian variety $E^4$), this seems difficult to prove.
Note that at least, for a general double EPW sextic $X_0$, the relations of corollary \ref{XX0} give a concrete description of a filtration on $A^\ast(X_0)$ that should be the Bloch--Beilinson filtration.
\end{remark}
\section{Further results}
\label{secf}
\subsection{Bloch conjecture}
\label{ssb}
\begin{conjecture}[Bloch \cite{B}]\label{CB} Let $X$ be a smooth projective variety of dimension $n$. Let $\Gamma\in A^n(X\times X)$ be a correspondence such that
\[ \Gamma_\ast=0\colon\ \ \ H^{p,0}(X)\ \to\ H^{p,0}(X)\ \ \ \hbox{for\ all\ }p>0\ .\]
Then
\[ \Gamma_\ast=0\colon\ \ \ A^n_{hom}(X)\ \to\ A^n_{hom}(X)\ .\]
\end{conjecture}
A weak version of conjecture \ref{CB} is true for the very special EPW sextic:
\begin{proposition} Let $X$ be the very special EPW sextic. Let $\Gamma\in A^4(X\times X)$ be a correspondence such that
\[ \Gamma_\ast=0\colon\ \ \ H^{4,0}(X)\ \to\ H^{4,0}(X)\ .\]
Then there exists $N\in\mathbb{N}$ such that
\[ (\Gamma^{\circ N})_\ast=0\colon\ \ \ A^4_{hom}(X)\ \to\ A^4_{hom}(X)\ .\]
\end{proposition}
\begin{proof} As is well--known, this follows from the fact that $X$ has finite--dimensional motive; we include a proof for completeness' sake.
By assumption, we have
\[ \Gamma_\ast=0\colon\ \ \ H^{4}(X,\mathbb{C})/F^1\ \to\ H^{4}(X,\mathbb{C})/F^1\ \]
(where $F^\ast$ is the Hodge filtration). Thanks to the ``strong form of the generalized Hodge conjecture'' (proposition \ref{ghc}), this implies that also
\[ \Gamma_\ast=0\colon\ \ \ H^{4}(X,\mathbb{Q})/\widetilde{N}^1\ \to\ H^{4}(X,\mathbb{Q})/\widetilde{N}^1\ .\]
Using Vial's refined CK projectors (theorem \ref{Pi_2}), this means
\[ \Gamma\circ \Pi^X_{4,0}=0\ \ \ \hbox{in}\ H^8(X\times X)\ ,\]
or, equivalently,
\[ \Gamma - \sum_{(k,\ell)\not=(4,0)} \Gamma\circ \Pi^X_{k,\ell}=0 \ \ \ \hbox{in}\ H^8(X\times X)\ .\]
By finite--dimensionality, this implies there exists $N\in\mathbb{N}$ such that
\[ \Bigl(\Gamma - \sum_{(k,\ell)\not=(4,0)} \Gamma\circ \Pi^X_{k,\ell}\Bigr)^{\circ N}=0 \ \ \ \hbox{in}\ A^4(X\times X)\ .\]
Upon developing, this gives an equality
\begin{equation}\label{devel} \Gamma^{\circ N}=Q_1+\cdots +Q_N\ \ \ \hbox{in}\ A^4(X\times X)\ ,\end{equation}
where each $Q_j$ is a composition of correspondences
\[ Q_j=Q_j^1\circ Q_j^2\circ\cdots\circ Q_j^r\ \ \ \in A^4(X\times X)\ ,\]
and for each $j$, at least one $Q_j^i$ is equal to $\Pi^X_{k,\ell}$ with $(k,\ell)\not=(4,0)$.
Since (for dimension reasons)
\[ (\Pi^X_{k,\ell})_\ast A^4_{hom}(X)=0\ \ \ \hbox{for\ all\ }(k,\ell)\not=(4,0)\ ,\]
it follows that
\[ (Q_j)_\ast A^4_{hom}(X)=0\ \ \ \hbox{for\ all\ }j\ .\]
In view of equality (\ref{devel}), we thus have
\[ (\Gamma^{\circ N})_\ast=0\colon\ \ \ A^4_{hom}(X)\ \to\ A^4_{hom}(X)\ .\]
\end{proof}
For special correspondences, one can do better:
\begin{proposition} Let $X$ be the very special EPW sextic. Let $\Gamma\in A^4(X\times X)$ be a correspondence such that
\[ \Gamma^\ast =0\colon\ \ H^{4,0}(X)\ \to\ H^{4,0}(X)\ .\]
Assume moreover that $\Gamma$ can be written as
\[ \Gamma={\displaystyle\sum_{i=1}^r}c_i \Gamma_{\sigma_i}\ \ \ \hbox{in}\ A^4(X\times X)\ ,\]
with $c_i\in\mathbb{Q}$ and $\sigma_i\in\hbox{Aut}(X)$
induced by a $G$--equivariant automorphism $\sigma_i^E\colon E^4\to E^4$, where $X=E^4/(G)$ and $\sigma_i^E$ is a group homomorphism.
Then
\[ \Gamma^\ast=0\colon\ \ A^4_{hom}(X)\ \to\ A^4_{hom}(X)\ .\]
\end{proposition}
\begin{proof} Let us write $A=E^4$, and $X^\prime:=A/(G^\prime)$ for the double cover of $X$ with $\dim H^{2,0}(X^\prime)=1$. The projection $g\colon X^\prime\to X$ induces an isomorphism
\[ g^\ast\colon\ \ H^{4,0}(X)\ \xrightarrow{\cong}\ H^{4,0}(X^\prime)\ ,\]
with inverse given by ${1\over d} g_\ast$.
Let $\sigma^\prime_i\colon X^\prime\to X^\prime$ ($i=1,\ldots,r$) be the automorphism induced by $\sigma_i^E$.
For each $i=1,\ldots,r$, there is a commutative diagram
\[ \begin{array}[c]{ccc}
H^{4,0}(X^\prime) & \xrightarrow{ (\sigma_i^\prime)^\ast}& H^{4,0}(X^\prime)\\
{\scriptstyle g^\ast} \uparrow \ \ \ \ && \ \ \ \ \downarrow {\scriptstyle g_\ast}\\
H^{4,0}(X) & \xrightarrow{ (\sigma_i)^\ast}& H^{4,0}(X^)\\
\end{array}\]
Defining a correspondence
\[ \Gamma^\prime= {\displaystyle\sum_{i=1}^r}c_i \Gamma_{\sigma^\prime_i}\ \ \ \hbox{in}\ A^4(X^\prime\times X^\prime)\ ,\]
we thus get a commutative diagram
\[ \begin{array}[c]{ccc}
H^{4,0}(X^\prime) & \xrightarrow{ (\Gamma^\prime)^\ast}& H^{4,0}(X^\prime)\\
{\scriptstyle g^\ast} \uparrow \ \ \ \ && \ \ \ \ \downarrow {\scriptstyle g_\ast}\\
H^{4,0}(X) & \xrightarrow{ \Gamma^\ast}& H^{4,0}(X^)\\
\end{array}\]
The assumption on $\Gamma^\ast$ thus implies that
\[ (\Gamma^\prime)^\ast =0\colon\ \ H^{4,0}(X^\prime)\ \to\ H^{4,0}(X^\prime)\ .\]
Since (by construction of $X^\prime$) the cup--product map
\[ H^{2,0}(X^\prime)\otimes H^{2,0}(X^\prime)\ \to\ H^{4,0}(X^\prime) \]
is an isomorphism of $1$--dimensional $\mathbb{C}$--vector spaces, we must have that
\[ (\Gamma^\prime)^\ast =0\colon\ \ H^{2,0}(X^\prime)\ \to\ H^{2,0}(X^\prime)\ .\]
It is readily seen this implies
\begin{equation}\label{zero} {}^t \Gamma^\prime\circ \Pi^{X^\prime}_{2,0}=0\ \ \ \hbox{in}\ H^8(X^\prime\times X^\prime)\ .\end{equation}
Let $\Gamma_A$ denote the correspondence
\[ \Gamma_A:= {\displaystyle\sum_{i=1}^r}c_i \Gamma_{\sigma^E_i}\ \ \ \hbox{in}\ A^4(A\times A)\ .\]
Let $p^\prime\colon A\to X^\prime=A/(G^\prime)$ denote the quotient morphism. There are relations
\begin{equation}\label{rel} \begin{split} {}^t \Gamma_{\sigma^\prime} &= {1\over \vert G^\prime\vert}\ \Gamma_{p^\prime}\circ {}^t \Gamma_{A}\circ {}^t \Gamma_{p^\prime}\ \ \ \hbox{in}\
A^4(X^\prime\times X^\prime)\ ,\\
\Pi^{X^\prime}_{2,0}&={1\over \vert G^\prime\vert}\ \ \Gamma_{p^\prime}\circ \Pi^A_{2,0}\circ {}^t \Gamma_{p^\prime}\ \ \ \hbox{in}\
A^4(X^\prime\times X^\prime)\ \\
\end{split}\end{equation}
(the first relation is by construction of the automorphisms $\sigma_i^\prime$; the second relation can be taken as definition, cf. corollary \ref{symref}). Plugging in these relations in equality (\ref{zero}), one obtains
\[ \Gamma_{p^\prime}\circ {}^t \Gamma_{A}\circ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime}\circ \Pi^A_{2,0}\circ {}^t \Gamma_{p^\prime}=0\ \ \ \hbox{in}\ H^8(X^\prime\times X^\prime)\ .\]
Composing with ${}^t \Gamma_{p^\prime}$ on the left and $\Gamma_{p^\prime}$ on the right, this implies in particular that
\[ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime}\circ {}^t \Gamma_{A}\circ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime}\circ \Pi^A_{2,0}\circ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime}=0\ \ \ \hbox{in}\ H^8(A\times A)\ .\]
Using the standard relation ${}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime}={1\over \vert G^\prime\vert}\ \sum_{g\in G^\prime} \Gamma_g$, this simplifies to
\[ \bigl( \sum_{g\in G^\prime}\Gamma_g\bigr) \circ {}^t \Gamma_{A} \circ \bigl( \sum_{g\in G^\prime}\Gamma_g\bigr)\circ \Pi^A_{2,0} =0\ \ \ \hbox{in}\ H^8(A\times A)\ .\]
The left--hand side is a symmetrically distinguished cycle which is homologically trivial, and so it is rationally trivial (theorem \ref{os}). That is,
\[ \bigl( \sum_{g\in G^\prime}\Gamma_g\bigr) \circ {}^t \Gamma_{A} \circ \bigl( \sum_{g\in G^\prime}\Gamma_g\bigr)\circ \Pi^A_{2,0} =0\ \ \ \hbox{in}\ A^4(A\times A)\ ,\]
in other words
\[ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime} \circ {}^t \Gamma_{A} \circ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime} \circ \Pi^A_{2,0}=0 \ \ \ \hbox{in}\ A^4(A\times A)\ .\]
Now we descend again to $X^\prime$ by composing some more on both sides:
\[ \Gamma_{p^\prime}\circ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime} \circ {}^t \Gamma_{A} \circ {}^t \Gamma_{p^\prime}\circ \Gamma_{p^\prime} \circ \Pi^A_{2,0}\circ {}^t \Gamma_{p^\prime}=0 \ \ \ \hbox{in}\ A^4(X^\prime\times X^\prime)\ .\]
Using the relations (\ref{rel}), this shimmers down to
\[ ({}^t \Gamma^\prime)\circ \Pi^{X^\prime}_{2,0}=0\ \ \ \hbox{in}\ A^4(X^\prime\times X^\prime)\ .\]
This implies that
\[ (\Gamma^\prime)^\ast=0\colon\ \ \ A^2_{hom}(X^\prime)\ \to\ A^2_{hom}(X^\prime)\ .\]
Since $ A^4_{(4)}(X^\prime)$ equals the image of the intersection product $A^2_{hom}(X^\prime)\otimes A^2_{hom}(X^\prime)\to A^4(X^\prime)$ (proposition \ref{surj}), we also have that
\[ (\Gamma^\prime)^\ast=0\colon\ \ \ A^4_{(4)}(X^\prime)\ \to\ A^4_{(4)}(X^\prime)\ .\]
The commutative diagram
\[ \begin{array}[c]{ccc}
A^4_{(4)}(X^\prime) &\xrightarrow{(\Gamma^\prime)^\ast}& A^4_{(4)}(X^\prime) \\
{\scriptstyle g^\ast}\uparrow\ \ \ \ && \ \ \ \ \uparrow {\scriptstyle g^\ast}\\
A^4_{hom}(X) &\xrightarrow{\Gamma^\ast}& \ \ A^4_{hom}(X)\ , \\
\end{array}\]
in which vertical arrows are isomorphisms (proof of theorem \ref{main3}), now implies that
\[ \Gamma^\ast=0\colon\ \ \ A^4_{hom}(X)\ \to\ A^4_{hom}(X)\ .\]
\end{proof}
\subsection{Voisin conjecture}
\label{ssv}
Motivated by the Bloch--Beilinson conjectures, Voisin formulated the following conjecture:
\begin{conjecture}[Voisin \cite{V9}]\label{conjVois} Let $X$ be a smooth Calabi--Yau variety of dimension $n$. Let $a,a^\prime\in A^n_{hom}(X)$ be two $0$--cycles of degree $0$. Then
\[ a\times a^\prime =(-1)^n a^\prime\times a\ \ \ \hbox{in}\ A^{2n}(X\times X)\ .\]
\end{conjecture}
It seems reasonable to expect this conjecture to go through for Calabi--Yau's that are quotient varieties.
In particular, conjecture \ref{conjVois} should be true for all EPW sextics that are quotient varieties. We can prove this for the very special EPW sextic:
\begin{proposition}\label{prV} Let $X$ be the very special EPW sextic. Let $a,a^\prime\in A^4_{hom}(X)$. Then
\[ a\times a^\prime = a^\prime\times a\ \ \ \hbox{in}\ A^8(X\times X)\ .\]
\end{proposition}
\begin{proof} As we have seen, there is a finite morphism $p\colon A\to X$, where $A$ is an abelian fourfold and
\[ p^\ast\colon\ \ A^4_{hom}(X)\ \to\ A^4_{(4)}(A)=(\Pi^A_4)_\ast A^4(A) \]
is a split injection. (The inverse to $p^\ast$ is given by a multiple of $p_\ast$.)
Proposition \ref{prV} now follows from the following fact: any $c,c^\prime\in A^4_{(4)}(A)$ verify
\[ c\times c^\prime= c^\prime\times c\ \ \ \hbox{in}\ A^8(A\times A)\ ;\]
this is \cite[Example 4.40]{Vo}.
\end{proof}
\vskip1cm
\begin{nonumberingt} The ideas developed in this note grew into being during the Strasbourg 2014---2015 groupe de travail based on the monograph \cite{Vo}. Thanks to all the participants of this groupe de travail for a stimulating atmosphere. I am grateful to Bert van Geemen and to the referee for helpful comments, and to Charles Vial for making me appreciate \cite{OS}, which is an essential ingredient in this note.
Many thanks to Yasuyo, Kai and Len for hospitably receiving me in the Schiltigheim Math. Research Institute, where this note was written.
\end{nonumberingt}
\vskip1cm
\end{document}
|
\begin{document}
\begin{center}
{\bf \Large
Fuzzy Hom-Lie Subalgebras of Hom-Lie Algebras}\\~~\\
{Shadi Shaqaqha}\\~\\
Yarmouk University, Irbid, Jordan\\
[email protected]
\end{center}
\begin{quote}
{\small \bf Abstract.} {\small In this paper, we introduce the concept of fuzzy Hom-Lie subalgebras (ideals) of Hom-Lie algebras and we investigate some of their properties. We study the relationship between fuzzy Hom-Lie subalgebras (resp. ideals) and Hom-Lie subalgebras (resp. ideals). For a finite number of fuzzy Hom-Lie subalgebras, we construct a new fuzzy hom-Lie subalgebras on their direct sum. Finally, The properties of fuzzy Hom-Lie subalgebras and fuzzy Hom-Lie ideals under morphisms of Hom-Lie algebras are studied.}
\end{quote}
\noindent {\bf Keywords:} Hom-Lie algebras; morphism of Hom-Lie algebras; direct sum, fuzzy set; fuzzy Hom-Lie subalgebra; fuzzy Hom-Lie ideal.
\noindent
\section{Introduction} \label{111}
The notion of Hom-Lie
algebras was originally introduced by Hartwig, Larsson, and Silvestrov in 2006 \cite{HLA}. It is one of
generalizations of the concept of classical Lie algebras. In recent years, they have become
an interesting subject of mathematics and physics. We refer for more details on Hom-Lie algebras to \cite{subalgebra, Makhlouf2, Makhlouf3, Kdaisat, Shadi3}.\\
The idea of fuzzy sets was firstly introduced by Zadeh \cite{Zadeh}. A fuzzy set on a nonempty set $X$ is a map, called membership function, $\mu:X\rightarrow [0, 1]$. Note that in the the classical set theory we write $\mu(x)=1$ if $x\in X$, and $\mu(x)=0$ if $x\notin X$. Applications of the fuzzy set theory can be found in artificial intelligence, computer science,decision
theory, logic and management science, etc..\\
The study of fuzzy Lie subalgebras of Lie algebras was initiated by Yehia \cite{Yehia1} in 1996. Later fuzzy sets (and more generally intuitionistic fuzzy sets and complex fuuzy sets) have been applied in various directions in Lie algebras by many authors (see e.g. \cite{Akram,Akram1, Akram2, Davvaz, Shadi1, Shadi2}, and references therein) .\\
In this paper we describe fuzzy Hom-Lie algebras.
\section{Preliminaries} \label{222}
Let $F$ be a ground field. A Hom-Lie algebra over $F$ is a triple $(L, ~[~,~],~\alpha)$ where $L$ is a vector space over $F$, $\alpha: L\rightarrow L$ is a linear map, and $[~,~]: L\times L\rightarrow L$ is a bilinear map (called a bracket), satisfying the following properties:
\begin{itemize}
\item[(i)] $[x,~y]= -[y,~x]$ for all $x, y\in L$ (skew-symmetry property).
\item[(ii)] $[\alpha(x), ~[y,~z]]+ [\alpha(y),~[z,~x]]+[\alpha(z),~[x,~y]]=0$ ,
for all $x, y, z \in L$ (Hom-Jacobi identity).
\end{itemize}
It is clear that every Lie algebra is a Hom-Lie algebra by setting $\alpha= id_L$ (The identity map). For a Hom-Lie algebra $L$ over a field $F$ of characteristic $\neq2$, as in the setting of Lie algebras one can show that $[x,~x]=0$ for each $x\in L$. Also for an arbitrary Hom-Lie algebra $L$, we have $[x,~0]=[0,x]=0$ for each $x\in L$.
\begin{ex}
Let $L$ be a vector space over $F$ and $[~,~]: L\times L\rightarrow L$ be any skew-symmetric bilinear map. If $\alpha: L\rightarrow L$ is the zero map, then $(L, ~[~,~],~\alpha)$ is a Hom-Lie algebra.
$\blacksquare$
\end{ex}
Let $(L, ~[~,~],~\alpha)$ be a Hom-Lie algebra. A subspace $H$ of $L$ is a Hom-Lie subalgebra if $\alpha(H)\subseteq H$ and $[x,~y]\in H$ for all $x,y\in H$. A Hom-Lie subalgebra $H$ is said to be a Hom-Lie ideal if $[x,~y]\in H$ for all $x\in H$ and $y\in L$.\\
Let $(L_1, ~[~,~]_1,~\alpha_1)$ and $(L_2, ~[~,~]_2,~\alpha_2)$ be Hom-Lie algebras. A linear map $\varphi: L_1\rightarrow L_2$ is called a morphism of Hom-Lie algebras if the following two identities are satisfied:
\begin{itemize}
\item[(i)] $\varphi([x,~y]_1)= [\varphi(x),~\varphi(y)]_2$ for all $x, ~y\in L_1$.
\item[(ii)]$\varphi\circ\alpha_1=\alpha_2\circ\varphi$.
\end{itemize}
Throughout this paper, $L$ is a Hom-Lie algebra over $F$.
\section{Fuzzy Hom-Lie Subalgebras and Fuzzy Hom-Lie Ideals} \label{333}
Let $a, b\in[0, 1]$. For the sake of simplicity we use the symbols $a\wedge b$ and $a\vee b$ to denote $\mathrm{min}\left\{a, b\right\}$ and $\mathrm{max}\left\{a, b\right\}$, respectively.
\begin{define}\label{CFL2}
A fuzzy set $\mu$ on $L$ is a {\em fuzzy Hom-Lie subalgebra} if the following conditions are satisfied for all $x, y\in L$, and $c\in F$:
\begin{itemize}
\item[(i)] $\mu(x+y)\geq \mu(x)\wedge \mu(y)$,
\item[(ii)] $\mu(c x)\geq \mu(x)$,
\item[(iii)] $\mu([x, y])\geq\mu(x)\wedge \mu(y)$,
\item[(iv)] $\mu(\alpha(x))\geq \mu(x)$.
\end{itemize}
\end{define}
If the condition (iii) is replaced by $\mu([x, y])\geq \mu(x) \vee\mu(y)$, then $\mu$ is called a {\em fuzzy Hom-Lie ideal} of $L$. Note that the condition $(ii)$ implies $\mu(x)\leq \mu(0)$ and $\mu(-x)\leq \mu(x)$ for all $x\in L$.\\
It is clear that if $\mu$ is a a fuzzy Hom-Lie ideal of $L$, then it is a fuzzy Hom-Lie subalgebra of $L$.\\
\begin{ex}\label{Homex2}
Let $L$ be a vector space wiith basis $\{e_1,~e_2,~e_3\}$. We define the linear map $\alpha: L\rightarrow L$ by setting $\alpha(e_1)=e_2$ and $ \alpha(e_2) = \alpha(e_3)=0$. Let $[~,~]: L\times L\rightarrow L$ be the skew-symmetric bilinear map such that
$$[e_1,~e_2]=[e_2,~e_3]=0,~[e_1,~e_3]= e_1$$
and $[e_i,~e_i]=0$ for all $i=1, 2, 3$. Then $(L, ~[~,~],~\alpha)$ is a Hom-Lie algebra. Indeed, for each $x, y\in L$, we have $[x,~y]$ is a scalar multiple of $e_1$. Also $\alpha(x)$ is a scalar multiple of $e_2$ for each $x\in L$. Therefore $[\alpha(x),~[y, ~z]]=0$ for each $x, y, z\in L$. This implies that the Hom-Jacobi identity is satisfied.\\
We define $\mu$ as follows:
$$\mu(x)=\left\{
\begin{array}{lr}
0.8 &: x=0 \\
0.4 &: x\in \mathrm{span}\{e_1, e_2\}-\{0\}\\
0.1 &: \mathrm{otherwise}.
\end{array}
\right.$$
Then $\mu$ is a fuzzy Hom-Lie ideal of $L$.
$\blacksquare$
\end{ex}
\section{Relations Between Fuzzy Hom-Lie Ideals and Hom-Lie Ideals}
Let $V$ be a vector space and $\mu$ be a fuzzy set on it. For $t\in[0, 1]$ the set $U(\mu, t)= \left\{x\in V~|~\mu(x)\geq t\right\}$ is called an upper level of $\mu$. The following theorem will show a relation between fuzzy Hom-Lie subalgebras of $L$ and Hom-Lie subalgebras of $L$.
\begin{thm}\label{CFL3}
Let $\mu$ be a fuzzy subset of $L$. Then the following statements are equivalent:
\begin{itemize}
\item[(i)] $\mu$ is a fuzzy Hom-Lie subalgebra of $L$,
\item[(ii)] the non empty set $U(\mu, t)$ is a Hom-Lie subalgebra of $L$ for every $t\in \mathrm{Im}(\mu)$.
\end{itemize}
\end{thm}
{\it Proof.~}
Let $t\in\mathrm{Im}(\mu)$, and let $x, y\in U(\mu, t)$, and $c\in F$. As $\mu$ is a fuzzy Hom-Lie subalgebra of $L$, we have $\mu(x+y)\geq\mu(x)\wedge\mu(y)\geq t$, $\mu(c.x)\geq\mu(x)\geq t$, $\mu(\alpha(x))\geq \mu(x)\geq t$, and $\mu([x, y])\geq\mu(x)\wedge\mu(y)\geq t$, and so $x+y$, $\alpha x$, and $[x, y]$ are elements in $U(\mu, t)$. Conversely, let $U(\mu, t)$ be Hom-Lie subalgebras of $L$ for every $t\in \mathrm{Im}(\mu)$. Let $x, y\in L$ and $c\in F$. We may assume $\mu(y)\geq \mu(x)=t_1$, so $x, y\in U(\mu, t_1)$. As $U(\mu, t_1)$ is a subspace of $L$, we have $c. x$ and $x+y$ are in $U(\mu, t_1)$, and so $\mu(c. x)\geq t_1= \mu(x)$ and $\mu(x+y)\geq t_1 =\mu(x)\wedge\mu(y)$. Since $U(\mu, t_1)$ is a Hom-Lie subalgebra of $L$, we have $[x, y]$ and $\alpha(x)$ are in $U(\mu, t_1)$. Hence, $\mu([x, y])\geq t_1 =\mu(x)\wedge\mu(y)$, and $\mu(\alpha(x)\geq t_1=\mu(x)$.
$\Box$
\begin{thm}\label{CFL4}
Let $\mu$ be a fuzzy subset of $L$. Then the following statements are equivalent:
\begin{itemize}
\item[(i)] $\mu$ is a fuzzy Hom-Lie ideal of $L$,
\item[(ii)] the non empty set $U(\mu, t)$ is a Hom-Lie ideal of $L$ for every $t\in \mathrm{Im}(\mu)$.
\end{itemize}
\end{thm}
{\it Proof.~} Let $\mu$ be a fuzzy Hom-Lie ideal of $L$. Then it is a fuzzy Hom-Lie subalgebra of $L$. According to the theorem above, every $x, y\in U(\mu, t)$ and $c\in F$ we have $x+y, c.x, \alpha(x)$ are in $U(\mu, t)$. For $x\in L$ and $y\in U(\mu, t)$, we find $\mu([x, y])\geq \mu(x)\vee \mu(y)\geq \mu(x)\geq t$. That is $[x, y]\in U(\mu, t)$. Conversely, assume that every $U(\mu, t)\neq \Phi$ is a Hom-Lie ideal of $L$, then $U(\mu, t)$ is a Hom-Lie subalgebra. Thus , we can proceed as in the theorem above and the only difference appears in the proof of the following statement:
$$\mu([x, y]\geq \mu(x)\vee \mu(y)~\forall x, y\in L.$$
Let $x, y\in L$. Without loss of generality, we may assume that $\mu(x)\geq \mu(y)$. Set $t_0=\mu(x)$. Hence $x\in U(\mu, t_0)$. As $U(\mu, t_0)$ is a Hom-Lie ideal of $L$, we have $[x, y]\in L$. This implies that $\mu([x, y])\geq \mu(x)\vee \mu(y)$.
$\Box$
Let $V$ be a vector space. For $t\in [0, 1]$ and a fuzzy set $\mu$ on $V$, the set $U(\mu^>, t)= \left\{x\in V~|~\mu(x)> t\right\}$ is called a {\em strong upper level} of $\mu$. We have the following result.
\begin{thm}\label{CFL3B}
Let $\mu$ be a fuzzy subset of $L$. Then the following statements are equivalent:
\begin{itemize}
\item[(i)] $\mu$ is a fuzzy Hom-Lie subalgebra of $L$,
\item[(ii)] the strong upper level $U(\mu^>, t)$ is a subalgebra of $L$ for every $t\in \mathrm{Im}(\mu)$.
\end{itemize}
\end{thm}
{\it Proof.~}
For $t\in\mathrm{Im}(\mu)$, let $x, y\in U(\mu^>, t)$, and $c\in F$. As $\mu$ is a fuzzy Hom-Lie subalgebra of $L$, we have $\mu(x+y)\geq\mu(x)\wedge\mu(y)>t$, $\mu(c.x)>\mu(x)\geq t$, $\mu(\alpha(x))>\mu(x)$, and $\mu([x, y])\geq\mu(x)\wedge\mu(y)> t$. Consequently, $x+y$, $c.x$, $\alpha(x)$, and $[x, y]$ are elements in $U(\mu^>, t)$. Conversely, assume that for every $t\in \mathrm{Im}(\mu)$ we have $U(\mu^>, t)$ is a Hom-Lie subalgebra of $L$. Let $x, y\in L$ and $c\in F$. We need to show that the conditions of Definition \ref{CFL2} are satisfied. If $\mu(x)=0$ or $\mu(y)=0$, then $\mu(x+y)\geq 0=\mu(x)\wedge\mu(y)$. Suppose that $\mu(x)\neq 0$ and $\mu(y)\neq 0$. Suppose to the contrary that $\mu(x+y)$ and $\mu([x, y]$ are less than $\mu(x)\wedge \mu(y)$. Let $t_0$ be the greatest lower bound of the set $\{t~|~t<\mu(x)\wedge \mu(y)\}$. Since $x, y\in U(\mu^>, t_0)$, we have $x+y, [x, y]\in U(\mu^>, t_0)$, and hence $\mu(x+y), \mu([x, y])> t_0$. This contradicts that there is no element $a\in L$ with $t_0< \mu(a)< \mu(x)\wedge \mu(y)$. This shows that $\mu(x+y), \mu([x, y])\geq\mu(x)\wedge \mu(y)$. Again let $t_0$ be the largest number of $[0, 1]$ such that $t_0<\mu_A(x)$ and there is no $a\in L$ with $t_0<\mu_A(a)<\mu_A(x)$. As $U(\mu^>, t_0)$ is a Hom-Lie subalgerba, we have $c.x, \alpha(x)$ are in $U(\mu^>, t_0)$, and so $\mu(c.x)> t_0$ and $\mu(\alpha(x))> t_0$. Thus $\mu(c. x)$ and $\mu(\alpha(x))$ are greater than or equal to $\mu(x)$.
$\Box$
Using almost the same argument one can show the following result.
\begin{thm}\label{CFL3C}
Let $\mu$ be a fuzzy subset of $L$. Then the following statements are equivalent:
\begin{itemize}
\item[(i)] $\mu$ is a fuzzy Hom-Lie ideal of $L$,
\item[(ii)] every strong upper level $U(\mu^>, t)$ is a Hom-Lie ideal of $L$ for every $t\in \mathrm{Im}(\mu)$.
\end{itemize}
\end{thm}
\section{Direct Sum of Fuzzy Hom-Lie Subalgebras}
Given $n$ Hom-Lie algebras $(L_i, ~[~,~]_i,~\alpha_i)$ ,$i=1, \ldots,n$, then $$(L_1\oplus L_2\oplus \ldots\oplus L_n,~[~, ~],~\alpha_1+ \alpha_2+ \ldots+\alpha_n)$$
is a Hom-Lie algebra by setting
$$[~, ~]~:~(L_1\oplus L_2\oplus \ldots\oplus L_n)\times( L_1\oplus L_2\oplus \ldots\oplus L_n)\rightarrow (L_1\oplus L_2\oplus \ldots\oplus L_n)$$
$$((x_1,~x_2, \ldots,~x_n),~(y_1,~y_2, \ldots,~y_n))\mapsto ([x_1,~y_2]_1,~[x_1,~y_2]_2, \ldots,~[x_n,~y_n]_n),$$
and the linear map $$(\alpha_1+ \alpha_2+ \ldots+\alpha_n)~:~(L_1\oplus L_2\oplus \ldots\oplus L_n)\rightarrow(L_1\oplus L_2\oplus \ldots\oplus L_n)$$
$$(x_1,~x_2, \ldots,~x_n) \mapsto (\alpha_1(x_1),~\alpha_2(x_2), \ldots,~\alpha_n(x_n)).$$
In the special case where $n=2$, we obtain \cite[Proposition 2.2]{directsum} (see \cite{Kdaisat}).\\
Let $(L_1, [~,~]_1, \alpha_1), (L_2, [~,~]_2, \alpha_2), \ldots, (L_n, [~,~]_n, \alpha_n)$ be Hom-Lie algebras. Suppose that $\mu_1, \mu_2, \ldots, \mu_n$ are fuzzy subsets of $L_1, L_2, \ldots, L_n$, respectively. Then the generalized Cartesian sum of fuzzy sets induced by $\mu_1, \mu_2, \ldots, \mu_n$ on $L_1\oplus L_2\oplus \cdots \oplus L_n$ is
$$\mu_1\oplus \mu_2\oplus \cdots\oplus \mu_n:L_1\oplus L_2\cdots \oplus L_2\rightarrow [0,1];~(x_1, x_2, \ldots, x_n)\mapsto \mu_1(x_1)\wedge \mu_2(x_2)\wedge \mu_n(x_n).$$
\begin{thm}\label{proifnls1}
Let $(L_1, [~,~]_1, \alpha_1), (L_2, [~,~]_2, \alpha_2), \ldots, (L_n, [~, ~]_n, \alpha_n)$ be Hom-Lie algebras. Let $\mu_1, \mu_2, \ldots, \mu_n$ be fuzzy Hom-Lie aubalgebras of $L_1, L_2, \ldots, L_n$, respectively. Then $\mu_1\oplus \mu_2\oplus\cdots \oplus \mu_n$ is a fuzzy Hom-Lie subalgebra of $L_1\oplus L_2\oplus \cdots \oplus L_n$.
\end{thm}
{\it Proof.~} Let $(x_1, x_2, \ldots, x_n), (y_1, y_2, \ldots, y_n)\in L_1\oplus L_2\oplus \cdots \oplus L_n$. Then
\begin{eqnarray}
\mu_1([(x_1, x_2, \ldots, x_n), (y_1, y_2, \ldots, y_n)])&=& (\mu_1\oplus \mu_2\oplus\cdots \oplus \mu_n)([x_1, y_1]_1, [x_2, y_2]_2, \ldots, [x_n, y_n]_n)\nonumber\\
&=&\mu_1([x_1, y_1]_1)\wedge \mu_2([x_2, y_2]_2)\wedge \cdots\wedge \mu_n([x_n, y_n]_n)\nonumber\\
&\geq& (\mu_1(x_1)\wedge\mu_1(y_1)\wedge \mu_2(x_2)\wedge \mu_2(y_2)\cdots \wedge \mu_n(x_n)\wedge \mu_n(y_n)\nonumber \\
&=&(\mu_1\oplus \cdots\oplus \mu_n)((x_1, \ldots, x_2))\wedge (\mu_1\oplus \cdots\oplus \mu_n)((y_1, \ldots, y_n)).\nonumber
\end{eqnarray}
Also,
\begin{eqnarray}
(\mu_1\oplus \cdots \oplus \mu_n)(\alpha_1+\alpha_2+\cdots +\alpha_n)(x_1, x_2, \ldots, x_n)&=&(\mu_1\oplus \cdots \oplus \mu_n)(\alpha_1(x_1), \alpha_2(x_2), \ldots, \alpha_n(x_n))\nonumber\\
&=&\nonumber\mu_1(\alpha_1(x_1))\wedge \mu_2(\alpha_2(x_2))\wedge \cdots \wedge \mu_n(\alpha_n(x_n))\nonumber\\
&\geq& \mu_1(x_1)\wedge \mu_2(x_2)\wedge \cdots \wedge \mu_n(x_n)\nonumber\\
&=&(\mu_1\oplus \mu_2\oplus \cdots \mu_n)*x_1, x_2, \ldots, x_n).\nonumber
\end{eqnarray}
The rest of the proof is similar to the proof of \cite[Theorem 5.2]{Huang}, so we omit it.
$\Box$
However the direct sum of fuzzy-Hom Lie ideals of Hom-Lie algebras $L_1$ and $L_2$ is not nesaccary to be a fuzzy Hom-Lie ideal of the Hom-Lie algebra $L_1\oplus L_2$.
\begin{rem}
In \cite{Shadi7}, we introduced and studied infinite direct product of Hom-Lie algebras. One can consider fuzzy Hom-Lie subalgebras of such hom-Lie algebras.
\end{rem}
\section{On Fuzzy Hom-Lie algebras and Hom-Lie Algebras Morphisms}
Suppose $f: X\rightarrow Y$ is a function. If $\mu_B$ is a fuzzy set of $Y$, then we can define a fuzzy set on $X$ induced by $f$ and $\mu_A$ by setting $\mu_{f^{-1}(B)}(x)=\mu_B(f(x))$ for any $x\in X$. Also if $\mu_A$ is a fuzzy set on $X$, then
$$\mu_{f(A)}(y)=\left\{
\begin{array}{lr}
\mathrm{sup}_{x\in f^{-1}(y)}\left\{\mu_A(x)\right\} &: y\in f(X) \\
0 &: y\notin f(X)
\end{array}
\right.$$
is a fuzzy set on $Y$ induced by $f$ and $\mu_A$ (See for example \cite{Shadi3}). The following theorem was obtained by Kim and Lee in \cite{Kim} in the setting of Lie algebras. We extend it to Hom-Lie algebra case.
\begin{thm}\label{CFL9}
Let $f: (L_1, [~,~]_1, \alpha_1)\rightarrow (L_2, [~,~]_2, \alpha_1)$ be a morphism of Hom-Lie algebras. If $B=\mu_B$ is a fuzzy Hom-Lie subalgebra (resp. ideal) of $L_2$, then the fuzzy set $f^{-1}(B)$ is also a fuzzy Hom-Lie subalgebra (resp. ideal) of $L_1$.
\end{thm}
{\it Proof.~}
Let $x_1, x_2\in L_1$. Then
\begin{eqnarray*}
\mu_{f^{-1}(B)}(x_1+x_2)&=&\mu_B(f(x_1+x_2))\\
&=& \mu_B(f(x_1)+f(x_2))~~~(f~\mathrm{is~linear})\\
&\geq & \mu_B(f(x_1))\wedge \mu_B(f(x_2))~(\mu_B~\mathrm{is ~a~ fuzzy ~Hom-Lie~subalgebra})\\
&=&\mu_{f^{-1}(B)}(x_1)\wedge \mu_{f^{-1}(B)}(x_2),
\end{eqnarray*}
and
\begin{eqnarray*}
\mu_{f^{-1}(B)}([x_1,x_2])&=&\mu_B(f([x_1, x_2])\\
&=& \mu_B([f(x_1), f(x_2)])~~~(f~\mathrm{is~morphism})\\
&\geq & \mu_B(f(x_1))\wedge \mu_B(f(x_2))~(\mu_B~\mathrm{is~a~fuzzy~Hom-Lie~subalgebra})\\
&=&\mu_{f^{-1}(B)}(x_1)\wedge \mu_{f^{-1}(B)}(x_2).
\end{eqnarray*}
Let $x\in L_1$ and $c\in F$. Then
\begin{eqnarray*}
\mu_{f^{-1}(B)}(c .x)&=&\mu_B(f(c.x))\\
&=& \mu_B(c.f(x))~~~(f~\mathrm{is~linear})\\
&\geq & \mu_B(f(x))~(\mu_B~\mathrm{is ~a~fuzzy~Hom-Lie~subalgebra})\\
&=&\mu_{f^{-1}(B)}(x),
\end{eqnarray*}
and
\begin{eqnarray*}
\mu_{f^{-1}(B)}(\alpha_1(x))&=&\mu_B(f(\alpha_1(x)))\\
&=& \mu_B(\alpha_2(f(x)))~~~(f~\mathrm{is~a~morphism~of~Hom-Lie~algebras})\\
&\geq & \mu_B(f(x))~(\mu_B~\mathrm{is ~a~fuzzy~Hom-Lie~subalgebra})\\
&=&\mu_{f^{-1}(B)}(x),
\end{eqnarray*}
The case of fuzzy Hom-Lie ideal is similar to show.
$\Box$
If $f:L_1\rightarrow L_2$ is a Lie algebra homomorphism and $A=\mu_A$ is a fuzzy subalgebra of $L_1$, then the image of $A$, $f(A)$ is a fuzzy subalgebra of $f(L_1)$ (\cite{Kim}). In the following theorem we establish an analogue result for the case of Hom-Lie algebras.
\begin{thm}\label{CFL10}
Let $f:(L_1, [~,~]_1, \alpha_1)\rightarrow (L_2, [~,~]_2, \alpha_2)$ be a morphism from $L_1$ onto $L_2$. If $A=\mu_A$ is a fuzzy Hom-Lie subalgebra of $L_1$, then $f(A)$ is also a fuzzy Hom-Lie subalgebra of $L_2$.
\end{thm}
{\it Proof.~} Let $y_1, y_2\in L_2$. As $f$ is onto, there are $x_1, x_2\in L_1$ such that $f(x_1)=y_1$ and $f(x_2)=y_2$. We have
$$\{x_1+x_2~|~x_1\in f^{-1}(y_1)~\mathrm{and}~x_2\in f^{-1}(y_2)\}\subseteq \{x~|~x\in f^{-1}(y_1+y_2)\},$$
and
$$\{[x_1, x_2]_1~|~x_1\in f^{-1}(y_1)~\mathrm{and}~x_2\in f^{-1}(y_2)\}\subseteq \{x~|~x\in f^{-1}([y_1, y_2]_2)\}.$$
Now, we find
\begin{eqnarray}
\mu_{f(A)}(y_1+y_2)&=&\mathrm{sup}_{x\in f^{-1}(y_1+y_2)}\{\mu_A(x)\}\nonumber\\
&\geq& \{\mu_A(x_1+x_2)~|~x_1\in f^{-1}(y_1)~\mathrm{and}~x_2\in f^{-1}(y_2)\}\nonumber\\
&\geq &\mathrm{sup}\{\mu_A(x_1)\wedge \mu_A(x_2)~|~x_1\in f^{-1}(y_1)~\mathrm{and}~x_2\in f^{-1}(y_2)\}\nonumber\\
&=& \mathrm{sup}_{x_1\in f^{-1}(y_1)}\{\mu_A(x_1)\}\wedge \mathrm{sup}_{x_2\in f^{-1}(y_2)}\{\mu_A(x_2)\}\nonumber\\
&=&\mu_{f(A)}(y_1)\wedge \mu_{f(A)}(y_2).\nonumber
\end{eqnarray}
Also,
\begin{eqnarray}
\mu_{f(A)}([y_1, y_2]_2)&=&\mathrm{sup}_{x\in f^{-1}([y_1, y_2]_2)}\{\mu_A(x)\}\nonumber\\
&\geq &\{\mu_A([x_1, x_2]_1)~|~x_1\in f^{-1}(y_1)~\mathrm{and}~x_2\in f^{-1}(y_2)\}\nonumber\\
&\geq&\mathrm{sup}\{\mu_A(x_1)\wedge \mu_A(x_2)~|~x_1\in f^{-1}(y_1)~\mathrm{and}~x_2\in f^{-1}(y_2)\}\nonumber\\
&=& \mathrm{sup}_{x_1\in f^{-1}(y_1)}\{\mu_A(x_1)\}\wedge \mathrm{sup}_{x_2\in f^{-1}(y_2)}\{\mu_A(x_2)\}\nonumber\\
&=&\mu_{f(A)}(y_1)\wedge \mu_{f(A)}(y_2).\nonumber
\end{eqnarray}
For $y\in L_2$ and $c\in F$, we find
$$\{c.x~|~x\in f^{-1}(y)\}\subseteq \{x~|~x\in f^{-1}(c.y)\},$$
and
$$\{\alpha_1(x)~|~x\in f^{-1}(y)\}\subseteq \{x~|~x\in f^{-1}(\alpha_2(y))\}.$$
and so
\begin{eqnarray}
\mu_{f(A)}(c.y)&=&\mathrm{sup}_{x\in f^{-1}(c.y)}\{\mu_A(x)\}\nonumber\\
&\geq& \{\mu_A(c.x)~|~x\in f^{-1}(c.y)\}\nonumber\\
&\geq& \{\mu_A(x)~|~x\in f^{-1}(y)\}\nonumber\\
&=&\mu_{f(A)}(y),\nonumber
\end{eqnarray}
also,
\begin{eqnarray}
\mu_{f(A)}(\alpha_2(y))&=&\mathrm{sup}_{x\in f^{-1}(\alpha_2(y))}\{\mu_A(x)\}\nonumber\\
&\geq&\{\mu_A(\alpha_1(x))~|~x\in f^{-1}(\alpha_2(y))\}\nonumber\\
&\geq& \{\mu_A(x)~| x\in f^{-1}(y)\}\nonumber\\
&=&\mu_{f(A)}(y).\nonumber
\end{eqnarray}
$\Box$
Chung-Gook Kim and Dong-Soo Lee (\cite{Kim}) proved if $\varphi:L\rightarrow L'$ is a surjective Lie algebra homomorphism and $A=\mu_A$ is a fuzzy ideal of $L$, then $\varphi(A)$ is a fuzzy ideal of $L'$. We will extend the result to fuzzy Hom-Lie algebra case.
\begin{thm}\label{CFL11}
Let $f:(L_1, [~,~]_1, \alpha_1)\rightarrow (L_2, [~,~]_2, \alpha_2)$ be an onto morphism of Hom-Lie algebras. If $A=\mu_A$ is a fuzzy Hom-Lie ideal of $L_1$, then $f(A)$ is also a fuzzy Hom-Lie ideal of $L_2$.
\end{thm}
{\it Proof.~}
The proof is similar to the proof of the theorem above. We only need to show that $\mu_{f(A)}([y_1, y_2]_2)\geq \mu_{f(A)}(y_1)\vee \mu_{f(A)}(y_2)$ for all $y_1, y_2\in L_2$. Let $y_1, y_2\in L_2$, and assume, by contradiction, that $\mu_{f(A)}([y_1, y_2]_2)<\mu_{f(A)}(y_1)\vee \mu_{f(A)}(y_2)$. Then $\mu_{f(A)}([y_1, y_2]_2)< \mu_{f(A)}(y_1)$ or $\mu_{f(A)}([y_1, y_2]_2)< \mu_{f(A)}(y_2)$. We may assume, without loss of generality, that $\mu_{f(A)}([y_1, y_2]_2)< \mu_{f(A)}(y_1)$. Choose a number $t\in [0, 1]$ such that $\mu_{f(A)}([y_1, y_2]_2)<t<\mu_{f(A)}(y_2)$. There is $a\in f^{-1}(y_1)$ with $\mu_A(a)>t$. As $f$ is onto, there exists $b\in f^{-1}(y_2)$. We note that
$$f([a, b]_1)=[f(a), f(b)]_2=[y_1, y_2]_2.$$
Thus,
$$\mu_{f(A)}([y_1, y_2]_2)\geq \mu_A([a, b]_1)\geq \mu_A(a)\vee \mu_A(b)>t>\mu_{f(A)}([y_1, y_1]_2).$$
Contradiction.
$\Box$
\end{document}
|
\begin{document}
\title{Cavity-based single atom preparation and high-fidelity hyperfine state readout}
\author{Roger Gehr}
\author{J\"urgen Volz}
\author{Guilhem Dubois}
\author{Tilo Steinmetz}
\altaffiliation{Present address: Menlo Systems GmbH, Germany }
\author{Yves Colombe}
\altaffiliation{Present address: NIST, Boulder, CO-80305, USA }
\author{Benjamin L. Lev}
\altaffiliation{Present address: University of Illinois, IL-61801, USA }
\author{Romain Long}
\author{J\'er\^ome Est\`eve}
\author{Jakob Reichel}
\email{[email protected]}
\affiliation{
Laboratoire Kastler-Brossel, ENS, CNRS, UPMC, 24 rue Lhomond, 75005 Paris, France}
\date{\today}
\begin{abstract}
We prepare and detect the hyperfine state of a single $^{87}$Rb atom coupled to a fiber-based high finesse cavity on an atom chip. The atom is extracted from a Bose-Einstein condensate and trapped at the maximum of the cavity field, resulting in a reproducibly strong atom-cavity coupling. We use the cavity reflection and transmission signal to infer the atomic hyperfine state with a fidelity exceeding 99.92\% in a read-out time of 100\,$\mu$s. The atom is still trapped after detection.
\end{abstract}
\pacs{42.50.Pq, 42.50.Dv, 67.85.Hj}
\maketitle
\newcommand{\ket}[1]{\left|#1\right\rangle}
A single neutral atom with two hyperfine ground states provides a long-lived two-level system ideally suited for quantum information purposes. The collisional interaction between two atoms in the vibrational ground state is a powerful mechanism for the creation of entanglement in this system \cite{Jaksch1999,Treutlein2006a}. This has been demonstrated for atoms in the Mott insulator state in an optical lattice loaded from a Bose-Einstein condensate (BEC) \cite{Mandel2003, Anderlini2007}. However, single-site addressability is challenging in these experiments \cite{Bakr2009}. Bottom-up approaches starting with laser-cooled single atoms in easily addressable macroscopic traps \cite{Schrader2004, Volz2006, Urban2009, Wilk2010} and on atom chips \cite{Teper2006} have not yet succeeded in ground state preparation. Furthermore, read-out of the qubit state is usually destructive and does not fulfill the requirements for efficient quantum error correction \cite{Knill2001}.
In this letter, we follow an intermediate route in which a single atom is extracted from a BEC trapped inside a fiber-based high-finesse cavity on an atom chip \cite{Colombe2007}. This minimizes thermal fluctuations of the extracted atom and allows us to achieve high-fidelity read-out of the hyperfine state. An atom-cavity system in the strong coupling regime enables the efficient preparation and detection of a single atom because the presence of one atom in the hyperfine state resonant with the cavity drastically changes cavity reflection and transmission \cite{Haroche2006}. Since we can trap the entire BEC in a single antinode of the intracavity dipole trap \cite{Colombe2007}, the atom position along the cavity axis is not subject to uncertainties that lead to variations in the coupling strength \cite{Boca2004,Maunz2005a,Khudaverdyan2008}. Together with the high cooperativity of our system, this allows us to reach a read-out fidelity exceeding 99.92\% in a detection time of 100\,$\mu$s without the loss of the atom \footnote{During the process of submitting this paper, we became aware of related work by Bochmann et.al., arXiv:1002.2918v1}, on a par with ion trap experiments \cite{Hume2007, Myerson2008}, the best qubit detectors so far.
Furthermore, our cavity-assisted read-out scheme is intrinsically faster than free-space fluorescence measurements and detection times down to the sub-microsecond range are possible.
\begin{figure}
\caption{\label{fig:scheme}
\label{fig:scheme}
\end{figure}
Our experimental setup, shown in Fig.~\ref{fig:scheme}, is similar to the one described in Ref. \cite{Colombe2007}. The centerpiece of the experiment is a Fabry-Perot cavity mounted on an atom chip. The cavity with finesse 38000 and linewidth of $\kappa/2\pi$ = 53\,MHz is formed by the concave endfacets of two optical fibers with high-reflectivity coatings. The calculated maximum coupling strength between a single $^{87}$Rb atom and the cavity field is $g_{0}/2\pi$=215\,MHz for the $\ket{F=2,m_{F}=\pm 2}\rightarrow \ket{F'=3,m_{F}=\pm 3}$ transition of the $D_{2}$ line. Together with the $^{87}$Rb atomic decay rate $\gamma/2\pi$ = 3\,MHz, this positions our system in the strong coupling regime ($g_{0}\gg\kappa,\gamma$). Two avalanche photodiodes (APDs) record transmission and reflection of a probe laser. The length of the cavity is stabilized by a Pound-Drever-Hall setup using a laser at 830 nm which also serves as an intracavity standing wave dipole trap. We have determined the dipole trap-induced light shift by measuring the axial dipole trap frequency (960\,kHz). A laser beam on the repump transition is aligned transversally to the cavity axis.
The birefringence of the cavity induces a splitting of $\Delta/2\pi$=540\,MHz between its two linearly polarized eigenmodes. The dipole trap polarization is parallel to the polarization of the higher frequency cavity mode and defines the quantization axis. The pumped cavity mode is near-resonant to the atomic transition frequency, $\omega_{c}=\omega_{a}+\Delta_{ca}$, where $\omega_{a}$ is the $| F=2,m_{F}=0\rangle\rightarrow |F'=3,m_{F}=0\rangle$ transition frequency corrected for the dipole trap induced light shift of 95\,MHz, and $\Delta_{ca}$ is the cavity-atom detuning.
In a typical sequence, we prepare a small BEC of 600 atoms in a magnetic trap. We then move the trap close to one of the cavity mirrors to reduce the number of trapped atoms by surface evaporation.
After this evaporation, we are left with a small reservoir of less than 10 atoms. We load it into the central antinode of the intracavity dipole trap~\cite{Colombe2007}, ramp down the magnetic trap and apply a homogeneous magnetic field of 3.7\,G. We then use the following method to prepare a single atom. A microwave-induced adiabatic rapid passage transfers all atoms to the $\ket{F=1, m_{F}=1}$ state, where they act as a dispersive medium for the cavity, shifting its resonance by -6.1\,MHz per atom. A cavity transmission measurement allows us to approximately deduce the number of atoms in the reservoir. We post-select only runs where transmission is compatible with less than 6 atoms in the reservoir, since a larger reservoir later increases the probability to extract two rather than one atom.
We now apply a 1.9\,$\mu$s microwave (MW) pulse resonant to the $\ket{F=1,m_{F}=1}\rightarrow \ket{F=2,m_{F}=0}$ transition with a transfer probability of $4.2\%$ per atom. In order to detect a successful transfer, we measure the cavity transmission during 20\,$\mu$s with a probe laser resonant to cavity and atom. Figure~\ref{fig:preparation}a) shows the histogram of detected counts on the transmission APD following the MW pulse. The measured probability distribution is well approximated by the sum of two poissonian distributions. The low transmission peak corresponds to the presence of at least one $F=2$ atom, whereas the high transmission indicates that no atom was transferred. The distribution drops close to zero between the two peaks, justifying the choice of a threshold at 5 counts to determine whether an atom was transferred. The probability of a false positive event, i.e. a drop of cavity transmission below threshold although no atom is in $F=2$, is negligible $(\approx10^{-5})$.
We repeatedly apply this preparation cycle until a transmission level below threshold signals a successful transfer. Figure~\ref{fig:preparation}b) shows the probability distribution of the number of pulses required to prepare an atom. Sequences in which no transfer occurs after 50 trials are discarded. A poissonian reservoir atom number distribution with mean atom number $\overline{n}=1.5$ fits the data well. This measurement allows us to quantify the quality of the single atom preparation. We calculate the probability that a successful preparation leads to more than one atom to be 2.6\%.
\begin{figure}
\caption{\label{fig:preparation}
\label{fig:preparation}
\end{figure}
Since we want to detect the hyperfine state of a single atom, we now remove the remaining atoms in $F=1$. We do this by lowering the dipole trap and turning on a magnetic field gradient of 2.7\,kG$/$cm during 30\,ms, creating a strong force on the reservoir in state $\ket{F=1,m_{F}=1}$ that exceeds the restoring force of the dipole trap. The single atom in $F=2$ has a strong probability to be in the magnetic field-insensitive state $m_{F}=0$ and remains trapped, while in approximately 99$\%$ of the runs all reservoir atoms are removed. After this procedure, a probe laser pulse verifies that the atom is still trapped.
In order to show that all single atoms prepared in this way strongly couple to the cavity with similar strength, we measure the normal-mode spectrum of the atom-cavity system~\cite{Maunz2005a,Boca2004}. We probe cavity transmission at a given laser-cavity detuning $\Delta_{lc}$ during 8\,$\mu$s. To ensure that the atom is still trapped, we then apply a short repump pulse and check that the on-resonance cavity transmission is below the preparation threshold. This measurement-control cycle is repeated until the atom is lost. The resulting cavity transmission for a given detuning is obtained by averaging over approximately 20 atoms.
The resulting normal-mode spectrum is shown in Fig.~\ref{fig:rabi} together with the steady state prediction of the atom-cavity master equation. In order to account for all features of the spectrum, the model contains the full Zeeman structure of the $F=$2 and $F'=1,2,3$ manifolds as well as the magnetic field, dipole trap light shift, and coupling to both cavity modes. The probe light pumps the $\pi$-polarized higher frequency cavity mode. The coupling strength $g_{0}$ is the only free parameter in the model.
\begin{comment}
Besides the two peaks expected for a two-level atom, a third, smaller peak centered around -80\,MHz can be seen. It arises from the atomic population in $m_{F}\neq 0$. Since we pump the $\pi$-polarized cavity mode, the $F=2,m_{F}=0$ ground state mainly couples to $F=3,m_{F}=0$ excited state, whereas the $F=2,m_{F}\neq 0$ states have additional allowed transitions to $F'=2$ that lead to the third, smaller peak centered around -80\,MHz.
We simulate the system by calculating the steady state of the master equation, taking into account all excited hyperfine states as well as the two orthogonal cavity polarization modes. $g_{0}$ is used as the only variable parameter.
\end{comment}
The value of $g_{0}$ extracted from the fit is $240\pm10$\,MHz, 12$\%$ higher than expected from the calculated cavity mode geometry and whithin the error given by the uncertainty in the curvatures of our mirrors. The high value of the observed coupling and the absence of peak broadening are strong indications that a large fraction of the prepared atoms is well localized close to the maximum of the cavity field. This is in agreement with the observed binary transmission level during preparation (see Fig.~\ref{fig:preparation}): All atoms extracted from the ultracold reservoir couple maximally to the cavity.
\begin{figure}
\caption{\label{fig:rabi}
\label{fig:rabi}
\end{figure}
The high degree of control on the external degree of freedom of the prepared atom is an ideal starting point to establish the performance of the cavity as a qubit detector. The fidelity of the read-out is defined as $\mathcal{F}=1-\epsilon$, where $\epsilon=(\epsilon_{F1}+\epsilon_{F2})/2$, and $\epsilon_{F2}$ ($\epsilon_{F1}$) is the probability of detecting $F=1$ ($F=2$) if the atom is in $F=2$ ($F=1$) immediately before the measurement. The atomic state is inferred from the cavity reflection and transmission signals with the laser resonant to the cavity. A combination of low (high) transmission and high (low) reflection signals an atom in $F=2$ ($F=1$). A typical trace is shown in Fig.~\ref{fig:scheme}c). In this trace, the atom is probed for a time much longer than needed for detection, and the atom performs quantum jumps between the two hyperfine ground states under the action of probe light. We implement two methods to infer the atomic state from the registered counts in reflection and transmission.
The first method integrates both count rates for a given detection time and uses thresholding in the two-dimensional space $\mathcal{C}$ of all possible counts in reflection and transmission. The second method additionally makes use of the temporal evolution of the signal for a maximum likelihood estimation of the atomic state.
The detection efficiency of both schemes is determined by the lifetimes of the hyperfine states and the different count rates for the two atomic states. The hyperfine lifetime is limited by optical pumping caused by the probe light. We optimize all parameters, count rates and lifetimes, by pumping the $\perp$-polarized cavity mode, by reducing the magnetic field to 1\,G and by setting $\Delta_{ca}=-20$\,MHz. These settings lead to efficient optical pumping into the extremal Zeeman states $|F=2,m_{F}=\pm2\rangle$, see Fig.~\ref{fig:scheme}b).
Traces like the one shown in Fig.~\ref{fig:scheme}c) allow the direct measurement of both count rates and lifetimes. Individual lifetimes are distributed exponentially. A fit to the distribution gives an average lifetime of $\tau_{F2}$=52 ms ($\tau_{F1}$=26 ms) for $F=2$ ($F=1$) atoms at the cavity pump rate of $1.9\cdot10^{6}$ photons/s. The count rates in transmission (reflection) of the cavity with an atom in $F=2$ are $1.4\cdot10^{3}$/s ($8.9\cdot10^{5}$/s), with an atom in $F=1$ they become $1.9\cdot10^{5}$/s ($4.4\cdot10^{5}$/s). Optical losses and 60$\%$ APD detection efficiency account for the difference in pump power and detected flux.
For the thresholding method, we calculate $p_{F2}(c_{R},c_{T})$ ($p_{F1}(c_{R},c_{T})$) which is the probability distribution to observe $(c_{R},c_{T})$ counts in reflection and transmission for a given detection time when the atom initially is in state $F=2$ ($F=1$). The model assumes exponential decays of the hyperfine states and poissonian distributions of detected counts. Outcomes in the subspace $\mathcal{C}_{2}$ defined by the threshold $p_{F2}(c_{R},c_{T}) > p_{F1}(c_{R},c_{T})$ signal an atom in $F=2$ and vice versa. The detection errors are given by $\epsilon_{F1}$=$\sum_{\mathcal{C}_{2}} p_{F1}$ and $\epsilon_{F2}$=$\sum_{\mathcal{C}_{1}} p_{F2}$. For short integration times, these errors are dominated by photon shot noise. Increasing the integration time decreases shot noise, but the probability of a quantum jump during detection increases. An optimum detection time exists, for which the sum of both errors is minimized. Figure~\ref{fig:detection} shows the two calculated counts distributions for the optimum detection time of 60\,$\mu$s.
The simple count thresholding discards the useful information encoded in the temporal evolution of the signal. A maximum likelihood method circumvents this problem \cite{Myerson2008}. Each detection pulse is divided into $N$ time bins. The single outcome $(c_{R},c_{T})$ is replaced by the set $M$=\{$c_R^i,c_T^i$\}, where $i=1...N$ refers to the bin number. For each set $M$ we calculate $q_{F2}(M)$ ($q_{F1}(M)$), the probability of obtaining the data set $M$ if the initial state is $F=2$ ($F=1$). If $q_{F2}(M)\geq q_{F1}(M)$, we conclude that the atom was in $F=2$ before the detection and in $F=1$ otherwise. Both probabilities are calculated recursively by considering more and more bins. The maximum number of bins $N$ is chosen such that a further increase in detection time does not significantly change the outcome. For our parameters, this results in a measurement time of 100\,$\mu$s. This method generates a better state inference for atoms that change their state during the detection time.
\begin{figure}
\caption{\label{fig:detection}
\label{fig:detection}
\end{figure}
In order to experimentally determine the errors of both read-out methods, we have to prepare a single atom in a well-defined hyperfine state before applying either detection method. For this, we load a single atom into the dipole trap and continuously monitor cavity transmission until the atom is lost. The observation of transmission below a lower (above a higher) threshold signals a succesful preparation in $F=2$ ($F=1$), and starts a detection. Using two separate thresholds reduces the uncertainty of the prepared state at the price of discarding results with intermediate transmission \footnote{With the chosen thresholding parameters, only about 10\% of the data is discarded. A hypothetical bias introduced by this method can therefore only have a small influence on the measured detection fidelity.}. The state preparation fidelity is limited by the finite probability of a quantum jump occuring during the preparation time, leading to errors of 2.4$\cdot 10^{-4}$ (1.2$\cdot 10^{-4}$) in the preparation of $F=2$ ($F=1$) atoms. We emphasize that we observe no time-dependence of the cavity transmission with an $F=2$ atom, except for the few microseconds preceding the loss of the atom from the trap. This allows us to use the whole time during which the atom is trapped for repeated preparation and detection. On average 1000 preparation-detection cycles are performed on each atom.
Table~\ref{table1} shows the measured and calculated detection errors. As expected, the maximum likelihood method gives the best read-out fidelity of 99.92\%. The measured thresholding method fidelity is 99.9\%, very close to the prediction of the model. The difference is mostly accounted for by state preparation errors, which decrease the measured read-out fidelity. Accidental preparation of more than one atom only affects the measured detection error on the level of $10^{-5}$.
Fig.~\ref{fig:detection} shows the comparison between the calculated and measured probability distributions, from which we extract the errors for the thresholding method. The tails of the distributions that make up the dominant contribution to the read-out error are clearly visible on both the measured data and the calculation. They are caused by a quantum jump occuring during the detection time.
The single atom preparation and high-fidelity read-out presented in this paper constitute two major steps towards single atom quantum engineering on atom chips. Together with recently realized state-dependent microwave potentials \cite{Boehi2009}, all individual elements for chip-based two-qubit gates have now been demonstrated.
Other applications may also benefit from the features of our read-out scheme and in particular its high bandwidth. Increasing probe power, we have measured 99.4$\%$ fidelity in 2\,$\mu$s, limited by APD dead time which becomes important for count rates exceeding 5\,MHz. At even higher probe powers, the dynamics of the atom-cavity system becomes non-linear; the fundamental limit is only reached for a detection time on the order of $1/\kappa$, which for our system is 3\,ns.
Sub-microsecond detection times can be useful for many experiments, e.g. the realization of loophole-free Bell tests \cite{Rosenfeld2009}. Additionally, the residual light intensity inside the cavity is weak when the atom is in the state resonant to the probe light. Compared to fluorescence measurements, the amount of spontaneous emission is thus greatly reduced. This opens up the possibility of an ideal projective measurement of the qubit state.
\begin{table}[t]
\centering
\begin{tabular}{|p{0.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.1cm}|}
\hline
& \centering TM & \centering TM &\centering MLM &\centering MLM &\centering prepa- \tabularnewline
& \centering calculated & \centering measured &\centering calculated &\centering measured &\centering ration\tabularnewline
\hline
$\centering \epsilon_{F1}$ & \centering $7.0$ &\centering $9.6\pm0.6$ &\centering $4.8$ &\centering $8.7\pm0.6$ &\centering $1.2$\tabularnewline
\hline
$\centering \epsilon_{F2}$ & \centering 9.1 &\centering $10.9\pm0.5$ &\centering $4.9$ &\centering $7.2\pm0.4$ &\centering $2.4$\tabularnewline
\hline \hline
$\centering \epsilon$ & \centering 8.0 &\centering $10.3\pm0.4$ &\centering $4.9$ &\centering $7.9\pm0.3$ & \tabularnewline
\hline
\end{tabular}
\caption{\label{table1} Calculated and measured hyperfine state read-out errors for the thresholding method (TM) and the maximum likelihood method (MLM). All numbers have to be multiplied by $10^{-4}$. Uncertainties are statistical. The last colunm gives preparation errors.}
\end{table}
We gratefully acknowledge financial support for this work from a EURYI award and the SCALA Integrated Project of the EU.
\end{document}
|
\begin{document}
\title[Characterizing the projective space]{Characterizing the
projective space after Cho, Miyaoka and Shepherd-Barron}
\date{May 9, 2001 }
\author{Stefan Kebekus}
\address{Stefan Kebekus, Math.~Institut, Universität Bayreuth, 95440
Bayreuth, Germany}
\email{[email protected]}
\thanks{The author gratefully acknowledges support by the
Forschungsschwerpunkt ``Globale Methoden in der komplexen Analysis''
of the Deutsche Forschungsgemeinschaft.}
\urladdr{http://btm8x5.mat.uni-bayreuth.de/\~{}kebekus}
\maketitle
\section{Introduction}
The aim of this paper is to give a short proof of the following
characterization of the projective space.
\begin{thm}\label{thm:main_char}
Let $X$ be a projective manifold of dimension $n \geq 3$, defined
over the field $\mbox{$\mathbb{C}$}$ of complex numbers. Assume that for every curve
$C\subset X$, we have $-K_X.C \geq n+1$. Then $X$ is isomorphic to
the projective space.
\end{thm}
A proof was first given in the preprint \cite{CMS00} by K.~Cho,
Y.~Miyaoka and N.~Shepherd-Barron. While our proof here is shorter,
involves substantial technical simplifications and is perhaps more
transparent, the essential ideas are taken from that preprint ---see
section~\ref{sec:attribut}.
This paper aims at simplicity, not at completeness. The methods also
yield other, more involved characterization results which we do not
discuss here. The preprint \cite{CMS00} discusses these thoroughly.
\section{Setup}
\subsection{The space of rational curves}
For the benefit of the reader who is not entirely familiar with the
deformation theory of rational curves on projective manifolds, we will
briefly recall the basic facts about the parameter space of rational
curves. Our basic reference is Kollárs book \cite{K96} on rational
curves. The reader might also wish to consider the less technical
overview in \cite{Kebekus-Habil}.
If a point $x \in X$ is given, then there exists a scheme
$\mathcal Hom_{\birat}(\mbox{$\mathbb{P}$}_1,X,[0:1]\mapsto x)$ whose geometric points
correspond to generically injective morphisms from $\mbox{$\mathbb{P}$}_1$ to $X$ which
map the point $[0:1]\in \mbox{$\mathbb{P}$}_1$ to $x$. There exists a universal
morphism
$$
\begin{array}{rccc}
\mu : & \mathcal Hom_{\birat}(\mbox{$\mathbb{P}$}_1,X,[0:1]\mapsto x) \times \mbox{$\mathbb{P}$}_1 &\to & X \\
& (f,p) & \mapsto & f(p)
\end{array}
$$
If $\mathbb B$ denotes the group of automorphisms of $\mbox{$\mathbb{P}$}_1$ which
fix the point $[0:1]$, then $\mathbb B$ acts by composition naturally
on the space $\mathcal Hom_{\birat}(\mbox{$\mathbb{P}$}_1,X,[0:1]\mapsto x)$. The quotient in
the sense of Mumford exists. We obtain a diagram as follows.
\begin{equation} \label{diag:rat_curves_x}
\xymatrix{
\mathcal Hom^n_{\birat} (\mbox{$\mathbb{P}$}_1, X,[0:1]\mapsto x)\times \mbox{$\mathbb{P}$}_1 \ar[d]
\ar[r]^(.65){U_x} \ar@/^.6cm/[rr]^{\mu} &
{\Univ^{rc}(x,X)} \ar[r]_(.6){ \iota_x} \ar[d]_{ \pi_x} & X \\
\mathcal Hom^n_{\birat} (\mbox{$\mathbb{P}$}_1, X,[0:1]\mapsto x) \ar[r]^(.55){u_x} &
{\RatCurves^n(x,X)} }
\end{equation}
Here $\mathcal Hom^n_{\birat}(\ldots)$ is the normalization of
$\mathcal Hom_{\birat}(\ldots)$, the morphisms $U_x$ and $u_x$ have the
structure of principal $\mathbb B$-bundles and $\pi_x$ is a
$\mbox{$\mathbb{P}$}_1$-bundle. The restriction of $\iota_x$ to any fiber of $\pi_x$ is
generically injective, i.e.~birational onto its image.
The space $\RatCurves^n(x,X)$ is called the ``space of rational curves
through $x$''. This name is perhaps a bit misleading because the
correspondence
$$
\begin{array}{rccc}
e: & \RatCurves^n(x,X) & \to & \{ \text{rational curves in $X$ which
contain $x$}\} \\
& h & \mapsto & \iota_x( \pi^{-1}(h))
\end{array}
$$
is not bijective in general. Although $e$ is surjective, it may
happen that the restriction of $e$ to an irreducible component $H
\subset \RatCurves^n(x,X)$ is only generically injective: several
points in $H$ may correspond to the same rational curve.
\subsection{Results of Mori's theory of rational curves}
The following theorem summarizes some of the classic results of Mori
theory, in particular Mori's famous existence theorem for rational
curves on manifolds where $K_X$ is not nef. While most statements can
been found explicitly or implicitly in the papers \cite{Mor79} and
\cite{KMM92}, some results found their final formulation only years
later. We refer to \cite{K96} for proper attributions.
\begin{thm}[Classic results on families of rational curves]\label{thm:Mori-thy}
Under the assumptions of theorem~\ref{thm:main_char}, let $x$ be a
very general point\footnote{A ``very general point'' is, by
definition, a point which is not contained in a countable union of
certain hypersurfaces.} of $X$. Then \cite[thm.~II.5.14]{K96}
there exists a rational curve $\ell \subset X$ which contains $x$
and satisfies $-K_X.\ell = n+1$.
Let $ H_x \subset \RatCurves^n(x,X)$ be the irreducible component
which contains the point corresponding to $\ell$ and consider the
restriction of the diagram~(\ref{diag:rat_curves_x}) above:
\begin{equation}
\label{eq:eval_and_proj}
\xymatrix{ { U_x} \ar[r]^{ \iota_x}
\ar[d]^{ \pi_x}_{\txt{\scriptsize $\mbox{$\mathbb{P}$}_1$-bundle}} & {X} \\
{ H_x} }
\end{equation}
Then the following holds.
\begin{enumerate}
\item The variety $ H_x$ is compact \cite[prop.~II.2.14]{K96},
smooth \cite[cor.~II.3.11.5]{K96} and has dimension $\dim H_x =
n-1$ \cite[thms.~II.3.11 and II.1.7]{K96}.
\item The evaluation morphism $ \iota_x$ is finite away from
$\iota_x^{-1}(x)$ (Mori's Bend-and-Break,
\cite[cor.~II.5.5]{K96}). In particular, $\iota_x$ is surjective.
\item If $\ell \subset X$ is curve corresponding to a general point
of $ H_x$, then $\ell$ is smooth \cite[thm.~II.3.14]{K96} and the
restriction $T_X|_\ell$ is an ample vector bundle on $\ell$
\cite[cor.~II.3.10.1]{K96}.
\end{enumerate}
\end{thm}
\subsection{Singular rational curves}
It was realized very early by Miyaoka (\cite{Miy92}, see also
\cite[prop.~V.3.7.5]{K96}) that the singular curves in the family
$H_x$ play a pivotal rôle in the characterization problem. A
systematic study of families of singular curves, however, was not
carried out before the paper \cite{Keb00a}. In that paper, the author
gave a sharp bound on the dimension of the subvariety $H^{\Sing}_x
\subset H_x$ whose points correspond to singular rational curves and
described the singularities of those curves which are singular at $x$.
The following theorem summarizes the results of \cite{Keb00a} which
form the centerpiece of our argumentation. A singular curve is called
``immersed'' if the normalization morphism has constant rank one. A
singular curve which is not immersed is often said to be ``cuspidal''.
\begin{thm}[Singular curves in $H_x$, {\cite[thm.~3.3]{Keb00a}}]
\label{thm:bounds_for_sing}
The closed subfamily $H^{\Sing}_x \subset H_x$ of singular curves
has dimension at most one. The subfamily $H^{\Sing,x}_x \subset
H^{\Sing}_x$ of curves which are singular at $x$ is at most finite.
If $H^{\Sing,x}_x$ is not empty, then the associated curves are
immersed.
\end{thm}
In our setup, we obtain a good description of $\iota_x^{-1}(x)$ as an
immediate corollary.
\begin{cor}\label{cor:section_and_points}
The preimage $ \iota^{-1}_x(x)$ contains a section, which we call
$\sigma_\infty$, and at most a finite number of further points,
called $z_i$. The tangential morphism $T\iota_x$ has rank one along
$\sigma_\infty$.
\end{cor}
The universal property of the blow-up \cite[prop.~II.7.13]{Ha77}
therefore allows us to extend diagram~(\ref{eq:eval_and_proj}) as
follows:
$$
\xymatrix{ & & {\hat X} \ar[d]^{\txt{\scriptsize blow-up of $x$}}
\\ { U_x} \ar[rr]^{ \iota_x} \ar[d]^{
\pi_x}_{\txt{\scriptsize $\mbox{$\mathbb{P}$}_1$-bundle}} \ar@{-->}@/^/[rru]^{\hat
\iota_x} & & {X}
\\
{ H_x} }
$$
where the rational map $\hat \iota_x$ is well-defined away from the
points $z_i$.
We end with a further description of $\hat \iota_x$.
\begin{prop}[{\cite[thm.~3.4]{Keb00a}}]\label{prop:finiteness}
If $E \cong \mbox{$\mathbb{P}$}(T_X^*|_x)$ is the exceptional divisor of the
blow-up\footnote{We use Grothendieck's notation: if $V$ is a vector
space, then $\mbox{$\mathbb{P}$}(V^*) = V\setminus \{0\}/\mbox{$\mathbb{C}$}^*$. This drives the
aficiónados of the older literature to the drink.}, then the
restricted morphism
$$
\hat \iota_x|_{\sigma_\infty} : \sigma_\infty \to E
$$
is finite. In particular, since $\dim \sigma_\infty = \dim H_x =
n-1$, the morphism $\hat \iota_x|_{\sigma_\infty}$ is surjective.
\end{prop}
\section{Proof of the characterization Theorem}
\subsection{The neighborhood of $\sigma_\infty$}
\label{sec:step1}
As a first step towards the proof of theorem~\ref{thm:main_char}, we
need to study the neighborhood of the section $\sigma_\infty \subset
U_x$.
\begin{prop}\label{prop:neighborhood}
If $E \cong \mbox{$\mathbb{P}$}(T_X^*|_x)$ is the exceptional divisor of the blow-up, then
\begin{enumerate}
\item The restricted morphism $\hat \iota_x|_{\sigma_\infty}$ is an
embedding. In particular, $H_x \cong \sigma_\infty \cong \mbox{$\mathbb{P}$}_{n-1}$.
\item The tangent map $T \hat \iota_x$ has maximal rank along
$\sigma_\infty$. In particular, $N_{\sigma_\infty,U_x} \cong
N_{E,\hat X} \cong \mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}(-1)$.
\end{enumerate}
\end{prop}
The remaining part of the present section~\ref{sec:step1} is devoted
to a proof of proposition~\ref{prop:neighborhood}. Note that
statement~(2) follows immediately from statement~(1) and from
corollary~\ref{cor:section_and_points}. To show statement~(1) requires
some work.
By proposition~\ref{prop:finiteness} and by Zariski's main theorem, we
are done if we show that $\hat \iota_x|_{\sigma_\infty}$ is
birational. Assume for the moment that $\hat \iota_x|_{\sigma_\infty}$
is \emph{not} birational, let $\ell \subset X$ be a general curve
associated with $H_x$ and let $F \subset U_x$ be the corresponding
fiber of $ \pi_x$. The subvariety $ \iota_x^{-1}(\ell) \subset U_x$
will then contain a curve $B$ such that
\begin{enumerate}
\item $B$ is not contained in $\sigma_\infty$.
\item $B$ is not a fiber of the projection $\pi_x$.
\item $B\cap \sigma_\infty$ contains a point $y_1$ which is different
from $y_0 := F \cap \sigma_\infty$.
\end{enumerate}
In order to see that we can find a curve $B$ which is not a fiber of
the projection $\pi_x$, recall that the correspondence between points
in $H_x$ and curves in $X$ is generically injective and that $\ell$
was generically chosen.
Summing up, in order to show proposition~\ref{prop:neighborhood}, it
suffices to show the following claim.
\begin{claim}\label{claim1}
Let $\ell \subset X$ be a general curve associated with $H_x$ and
let $B \subset \iota_x^{-1}(\ell)$ be any curve which satisfies
items (1) and (2). Then $B$ is disjoint from $\sigma_\infty$.
\end{claim}
A proof will be given in the next few subsections.
\subsubsection{The normal bundle of $\ell$}
Since $\dim H_x = n-1 > 1$, it is a direct consequence of
theorem~\ref{thm:bounds_for_sing} that $\ell$ is smooth and therefore
isomorphic to the projective line. A standard theorem, which is
attributed to Grothendieck, but probably much older, asserts that a
vector bundle on $\mbox{$\mathbb{P}$}_1$ always decomposes into a sum of line bundles.
For the restriction of the tangent bundle $T_X$ to $\ell$, all
summands must be positive by theorem~\ref{thm:Mori-thy}(3). The
splitting type is therefore known:
$$
T_X|_\ell \cong \mbox{$\mathcal{O}$}(2)\oplus\mbox{$\mathcal{O}$}(1)^{\oplus n-1}.
$$
The normal bundle of $\ell$ in $X$ is thus isomorphic to
\begin{equation}
\label{eq:normal_bdle}
N_{\ell/X} \cong \mbox{$\mathcal{O}$}(1)^{\oplus n-1}.
\end{equation}
We will use this splitting later in section~\ref{sec:self-intersect_nmbrs}
to give an estimate on certain self-intersection numbers.
\subsubsection{Reduction to a ruled surface}
As a next step let $\tilde B$ be the normalization of $B$ and perform
a base change via the natural morphism $\tilde B \to H_x$. We obtain a
diagram as follows:
$$
\xymatrix{ { U_B} \ar@/^0.6cm/[rrr]^{ \iota_B}
\ar[rr]^{\gamma}_{\txt{\scriptsize finite base change}}
\ar[d]_{\txt{\scriptsize $\pi_B$\\ \scriptsize $\mbox{$\mathbb{P}$}_1$-bundle}} & & {
U_x} \ar[r]^{ \iota_x} \ar[d]^{ \pi_x}
& {X} \\
{\tilde B} \ar[rr] & & { H_x} }
$$
The bundle $ U_B$ will now contain two distinct distinguished
sections. Let $\sigma_{B,\infty} \subset \iota_B^{-1}(x)$ be
the section which is contracted to a point and choose a component
$\sigma_{B,0} \subset \gamma^{-1}(B)$. In order to prove
claim~\ref{claim1} we have to show that these sections are disjoint.
\begin{figure}
\caption{Reduction to a ruled surface}
\end{figure}
\subsubsection{Estimate for the self-intersection of $\sigma_{B,0}$}
\label{sec:self-intersect_nmbrs}
Let $d$ be the mapping degree of the restricted evaluation
$\iota_B|_{\sigma}$. We will now show that the self-intersection
number of the distinguished section $\sigma_{B,0}$ in the ruled
surface $U_B$ is at most $d$.
\begin{claim}
The natural map
$$
T \iota_B: N_{\sigma_{B,0}, U_B} \to N_{\ell,X}
$$
between the normal bundles is not trivial.
\end{claim}
\begin{proof}
Let $\hat H_x \subset H_x$ be the closed proper subvariety whose
points correspond to curves which are either not smooth or whose
normal bundle is not of the form~(\ref{eq:normal_bdle}). Since
$\ell$ was generically chosen, $\ell$ is not contained in the proper
subvariety $\iota_x(\pi_x^{-1}(\hat H_x)) \subset X$. Consequence:
if $F \subset U_B$ is a general fiber of the morphism $\pi_B$, then
$\iota_B(F)$ is a smooth curve with normal bundle $N_{\iota_B(F),X}
\cong \mbox{$\mathcal{O}$}(1)^{\oplus n-1}$, and the tangent map $T\iota_B$ has rank
two along $F \setminus (F\cap \sigma_{B,\infty})$. In particular,
$\iota_B$ has maximal rank at $F\cap \sigma_{B,0}$, and the claim is
shown.
\end{proof}
We obtain the estimate
\begin{equation}
\label{eq:estimate_for_B}
\sigma_{B,0}^2 = \deg N_{\sigma_{B,0}, U_B} \leq d \cdot
\underbrace{(\text{max. degree of sub-linebundles in $N_{\ell,X}$})}_{=1
\text{ by equation~(\ref{eq:normal_bdle})}} = d.
\end{equation}
\subsubsection{Intersection numbers on the ruled surface}
\label{sec:intersect_nmbrs}
Let $F$ be a fiber of the projection $ \pi_B$ and $H\in \mbox{$\mathbb{P}$}ic X$
be any ample line bundle. We obtain the following list of
intersection numbers.
\begin{align*}
\iota_B^*(H).\sigma_{B,\infty} & = 0 &&
\text{because $\sigma_{B,\infty}$ is contracted to a point}\\
\iota_B^*(H).\sigma_{B,0} & = d \cdot
\iota_B^*(H).F && \text{$ \iota|_F:F\to \ell$ is
birational and $ \iota|_{\sigma_{B,0}}$ is $d:1$}\\
\sigma_{B,0}.F & = 1 && \text{because $\sigma_{B,0}$ is a section}
\end{align*}
Consequence: we may write the following numerical equivalence of
divisors on $ U_B$:
$$
\sigma_{B,0} \equiv \sigma_{B,\infty} + d\cdot F.
$$
We end the proof of claim~\ref{claim1} and of
proposition~\ref{prop:neighborhood} with the calculation
\begin{align*}
\sigma_{B,0}^2 & = \sigma_{B,0}\cdot (\sigma_{B,\infty}+d\cdot F) \\
& = \sigma_{B,0} \cdot \sigma_{B,\infty} + d.
\end{align*}
The inequality~(\ref{eq:estimate_for_B}) shows that $\sigma_{B,0}
\cdot \sigma_{B,\infty} = 0$. The distinguished sections are therefore
disjoint. The proof of proposition~\ref{prop:neighborhood} is
finished.
\subsection{Factorization of $\iota_x$}
To end the proof of theorem~\ref{thm:main_char}, consider the
Stein-factorization of the morphism $ \iota_x$. We obtain a sequence
of morphisms
$$
\xymatrix{ { U_x} \ar@/^/[rr]^{ \iota_x}
\ar[r]_{\alpha} & {Y} \ar[r]_{\beta} & X}
$$
where $\alpha$ contracts the divisor $\sigma_\infty$, and $\beta$
is a finite map.
Since $R^1{\pi_x}_*(\mbox{$\mathcal{O}$}_{U_x}) = 0$, the push-forward of the twisted
ideal sheaf sequence
$$
0 \to \mbox{$\mathcal{O}$}_{U_x} \to \mbox{$\mathcal{O}$}_{U_x}(\sigma_\infty) \to
\underbrace{\mbox{$\mathcal{O}$}_{U_x}(\sigma_\infty)|_{\sigma_\infty}}_{\cong
\mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}(-1)} \to 0
$$
gives a sequence
$$
0 \to \mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}} \to \mathcal E \to \mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}(-1) \to 0
$$
on $H_x \cong \mbox{$\mathbb{P}$}_{n-1}$ where $\mathcal E$ is a vector bundle of
rank two and $U_x \cong \mbox{$\mathbb{P}$}(\mathcal E^*)$. Since
$\Ext^1_{\mbox{$\mathbb{P}$}_{n-1}}(\mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}(-1),\mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}) = 0$, the bundle
$U_x$ is thus isomorphic to
$$
U_x \cong \mbox{$\mathbb{P}$}(\mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}(-1)\oplus \mbox{$\mathcal{O}$}_{\mbox{$\mathbb{P}$}_{n-1}}).
$$
Consequence: there exists a morphism $\alpha':U_x \to \mbox{$\mathbb{P}$}_n$ which
contracts $\sigma_\infty$. An application of Zariski's main theorem
shows that $\alpha = \alpha'$. In particular, $Y \cong \mbox{$\mathbb{P}$}_n$.
The fact that $\beta$ is an isomorphism now follows from \cite{Laz84}:
note that the $\beta$-images of the lines through $\alpha(x)$ are the
curves associated with $H_x$. Recall the adjunction formula for a
finite, surjective morphism:
$$
-K_{\mbox{$\mathbb{P}$}_n} = \beta^*(-K_X)+(\text{branch divisor})
$$
To see that $\beta$ is birational, and thus isomorphic, it is
therefore sufficient to realize that for a curve $\ell \in H_x$, we
have
$$
-K_X.\ell = -K_{\mbox{$\mathbb{P}$}_n}.(\text{line}) = n+1.
$$
This ends the proof of theorem~\ref{thm:main_char}.
\subsection{Attributions}
\label{sec:attribut}
The reduction to a ruled surface and the calculation of the
intersection numbery have already been used in \cite{Miy92} (see also
\cite[prop.~V.3.7.5]{K96}) to give a criterion for the existence of
singular rational curves. The estimate~(\ref{eq:estimate_for_B}) is
taken from \cite{CMS00} where a similar estimate is used in a more
complex and technically involved situation to prove a statement
similar to proposition~\ref{prop:neighborhood}.
The calculation that $U_x \cong \mbox{$\mathbb{P}$}(\mbox{$\mathcal{O}$}(-1)\oplus \mbox{$\mathcal{O}$})$ is modelled after
\cite{Mor79}.
\end{document}
|
\begin{document}
\mbox{\small aut}hor{Colin McDiarmid \\ Department of Statistics \\ Oxford University}
\title{Connectivity for random graphs from a weighted bridge-addable class}
\begin{abstract}
There has been much recent interest in random graphs sampled uniformly from the
$n$-vertex graphs in a suitable structured class, such as the class of all planar graphs.
Here we consider a general \emph{bridge-addable} class ${\mathcal A}$ of graphs --
if a graph is in ${\mathcal A}$ and $u$ and $v$ are vertices in different components
then the graph obtained by adding an edge (bridge) between $u$ and $v$ must also be in ${\mathcal A}$.
Various bounds are known concerning the probability of a random graph from such a
class being connected or having many components, sometimes under the additional assumption that bridges
can be deleted as well as added.
Here we improve or amplify or generalise these bounds (though we do not resolve the main conjecture).
For example, we see that the expected number of vertices left when we remove a largest component is less than $2$.
The generalisation is to consider `weighted' random graphs,
sampled from a suitable more general distribution,
where the focus is on the bridges.
\end{abstract}
\section{Introduction}
\label{sec.intro}
A \emph{bridge} in a graph is an edge $e$ such that the graph $G \setminus e$ obtained by deleting $e$ has
one more component.
A class ${\mathcal A}$ of graphs is {\em bridge-addable} if for all graphs $G$ in ${\mathcal A}$ and all vertices
$u$ and $v$ in distinct connected components of $G$, the graph $G +uv$ obtained by adding an
edge between $u$ and $v$ is also in ${\mathcal A}$.
The concept of being bridge-addable (or {`weakly addable'}) was introduced in
McDiarmid, Steger and Welsh~\cite{msw05} in the course of studying random planar graphs.
(For an overview on random planar graphs see the survey paper~\cite{gn09b} of Gim\'enez and Noy.)
Examples of bridge-addable classes of graphs include forests, series-parallel graphs,
planar graphs, and indeed graphs embeddable on any given surface.
In the rest of this section, we first describe what is known concerning connectedness and components for
random graphs sampled uniformly from a bridge-addable class;
then describe the new results here for such random graphs; and finally briefly discuss random rooted graphs.
Random graphs from a weighted class are introduced in Section~\ref{sec.rwg}; and new general results are presented,
which extend the results on uniform random graphs.
After that come the proofs, first for non-asymptotic results then for asymptotic results and finally for the rooted case.
\noindent
{\em Background results for uniform random graphs}
If ${\mathcal A}$ is finite and non-empty we write $R \in_{u} {\mathcal A}$ to mean that $R$ is a random graph sampled
{\bf u}niformly from ${\mathcal A}$. (We consider graphs to be labelled.)
The basic result on connectivity for a bridge-addable set of graphs is
Theorem 2.2 of~\cite{msw05}:
if ${\mathcal A}$ is a finite bridge-addable set of graphs and $R \in_u {\mathcal A}$ then
\begin{equation} \label{eqn.b-a-conn}
\mathbb P(R \mbox{ is connected}) \geq e^{-1} .
\end{equation}
Indeed, a stronger result is given in~\cite{msw05}, concerning the number $\kappa(R)$ of components of $R$;
namely that $\kappa(R)$ is stochastically at most $1+\mbox{\rm Po}(1)$
where $\mbox{\rm Po}(\lambda)$ denotes a Poisson-distributed random variable with mean $\lambda$,
that is
\begin{equation} \label{eqn.b-a-comps}
\kappa(R) \leq_s 1+ \mbox{\rm Po}(1).
\end{equation}
(Recall that $X \leq_s Y$ means that $\mathbb P(X \leq t) \geq \mathbb P(Y \leq t)$ for each $t$.)
Note that from~(\ref{eqn.b-a-comps}) we have
\[ \mathbb P(R \mbox{ is connected}) = \mathbb P(\kappa(R) \leq 1) \geq \mathbb P(\mbox{\rm Po}(1) \leq 0) = e^{-1}\]
and we obtain~(\ref{eqn.b-a-conn}).
Also $\mathbb E[\kappa(R)] \leq 2$ (see~(\ref{eqn.exp}) below).
For any set ${\mathcal A}$ of graphs, we let ${\mathcal A}_n$ denote the set of graphs in ${\mathcal A}$ on vertex set $[n]:=\{1,\ldots,n\}$;
and we write $R_n \in_{u} {\mathcal A}$ to mean that $R_n$ is uniformly distributed over ${\mathcal A}_n$.
We always assume that ${\mathcal A}_n$ is non-empty at least for large $n$.
The class ${\mathcal F}$ of forests is of course bridge-addable. For $R_n \in_u {\mathcal F}$ a result of R\'enyi~\cite{renyi59} shows that
$\mathbb P(R_n \mbox{ is connected}) \to e^{-\frac12}$ as $n \to \infty$, and indeed
$\kappa(R_n)$ converges in distribution to $1+ \mbox{\rm Po}(\frac12)$.
For background on random trees and forests see the books~\cite{drmota2009,moon70}.
It was noted in~\cite{msw06} that plausibly forests form the `least connected' bridge-addable set of graphs,
and in particular it should be possible to improve the bound in~(\ref{eqn.b-a-conn}) asymptotically.
\begin{conjecture} \cite{msw06} \label{conj.b-add}
If ${\mathcal A}$ is bridge-addable and $R_n \in_u {\mathcal A}$ then
\begin{equation} \label{eqn.b-a_conj}
\liminf_{n \to \infty} \mathbb P(R_n \mbox{ is connected}) \geq e^{-\frac12}.
\end{equation}
\end{conjecture}
Balister, Bollob{\'a}s and Gerke~\cite{bbg07,bbg10} showed that inequality~(\ref{eqn.b-a_conj}) holds
if we replace $e^{-\frac12} \approx 0.6065$ by the weaker bound $e^{-0.7983} \approx 0.4542$.
This result has recently been improved by Norine~\cite{sn2012}.
Recently Addario-Berry, McDiarmid and Reed~\cite{amr2011}, and Kang and Panagiotou~\cite{kp2011},
separately showed that~(\ref{eqn.b-a_conj}) holds with the desired lower
bound $e^{-\frac12}$, if we suitably strengthen the condition on ${\mathcal A}$.
Call a set ${\mathcal A}$ of graphs {\em bridge-alterable} if it is bridge-addable and also closed under
deleting bridges. Thus ${\mathcal A}$ is bridge-alterable exactly when it satisfies the condition that,
for each graph $G$ and bridge $e$ in $G$, the graph $G$ is in ${\mathcal A}$ if and only if $G \setminus e$ is in ${\mathcal A}$.
Observe that each of the bridge-addable classes of graphs mentioned above is in fact bridge-alterable.
If ${\mathcal A}$ is a bridge-alterable set of graphs and $R_n \in_u {\mathcal A}$ then~\cite{amr2011,kp2011}
\begin{equation} \label{eqn.b-alt}
\liminf_{n \to \infty} \mathbb P(R_n \mbox{ is connected}) \geq e^{-\frac12}.
\end{equation}
Since the class ${\mathcal F}$ of forests is bridge-alterable, this result is best-possible for a bridge-alterable
set of graphs. The full version of Conjecture~\ref{conj.b-add} (for a bridge-addable set) is still open.
Next let us consider the `fragment' of a graph $G$:
we let $\mbox{\rm frag}(G)$ be the number of vertices remaining when we remove a largest component.
For the class ${\mathcal F}$ of forests, if $R_n \in_u {\mathcal F}$ then
\begin{equation} \label{eqn.forests-frag}
\mathbb E[\mbox{\rm frag}(R_n)] \to 1 \;\; \mbox{ as } n \to \infty.
\end{equation}
It was shown in~\cite{cmcd09} that,
if ${\mathcal A}$ is a bridge-addable class of graphs which satisfies the further condition
that it is closed under forming minors (and so ${\mathcal A}$ is bridge-alterable),
then there is a constant $c=c({\mathcal A})$ such that, for $R_n \in_u {\mathcal A}$
\begin{equation} \label{eqn.b-alt-frag1}
\mathbb E[\mbox{\rm frag}(R_n)] \leq c \;\; \mbox{ for all } n.
\end{equation}
\noindent
{\em New results for uniform random graphs}
In the present paper we much improve inequality~(\ref{eqn.b-alt-frag1}) and extend all the above results to more
general distributions (similar to distributions considered in~\cite{cmcd-rwg2012}),
though we continue to consider uniform random graphs in this section.
(All the results presented here are special cases of results discussed in the following section.)
In particular we see that, if ${\mathcal A}$ is \emph{any} bridge-addable class of graphs
(with no further conditions) and $R_n \in_u {\mathcal A}$, then
\begin{equation} \label{eqn.b-add-frag}
\mathbb E[\mbox{\rm frag}(R_n)] < 2 \;\; \mbox{ for all } n;
\end{equation}
and if ${\mathcal A}$ is bridge-alterable then
\begin{equation} \label{eqn.b-alt-frag2}
\limsup_{n \to \infty} \mathbb E[\mbox{\rm frag}(R_n)] \leq 1.
\end{equation}
Observe from the limiting result~(\ref{eqn.forests-frag}) that this last bound is optimal for a bridge-alterable
set of graphs, but perhaps it holds for any bridge-addable set of graphs-- see Section~\ref{sec.concl}.
We also strengthen inequality~(\ref{eqn.b-alt}) in much the same way that~(\ref{eqn.b-a-comps})
strengthens~(\ref{eqn.b-a-conn}).
Given non-negative integer-valued random variables $X_1,X_2,\ldots$ and $Y$, we say that
$X_n$ is \emph{stochastically at most} $Y$ \emph{asymptotically}, and write $X_n \leq_s Y$ asymptotically,
if for each fixed $t \geq 0$,
\[ \limsup_{n \to \infty} \mathbb P(X_n \geq t) \leq \mathbb P(Y \geq t). \]
Our strengthening of~(\ref{eqn.b-alt}) is that, if ${\mathcal A}$ is bridge-alterable and $R_n \in_{u} {\mathcal A}$, then
\begin{equation} \label{eqn.b-alt-comps1}
\kappa(R_n) \leq_s 1+ \mbox{\rm Po}(\frac{1}{2}) \;\; \mbox{ asymptotically }.
\end{equation}
\noindent
{\em Random rooted graphs}
It may be enlightening to consider rooted graphs.
We say that a graph is {\em rooted} if each component has a specified root vertex.
We will use the notation $G^{o}$ for a rooted graph;
and given a class ${\mathcal A}$ of graphs we write ${\mathcal A}o$ for the corresponding class of rooted graphs.
Thus a connected graph in ${\mathcal A}_n$ yields $n$ rooted graphs in the corresponding set ${\mathcal A}o_n$;
a graph in ${\mathcal A}_n$ which has two components, with respectively $a$ and $n-a$ vertices, yields $a(n-a)$ rooted graphs
in ${\mathcal A}o_n$; and so on.
We use the notations $R^{o} \in_{u} {\mathcal A}o$ and $R_n^{o} \in_{u} {\mathcal A}o$ as before, to indicate that
$R^{o}$ is sampled uniformly from ${\mathcal A}o$ (assumed finite) and $R_n^{o}$ is uniformly sampled from ${\mathcal A}o_n$.
Now let ${\mathcal A}$ be a finite bridge-addable set of graphs, and let $R^{o} \in_{u} {\mathcal A}o$.
Since a graph with several non-singleton components generates many rooted graphs,
it is not immediately clear to what extent the earlier results on connectedness and components will survive.
We will see that the analogues of~(\ref{eqn.b-a-conn}) and~(\ref{eqn.b-a-comps}) both hold:
\begin{equation} \label{eqn.rooted-conn}
\mathbb P(R^{o} \mbox{ is connected}) \geq e^{-1}
\end{equation}
and indeed
\begin{equation} \label{eqn.rooted-comps}
\kappa(R^{o}) \leq_s 1+ \mbox{\rm Po}(1).
\end{equation}
Now consider the class ${\mathcal F}$ of forests, and let $R_n^{o} \in_{u} {\mathcal F}^{o}$.
Then as $n \to \infty$
\[ \mathbb P(R_n^{o} \mbox{ is connected}) = (\frac{n}{n+1})^{n-1} \to e^{-1}\]
and indeed $\kappa(R_n^{o})$ converges in distribution to $1+\mbox{\rm Po}(1)$.
Thus~(\ref{eqn.rooted-conn}) and~(\ref{eqn.rooted-comps}) are best possible, in contrast to the unrooted case.
Further,
$\mathbb E[\mbox{\rm frag}(R_n^{o})] \to \infty$ as $n \to \infty$,
so there is no analogue for~(\ref{eqn.b-add-frag}) or~(\ref{eqn.b-alt-frag2}) for rooted graphs.
In all these results the crucial feature is the behaviour of the bridges.
We shall bring this out by singling out bridges in the more general distributions we next
introduce for our random graphs.
\section{Random weighted graphs} \label{sec.rwg}
Given a graph $G$ with vertex set $V$, let $e(G)$ denote the number of edges,
let $e_0(G)$ denote the number of bridges (edges in 0 cycles) and let
$\tilde{G}$ denote the graph on $V$ obtained from $G$ by removing all bridges.
Thus $\kappa(\tilde{G})= \kappa(G)+e_0(G)$.
Let $\lambda >0$ and $\nu >0$, and let $f(G) \geq 0$ for each bridge-free graph $G$.
We call $(\lambda,\nu,f)$ a \emph{weighting} and define the \emph{weight} $\tau(G)$ of $G$
by setting
\begin{equation} \label{eqn.probdef}
\tau(G) = f(\tilde{G}) \, \lambda^{e_0(G)} \nu^{\kappa(G)}.
\end{equation}
Given a set ${\mathcal A}$ of graphs, let $\tau({\mathcal A})$ denote $\sum_{G \in {\mathcal A}} \tau(G)$.
When $0<\tau({\mathcal A}) < \infty$, we let $R \in_{\tau} {\mathcal A}$ mean that $R$ is a random graph sampled from ${\mathcal A}$ with
$\mathbb P(R=G) = \tau(G)/ \tau({\mathcal A})$ for each graph $G \in {\mathcal A}$. Similarly
$R_n \in_{\tau} {\mathcal A}$ means that $R_n$ is a random graph sampled from ${\mathcal A}_n$ with
$\mathbb P(R=G) = \tau(G)/ \tau({\mathcal A}_n)$ for each graph $G \in {\mathcal A}_n$ (and we assume that $\tau({\mathcal A}_n)>0$).
In the special case when $\lambda=\nu=1$ and $f(G) \equiv 1$, clearly $R \in_{\tau} {\mathcal A}$ and
$R_n \in_{\tau} {\mathcal A}$ mean the same as $R \in_{u} {\mathcal A}$ and $R_n \in_{u} {\mathcal A}$ respectively.
When $f(G)\equiv \lambda^{e(G)}$ we have $\tau(G) = \lambda^{e(G)} \nu^{\kappa(G)}$,
and we do not single out bridges.
Recall that the classical Erd\H{o}s-R\'enyi (or binomial) random graph $G_{n,p}$ has vertex set $[n]$,
and the ${n \choose 2}$ possible edges are included independently with probability $p$,
where $0<p<1$. Assuming that ${\mathcal A}_n$ is non-empty, for each $H \in {\mathcal A}_n$ we have
\[
\mathbb P(G_{n,p}=H | G_{n,p} \in {\mathcal A} ) =
\frac{p^{e(H)}(1-p)^{{n \choose 2}-e(H)}}
{\sum_{G \in {\mathcal A}_n}p^{e(G)}(1-p)^{{n \choose 2}-e(G)}}
=
\frac{\lambda^{e(H)}}
{\sum_{G \in {\mathcal A}_n}\lambda^{e(G)}}
\]
where $\lambda=p/(1-p)$.
Now consider the more general random-cluster model (see for example~\cite{grimmett06}),
where we are also given $\nu>0$ (we use $\nu$ rather than $q$), and
the random graph $R_n$ takes as values the graphs $H$ on $[n]$, with
\[
\mathbb P(R_n=H) \mathbb Popto p^{e(H)}(1-p)^{{n \choose 2}-e(H)} \nu^{\kappa(H)}.
\]
Then for each $H \in {\mathcal A}_n$
\[
\mathbb P(R_n=H | R_n \in {\mathcal A} )
=
\frac{\lambda^{e(H)}\nu^{\kappa(H)}}
{\sum_{G \in {\mathcal A}_n}\lambda^{e(G)} \nu^{\kappa(G)}} = \frac{\tau(H)}{\tau({\mathcal A}_n)}
\]
where $\tau(H)= \lambda^{e(H)} \nu^{\kappa(H)}$, as we met above.
Suppose now that we are given a set ${\mathcal A}$ of graphs and a weighting $(\lambda,\nu,f)$,
and that $0<\tau({\mathcal A})<\infty$ or $\tau({\mathcal A}_n) >0$ as appropriate.
We generalise and sometimes amplify all the results presented in the last section.
For the asymptotic results
we need to assume that ${\mathcal A}$ is bridge-alterable rather than just bridge-addable.
We first state two non-asymptotic results; then present some results on random forests,
and consider asymptotic results; and finally we consider random rooted graphs.
The first result generalises the inequalities~(\ref{eqn.b-a-conn}) and~(\ref{eqn.b-a-comps}), and
is used several times in~\cite{cmcd-rwg2012}; and the second result generalises inequality~(\ref{eqn.b-add-frag}).
\begin{theorem} \label{prop.tauconn}
If ${\mathcal A}$ is finite and bridge-addable and $R \in_{\tau} {\mathcal A}$, then
\[ \kappa(R) \leq_s 1+ \mbox{\rm Po}(\nu/\lambda); \]
and in particular $\; \mathbb P(R \mbox{ is connected}) \geq e^{-\nu/\lambda}$, and
$\mathbb E[\kappa(R)] \leq 1+ \nu/\lambda$.
\end{theorem}
\begin{theorem} \label{prop.fragbound}
If ${\mathcal A}$ is finite and bridge-addable and $R \in_{\tau} {\mathcal A}$, then
\[ \mathbb E[\mbox{\rm frag}(R)] < \frac{2\nu}{\lambda}. \]
\end{theorem}
Before we introduce the asymptotic results for a general bridge-alterable set of graphs,
let us record some results on random forests $R_n \in_{\tau} {\mathcal F}$ which generalise the results
mentioned earlier for uniform random forests $R_n \in_{u} {\mathcal F}$ --
see for example~\cite{cmcd-rwg2012} where these results are proved in a general setting.
Observe that $\tau(F)= f(\bar{K_n}) (\lambda/\nu)^{e(F)} \nu^{n}$ for each $F \in {\mathcal F}_n$
(where $\bar{K_n}$ denotes the graph on $[n]$ with no edges):
thus $\tau(F) \mathbb Popto (\lambda/\nu)^{e(F)}$,
and the only aspect of the weighting that matters is the ratio $\lambda/\nu$.
\begin{theorem}\label{prop.asympt-forests}
Consider $R_n \in_{\tau} {\mathcal F}$, where ${\mathcal F}$ is the class of forests.
Then $\; \kappa(R_n)$ converges in distribution to $1+ \mbox{\rm Po}(\frac{\nu}{2\lambda})$, so
$\mathbb P(R_n \mbox{ is connected}) \to e^{-\frac{\nu}{2 \lambda}}$;
$\mathbb E[\kappa(R_n)] \to 1 + \frac{\nu}{2 \lambda}$ as $n \to \infty$; and
$\mathbb E[ \mbox{\rm frag}(R_n)] \to \frac{\nu}{\lambda}$ as $n \to \infty$.
\end{theorem}
\noindent
Now we consider asymptotic results for a bridge-alterable set of graphs.
These results generalise and amplify inequalities~(\ref{eqn.b-alt}) and~(\ref{eqn.b-alt-frag2});
and Theorem~\ref{prop.asympt-forests} shows that each of inequalities~(\ref{eqn.asympt-comps})
to~(\ref{eqn.asympt-frag}) is best-possible for a bridge-alterable class of graphs.
\begin{theorem}\label{prop.asympt-conn}
Suppose that ${\mathcal A}$ is bridge-alterable
and $R_n \in_{\tau} {\mathcal A}$. Then
\begin{equation} \label{eqn.asympt-comps}
\kappa(R_n) \leq_s 1+ \mbox{\rm Po}(\frac{\nu}{2\lambda}) \;\; \mbox{ asymptotically},
\end{equation}
and so in particular
\begin{equation} \label{eqn.asympt-conn}
\liminf_{n \to \infty} \mathbb P(R_n \mbox{ is connected}) \geq e^{-\frac{\nu}{2\lambda}};
\end{equation}
and
\begin{equation} \label{eqn.asympt-Ekappa}
\limsup_{n \to \infty} \mathbb E[\kappa(R_n)] \leq 1+ \frac{\nu}{2\lambda}.
\end{equation}
\end{theorem}
\begin{theorem} \label{prop.fragbound2}
If ${\mathcal A}$ is bridge-alterable
and $R_n \in_{\tau} {\mathcal A}$, then
\begin{equation} \label{eqn.asympt-frag}
\limsup_{n \to \infty} \mathbb E[\mbox{\rm frag}(R_n)] \leq \frac{\nu}{\lambda}.
\end{equation}
\end{theorem}
Now consider rooted graphs, starting with rooted forests. Recall that ${\mathcal F}^{o}$ denotes the class of rooted forests.
\begin{theorem}\label{prop.rooted-forests}
Consider $R^{o}_n \in_{\tau} {\mathcal F}^{o}$.
As $n \to \infty$,
$\kappa(R^{o}_n)$ converges in distribution to $1+\mbox{\rm Po}(\frac{\nu}{\lambda})$;
and so
$\mathbb P(R_n^{o} \mbox{ is connected}) \to e^{-\frac{\nu}{\lambda}}$, and
$\mathbb E[\kappa(R_n^{o})] \to 1 + \frac{\nu}{\lambda}$ as $n \to \infty$.
In contrast, $\mathbb E[ \mbox{\rm frag}(R_n^{o})] \to \infty$ as $n \to \infty$.
\end{theorem}
Our final result here is non-asymptotic and may be compared with
Theorem~\ref{prop.tauconn}.
It generalises~(\ref{eqn.rooted-conn}) and~(\ref{eqn.rooted-comps}).
Theorem~\ref{prop.rooted-forests} on rooted forests shows that it is best possible,
and that there is no rooted-graph analogue for Theorem~\ref{prop.fragbound} (which bounds $\mathbb E[ \mbox{\rm frag}(R_n)]$).
\begin{theorem} \label{prop.rooted}
Let ${\mathcal A}$ be finite and bridge-addable, and let $R^{o} \in_{\tau} {\mathcal A}o$.
Then
\[ \kappa(R^{o}) \leq_s 1+ \mbox{\rm Po}(\nu/\lambda); \]
and in particular $\; \mathbb P(R^{o} \mbox{ is connected}) \geq e^{-\nu/\lambda}$, and
$\mathbb E[\kappa(R^{o})] \leq 1+ \nu/\lambda$.
\end{theorem}
\section{Proofs for non-asymptotic results}
\label{sec.proof-non-a}
In this section we prove the non-asymptotic results above, namely Theorems~\ref{prop.tauconn},
\ref{prop.fragbound}, except that we leave proofs for rooted graphs to Section~\ref{sec.rooted}.
Given a graph $G$, let $\rm Bridge(G)$ denote the set of bridges, so that $|\rm Bridge(G)|=e_0(G)$;
and let $\rm Cross(G)$ denote the set of `non-edges' or `possible edges' between components, and let
$\rm cross(G)=|\rm Cross(G)|$.
We start with two basic lemmas about graphs.
The first is just an observation, and needs no proof.
\begin{lemma} \label{lem.basic1}
Let the set ${\mathcal A}$ of graphs be bridge-addable.
If $G \in {\mathcal A}$ and $e \in \rm Cross(G)$,
then the graph $G' = G +e$ obtained from $G$ by adding $e$ is in ${\mathcal A}$ and $e$ is a bridge of $G'$;
and $\tau(G) = \tau(G') {\mathcal D}ot (\nu/\lambda)$.
\end{lemma}
\begin{lemma} \label{lem.basic2} \cite{msw05}
If the graph $G$ has $n$ vertices, then $e_0(G) \leq n - \kappa(G)$;
and if $\kappa(G)=k+1$ then $\rm cross(G) \geq k(n-k) + {k \choose 2} \geq k(n-k)$.
\end{lemma}
\begin{proof}
Observe that $\kappa(G) + e_0(G) = \kappa(\tilde{G}) \leq n$, so $e_0(G) \leq n-\kappa(G)$.
Now consider the second inequality, and assume that $\kappa(G)=k+1$.
Since if $0<|X| \leq |Y|$ then $|X| |Y| >(|X|-1)(|Y|+1)$, we see that $\rm cross(G)$ is minimised when
$G$ consists of $k$ singleton components and one other component.
\end{proof}
Now let us recall a well-known elementary fact.
Let $X$ and $Y$ be random variables taking non-negative integer values, and suppose that $X \leq_s Y$: then
\begin{equation} \label{eqn.exp}
\mathbb E X \leq \mathbb E Y.
\end{equation}
To prove this, note that
\[ \mathbb E X = \sum_{t \geq 1} \mathbb P(X \geq t) \leq \sum_{t \geq 1} \mathbb P(Y \geq t) = \mathbb E Y. \]
The next two lemmas concern bounding a random variable by a Poisson-distributed random variable.
The first lemma is stated in a general form which quickly gives the second and which is
suitable also for using later.
\begin{lemma} \label{lem.po1}
Let the random variable $X$ take non-negative integer values.
Let $\alpha>0$ and let $Y \sim \mbox{\rm Po}(\alpha)$. Let $k_0$ be a positive integer, and suppose that
\[ \mathbb P(X=k+1) \leq \frac{\alpha}{k+1} \, \mathbb P(X=k) \;\; \mbox{ for each } k=0,1,\ldots,k_0-1. \]
Then
\[ \mathbb P(k_0 \geq X \geq k) \leq \mathbb P(Y \geq k) \;\; \mbox{ for each } k=0,1,\ldots,k_0 .\]
\end{lemma}
\begin{proof}
Observe that, for each $k=0, 1, \ldots, k_0-1$ we have
\[ \mathbb P(X=k+i) \leq \frac{\alpha^i}{(k+i)_i} \, \mathbb P(X=k) \;\; \mbox{ for each } i=1,\ldots,k_0 -k. \]
Clearly $\mathbb P(k_0 \geq X \geq 0) \leq 1 = \mathbb P(Y \geq 0)$.
Let $k_0 > k \geq 0$ and suppose that $\mathbb P(k_0 \geq X \geq k) \leq \mathbb P(Y \geq k)$.
We want to show that $\mathbb P(k_0 \geq X \geq k+1) \leq \mathbb P(Y \geq k+1)$
to complete the proof by induction. This is immediate if
$\mathbb P(X = k) \geq \mathbb P(Y = k)$, so assume that this is not the case. Then
\begin{eqnarray*}
\mathbb P(k_0 \geq X \geq k+1)
&=&
\sum_{i=1}^{k_0-k} \mathbb P(X=k+i) \\
&\leq &
\mathbb P(X=k) \sum_{i \geq 1} \frac{\alpha^i}{(k+i)_i} \\
& \leq &
\mathbb P(Y=k) \sum_{i \geq 1} \frac{\alpha^i}{(k+i)_i} \\
& = &
\mathbb P(Y \geq k+1)
\end{eqnarray*}
as required.
\end{proof}
\noindent
From the last lemma with $k_0$ large we obtain
\begin{lemma} \label{lem.po2} (see~\cite{msw05})
Let the random variable $X$ take non-negative integer values.
Let $\alpha>0$ and suppose that
\[ \mathbb P(X=k+1) \leq \frac{\alpha}{k+1} \, \mathbb P(X=k) \;\; \mbox{ for each } k=0,1,2,\ldots \]
Then $X \leq_s Y$
where $Y \sim \mbox{\rm Po}(\alpha)$.
\end{lemma}
\begin{proof}
Fix $k \geq 0$. Let $\eps>0$ and choose $k_0 \geq k$ such that $\mathbb P(X>k_0)<\eps$.
By Lemma~\ref{lem.po1}
\[ \mathbb P(X \geq k) = \mathbb P(k_0 \geq X \geq k) + \mathbb P(X>k_0) \leq \mathbb P(Y \geq k)+ \eps,\]
and thus $ \mathbb P(X \geq k) \leq \mathbb P(Y \geq k)$.
\end{proof}
\begin{proofof} {Theorem~\ref{prop.tauconn}}
It suffices to assume that ${\mathcal A}$ is ${\mathcal A}_n$ for some $n$, since the sets
${\mathcal A}_n$ are disjoint. Let ${\mathcal A}_n^{k}$ denote the set of graphs in ${\mathcal A}_n$ with $k$ components.
Let $1 \leq k \leq n-1$.
By Lemmas~\ref{lem.basic1} and~\ref{lem.basic2}
\begin{eqnarray*}
\tau({\mathcal A}_n^{k}) {\mathcal D}ot(n-k)
& \geq &
\sum_{G,e} \{ \tau(G): G \in {\mathcal A}_n^{k}, e \in \rm Bridge(G)\}\\
& \geq &
\sum_{H,e} \{ \tau(H): H \in {\mathcal A}_n^{k+1}, e \in \rm Cross(H) \} {\mathcal D}ot (\lambda/\nu)\\
& \geq &
\tau({\mathcal A}_n^{k+1}) {\mathcal D}ot k(n-k) {\mathcal D}ot (\lambda/\nu).
\end{eqnarray*}
Therefore
\[ \tau({\mathcal A}_n^{k+1}) \leq \frac{\nu}{\lambda k} \ \tau({\mathcal A}_n^{k}). \]
Thus for $R \in_{\tau} {\mathcal A}$
\[
\mathbb P(\kappa(R) = k+1) \leq \frac{\nu}{\lambda k} \mathbb P(\kappa(R) = k) \;\; \mbox{ for each } k=1,2,\ldots.
\]
and so, writing $X=\kappa(R)-1$
\[
\mathbb P(X = k+1) \leq \frac{\nu}{\lambda (k+1)} \mathbb P(X = k) \;\; \mbox{ for each } k=0,1,2,\ldots,
\]
Hence if $\alpha= \nu/\lambda$ and $Y \sim\mbox{\rm Po}(\alpha)$ we have $X \leq_s Y$ by Lemma~\ref{lem.po2}.
Finally,
by~(\ref{eqn.exp}), $\; \mathbb E[\kappa(R)] = 1+\mathbb E[X] \leq 1+ \mathbb E[Y] = 1 + \nu/\lambda$.
\end{proofof}
To prove Theorem~\ref{prop.fragbound} we use two lemmas. The first is another basic lemma on graphs.
\begin{lemma} \label{lem.basic3} \cite{cmcd08}
If the graph $G$ has $n$ vertices, then $\rm cross(G) \geq (n/2) {\mathcal D}ot \mbox{\rm frag}(G)$.
\end{lemma}
\begin{proof}
An easy convexity argument shows that if $x,x_1,x_2,\ldots$ are
positive integers such that each $x_i \leq x$ and $\sum_i x_i=n$
then $\sum_i {x_i \choose 2} \leq \frac12 n (x-1)$.
For, if $n=ax+y$ where $a \geq 0$ and $0 \leq y \leq x-1$ are integers, then
\[ \sum_i {x_i \choose 2} \leq a {x \choose 2} + {y \choose 2}
\leq a {x \choose 2} + \frac{y(x-1)}{2} = \frac12 n (x-1).\]
Hence if we denote the maximum number of vertices in a component by $x$,
so that $\mbox{\rm frag}(G) = n-x$, then
\[\rm cross(G) \geq {n \choose 2} - \frac12 n (x-1)
= \frac12 n (n-x) = \frac12 n \ \mbox{\rm frag}(G)\]
as required.
\end{proof}
The next lemma is phrased generally so that it can also be used later.
\begin{lemma} \label{lem.frag1}
Let ${\mathcal A}={\mathcal A}_n$ be bridge-addable, and let $R \in_{\tau} {\mathcal A}$.
Let $\beta>0$ and assume that $\rm cross(G) \geq \beta n {\mathcal D}ot \mbox{\rm frag}(G)$ for each $G \in {\mathcal A}$.
Then
\[ \mathbb E [\mbox{\rm frag}(R)] \leq \frac{\nu}{\beta \lambda}. \]
\end{lemma}
\begin{proof}
Using Lemma~\ref{lem.basic1}
\begin{eqnarray*}
&& \beta n \, \sum_{G \in {\mathcal A}} \tau(G) \, \mbox{\rm frag}(G)\\
& \leq &
\sum_{G, e} \{ \tau(G): G \in {\mathcal A}, e \in \rm Cross(G) \} \\
& \leq &
\frac{\nu}{\lambda} \ \sum_{G', e} \{ \tau(G'): G' \in {\mathcal A}, e \in \rm Bridge(G')\}\\
& = &
\frac{\nu}{\lambda} \ \sum_{G \in {\mathcal A}} \tau(G) {\mathcal D}ot e_0(G).
\end{eqnarray*}
Thus
\[
\mathbb E[\mbox{\rm frag}(R)] \leq \frac{1}{ \beta n} \, \frac{\nu}{\lambda} \, \mathbb E[e_0(R)] < \frac{\nu}{\beta \lambda}
\]
as required.
\end{proof}
\begin{proofof} {Theorem~\ref{prop.fragbound}}
As in the proof of Theorem~\ref{prop.tauconn}
it suffices to assume that ${\mathcal A}$ is ${\mathcal A}_n$ for some $n$.
By Lemma~\ref{lem.basic3}, we may now complete the proof using Lemma~\ref{lem.frag1} with $\beta= \frac12$.
\end{proofof}
\section{Proofs of asymptotic results}
\label{sec.asympt1}
In this section we prove the asymptotic results
Theorems~\ref{prop.asympt-conn} and~\ref{prop.fragbound2}.
Assume throughout that ${\mathcal A}$ is bridge-alterable.
Let us focus first on
Theorem~\ref{prop.asympt-conn}, and in particular on~(\ref{eqn.asympt-comps}).
The proof goes roughly as follows.
We first see that it suffices to prove inequality~(\ref{eqn.impliesmain3}) below concerning $\kappa(R^{\cf})$,
where $R^{\cf}$ is a random forest on $[n]$ which we define below, with probabilities
depending on degrees.
Then we use a key result from~\cite{amr2011} which tells us about average sizes of components
of $R^{\ct}$ with an edge deleted, where $R^{\ct}$ is $R^{\cf}$ conditioned on being a tree.
We find that $\mathbb P(\kappa(R^{\cf})=2)$ is suitably smaller than $\mathbb P(\kappa(R^{\cf})=1)$;
and from this we deduce that in general $\mathbb P(\kappa(R^{\cf})=k+1)$ is suitably smaller than $\mathbb P(\kappa(R^{\cf})=k)$,
and so we can use Lemma~\ref{lem.po1}.
Now for more details.
We may define an equivalence relation on graphs by setting $G \sim H$ if $\tilde{G}=\tilde{H}$.
Let $[G]$ denote the equivalence class of $G$, that is, the set of graphs $H$ such that
$\tilde{H}=\tilde{G}$.
Let $W$ be a positive integer.
Since ${\mathcal A}$ is bridge-alterable, if $G \in {\mathcal A}_W$ then $[G] \subseteq {\mathcal A}_W$.
Thus ${\mathcal A}_W$ can be written as a disjoint union of equivalence classes.
To prove~(\ref{eqn.asympt-comps}),
we may fix a (large) positive integer $W$,
a bridgeless graph $G_0 \in {\mathcal A}_W$, an integer $t \geq 1$ and real $\eps>0$; and prove that,
if $R^{G_0} \in_{\tau} [G_0]$ then
\begin{equation} \label{eqn.impliesmain2}
\mathbb P(\kappa(R^{G_0}) \geq t+1) \leq \mathbb P(\mbox{\rm Po}(\frac{\nu}{2\lambda}) \geq t) + \eps
\end{equation}
if $W$ is sufficiently large.
Since we are now restricting attention to $[G_0]$
we may assume that $f(G_0)= 1$. Denote $\kappa(G_0)$ by $n$: we may assume that $n \geq 2$
(for otherwise the connected graph $G_0$ is the only graph in $[G_0]$).
Write $C_1,\ldots,C_n$ for the components of $G_0$, and let $w_i=|V(C_i)|$
for $i=1,\ldots,n$, so that $W = \sum_{i=1}^n w_i$.
We use the vector $w=(w_1,\ldots,w_n)$ together with the weighting $\tau$ to define a probability measure on the
set ${\mathcal F}_n$ of forests on~$[n]$.
Given $F \in {\mathcal F}_n$, let
\[ \mass{F} = \mathbb Pod_{i=1}^n w_i^{d_F(i)} {\mathcal D}ot \lambda^{e(F)} \nu^{\kappa(F)} \]
where $d_F(i)$ denotes the degree of vertex $i$ in the forest $F$.
Also, let $K = \sum_{F \in {\mathcal F}_n} \mass{F}$, and let $R^{\cf}$ be a random element of ${\mathcal F}_n$ with
$\mathbb P(R^{\cf} =F) = \mass{F}/K$ for each $F \in {\mathcal F}_n$.
Corresponding to Lemma 2.3 of~\cite{amr2011}, we have
\begin{equation} \label{eqn.stocheq}
\kappa(R^{G_0}) =_s \kappa(R^{\cf}).
\end{equation}
\begin{proofof}{(\ref{eqn.stocheq})}
Denote $[G_0]$ by ${\mathcal B}$.
Given $H \in {\mathcal B}$, let $g(H)$ be the graph obtained from $H$ by contracting
each $C_i$ to the single vertex $i$.
Then $g(H)\in {\mathcal F}_n$ and $\kappa(H)= \kappa(g(H))$. Also,
for each $F \in {\mathcal F}_n$, the set $g^{-1}(F)$ has cardinality
$\mathbb Pod_{i=1}^n w_i^{d_F(i)}$, and so $\tau(g^{-1}(F))= \mass{F}$.
It follows that
\begin{eqnarray*}
\mathbb P(\kappa(R^{G_0})=k) & = & \frac{\tau(\{H\in{\mathcal B} : \kappa(H)=k \})}{\tau({\mathcal B})}\\
& = & \frac{ \sum_{\{F\in{\mathcal F}_n: \kappa(F)=k \}}\tau(g^{-1}(F))}{\sum_{F\in{\mathcal F}_n}\tau(g^{-1}(F))} \\
& = & \frac{ \sum_{ \{ F\in{\mathcal F}_n: \kappa(F)=k \} } \mass{F} }{K} \\
& = & \mathbb P(\kappa(R^{\cf})=k)
\end{eqnarray*}
as required.
\end{proofof}
Now that we have established~(\ref{eqn.stocheq}), in order to prove~(\ref{eqn.impliesmain2}) we may show that
\begin{equation} \label{eqn.impliesmain3}
\mathbb P(\kappa(R^{\cf}) \geq t+1) \leq \mathbb P(\mbox{\rm Po}(\frac{\nu}{2\lambda}) \geq t) + \eps
\end{equation}
if $W$ is sufficiently large.
Given a graph $H$ on $[n]$, let
\[ \rm cross_w(H) = \sum_{uv \in \rm Cross(H)} w_u w_v. \]
Observe that $\rm cross_w(H)$ equals
the sum of $w(C) w(C')$ over the unordered pairs $C$ and $C'$ of
components of $H$, where $w(C)$ denotes $\sum_{i \in V(C)}w_i$.
For forests $F,F' \in {\mathcal F}_n$ such that $F$ can be obtained from $F'$ by deleting an edge $uv$,
observe that $\mass{F'} = \frac{\lambda}{\nu} {\mathcal D}ot \mass{F} {\mathcal D}ot w_uw_v$.
For such $F, F'$ we let
\begin{equation}
\varphi(F',F) = \frac{\nu}{\lambda} {\mathcal D}ot \frac{ \mass {F'} }{\rm cross_w(F)}. \label{eq:phi_def}
\end{equation}
For all other pairs $F,F'$, we let $\varphi(F',F)=0$.
For $i=1,\ldots,n$, let ${\mathcal F}_{n}^{i}$ be the set of forests in ${\mathcal F}_n$ with $i$ components.
For each $F \in {\mathcal F}_{n}^{i+1}$ we have
\[
\sum_{F'\in{\mathcal F}_{n}^{i}}\varphi(F',F) = \frac{\mass{F}}{\rm cross_w(F)} \sum_{uv \in \rm Cross(F)} w_uw_v = \mass{F};
\]
and thus for each $i=1,\ldots,n-1$
\begin{equation} \label{eqn.sumflow}
\sum_{F'\in{\mathcal F}_{n}^{i}}\sum_{F\in{\mathcal F}_{n}^{i+1}}\varphi(F',F) =
\sum_{F\in {\mathcal F}_{n}^{i+1}} \mass{F} = K{\mathcal D}ot\mathbb P(R^{\cf} \in {\mathcal F}_{n}^{i+1})
\end{equation}
as in Lemma 3.1 of~\cite{amr2011}.
Given a tree $T$ on $[n]$ and an integer $k$ with $1 \leq k \leq \lfloor W/2 \rfloor$,
let $c(T,k)$ be the number of edges $e$ in $T$ such that $T \setminus e$ has a component with weight~$k$.
Let $R^{\ct}$ be $R^{\cf}$ conditioned on being a tree, so that $R^{\ct}$ is a random tree on $[n]$
with $\mathbb P(R^{\ct} =T) \mathbb Popto \mass{T}$.
The distribution of $R^{\ct}$ is exactly as in~\cite{amr2011} --
the weighting is not relevant here, since $e(T)=n-1$ and $\kappa(T)=1$ are fixed,
and thus $\mathbb P(R^{\ct} = T) \mathbb Popto \mathbb Pod_{i=1}^{n} w_i^{d_T(i)}$.
Hence from Section 4 of~\cite{amr2011} we see that for any $\eta>0$, for $W$ sufficiently large we have
\begin{equation} \label{claim.half}
\sum_{k \geq 1} \frac{\mathbb E{[c(R^{\ct},k)]}}{k(W-k)} \leq (1+\eta) {\mathcal D}ot \frac12.
\end{equation}
By~(\ref{eqn.sumflow}) with $i=1$,
corresponding to lemma 4.1 of~\cite{amr2011} we have
\begin{eqnarray*}
\mathbb P(R^{\cf} \in {\mathcal F}_{n}^{2}) &=&
\frac{1}{K} \frac{\nu}{\lambda} \sum_{T \in {\mathcal F}_n^1} \mass{T} \sum_{e \in T} \frac{1}{\rm cross_w(T-e)}\\
&=&
\mathbb P(R^{\cf} \in{\mathcal F}_{n}^{1}) \ \frac{\nu}{\lambda} \sum_{T \in {\mathcal F}_n^1} \mathbb P(R^{\ct} =T) \sum_{k \geq 1}
\frac{c(R^{\ct},k)}{k(W-k)}
\end{eqnarray*}
and so
\begin{equation} \label{eqn.fn2_bound}
\mathbb P(R^{\cf} \in {\mathcal F}_{n}^{2}) = \mathbb P(R^{\cf} \in{\mathcal F}_{n}^{1})\ \frac{\nu}{\lambda}\ \sum_{k \geq 1}
\frac{\mathbb E{[c(R^{\ct},k)}]}{k(W-k)}.
\end{equation}
From~(\ref{claim.half}) and~(\ref{eqn.fn2_bound}) we see that
for all $\eta > 0$, for $W$ sufficiently large, for all
$w_1,\ldots,w_n$ with $\sum_{j=1}^n w_j=W$,
\begin{equation} \label{eqn.ratio_1_2}
\mathbb P(R^{\cf} \in {\mathcal F}_{n}^{2}) \leq (1+\eta) \frac{\nu}{2 \lambda} \ \mathbb P(R^{\cf} \in {\mathcal F}_{n}^{1}).
\end{equation}
Now we can complete the proof of~(\ref{eqn.impliesmain3})
(and thus of~(\ref{eqn.asympt-comps}) in Theorem~\ref{prop.asympt-conn})
as follows, as in the proof of Claim 2.2 in~\cite{amr2011}.
The next lemma will allow us to assume that $n$ is large, as well as being useful later.
Using~(\ref{eqn.sumflow}) and the proof of Lemma 3.2 of~\cite{amr2011} we find:
\begin{lemma}\label{lem.smalln2.5}
For each $i=1,\ldots,n-1$
\begin{equation}\label{eqn.smalln2.5}
\mathbb P(R^{\cf} \in {\mathcal F}_{n}^{i+1}) \leq \frac{\mathbb P(R^{\cf} \in {\mathcal F}_{n}^{i})}{i} \frac{n}{W} \frac{\nu}{\lambda}.
\end{equation}
\end{lemma}
If $W \geq 2n$ then by the above result and Lemma~\ref{lem.po2},
$\kappa(R^{\cf}) \leq_s 1+ \mbox{\rm Po}(\frac{\nu}{2 \lambda})$ and so~(\ref{eqn.impliesmain3}) holds.
Thus we may assume from now on
that $W<2n$.
Next we introduce Lemma 3.3 of~\cite{amr2011}.
For each finite non-empty set $V$ of positive integers,
let ${\mathcal G}_V$ denote the set of all graphs on the vertex set $V$,
and let ${\mathcal G}^k_V$ denote the set of all graphs in ${\mathcal G}_V$ with exactly
$k$ components.
For each positive integer $n$, let $\mu_n$ be a measure on the set of all graphs
with vertex set a subset of $[n]$ which is \emph{multiplicative on components};
that is, if $G$ has components $H_1,\ldots,H_k$ then
$\mu_n(G)=\mathbb Pod_{i=1}^k \mu_n(H_i)$.
Observe that we obtain such a measure
if we set $\mu_n(G) = \mass{G}$ when $G$ is a forest and $\mu_n(G)=0$ otherwise.
\begin{lemma}\label{lem.connect_1} (\cite{amr2011})
Suppose there exist $\alpha > 0$ and integers $n \geq m_0 \geq 1$ such that
\begin{equation}\label{eq.connect_1_a}
\mu_n({\mathcal G}^2_V) \leq \alpha \mu_n({\mathcal G}^1_V) \;\;\; \mbox{for all }
V \subseteq \{1,\ldots,n\} \mbox{ with } |V| \geq m_0.
\end{equation}
Then for all integers $k \geq 1$ and $n \geq km_0$
\begin{equation}
\label{eq:connect_1}
\mu_{n}({\mathcal G}_{[n]}^{k+1})\leq \frac{\alpha}{k}\mu_n({\mathcal G}_{[n]}^k).
\end{equation}
\end{lemma}
We may now complete the proof of~(\ref{eqn.impliesmain3}).
Fix $j \geq t$ large enough that
\[ \sum_{i \geq j} \left(\frac{\nu}{\lambda}\right)^i \frac{1}{i!} \leq \frac{\eps}{2}.\]
Fix $\eta > 0$ small enough that, with $\alpha=(1+\eta) \frac{\nu}{2 \lambda}$,
we have
\[ \mathbb P(\mbox{\rm Po}(\alpha) \geq t) \leq \mathbb P(\mbox{\rm Po}(\frac{\nu}{2 \lambda}) \geq t) + \eps/2. \]
By~(\ref{eqn.ratio_1_2}) and Lemma \ref{lem.connect_1}
it follows that, for $W$ large enough (recall that $n >W/2$), for all $i$ with $1\leq i \leq j$ we have
\[ \mathbb P(\kappa(R^{\cf})=i+1) \le \frac{\alpha}{i} \mathbb P(\kappa(R^f)=i). \]
In terms of $X= \kappa(R^{\cf})-1$, this says that
\begin{equation} \label{eqn.krf}
\mathbb P(X=i+1) \le \frac{\alpha}{i+1} \mathbb P(X=i) \;\; \mbox{ for } i=0,1,\ldots,j-1.
\end{equation}
Also, by Lemma \ref{lem.smalln2.5}, for all $i \geq 1$
\[ \mathbb P(R^{\cf} \in {\mathcal F}_{n}^{i+1})\leq \left(\frac{n \nu}{W \lambda}\right)^i \frac{1}{i!}
\leq \left(\frac{\nu}{\lambda}\right)^i \frac{1}{i!}\]
and so it follows by our choice of $j$ that
\[ \mathbb P(X \geq j) = \mathbb P(\kappa(R^{\cf}) \geq j+1) \leq \eps/2. \]
Hence by~(\ref{eqn.krf}) and Lemma~\ref{lem.po1},
\begin{eqnarray*}
\mathbb P(X \geq t)
& = & \mathbb P(j-1 \geq X \geq t) + \mathbb P(X \geq j)\\
& \leq & \mathbb P(\mbox{\rm Po}(\alpha) \geq t) + \eps/2\\
& \leq & \mathbb P(\mbox{\rm Po}(\frac{\nu}{2\lambda}) \geq t) + \eps.
\end{eqnarray*}
This completes the proof of~(\ref{eqn.impliesmain3}) and thus of~(\ref{eqn.asympt-comps}).
The inequality~(\ref{eqn.asympt-conn}) follows directly from~(\ref{eqn.asympt-comps}),
so it remains only to prove~(\ref{eqn.asympt-Ekappa}). Let $\eps>0$. By Theorem~\ref{prop.tauconn},
if $Y \sim \mbox{\rm Po}(\nu/\lambda)$ then
\[ \mathbb E[\kappa(R_n) {\bf 1}_{\kappa(R_n) \geq t+1}] \leq \mathbb E[(1+Y) {\bf 1}_{Y \geq t}] < \eps/2\]
if $t$ is sufficiently large. Fix such a $t$.
By~(\ref{eqn.impliesmain2}) (applied for each value up to~$t$, and with $\eps$ replaced by $\frac{\eps}{2t}$), for $n$ sufficiently large
\begin{eqnarray*}
\mathbb E[\kappa(R_n) {\bf 1}_{\kappa(R_n) \leq t}]
&=&
\sum_{i=1}^{t} \mathbb P(t \geq \kappa(R_n)\geq i)\\
& \leq & \sum_{i=0}^{t-1} \mathbb P(\mbox{\rm Po}(\frac{\nu}{2 \lambda})\geq i) + \eps/2\\
& \leq & 1+ \mathbb E[\mbox{\rm Po}(\frac{\nu}{2 \lambda})] + \eps/2 = 1 + \frac{\nu}{2 \lambda} + \eps/2.
\end{eqnarray*}
Hence, for $n$ sufficiently large, $\mathbb E[\kappa(R_n)] \leq 1 + \frac{\nu}{2 \lambda}+ \eps$, and we are done.
We have now completed the proof of Theorem~\ref{prop.asympt-conn}.
\subsection{Proof of Theorem~\ref{prop.fragbound2}}
\label{sec.fragproof}
Let $\eps>0$, and let ${\mathcal A}'_n = \{G \in {\mathcal A}_n:\mbox{\rm frag}(G) \leq \eps n\}$. Then ${\mathcal A}'_n$ is bridge-addable.
Also, for each $G \in {\mathcal A}'_n$ we have $\rm cross(G) \geq (1-\eps)n {\mathcal D}ot \mbox{\rm frag}(G)$.
Hence by Lemma~\ref{lem.frag1} with $\beta=1-\eps$ we have
\[ \mathbb E[\mbox{\rm frag}(R_n) {\bf 1}_{\mbox{\rm frag}(R_n) \leq \eps n}] < (1-\eps)^{-1} (\nu/\lambda). \]
Thus it suffices for us to show that
\begin{equation} \label{eqn.frageps}
\mathbb E[\mbox{\rm frag}(R_n) {\bf 1}_{\mbox{\rm frag}(R_n) > \eps n}] \; = \; o(1) \;\; \mbox{ as } n \to \infty.
\end{equation}
We proceed as in the proof of Theorem~\ref{prop.asympt-conn}.
For a graph $G$ on $[n]$ let $\mbox{\rm wfrag}(G)$ denote $W$ minus the maximum weight $w(C)$ of a component $C$ of~$G$.
Then corresponding to~(\ref{eqn.stocheq}) we have
\[ \mbox{\rm frag} (R^{G_0}) =_s \mbox{\rm wfrag}(R^{\cf}). \]
To see this we may argue as in the proof of~(\ref{eqn.stocheq}),
recalling that for each $H \in {\mathcal B} =[G_0]$, $g(H)$ is the forest obtained by contracting the components $C_i$ of $G_0$, and
noting that we have $\mbox{\rm frag}(H)=\mbox{\rm wfrag}(g(H))$.
Thus it suffices to consider $R^{\cf}$
and show that $\mathbb E[X {\bf 1}_{X > \eps W}]$ is $o(1)$ as $W \to \infty$,
where $X = \mbox{\rm wfrag}(R^{\cf})$.
Define $\tilde{\tau}(F)$ to be $\tau(g^{-1}(F))$ for each $F \in {\mathcal F}_n$; and for ${\mathcal A} \subseteq {\mathcal F}_n$ let
$\tilde{\tau}({\mathcal A}) = \sum_{F \in {\mathcal A}} \tilde{\tau}(F) = \tau(g^{-1}({\mathcal A}))$.
Then $\tilde{\tau}({\mathcal F}_n)=\tau({\mathcal B})$, and $\tilde{\tau}({\mathcal T}_n)=\tau({\mathcal C})$ where ${\mathcal C}$ is the set of connected graphs in ${\mathcal B}$.
Thus by Theorem~\ref{prop.tauconn} we have
$\tilde{\tau}({\mathcal T}_n) \geq e^{-\alpha} \tilde{\tau}({\mathcal F}_n)$, where we let $\alpha$ denote $\nu / \lambda$.
Recall that we take $f(G) \equiv 1$ without loss of generality, and so
$\tau(H)= \alpha \lambda^n$ for each connected graph $H$ in ${\mathcal B}$.
Also
\[ |g^{-1}({\mathcal T}_n)| = \mathbb Pod_{i=1}^{n} w_i {\mathcal D}ot W^{n-2}\]
where $W=\sum_{i=1}^n w_i$.
This counting result goes back to Moon~\cite{moon67} in 1967 and R\'enyi~\cite{renyi1970} in 1970
(see also Pitman~\cite{pitman99}) and appears in
the proof of Lemma 4.2 of~\cite{amr2011}. Hence
\[ \tilde{\tau}({\mathcal T}_n) = \alpha \lambda^n \mathbb Pod_{i=1}^{n} w_i {\mathcal D}ot W^{n-2}.\]
For each non-empty set $A$ of positive integers, let ${\mathcal T}_A$ and ${\mathcal F}_A$ denote respectively the sets of
trees and forests on vertex set $A$, and let $W_A$ denote $\sum_{i \in A} w_i$.
Let $1 \leq a \leq n-1$ and let $A \subseteq [n]$ with $|A|=a$. Denote $[n] \setminus A$ by $\bar{A}$. Then
\begin{eqnarray*}
\tilde{\tau}({\mathcal F}_A) \tilde{\tau}({\mathcal F}_{\bar{A}})
& \leq & e^{2 \alpha} \tilde{\tau}({\mathcal T}_A) \tilde{\tau}({\mathcal T}_{\bar{A}})\\
&=& \alpha^2 \lambda^n e^{2 \alpha} (\mathbb Pod_{i=1}^{n} w_i ) {\mathcal D}ot W_A^{a-2} (W-W_A)^{n-a-2}\\
&=& \alpha^2 \lambda^n e^{2 \alpha} (\mathbb Pod_{i=1}^{n} w_i) {\mathcal D}ot (W_A(W-W_A))^{-2} W_A^{a} (W-W_A)^{n-a}.
\end{eqnarray*}
But $x^a(W-x)^{n-a}$ is maximised at $x=\frac{a}{n}W$, so
\[ W_A^{a} (W-W_A)^{n-a} \leq (\frac{a}{n} W)^{a} (\frac{n-a}{n} W)^{n-a} = a^a (n-a)^{n-a} \left(\frac{W}{n} \right)^n.\]
Thus
\[ \tilde{\tau}({\mathcal F}_A) \tilde{\tau}({\mathcal F}_{\bar{A}}) \leq \alpha^2 \lambda^n e^{2 \alpha} \mathbb Pod_{i=1}^{n} w_i {\mathcal D}ot (W_A(W-W_A))^{-2} a^a (n-a)^{n-a} \left(\frac{W}{n} \right)^n.\]
Now by Stirling's formula, there are positive constants $c_1$ and $c_2$ such that for each positive integer $k$
\[ c_1 k^{k+\frac12} e^{-k} \leq k! \leq c_2 k^{k+\frac12} e^{-k}. \]
Thus
\begin{eqnarray*}
\sum_{a=1}^{n-1} {n \choose a} a^{a} (n-a)^{n-a}
&=& n! \sum_{a=1}^{n-1} \frac{a^a}{a!} \frac{(n-a)^{n-a}}{(n-a)!}\\
& \leq & c_1^{-2} n! e^n \sum_{a=1}^{n-1} (a(n-a))^{-\frac12}.
\end{eqnarray*}
But the sum in this last expression is $O(1)$, so
\[ \sum_{a=1}^{n-1} {n \choose a} a^{a} (n-a)^{n-a} \; \leq \; c_3 n! e^n \; \leq \; c_4 n^{n+\frac12} \]
for some constants $c_3$ and $c_4$. It follows that
\[ \sum_{ A \subseteq [n]} (W_A W_{\bar{A}})^2 \ \tilde{\tau}({\mathcal F}_A) \tilde{\tau}({\mathcal F}_{\bar{A}}) \leq
c_5 \alpha^2 \lambda^n e^{2 \alpha} (\mathbb Pod_{i=1}^{n} w_i) W^n n^{\frac12} = c_6 \, \tilde{\tau}({\mathcal T}_n) W^2 n^{\frac12} \]
for some constant $c_5$, and $c_6 = c_6(\alpha) = c_5 \alpha e^{2 \alpha}$.
Let us introduce a piece of notation: for $z>0$ let
\[ \sum_{z \leq W_A \leq W/2} s_A := \sum \{ W_A \ \tilde{\tau}({\mathcal F}_A) \tilde{\tau}({\mathcal F}_{\bar{A}}): A \subseteq [n], z \leq W_A \leq W/2\}. \]
Then
\begin{equation} \label{eqn.end}
\sum_{z \leq W_A \leq W/2} s_A \leq z^{-1} (\frac{W}{2})^{-2} \sum_{ A \subseteq [n]} (W_A W_{\bar{A}})^2 \
\tilde{\tau}({\mathcal F}_A) \tilde{\tau}({\mathcal F}_{\bar{A}})
\leq 4 c_6 \tilde{\tau}({\mathcal T}_n) z^{-1} n^{\frac12}.
\end{equation}
Now we argue as in the proof of Proposition 5.2 of~\cite{cmcd08}.
Consider a disconnected graph $G$ on $[n]$, and denote $\mbox{\rm wfrag}(G)$ by $z$.
We claim that there is a union of components with weight in the interval $[z/2, W/2]$.
To see this let $b=W-z$, so that $b$ is the biggest weight of a component.
Note that $\lfloor \frac{W+b}{2} \rfloor - \lceil \frac{W-b}{2} \rceil +1 \geq b$,
and so there are at least $b$ integers in the list
$\lceil \frac{W-b}{2} \rceil,\ldots,\lfloor \frac{W+b}{2} \rfloor$.
Thus by considering adding components one at a time we see that there is a union of components, with vertex $A$ say,
such that $W_A$ is in this set. Then $W_A \geq \lceil \frac{W-b}{2} \rceil \geq z/2$ and
$W-W_A \geq W - \lfloor \frac{W+b}{2} \rfloor \geq z/2$. Thus $A$ or $\bar{A}$ is as required.
From the above, there is an injection from the set of forests $F \in {\mathcal F}_n$ with $\mbox{\rm wfrag}(F) \geq z$
to the set
of triples $A, F_A, F_{\bar{A}}$ where $A \subseteq [n]$, $z/2 \leq \mbox{\rm wfrag}(F)/2 \leq W_A \leq W/2$,
$F_A \in {\mathcal F}_A$, $F_{\bar{A}} \in {\mathcal F}_{\bar{A}}$ and where $\tilde{\tau}(F)= \tilde{\tau}(F_A) \tilde{\tau}(F_{\bar{A}})$.
It follows that
\begin{eqnarray*}
\tilde{\tau}({\mathcal F}_n) \, \mathbb E[X 1_{X \geq z}] &=&
\sum_{F \in {\mathcal F}_n: {\small \mbox{\rm wfrag}}(F) \geq z} \tilde{\tau}(F) \mbox{\rm wfrag}(F)\\
& \leq &
\sum_{A \subseteq [n], z/2 \leq W_A \leq W/2} \; \sum_{F_A \in {\mathcal F}_A} \sum_{F_{\bar{A}} \in {\mathcal F}_{\bar{A}}}
2 W_A \ \tilde{\tau}(F_A) \tilde{\tau}(F_{\bar{A}})\\
& = & 2 \sum_{z/2 \leq W_A \leq W/2} s_A.
\end{eqnarray*}
Hence
\[ \mathbb E[X 1_{X > \eps W}] \leq \frac{2}{\tilde{\tau}({\mathcal F}_n)} \sum_{\eps W/2 \leq W_A \leq W/2} s_A =O(W^{-\frac12}) \]
by~(\ref{eqn.end}), and the proof of Theorem~\ref{prop.fragbound2} is complete.
\section{Proofs for rooted graphs}
\label{sec.rooted}
In this section we prove Theorems~\ref{prop.rooted-forests} and~\ref{prop.rooted} and on rooted graphs.
First we prove Theorem~\ref{prop.rooted},
following the lines of the proof of Theorem~\ref{prop.tauconn}.
\begin{proofof} {Theorem~\ref{prop.rooted}}
As before, it suffices to assume that ${\mathcal A}$ is ${\mathcal A}_n$ for some $n$.
Let $1\leq k \leq n-1$.
Let ${\mathcal P}$ be the set of pairs $(G^o,e)$ where $G^o$ is a rooted graph on $[n]$ with $k$ components and $e$ is a bridge
in $G^{o}$ (which we may think of as being oriented towards the root of the component).
Let ${\mathcal Q}$ be the set of pairs $(H^{o},uv)$, where $H^o$ is a rooted graph on $[n]$ with $k+1$ components, and $uv$ is an
ordered pair of vertices such that $u$ is the root of its components and $v$ is in a different component.
There is a natural bijection between ${\mathcal P}$ and ${\mathcal Q}$. Given $(G^o,e) \in {\mathcal P}$, if $u$ is
the end of $e$ further from the root, we delete the edge $e$ and make $u$ a new root: given $(H^{o},uv) \in {\mathcal Q}$,
we add an edge between $u$ and $v$ and no longer have $u$ as a root.
Further, if the $k+1$ components of $H^{o}$ have $n_1,\ldots, n_{k+1}$ vertices respectively,
then the number of pairs $uv$ such that $(H^{o},uv) \in {\mathcal Q}$ is
$\sum_{i=1}^{k+1} (n-n_i) = kn$.
Now much as in the proof of Theorem~\ref{prop.tauconn} we have
\begin{eqnarray*}
\tau({\mathcal A}_n^{k \, o}) {\mathcal D}ot(n\!-\!k)
& \geq &
\sum_{(G^{o},e) \in {\mathcal P}} \tau(G^{o}) \; \geq \; \frac{\lambda}{\nu}
\sum_{(H^{o},uv) \in {\mathcal Q}} \tau(H^{o})\\
& = & \frac{\lambda}{\nu} \
\tau({\mathcal A}_n^{k+1 \, o}) {\mathcal D}ot k n.
\end{eqnarray*}
Therefore
\begin{equation} \label{eqn.rooted}
\tau({\mathcal A}_n^{k\!+\!1 \, o}) \leq
\frac{n\!-\!k}{n} \frac{\nu}{\lambda k} \ \tau({\mathcal A}_n^{k \, o}).
\end{equation}
Thus for $R^{o} \in_{\tau} {\mathcal A}o$
\[
\mathbb P(\kappa(R^{o}) = k+1) \leq \frac{\nu}{\lambda k} \mathbb P(\kappa(R^{o}) = k) \;\; \mbox{ for each } k=1,2,\ldots, \]
and we may complete the proof as for Theorem~\ref{prop.tauconn}.
\end{proofof}
\begin{proofof}{Theorem~\ref{prop.rooted-forests}}
Consider rooted forests
and let $R^{o}_n \in_{\tau} {\mathcal F}^{o}$.
Then the first two inequalities above hold at equality, so
\[
\mathbb P(\kappa(R^{o}_n) = k+1) = \frac{n-k}{n} \frac{\nu}{\lambda k} \mathbb P(\kappa(R^{o}_n) = k) \;\; \mbox{ for each } k=1,2,\ldots.
\]
Hence $\kappa(R^{o}_n)$ converges in distribution to $1+\mbox{\rm Po}(\frac{\nu}{\lambda})$ as $n \to \infty$.
Thus to complete the proof of Theorem~\ref{prop.rooted-forests}, it remains only to show that
$\mathbb E[\mbox{\rm frag}(R_n^{o})] \to \infty$ as $n \to \infty$.
By what we have just proved and the fact that $|{\mathcal T}_n^{o}|=n^{n-1}$
\[ \tau({\mathcal F}_n^{o}) \sim e^{\nu/\lambda} \ \tau({\mathcal T}_n^{o}) = \nu e^{\nu/\lambda} (\lambda n)^{n-1}. \]
We now obtain a lower bound on $\mathbb E[\mbox{\rm frag}(R_n^{o})]$ by considering forests with two components.
With sums over say $\log n < j < n/2$, and using Stirling's formula, we have
\begin{eqnarray*}
\mathbb E[\mbox{\rm frag}(R_n^{o})] & \geq & \tau({\mathcal F}_n^{o})^{-1} {\mathcal D}ot \sum_j {n \choose j} j \ \tau({\mathcal T}_j^{o}) \tau({\mathcal T}_{n-j}^{o})\\
& \sim & \left(\nu e^{\nu/\lambda}\right)^{-1} (\lambda n)^{-(n-1)} n!
\sum_j j \frac{(\lambda j)^{j-1}}{j!} \frac{(\lambda(n-j))^{n-j-1}}{(n-j)!}\\
&=& \Theta(1) {\mathcal D}ot \frac{n!}{n^{n-1}} \sum_j \frac{j^j}{j!} \frac{(n-j)^{n-j-1}}{(n-j)!}\\
&=& \Theta(1) {\mathcal D}ot n^{\frac32} e^{-n} \sum_j j^{-\frac12} e^j (n-j)^{-\frac32} e^{n-j}\\
&=& \Theta(1) {\mathcal D}ot \sum_j j^{-\frac12} \; = \; \Theta(n^{\frac12}),
\end{eqnarray*}
and we are done.
\end{proofof}
\noindent
{\em Aside on the unrooted case}
From the inequality~(\ref{eqn.rooted}) above we may quickly deduce the result in Theorem~\ref{prop.tauconn} that,
for $R \in_{\tau} {\mathcal A}$ (where ${\mathcal A}$ is finite and bridge-addable and not rooted) we have
$\; \mathbb P(R \mbox{ is connected}) \geq e^{-\nu/\lambda}$.
To see this note that we may assume as usual that ${\mathcal A}$ is ${\mathcal A}_n$,
and note also that each graph $G \in {\mathcal A}_n$ with $\kappa(G)=k+1$
yields at least $n-k$ rooted graphs in ${\mathcal A}o_n$.
Now let ${\mathcal C}_n$ be the set of connected graphs in ${\mathcal A}_n$, and use~(\ref{eqn.rooted}) once and then $k-1$ further times:
we find
\begin{eqnarray*}
\tau({\mathcal A}_n^{k+1})
&\leq &
\frac1{n-k} \tau({\mathcal A}_n^{k\!+\!1 \, o})
\; \leq \;
\frac1{n} \frac{\nu}{\lambda k} \tau({\mathcal A}_n^{k \, o}) \\
&\leq &
\frac1{n} \left(\frac{\nu}{\lambda}\right)^k \frac1{k!} \tau({\mathcal C}_n^{o}) \; = \;
\left(\frac{\nu}{\lambda}\right)^k \frac1{k!} \tau({\mathcal C}_n).
\end{eqnarray*}
Thus
\[ \tau({\mathcal A}_n) \leq \sum_{k \geq 0} \left(\frac{\nu}{\lambda}\right)^k \frac1{k!} \tau({\mathcal C}_n) = e^{\nu/\lambda} \tau({\mathcal C}_n),\]
and the proof is complete.
\section{Concluding remarks}
\label{sec.concl}
Consider $R_n \in_{u} {\mathcal A}$ where ${\mathcal A}$ is bridge-addable.
Starting from the lower bound $e^{-1}$
on the probability that $R_n$ is connected and the stronger stochastic bound
$\kappa(R_n) \leq_s 1+ \mbox{\rm Po}(1)$ on the number of components,
it was natural to enquire to what extent the bounds could be improved to match known results for forests.
Our results suggest that we should think of these bounds as being out asymptotically by a factor~2
in the `parameter', in that the ratio $\lambda/\nu$ should be doubled
(though the corresponding bounds are tight in the rooted case).
The central conjecture on connectivity for a bridge-addable set of graphs is from~\cite{msw06},
and was re-stated here as Conjecture~\ref{conj.b-add}.
As we noted earlier, some asymptotic improvement has been made on the bound $e^{-1}$
\cite{bbg07,bbg10,sn2012}; and the full improvement to $e^{-\frac12}$ has been achieved,
but only when we make the stronger assumption that ${\mathcal A}$ is bridge-alterable~\cite{amr2011,kp2011}.
In the present paper we considered corresponding improvements concerning the distribution of $\kappa(R_n)$,
and introduced new bounds on $\mbox{\rm frag}(R_n)$ (the number of vertices left when we remove
a largest component)
which also match results for forests asymptotically. Further, we set these results in a general framework
emphasising the role of bridges, rather than just considering uniform distributions.
For random rooted graphs our non-asymptotic results already match those for forests.
In each other case, to achieve results asymptotically matching those for forests we have had to assume that the set
${\mathcal A}$ of graphs is bridge-alterable.
It is natural to ask whether these results actually hold under the weaker assumption that ${\mathcal A}$
is bridge-addable. Conjecture~\ref{conj.b-add} is still open.
We propose two further conjectures (for uniform distributions).
The first concerns a possible extension of inequalities~(\ref{eqn.b-alt-frag2}) and~(\ref{eqn.b-alt-comps1})
from bridge-alterable to bridge-addable.
\begin{conjecture} \label{conj.b-add-comps}
If ${\mathcal A}$ is bridge-addable and $R_n \in_u {\mathcal A}$ then
\[ \mbox{(a)} \hspace{.1in} \kappa(R_n) \leq_s 1+ \mbox{\rm Po}(\frac12) \;\;\mbox{ asymptotically},\]
and
\[ \mbox{(b)} \hspace{.1in} \limsup_{n \to \infty} \mathbb E[\mbox{\rm frag}(R_n)] \leq 1. \hspace{.9in}\]
\end{conjecture}
The work of Balister, Bollob{\'a}s and Gerke~\cite{bbg07,bbg10} mentioned earlier
gives some progress on part (a) of this conjecture: the proofs there
together with Lemma~\ref{lem.po1} here show that, with $\alpha=0.7983$,
\[ \kappa(R_n) \leq_s 1+ \mbox{\rm Po}(\alpha) \;\;\mbox{ asymptotically};\]
and this bound gives
\[ \limsup_{n \to \infty} \mathbb E[\kappa(R_n)] \leq 1+ \alpha \approx 1.7983 \]
as may be seen by arguing as at the end of the proof of Theorem~\ref{prop.asympt-conn}.
Note also that to establish part (b) of the conjecture,
it suffices to show that~(\ref{eqn.frageps}) holds whenever ${\mathcal A}$ is bridge-addable.
Finally, we propose a strengthened non-asymptotic version of the last conjecture,
along the lines of Conjecture 5.1 in~\cite{amr2011} or Conjecture 1.2 of~\cite{bbg10}.
\begin{conjecture} \label{conj.b-add-strong}
If ${\mathcal A}$ is bridge-alterable, $n$ is a positive integer, $R_n \in_u {\mathcal A}$ and $R_n^{{\mathcal F}} \in_{u} {\mathcal F}$ then
\[ \kappa(R_n) \leq_s \kappa(R_n^{{\mathcal F}})\;\; \mbox{ and } \;\; \mathbb E[\mbox{\rm frag}(R_n)] \leq \mathbb E[\mbox{\rm frag}(R^{\cf}_n)]. \]
\end{conjecture}
Establishing the stronger version of this conjecture, in which we assume only that ${\mathcal A}$ is bridge-addable, would
of course be even better!
{\small
}
\end{document}
|
\begin{document}
\title{\bf\Large A note on complex interpolation and Calder\'on
product of quasi-Banach spaces
\footnotetext{\hspace{0.3cm}pace{-0.35cm} 2010 {\it
Mathematics Subject Classification}. Primary 46B70.
\endgraf
{\it Key words and phrases}.
complex interpolation, Calder\'on product, quasi-Banach lattice
\endgraf
Wen Yuan is supported by the National
Natural Science Foundation of China (Grant No. 11101038), the Specialized Research Fund for the Doctoral Program of Higher Education
of China (Grant No. 20120003110003), the Fundamental Research Funds for
Central Universities of China (Grant No. 2012LYB26) and the
Alexander von Humboldt Foundation.
}}
\dot{A}_{p,q}^{s,\tau}({{\mathbb R}^n})e{}
\author{Wen Yuan}
\maketitle
\begin{center}
\begin{minipage}{13.5cm}{\small
{\noindent{\bf Abstract}\quad
In this paper, we prove that the inner complex interpolation of two
quasi-Banach lattices coincides with the closure of their intersection
in their Calder\'on product. This generalizes a classical result by Shestakov in 1974
for Banach lattices.
}}
\end{minipage}
\end{center}
\arraycolsep=1pt
\section{Introduction}
In this paper we consider the relation between complex interpolations and
Calder\'on products for quasi-Banach lattices. We begin with the definition of complex interpolation for quasi-Banach spaces (see, for example, \cite{ca64,km98,kmm}).
Consider a \emph{couple of quasi-Banach spaces} $X_0,X_1$,
which are continuously embedding into a large topological
vector space $Y$. The \emph{space} $X_0+X_1$ is defined as
$$X_0+X_1:=\{h\in Y:\ \exists\ h_i\in X_i,\ i\in\{0,1\},\ {\rightm such\ that}\ h=h_0+h_1\},$$
with
$$\|h\|_{X_0+X_1}:=\inf\{\|h_0\|_{X_0}+\|h_1\|_{X_1}:\ h=h_0+h_1,
\ h_0\in X_1\ {\rightm and}\ h_1\in X_1\}.$$
Let $U:= \{z \in {\mathbb C} : \: 0<\Re e\, z<1\}$ and
$\overline{U}:=\{z\in{\mathbb C} :\: 0\le \Re e\, z\le 1\}.$
A map
$f$: $U\to X$ is said to be \emph{analytic} if, for any given $z_0\in U$, there exists
$\eta\in(0,\infty)$ such that
$f(z)=\sum_{j=0}^\infty h_n(z-z_0)^n,\ h_n\in X$, is uniformly convergent for
$|z-z_0|<\eta$. A quasi-Banach space $X$ is said to be \emph{analytically convex} if
there exists a positive constant $C$ such that, for any analytic function
$f:\ U\to X$ which is continuous on the closed strip $\overline{U}$,
$$\max_{z\in U}\|f(z)\|_X \le C\max_{{\Re e}\,z\in\{0,1\}}\|f(z)\|_X.$$
Suppose that $X_0+X_1$ is analytically convex.
The \emph{set} ${\mathcal F}:={\mathcal F}(X_0,X_1)$ is defined to be the set
of all functions $f$:\ $U\to X_0+X_1$ satisfying that
\begin{enumerate}
\item[(i)] $f$ is analytic and \emph{bounded} in $X_0+X_1$, which means
that $f(U):=\{f(z):\ z\in U\}$
is a bounded set of $X_0+X_1$;
\item[(ii)]
$f$ is extended continuously to the closure $\overline{U}$ of the strip $U$
such that the traces
$t\mapsto f(j+it)$ are bounded continuous functions into $X_j$,
$j\in\{0,1\}$, $t\in{\mathbb R}$.
\end{enumerate}
We endow ${\mathcal F}$ with the \emph{quasi-norm}
$$\|f\|_{\mathcal F}:=\max\left\{\sup_{t\in{\mathbb R}}\|f(it)\|_{X_0},
\ \ \sup_{t\in{\mathbb R}}\|f(1+it)\|_{X_1}\right\}.$$
Let ${\mathcal F}_0:={\mathcal F}_0(X_0,X_1)$ be closure of all functions $f\in {\mathcal F}$ such that
$f(z)\in X_0{\mathcal A}p X_1$ for all $z\in U$.
We now recall the definition of complex interpolations.
\begin{definition}
Let $X_0,\,X_1$ be two quasi-Banach spaces such that
$X_0+X_1$ is analytically convex.
Then the \emph{outer complex interpolation space}
$[X_0,X_1]_\theta$ with $\theta\in(0,1)$ is defined by
$$[X_0,X_1]_\theta:=\{g\in X_0+X_1:\ \exists\ f\in{\mathcal F}\ {\rightm such\ that}\ f(\theta)=g\}$$
and its \emph{norm} given by
$\|g\|_{[X_0,X_1]_\theta}:=\inf_{f\in{\mathcal F}}\{\|f\|_{\mathcal F}:f(\theta)=g\}.$
The \emph{inner complex interpolation space}
$[X_0,X_1]^i_\theta$ with $\theta\in(0,1)$ is defined via the same as $[X_0,X_1]_\theta$ with
${\mathcal F}$ replaced by ${\mathcal F}_0$.
\end{definition}
It easily follows from the definition that $[X_0,X_1]^i_\theta
\hookrightarrow[X_0,X_1]_\theta$ and
$X_0{\mathcal A}p X_1$ is dense in $[X_0,X_1]^i_\theta$.
If $X_0$ and $X_1$ are both Banach spaces, then it is known that
the inner and outer complex methods coincide (see \cite{ca64,kmm}).
For the general quasi-Banach cases, Kalton, Mayboroda and Mitrea \cite{kmm} pointed out that the inner and the outer complex methods yield the same space if $X_0$
and $X_1$ are separable analytically convex quasi-Banach spaces.
However, for quasi-Banach spaces without the separability condition,
whether these two methods
still coincide is still unclear (see \cite{kmm}).
Let $(\Omega,\mu)$ be a $\sigma$-finite measure space and $L_0$ be the collection of
all complex-valued $\mu$-measurable functions on $\Omega$. A quasi-Banach function
space $X$ on $\Omega$ is called a {\it quasi-Banach lattice}
if for every $f\in X$ and $g\in L_0$ with $|g(x)|\le |f(x)|$ for
$\mu$-a.e. $x\in \Omega$,
one has $g\in X$ and $\|g\|_X\le \|f\|_X.$
\begin{definition}
Let $X_j \mathop{\rightm sub \,}\nolimitsset L_0$, $j\in\{0,1\}$, be quasi-Banach lattices on $(\Omega,\mu)$
and $\theta\in(0,1)$. Then the {\it Calder\'on product $X_0^{1-\theta}X_1^\theta$}
of $X_0$ and $X_1$ is the collection of all functions $f \in L_0$ such that
\begin{eqnarray*}
\|f\|_{X_0^{1-\theta}X_1^\theta} := \inf\Bigl\{\|f_0\|_{X_0}^{1-\theta}\|f_1\|_{X_1}^\theta:\:
|f|\le |f_0|^{1-\theta}|f_1|^\theta \quad \mu \mbox{-a.e.},\ \
f_j\in X_j, \, j\in\{0,1\}\Bigr\}
\end{eqnarray*}
is finite.
\end{definition}
The first result concerning the relation between complex interpolations and
Calder\'on products is due to Calder\'on \cite{ca64}. He showed that if $X_0$ and $X_1$
are Banach lattices, then $[X_0,X_1]_\theta \hookrightarrow X_0^{1-\theta}X_1^\theta$.
Later, Shestakov \cite{she74} (see also \cite{s74,n85}) in 1974
proved that the complex interpolation of two Banach lattices $X_0$ and $X_1$
is just the closure of their intersection $X_0{\mathcal A}p X_1$ in their Calder\'on product, namely,
$[X_0,X_1]_\theta =\overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}}$.
In 1998, Kalton and Mitrea \cite{km98} considered more general
quasi-Banach cases. Indeed, they proved in \cite[Theorem 3.4]{km98} that,
\emph{if $X_0$ and $X_1$ are analytically convex separable quasi-Banach lattices, then
$X_0+X_1$ is also analytically convex and
$[X_0,X_1]_\theta=X_0^{1-\theta}X_1^\theta$}. The proof of this result was noticed later in
\cite{kmm} to be also feasible for the coincidence
$[X_0,X_1]^i_\theta=X_0^{1-\theta}X_1^\theta$, and so in this case,
$$[X_0,X_1]_\theta =[X_0,X_1]^i_\theta =X_0^{1-\theta}X_1^\theta=\overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}}.$$
Notice that in Kalton and Mitrea's result \cite[Theorem 3.4]{km98}, there is a condition
on the separability of the spaces $X_0$ and $X_1$. An interesting
question is, how is the relation between complex interpolations and Calder\'on
products of quasi-Banach lattices which are not separable?
Is Shestakov's result for Banach spaces also true for general quasi-Banach cases?
In this note we give a positive answer for the inner complex interpolation.
\begin{theorem}\langlebel{main}
Let $\Omega$ be a Polish space, $\mu$ a $\sigma$-finite Borel measure
on $\Omega$ and $(X_0, X_1)$ a pair of quasi-Banach lattices on $(\Omega,\mu)$.
If both $X_0$ and $X_1$ are analytically convex, then
$$[X_0,X_1]_\theta^i =\overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}},\quad \theta\in(0,1).$$
\end{theorem}
To prove Theorem \rightef{main}, we use another interpolation method, the Gagliardo-Peetre interpolation introduced by Peetre \cite{p71}. In 1985 Nilsson \cite{n85} proved
a general result concerning the
relation between Gagliardo-Peetre interpolation and Calder\'on product,
which is a key tool used in this paper. This proof is different from the one
used by Shestakov \cite{she74}
for Banach lattices.
Throughout the paper,
the \emph{symbol} $C $ denotes a positive constant
which may vary from line to line.
The \emph{meaning of $A \lesssim B$} is
given by: there exists a positive constant $C$ such that
$A \le C \,B$.
The \emph{symbol $A \sim B$} means
$A \lesssim B \lesssim A$.
\section{Proof of Theorem \rightef{main}}
Let $X$ be a quasi-Banach lattice and $p\in[1,\infty]$.
The \emph{$p$-convexification} of $X$, denoted by $X^{(p)}$,
is defined as follows: $f\in X^{(p)}$ if and only if
$|f|^p\in X$. For all $f\in X^{(p)}$, define $\|f\|_{X^{(p)}}:=\||f|^p\|_X^{1/p}$.
The lattice $X$ is called \emph{$1/p$-convex} if $X^{(p)}$ is a Banach space.
Moreover, a quasi-Banach lattice $X$ is said to \emph{be of type $\mathfrak{E}$},
if there exists an equivalent quasi-norm $|||{\mathcal D}ot|||_X$ such that $(X,|||{\mathcal D}ot|||_X)$
is {$1/p$-convex} for some $p\in[1,\infty)$; see \cite{n85}.
An important tool we used is the following equivalent characterization of
analytically convex quasi-Banach lattice; see, for example, \cite{k86,kmm}.
In what follows, $K_X$ denotes the \emph{modulus of concavity} of a
quasi-Banach space $X$, i.\,e., the smallest positive constant satisfying
$$\|x+y\|_X\le K_X(\|x\|_X+\|y\|_X),\quad x,y\in X.$$
\begin{proposition}\langlebel{r-convex}
Let $X$ be a quasi-Banach lattice. Then the following assertions are equivalent:
{\rightm(i)} $X$ is analytically convex;
{\rightm(ii)} there exists $r>0$ such that $X$ is $r$-convex, namely, $X^{(1/r)}$ is a Banach space;
{\rightm(iii)} $X$ is $r$-convex for all $0<r<(1+\log_2K_X)^{-1}$.
\end{proposition}
It follows from Proposition \rightef{r-convex} that all analytically convex
quasi-Banach lattices are of type $\mathfrak{E}$.
We now prove one direction of Theorem \rightef{main} in the following theorem, which can be proved by an argument similar to that used for \cite[Theorem 3.4]{km98}.
For the sake of convenience, we give some details here.
\begin{theorem}
Let $\Omega$ be a Polish space, $\mu$ a $\sigma$-finite Borel measure
on $\Omega$ and $(X_0, X_1)$ a pair of quasi-Banach lattices of functions on $(\Omega,\mu)$.
If both $X_0$ and $X_1$ are analytically convex, then
$$[X_0,X_1]_\theta^i \hookrightarrow \overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}},\quad \theta\in(0,1).$$
\end{theorem}
\begin{proof}
Since the lattices $X_0$, $X_1$ are analytically convex, by Proposition
\rightef{r-convex}, we know
that there exists $r\in(0,1]$ such that $X_0$, $X_1$ are both $r$-convex lattices.
By \cite[Theorem 3.4]{km98} and its proof, $X_0+X_1$ is also $r$-convex and hence $(X_0+X_1)^{(1/r)}$ is a Banach space.
Since $X_0{\mathcal A}p X_1$ is dense in $[X_0,X_1]_\theta^i$, it suffices to prove
$$(X_0{\mathcal A}p X_1, \|{\mathcal D}ot\|_{[X_0,X_1]_\theta^i})\hookrightarrow \overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}}.$$
Let $f\in X_0{\mathcal A}p X_1$. Then for any $\varepsilon>0$
there exists $F\in {\mathcal F}_0(X_0,X_1)$ such that $F(\theta)=f$ and $\|F\|_{{\mathcal F}(X_0,X_1)}\le \|f\|_{[X_0,X_1]_\theta^i}+\varepsilon$.
Since $F$ is analytic in $U$ and continuous in $\overline{U}$,
for any $z_0\in U$, there exist $R>0$
and $f_k\in X_0+X_1$ such that
$F(z)=\sum_{k\in{\mathbb N}_0} f_k (z-z_0)^k$
with uniformly convergent in $X_0+X_1$ for all $|z-z_0|<R$. Moreover, due to the Cauchy-Hadamard theorem, it holds $\limsup_{k\to\infty}\|f_k\|_{X_0+X_1}^{1/k}\le R^{-1}$.
We also know that for $\mu$-almost every $w\in \Omega$, any $\rightho<R$ and $q\le r$,
$$|F(z_0)(w)|^q\le \frac1{2\pi}\int_0^{2\pi} |F(z_0+\rightho e^{it})(w)|^q\,dt.$$
Since $z\mapsto |F(z)|^q$ is continuous into $(X_0+X_1)^{(1/q)}$,
we know that, for any positive continuous functional $\phi\in ((X_0+X_1)^{(1/q)})^*$,
\begin{eqnarray*}
\phi(|F(z_0)|^q)&&\le \frac1{2\pi}\phi\left(\int_0^{2\pi} |F(z_0+\rightho e^{it})|^q\,dt\rightight)\le\frac1{2\pi}\int_0^{2\pi} \phi\left(|F(z_0+\rightho e^{it})|^q\rightight)\,dt,
\end{eqnarray*}
hence $z \mapsto \phi(|F(z)|^q)$ is subharmonic on $U$. Then
\begin{eqnarray}\langlebel{e1}
\phi(|F(\theta)|^q)&&\le \int_{\mathbb R} P_0(\theta,t)\phi(|F(it)|^q)\,dt+\int_{\mathbb R} P_1(\theta,t)\phi(|F(1+it)|^q)\,dt,
\end{eqnarray}
where $P_0$ and $P_1$ are the components of the
Poisson kernel on $U$ satisfying
$\int_{\mathbb R} P_0(\theta,t)\,dt=1-\theta$ and $\int_{\mathbb R} P_1(\theta,t)\,dt=\theta$.
Let $f_0:=((1-\theta)^{-1}\int_{\mathbb R} P_0(\theta,t)|F(it)|^r\,dt)^{1/r}$ and $f_1:=((1-\theta)^{-1}\int_{\mathbb R} P_0(\theta,t)|F(it)|^r\,dt)^{1/r}$. It follows from the
$r$-convexity of $X_0$ and $X_1$ that $f_j\in X_j$ with $\|f_j\|_{X_j}\le \|F\|_{{\mathcal F}(X_0,X_1)}$, $j\in\{0,1\}$. By \eqref{e1}, $q\le r$
and the positivity of $\phi$, we have $|F(\theta)|^q\le (1-\theta)f_0^q+\theta f_1^q$. Taking $log$ in both side and letting $q\to0$ then gives $|f|=|F(\theta)|\le f_0^{1-\theta}f_1^\theta$.
Thus, $\|f\|_{X_0^{1-\theta}X_1^\theta}\le \|F\|_{{\mathcal F}(X_0,X_1)}\lesssim \|f\|_{[X_0,X_1]_\theta^i}$, as desired.
\end{proof}
To show the other direction, we need the following Gagliardo-Peetre interpolation method,
which was introduced by Peetre \cite{p71}.
\begin{definition}
Let $X_0$ and $X_1$ be a pair quasi-Banach spaces and $\theta \in (0,1)$.
We say $a\in \langlengle X_0, X_1\rightangle_\theta$ if there exists a sequence $\{a_i\}_{i\in{\mathbb Z}}
\mathop{\rightm sub \,}\nolimitsset X_0{\mathcal A}p X_1$ such that $a=\sum_{i\in{\mathbb Z}} \, a_i$ with convergence in $X_0+X_1$
and for any bounded sequence $\{\varepsilon_i\}_{i\in{\mathbb Z}}\mathop{\rightm sub \,}\nolimitsset{\mathbb C}$,
$\sum_{i\in{\mathbb Z}} \varepsilon_i \, 2^{i(j-\theta)} \, a_i$
converges in $X_j$, $j\in\{0,1\}$. We further require that
\[
\left\|\sum_{i\in{\mathbb Z}} \varepsilon_i \, 2^{i(j-\theta)} \, a_i\right\|_{X_j}\le
C \, \sup_{i\in{\mathbb Z}}|\varepsilon_i|,\quad j\in\{0,1\},
\]
for some constant $C$. As a quasi-norm of
$\langlengle X_0, X_1\rightangle_\theta$, we use $\|a\|_{\langlengle X_0, X_1\rightangle_\Theta}:=\inf C$
\end{definition}
Applying Proposition \rightef{r-convex} and \cite[Theorem 2.1]{n85} (see \cite[(2.1)]{n85}),
we have the following conclusion.
\begin{theorem}
Let $\Omega$ be a Polish space, $\mu$ a $\sigma$-finite Borel measure
on $\Omega$ and $(X_0, X_1)$ a pair of quasi-Banach lattices of functions on $(\Omega,\mu)$.
If both $X_0$ and $X_1$ are analytically convex, then
$$\langlengle X_0,X_1\rightangle_\theta =\overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}},\quad \theta\in(0,1),$$
and $\|{\mathcal D}ot\|_{\langlengle X_0,X_1\rightangle_\theta}$ is equivalent to $\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}$.
\end{theorem}
From this conclusion, we
deduce that $X_0{\mathcal A}p X_1$ is dense in $\langlengle X_0,X_1\rightangle_\theta$
and, to prove $\overline{X_0{\mathcal A}p X_1}^{\|{\mathcal D}ot\|_{X_0^{1-\theta}X_1^\theta}}\hookrightarrow[X_0,X_1]_\theta^i$, it
suffices to show
$\langlengle X_0,X_1\rightangle_\theta\hookrightarrow[X_0,X_1]_\theta^i$.
\begin{theorem}
Let $\Omega$ be a Polish space, $\mu$ a $\sigma$-finite Borel measure
on $\Omega$ and $(X_0, X_1)$ a pair of quasi-Banach lattices of functions on $(\Omega,\mu)$.
If both $X_0$ and $X_1$ are analytically convex, then
$$\langlengle X_0,X_1\rightangle_\theta\hookrightarrow[X_0,X_1]_\theta^i,\quad \theta\in(0,1).$$
\end{theorem}
\begin{proof} Let $D(X_0,X_1,\theta)$ be the subspace of $\langlengle X_0,X_1\righta_\theta$
consisting of all $f\in \langlengle X_0,X_1\rightangle_\theta$ such that there exists
a finite set $E\mathop{\rightm sub \,}\nolimitsset {\mathbb Z}$ and $\{f_k\}_{k\in E}\mathop{\rightm sub \,}\nolimitsset X_0{\mathcal A}p X_1$ such that
$f=\sum_{k\in E} f_k$ in $X_0+X_1$, and for any bounded sequence
$\{\varepsilon_k\}_{k\in E}$ of complex numbers
$\sum_{k\in E} \varepsilon_k 2^{k(j-\theta)}f_k$ converges in $X_j$, with
$$\left\|\sum_{k\in E} \varepsilon_k 2^{k(j-\theta)}f_k\right\|_{X_j}\lesssim \|f\|_{\langlengle X_0,X_1\rightangle_\theta} \sup_{k\in E}|\varepsilon_k|, \quad j\in\{0,1\}.$$
Obviously, $X_0{\mathcal A}p X_1 \mathop{\rightm sub \,}\nolimitsset D(X_0,X_1,\theta)$, and hence $D(X_0,X_1, \theta)$
is dense in $\langlengle X_0,X_1\rightangle_\theta$. To complete the proof,
it suffices to show
$$(D(X_0,X_1,\theta),\|{\mathcal D}ot\|_{\langlengle X_0,X_1\rightangle_\theta})\hookrightarrow[X_0,X_1]_\theta^i.$$
Let $f\in D(X_0,X_1,\theta).$ Without loss of generality, we may assume that
$f=\sum_{|k|\le M} f_k$ in $X_0+X_1$ for some $M\in{\mathbb N}$ and $\{f_k\}_{|k|\le M}\mathop{\rightm sub \,}\nolimitsset X_0{\mathcal A}p X_1$, and
\begin{eqnarray}\langlebel{con1}
\left\|\sum_{|k|\le M} \varepsilon_k 2^{k(j-\theta)}f_k\right\|_{X_j}\lesssim \|f\|_{\langlengle X_0,X_1\rightangle_{\theta}} \sup_{k\in E}|\varepsilon_k|, \quad j\in\{0,1\}.
\end{eqnarray}
Define $F(z):=\sum_{|k|\le M} 2^{k(z-\theta)}f_k$ with convergence in $X_0+X_1$
for all $z\in \overline{U}$. Obviously, $F(\theta)=f$ and $F(z)\in X_0{\mathcal A}p X_1$.
Now we prove $F\in {\mathcal F}_0(X_0,X_1)$. The analyticity of $F$ is obvious.
To show $F$ is bounded in $X_0+X_1$, for $z\in \overline{U}$,
write $z=a+ib$ with $a\in[0,1]$ and $b\in{\mathbb R}$, and
\begin{eqnarray*}
F(z)=\sum_{-M\le k<0} 2^{ka+kbi} 2^{-k\theta}f_k +\sum_{0\le k\le M}2^{k(a-1)+kbi}
2^{k(1-\theta)}f_k=:F_0(z)+F_1(z).
\end{eqnarray*}
Since $\{2^{ka+kbi}\}_{-M\le k<0}$ and $\{2^{k(a-1)+kbi}\}_{0\le k\le M}$ are bounded sequences, by \eqref{con1}, we have
$$\|F_j(z)\|_{X_j}\lesssim \|f\|_{\langlengle X_0,X_1\rightangle_\theta}, \quad j\in\{0,1\}.$$
This implies $F(z)\in X_0+X_1$ and $\|F(z)\|_{X_0+X_1}\lesssim \|f\|_{\langlengle X_0,X_1\rightangle_\theta}$ for all $z\in \overline{U}$.
Similarly, since $\{2^{kti}\}_{|k|\le M}$ is a bounded sequence, applying
\eqref{con1} we obtain
\begin{eqnarray*}
\|F(j+it)\|_{X_j}&&=\left\|\sum_{|k|\le M} 2^{kit}2^{k(j-\theta)}f_k\right\|_{X_j} \\
&&\lesssim\|f\|_{\langlengle X_0,X_1\rightangle_\theta} \sup_{|k|\le M}|2^{kti}|\lesssim
\|f\|_{\langlengle X_0,X_1\rightangle_\theta}, \quad j\in\{0,1\}.
\end{eqnarray*}
Now we show $t\mapsto F(j+it)$ is a
continuous function into $X_j$, $j\in\{0,1\}$.
Fix $t_0\in{\mathbb R}$. Notice that, for any $\varepsilon>0$, we can find
$\delta=\delta(M,\varepsilon)>0$, such that
for any $|t-t_0|<\delta$ and $|k|\le M$,
$|2^{kit}-2^{kit_0}|<\varepsilon$. Hence,
\begin{eqnarray*}
\|F(j+it)-F(j+it_0)\|_{X_j}&&=\left\|\sum_{|k|\le M} [2^{kit}-2^{kit_0}]2^{k(j-\theta)}f_k\right\|_{X_j} \\
&&\lesssim\|f\|_{\langlengle X_0,X_1\rightangle_\theta} \sup_{|k|\le M}|2^{kti}-2^{kit_0}|\lesssim
\varepsilon\|f\|_{\langlengle X_0,X_1\rightangle_\theta}, \quad j\in\{0,1\},
\end{eqnarray*}
as desired.
It remains to show the extension of $F$ from $U$ to $\overline{U}$ is continuous. Since $F$ is analytic in $U$, we only need to prove that, for any $t\in{\mathbb R}$,
\begin{equation}\langlebel{con2}
\|F(a+it)-F(it)\|_{X_0+X_1}\to 0, \quad a\to 0^+
\end{equation}
and
\begin{equation}\langlebel{con3}
\|F(a+it)-F(1+it)\|_{X_0+X_1}\to 0, \quad a\to 1^-.
\end{equation}
For any $\varepsilon>0$, we can find
$\delta=\delta(M,\varepsilon)>0$, such that
for any $0<a<\delta$ and $|k|\le M$,
$|2^{ka}-1|<\varepsilon$.
Write
\begin{eqnarray*}
F(a+it)-F(it)
&&=\sum_{-M\le k<0}[2^{ka}-1]2^{kit}2^{-k\theta}f_k+\sum_{|k|\le M} [2^{ka}-1]2^{-k}2^{kit}2^{k(1-\theta)}f_k.
\end{eqnarray*}
Since
$$\left\|\sum_{-M\le k<0}[2^{ka}-1]2^{kit}2^{-k\theta}f_k\right\|_{X_0}\lesssim \varepsilon \|f\|_{\langlengle X_0,X_1\rightangle_\theta}$$
and
$$\left\|\sum_{0\le k\le M}[2^{ka}-1]2^{-k}
2^{kit}2^{k(1-\theta)}f_k\right\|_{X_1}\lesssim \varepsilon \|f\|_{\langlengle X_0,X_1\rightangle_\theta},$$
we know that
$$\|F(a+it)-F(it)\|_{X_0+X_1}\lesssim \varepsilon \|f\|_{\langlengle X_0,X_1\rightangle_\theta}.$$
This gives \eqref{con2}. A similar argument gives \eqref{con3}.
Combining the above arguments, we know that $F\in {\mathcal F}_0(X_0,X_1)$ with
$\|F\|_{{\mathcal F}(X_0,X_1)}\lesssim \|f\|_{\langlengle X_0,X_1\rightangle_\theta}$.
Therefore, $\|f\|_{[X_0,X_1]_\theta^i}\lesssim \|f\|_{\langlengle X_0,X_1\rightangle_\theta}$.
This finishes the proof.
\end{proof}
Theorem \rightef{main} is then a consequence of the above three theorems.
Moreover, as a byproduct, we obtain the coincidence between the inner complex interpolation
and the Gagliardo-Peetre interpolation.
\begin{corollary}
Let $(X_0, X_1)$ a pair of analytically convex
quasi-Banach lattices of functions on $(\Omega,\mu)$.
Then $\langlengle X_0,X_1\rightangle_\theta=[X_0,X_1]_\theta^i$ for all $\theta\in(0,1).$
\end{corollary}
\begin{remark}
Recall that if $X_0$ and $X_1$ are Banach spaces, then
$\langlengle X_0,X_1\rightangle_\theta\hookrightarrow [X_0,X_1]_\theta$; see, for example, \cite{p71,j81,n85}.
The above corollary gives a generalization of this coincidence.
\end{remark}
From the relation between the inner and outer complex interpolations (see \cite{ca64} and
\cite[Theorem 7.9]{kmm}), we also have the following conclusion.
\begin{corollary}
Let $\theta\in(0,1)$ and $(X_0, X_1)$ a pair of analytically convex
quasi-Banach lattices of functions on $(\Omega,\mu)$.
Then $\langlengle X_0,X_1\rightangle_\theta=[X_0,X_1]_\theta$ if either $X_0$, $X_1$ are both
Banach spaces or $X_0,X_1$ are both separable.
\end{corollary}
Finally we give an application of Theorem \rightef{main} to
the Morrey space, which is a typical example of non-separable spaces.
Let $0<p\le u\le\infty$ and $({\mathcal X},\mu)$
be a quasi-metric measure space. Recall that
the Morrey space $\mathcal{M}^u_{p}({\mathcal X})$
is the collection of all $p$-locally integrable
functions $f$ on ${\mathcal X}$ such that
\begin{equation*}
\|f\|_{\mathcal{M}^u_{p}({\mathcal X})} := \sup_{B\mathop{\rightm sub \,}\nolimitsset {\mathcal X}}
|B|^{1/u-1/p}\left[\int_B |f(x)|^p\,dx\right]^{1/p}<\infty\, ,
\end{equation*}
where the supremum is taken over all balls $B$ in ${\mathcal X}$.
Obviously, $\mathcal{M}^p_{p}({\mathcal X})=L_p({\mathcal X})$.
Since Morrey spaces are non-separable, we can not apply \cite[Theorem 3.4]{km98} to Morrey spaces.
By \cite[Proposition 2.1]{lyy}, we know that $[{\mathcal M}_{p_0}^{u_0}({\mathcal X})]^{1-\theta}[{\mathcal M}_{p_1}^{u_1}({\mathcal X})]^\theta={\mathcal M}_{p}^{u}({\mathcal X})$, which together with Theorem \rightef{main} induce the following conclusion.
\begin{proposition}
Let $\theta\in(0,1)$, $0 < p_i \le u_i <\infty$, $i\in\{0,1\}$ and
$$\frac 1u := \frac{1-\theta}{u_0} + \frac{\theta}{u_1} \, ,
\quad \frac 1p := \frac{1-\theta}{p_0} + \frac{\theta}{p_1}.$$
If $u_0p_1=u_1p_0$, then
$$[{\mathcal M}_{p_0}^{u_0}({\mathcal X}),{\mathcal M}_{p_1}^{u_1}({\mathcal X})]_\theta^i=\langlengle {\mathcal M}_{p_0}^{u_0}({\mathcal X}),{\mathcal M}_{p_1}^{u_1}({\mathcal X})\rightangle_\theta =\overline{{\mathcal M}_{p_0}^{u_0}({\mathcal X}){\mathcal A}p
{\mathcal M}_{p_1}^{u_1}({\mathcal X})}^{\|{\mathcal D}ot\|_{{\mathcal M}_{p}^{u}({\mathcal X})}}.$$
\end{proposition}
\noindent Wen Yuan
\noindent School of Mathematical Sciences, Beijing Normal University,
Laboratory of Mathematics and Complex Systems, Ministry of
Education, Beijing 100875, People's Republic of China
\noindent {\it E-mail}: \texttt{[email protected]}
\end{document}
|
\begin{document}
\maketitle
\begin{abstract}
We prove that for a certain class of kernels $K(y)$ that viscosity solutions of
the integro-differential equation
$$
\int_{\mathbb R^n} (u(x+y) - 2 u(x) + u(x-y)) K(y) dy = f(x,u(x))
$$
are locally analytic if $f$ is an analytic function. This extends results in
\cite{Albanese2015} in which it was shown that such solutions belong to
certain Gevrey classes.
\end{abstract}
\tableofcontents
\section{Introduction}
Non-local equations play an important role in so different fields as the modeling of
american option prices, geometric repulsive potential, the propagation of flames,
and particel physics, where the Boltzman equation and the Kac equation are prominent
examples of fractional partial differential equations.
Though in recent years the research on non-local partial differential
equations exploded, still quite a lot of very basic questions regarding this type
of equations remain open that have long been settled in the classical setting. In
this article we address one of these questions: Is the solution to an elliptic
fractional partial differential equation with analytic right-hand side analytic?
For classical non-linear partial differential equations this is David Hilbert's 19th
problem. Already shortly after, Bernstein could give an answer in \cite{Bernstein1904}
for elliptic equations in two independent variables under the assumption that the solution is already
$C^3$ and by Petrowsky to systems \cite{Petrowsky1939}.
Different methods of proof and generalization can be found in \cite{Gevrey1918, Lewy1929, Hopf1932, Friedman1958, Morrey1957,Morrey1958,Morrey1958a}
In recent years some results on analyticity for special fractional equations on the whole space $\mathbb R^n$
or compact manifolds like $\mathcal S^1$ appeared \cite{DallAcqua2012,Barbaroux2017,Blatt2018a}.
To the best of the authors knowledge, the findings in \cite{Albanese2015} are the only attempt to consider
analyticity of local solutions to general fractional partial differential equations. They prove that the solution belong
to certain Gevrey classes but did not succeed in proving that the solutions are indeed analytic.
Let us formulate the main result of this article. We consider translation invariant kernels $K \in C^\infty( \mathbb R^n \setminus
\{0\}, (0,\infty)) $ close to a kernel of fractional Laplacian type in the sense that
\begin{equation} \label{eq:NearFractional}
\left|\frac{|y|^{n+s}K(y)}{2-s} - a_0 \right| \leq \eta
\end{equation}
for all $y \in \mathbb R^n \setminus \{0\}.$ Here,
$\eta> 0$ is going to be a small constant that will be determined
later on.
For such kernels and functions $u \in L^\infty(\mathbb R^n, \mathbb R)$ we define
the operator
$$
K u(x) = p.v. \int_{\mathbb R^n} (u(x+y) - 2 u(x) + u(x-y)) K(y) dy.
$$
We will furthermore assume that the kernel satisfies the estimate
\begin{equation} \label{eq:DerivativesKernel}
|\partial^\alpha_y K(y) | \leq C \frac {
H^{|\alpha|}
|\alpha| !} {|y|^{n+s+|\alpha|}} \quad \text{ on } B_1(0)
\end{equation}
for all multiindices $\alpha \in \mathbb N_0^n$. We will assume without loss of generality that $H \geq 1.$
In this short note we will prove the following result.
\begin{theorem} \label{thm:AnalyticityOfSolutions}
For $s \in (1,2)$ let us assume that $u \in L^\infty(\mathbb R^n, \mathbb R) \cap C^\infty(B_1(0))$ is a
viscosity solution of the equation
$$
K u (x) = f(x,u(x))
$$
for an analytic function $f:B_1(0) \times \mathbb R \rightarrow \mathbb R$. Then $u$ is analytic on $B_1(0).$
\end{theorem}
Note that in view of the bootstrapping argument in \cite{Barrios2014} the assumption $u \in C^\infty(B_1(0))$ is not essential. In contrast to \cite{Albanese2015} we only consider translation invariant
equations here. But this is not the reason why the result stated here
is stronger: Unfortunately some of the additional terms coming from $x$-dependence
of the kernel seem to be missing in \cite[inequality (3.2)]{Albanese2015} and hence their
proof seems to at least have a gap. Though we believe that also these additional
terms can be controlled we leave this case for a later paper as this will be technically
more involved.
As in \cite{Albanese2015}, we proof Theorem
\ref{thm:AnalyticityOfSolutions} combining the classical approaches by Friedman and
Morrey with the a-priori estimates for solution in \cite{Caffarelli2011}. In
contrast to \cite{Albanese2015} we omit the use of
incremental differences and discrete partial integration completely and directly
work with partial derivatives and partial integration. The essential new ingredient in our proof is to estimate the terms coming
from the long-range interactions of the equation in much more sophisticated way using nested balls.
In Section \ref{sec:Preliminaries} we gather some known facts and tools for the proof of Theorem \ref{thm:AnalyticityOfSolutions}, i.e. a characterization of analyticity, the Schauder estimates of Caffarelli and Silvestre in \cite{Caffarelli2011} and an elementary estimate for the binomial. The essential estimate for higher derivative is then derived in Section \ref{sec:APriori} before we turn to the proof of Theorem \ref{thm:AnalyticityOfSolutions} in Sections \ref{sec:Proof1} and \ref{sec:Proof2}. In \ref{sec:Proof1} we give the proof first for the special case that the right-hand side of our equation does not depend on $x$ and not on $u$. We do this for two reasons: To make the presentation as readable as possible and since this special case contains the major new difficulties. We will then see in Section \ref{sec:Proof2} that one can deal with the $u$-dependence by applying a higher order chain rule in a fairly standard way.
\section{Preliminaries} \label{sec:Preliminaries}
\subsection{Characterization of Analytic Functions}
The following fact is well known.
\begin{theorem} A function $u:\Omega \rightarrow \mathbb R$ is analytic on $\Omega$, $\Omega \subset \mathbb R^n$ open,
if and only if for every compact set $K \subset \Omega$ there are constants $C=C_k,A=A_K <
\infty$ such that
$$
\|\nabla^k u \|_{L^\infty(B_r(x))} \leq C A^k k!
$$
for all $k \in \mathbb N_0$.
\end{theorem}
A proof of this theorem can be found in \cite{Krantz1992}.
\subsection{A-Priori Estimates for Non-Local Integro-Differential Operators}
Caffarelli and Silvestre proved the following remarkable theorem.
\begin{theorem}[\protect{\cite[Theorem 61]{Caffarelli2011}}]
\label{thm:SchauderEstimate}
Let $s \in (1,2)$ and $u \in L^\infty(\mathbb R^n)$ be a viscosity solution of
$$
Ku (x) = f(x) \text { on } B_1(0)
$$
for an $f \in L^\infty(B_1(0))$ and let $\eta>0$ in \eqref{eq:NearFractional} be small enough.
Then for all $0 < \alpha < 1-s$ we have $u \in C^{1,\alpha}(B_{\frac 1 2}(0))$
and
$$
\|u\|_{C^{1,\alpha} (B_{\frac12}(0))}
\leq C\left(\|f\|_{L^\infty(B_1(0))}
+ \|u\|_{L^\infty(\mathbb R^n)}\right).
$$
\end{theorem}
Scaling this result, we immediately get the following.
\begin{theorem} \label{thm:ScaledSchauderEstimate}
Let $s \in (1,2)$ and $u \in L^\infty(\mathbb R^n)$ solve
$$
Ku (x) = f(x) \text{ in } B_{r}(0)
$$
for an $f \in L^\infty(B_r(0))$ and let $\eta>0$ in \eqref{eq:NearFractional} be small enough.
Then for all $0 < \alpha < 1-s$ we have $u \in
C^{1,\alpha}(B_{\frac r 2 }(0))$
and
$$
r\|\nabla u\|_{L^\infty (B_{\frac r2}(0))} +
r^{1+\alpha} \hol_{\alpha, B_{\frac r2}(0)} (\nabla u)
\leq C\left(r^{s}\|f\|_{L^\infty(B_1(0))}
+ \|u\|_{L^\infty(\mathbb R^n)}\right).
$$
\end{theorem}
\subsection{An Estimate for the Binomial} We will need the following estimate for the binomial.
\begin{lemma} \label{lem:Binomial}
We have
$$
\frac{k^k}{(k-l)^{k-l} l^l } \leq (2e)^l \binom kl
$$
for all $k \in \mathbb N$, $k>l>0$.
\end{lemma}
\begin{proof}
For $ 0< l \leq \frac k2$ we have
$$
\binom{k}{l} \geq 2^{-l} \frac{k^l}{l^l}
$$
and
$$
\frac{k^k}{(k-l)^{k-l} l^l} =\left( \frac k {k-l} \right) ^{k-l}\frac{k^l}{l^l} =
\left(
1 + \frac l {k-l} \right) ^{k-l}\frac{k^l}{l^l} \leq e^l \frac{ k^l} {l^l}.
$$
Hence,
$$
\frac{k^k}{(k-l)^{k-l}l^l} \leq (2e)^l \binom k l
$$
if $l \leq \frac k 2.$ For $l> \frac k2 $ we get applying the above to $k-l$ instead
of $k$
$$
\frac{k^k}{(k-l)^{k-1} l^l} \leq (2e)^{k-l} \binom k {k-l} \leq (2e)^l \binom k
{l}.
$$
\end{proof}
\section{The Essential A-Piori Estimate} \label{sec:APriori}
We use the estimates of Caffarelli and Silvestre to derive the following
recursive estimate for derivatives of higher order. To shorten notation we
use the shortcuts $B_R = B_R(0)$ and
$
\|u\|_{A} = \|u\|_{L^\infty (A)}
$
for a subset $A \subset \mathbb R^n. $ Furthermore, we will use
$$
\|\nabla ^k u\|_{A}:= \sup_{|\alpha| =k} \|\partial^{\alpha} u\|_{A}.
$$
\begin{theorem} \label{thm:ScaledHigherOrderEstimate}
Let $u \in L^\infty(\mathbb R^n) \cap C^\infty(\Omega)$ and $f:\Omega
\rightarrow \mathbb R$ be smooth such that
$$
Ku = f \text{ on } \Omega.
$$ If $x_0 \in \Omega$, $\sigma>0$. and $k\in \mathbb N$ are chosen such that
$B_{6\sigma(k+1)}(x_0) \subset
\Omega$, then
\begin{multline*}
\sigma \| \nabla^{k+1} u \|_{B_{\sigma}(x_0)} \leq C \bigg( \sigma^s
\|\nabla ^k f\|_{B_{2\sigma}(x_0)} + \|\nabla^k u\|_{B_{4r}(x_0)} \\ + \sigma^s
\sum_{l=1}^{k-1} \frac {H^l l!
\|\nabla^{k-l} u
\|_{B_{6l\sigma+2\sigma} (x_0)}}{(6l\sigma)^{l+s}} + \sigma^s \frac {H^k
k! \| u
\|_{\mathbb R^n}} {(6k\sigma)^{k+s}} \bigg)
\end{multline*}
\end{theorem}
\begin{proof}
After a suitable translation we can assumen that $x_0=0$.
We first show the statement of the theorem under the addition assumption that $u$ is
$C^\infty$ on the complete space $\mathbb R^n$ and has compact support.
For that we chose $\tilde \eta \in C^{\infty}(\mathbb R^n,[0,1])$ such that
$$
\tilde \eta \equiv 1 \text{ on } B_3 \quad \text{and} \quad \tilde
\eta \equiv 0 \text{ on } \mathbb R^n \setminus B_4
$$
and set
$$
\eta(x) = \tilde \eta (\tfrac x \sigma).
$$
For $k \in \mathbb N$ and $i_1,i_k \in \{0, \ldots, n\}$ we decompose
$$
w= \partial_{i_k, \ldots , i_1} u = \partial_{i_k} (\eta \partial_{i_{k-1}
\ldots, i_1} u) + \partial_{i_k} ((1 - \eta) \partial_{i_{k-1}
\ldots, i_1} u) = w_1 + w_2.
$$
Applying Theorem \ref{thm:ScaledSchauderEstimate} we get
\begin{equation}\label{eq:APriori}
\sigma \|\nabla \partial_{i_1, \ldots , i_k} u\|_{B_\sigma(0)} \leq
C \left( \sigma^s\|Kw_1\|_{B_{2\sigma}} + \|w_1\|_{\mathbb R^n}
\right)
\end{equation}
We first note that
\begin{equation}\label{eq:W1}
\begin{aligned}
\|w_1\|_{\mathbb R^n} &= \|\partial_{i_k} (\eta \partial_{i_{k-1}
\ldots, i_1} u) \|_{\mathbb R^n} \leq \|\nabla^k u\|_{B_{4\sigma}} +
\|\nabla \eta \|_{\mathbb R^n} \|
\nabla^{k-1} u\|_{B_{4\sigma}}
\\
&\leq \|\nabla^k u\|_{B_{4\sigma}} + \frac C \sigma \|
\nabla^{k-1} u\|_{B_{4\sigma}}.
\end{aligned}
\end{equation}
To estimate the first term in \eqref{eq:APriori}, we use $w_1 = w - w_2$ to get
\begin{equation} \label{}
\|Kw_1\|_{B_{2\sigma}} \leq \|Kw\|_{ B_{2\sigma}} +
\|Kw_2\|_{B_ {2\sigma} } \leq \|\nabla^k f\|_{B_{2r}} + \|Kw_2\|_{B_ {2\sigma} } .
\end{equation}
and observe that for $x \in B_{2\sigma}$ we have
\begin{equation} \label{eq:Kw2}
\begin{aligned}
|K w_2 &(x)| = \left|\int_{\mathbb R^n} (w_2(x+y) - 2 w_2(x) + w(x-y )) K (y) dy \right| \\
& = \left|\int_{\mathbb R^n} (w_2(x+y) + w(x-y )) K (y) dy \right| \\
& \leq 2 \left|\int_{\mathbb R^n} w_2(x+y) K (y) dy \right| \\
&= 2 \left| \int_{\mathbb R^n} (1-\eta(x+y)) \partial_{i_{k-1}, \ldots, i_1} u (x+y) \partial_{i_k} K (y) dy \right|\\
& \leq \left| \int_{B_{6\sigma}} (1-\eta(x+y)) \partial_{i_{k-1}, \ldots, i_1} u (x+y) \partial_{i_k} K (y) dy \right|
\\
& \quad \quad + \left| \int_{\mathbb R^n \setminus B_{6\sigma}} (1-\eta(x+y)) \partial_{i_{k-1}, \ldots, i_1} u (x+y) \partial_{i_k} K (y) dy \right|
\\
&= I_1 + J_1.
\end{aligned}
\end{equation}
To estimate $I_1$, we note that due to the properties of $\eta$ and the triangle
inequality $1-\eta (x+y) = 0$ if $|y| \leq \sigma$ and hence we get from the properties
of $K$ that
\begin{equation} \label{eq:I1}
I_1 \leq C H \|\nabla^{k-1} u \|_{L^\infty(B_{8\sigma})} \int_{\mathbb R^n \setminus
B_{\sigma}} \frac 1 {|y|^{n+1+s}} dy = C \frac
{H}{\sigma^{1+s}} \|\nabla^{k-1} u \|_{L^\infty(B_{8\sigma})} .
\end{equation}
For $J_1$ we use partial integration to get
\begin{align*}
J_1 &\leq \left| \int_{\mathbb R^n \setminus B_{6\sigma}(0)} \partial_{i_{k-2}, \ldots,
i_1} u (x+y) \partial_{i_k, i_{k-1}} K (y) dy \right| +
\left| \int_{\partial B_{6\sigma}(0)} |\partial_{i_{k-2}, \ldots,
i_1} u (x+y) | |\partial_{i_k} K (y) | dS(y) \right| \\
\\ &\leq C H^2 \|\nabla^{k-2} u \|_{L^\infty (B_{14\sigma})} \int_{B_{12\sigma} \setminus
B_{6\sigma}} \frac 1 {|y|^{n+2+s}} + C H \|\nabla^{k-2}\|_{L^\infty(B_{14\sigma})}
\int_{\partial B_{6\sigma}} \frac 1 {|y|^{n+s+1}} dS(y)
\\ & +
\left| \int_{\mathbb R^n \setminus B_{12\sigma}(0)} \partial_{i_{k-2},
\ldots,
i_1} u (x+y) \partial_{i_k,i_{k-1}} K (y) dy \right|
\\
& \leq\frac { C H^2 \|\nabla^{k-2} u \|_{L^\infty (B_{14\sigma})} }{(6\sigma)^{2+s}} + J_2.
\end{align*}
where
$$
J_2 = \left| \int_{\mathbb R^n \setminus B_{12\sigma}(0)} \partial_{i_{k-2},
\ldots,
i_1} u (x+y) \partial_{i_k, i_{k-1}} K (y) dy \right|.
$$
Setting
$$
J_l = \left| \int_{\mathbb R^n \setminus B_{6l\sigma}(0)} \partial_{i_{k-l},
\ldots,
i_1} u (x+y) \partial_{i_k, \ldots, i_{k+1-l}} K (y) dy \right|
$$
we obtain as above using integration by parts and \eqref{eq:DerivativesKernel}
\begin{align*}
J_l &\leq \left| \int_{\mathbb R^n \setminus B_{6l\sigma}(0)} \partial_{i_{k-l-1},
\ldots,
i_1} u (x+y) \partial_{i_k, \ldots, i_{k-l}} K (y) dy \right| \\ & \quad \quad \quad +
\left| \int_{\partial B_{6l\sigma}(0)} |\partial_{i_{k-l-1}, \ldots,
i_1} u (x+y) | |\partial_{i_k, \ldots, i_{k+1-l}} K (y) | dS(y) \right| \\
\\
&\leq C H^{l+1} (l+1)! \|\nabla^{k-l-1} u \|_{B_{6(l+1)\sigma+2\sigma}}
\int_{B_{6(l+1)\sigma} \setminus
B_{6l\sigma}} \frac 1 {|y|^{n+l+1+s}} \\ & \quad \quad \quad + C H^l l!
\|\nabla^{k-1-l}\|_{L^\infty(B_{6(l+1)\sigma+ 2\sigma})}
\int_{\partial B_{l\sigma}} \frac 1 {|y|^{n+s+l}} dS(y) + J_{l+1}
\\
& \leq C H^{l+1} (l+1)! \|\nabla ^{k-(l+1)}\|_{B_{(6(l+1)\sigma)} } \frac 1
{(6l\sigma)^{l+1+s}} + J_{l+1}.
\end{align*}
Iterating this estimate yields
\begin{equation} \label{eq:J1}
\begin{aligned}
J_1 \leq C \sum_{l=2}^{k-1} \frac {H^l l! \|\nabla^{k-l} u
\|_{L^\infty(B_{6l\sigma+2\sigma})}}{(6l\sigma)^{l+s}} + J_{k} \\
\leq C \left( \sum_{l=2}^{k-1} \frac {H^l l! \|\nabla^{k-l} u
\|_{B_{6l\sigma+2\sigma}}}{(6l\sigma)^{l+s}} + \frac {H^k k! \| u
\|_{\mathbb R^n}} {(6k\sigma)^{k+s}} \right).
\end{aligned}
\end{equation}
Together the estimates \eqref{eq:APriori} -- \eqref{eq:J1} prove the statement of
the theorem for all $u \in C^\infty (\mathbb R^n, \mathbb R)$ with compact support.
To get the statement for $u \in L^\infty (\mathbb R^n, \mathbb R) \cap
C^\infty(\Omega, \mathbb R)$, we let $u_m $ be such that
$$
u_m = u \text{ on } B_{m} ,
$$
and $$\|u_m\|_{L^\infty} \leq
\|u\|_{L^\infty}.$$
We can then apply what we have proven so far to the function $u_m$ instead of $u$ to get
\begin{multline} \label{eq:um}
\sigma \| \nabla^{k+1} u_m \|_{B_{\sigma}(x_0)} \leq C \bigg( \sigma^s
\|\nabla ^k f_m\|_{B_{2\sigma}(x_0)} + \|\nabla^k u_m\|_{B_{4r}(x_0)} \\ + \sigma^s
\sum_{l=1}^{k-1} \frac {H^l l!
\|\nabla^{k-l} u_m
\|_{B_{6l\sigma+2\sigma} (x_0)}}{(6l\sigma)^{l+s}} + \sigma^s \frac {H^k
k! \| u_m
\|_{\mathbb R^n}} {(6k\sigma)^{k+s}} \bigg)
\end{multline}
where $f_m = Ku_m$. It is obvious that due to the properties of the approximations $u_m$ we can go to
the limit in the inequality and thus obtain the inequality for $u$ once we have shown that
$$
\|\nabla ^k f_m\|_{B_{2\sigma}(x_0)} \rightarrow \|\nabla ^k f\|_{B_{2\sigma}(x_0)}
$$
for $m \rightarrow \infty$.
For $x \in B_{2\sigma}$ and $\alpha \in \mathbb N^n$ with $|\alpha|=k$ we calculate
\begin{align*}
\partial^\alpha (K u_m)(x) = \partial^\alpha Ku + \partial^\alpha K(v_m)
\end{align*}
where $v_m = u_m - u$ and using that $v_m=0$ on $B_m$
\begin{multline*}
\partial^\alpha K(v_m)(x) = \partial^\alpha \int_{\mathbb R^n - B_{\frac m2}}
v_m(y) \left( K(y+x) + K(y-x)\right) dy \\ = \int_{\mathbb R^n - B_{\frac m2}}
v_m(y) \left( \partial^{\alpha} K(y+x) + \partial^\alpha K(y-x)\right) dy
\end{multline*}
Hence,
$$
\|\nabla^k K(v_m)\|_{B_{2\sigma}} \leq C H^{|\alpha|} |\alpha|! m^{-s-|\alpha|} \|v_m\|_{\mathbb R^n}
\leq C H^{|\alpha|} |\alpha|! m^{-s-|\alpha|} \|u\|_{\mathbb R^n} \xrightarrow{m\rightarrow \infty} \infty
$$
and thus
$$
\|\nabla^k K u_m\|_{B_{2\sigma}} \rightarrow \|\nabla^k K u\|_{B_{2\sigma}} = \|\nabla^k f\|_{B_{2\sigma}}
$$
\end{proof}
\section{Proof of the Theorem for $Ku(x) = f(x)$} \label{sec:Proof1}
Let us first illustrate this method for the special case that $Ku(x) =f(x)$, i.e. that the righthand side of our equation does not depend on $u$.
\subsection{A Recursive Estimate}
Following \cite{Albanese2015} we define the quantities
\begin{gather*}
N_k = \sup_{0 < r < 1} \left( |1-r|^{k+s} \|\nabla^k f\|_{L^{\infty} (B_r)} \right)
\quad \text{ for } k \geq 0 \\
M_k = \sup_{0 < r < 1} \left( |1-r|^{k} \|\nabla^k u\|_{L^{\infty} (B_r)} \right)
\quad \text{ for } k \geq 1, \\
M_0 = \|\nabla^k u\|_{L^{\infty} (\mathbb R^n)}.
\end{gather*}
We will deduce the following estimate for these quantities from Theorem
\ref{thm:ScaledHigherOrderEstimate}.
\begin{theorem} \label{thm:RecursiveEstimate}
We have
$$
M_{k+1} \leq C \left(N_k + k \sum_{l=0}^{k} \binom{k}{l} M_{k-l} (2e)^l H^l l!
\right)
$$
for all $k \in \mathbb N_0$ and a constant $A$.
\end{theorem}
\begin{proof}
For $x_0 \in B_1(0)$ and $k \in \mathbb N$ we apply Theorem
\ref{thm:ScaledHigherOrderEstimate} with $\sigma = \frac {1-|x_0|} {6(k+2)}$ to get
\begin{multline*}
\| \nabla^{k+1} u \|_{B_{\sigma}} \leq C \bigg( \sigma^{s-1}
\|\nabla ^k f\|_{B_{2\sigma}} + \sigma^{-1} \|\nabla^k u\|_{B_{4r}} \\ +
\sigma^{s-1} \sum_{l=2}^{k-1} \frac {H^l l!
\|\nabla^{k-l} u
\|_{B_{6(\sigma+2\sigma)}}}{(6l\sigma)^{l+s}} + \sigma^{s-1} \frac {H^k k!
\| u
\|_{\mathbb R^n}} {(6k\sigma)^{k+s}} \bigg)
\end{multline*}
where we use $B_r=B_r(x_0)$ to shorten notation. Hence,
\begin{multline*}
(1-|x_0|)^{k+1}|\nabla^{k+1} u(x_0)| \leq
(1-|x_0|)^{k+1} C \bigg( \sigma^{s-1}
\|\nabla ^k f\|_{B_{2\sigma}} + \sigma^{-1} \|\nabla^k u\|_{B_{4r}} \\ +
\sigma^{s-1} \sum_{l=2}^{k-1} \frac {H^l l!
\|\nabla^{k-l} u
\|_{B_{6l\sigma+2\sigma}}}{(6l\sigma)^{l+s}} + \sigma^{s-1} \frac {H^k k!
\| u
\|_{\mathbb R^n}} {(6k\sigma)^{k+s}} \bigg .
\end{multline*}
We estimate
\begin{align*}
(1-|x_0|)^{k+1} \sigma^{s-1} \|\nabla^k f\|_{B_{2\sigma} (x_0} = (6(k+2)
\sigma)^{k+1} \sigma ^{s-1}\|\nabla^k f\|_{B_{2\sigma} (x_0)} \\
\leq \frac{(6(k+2) \sigma)^{k+1} \sigma ^{s-1}} {((6(k+2)-2) \sigma)^{s+k}} N_k
= \frac{(k+2)^{k+1}} {(k+2-\frac 1 3)^{k+s}}
\leq \left( 1 + \frac {\frac 1 3}
{k+2 - \frac 1 3} \right)^{k+1} N_k
\leq C N_k
\end{align*}
as $s > 1$ and
$$
0 < \left( 1 + \frac {\frac 1 3}
{k+2 - \frac 1 3} \right)^{k+1} < \left(1 + \frac {\frac 13} {k+\frac 5 3)}
\right)^{k+\frac 53} \rightarrow e^{\frac 1 3}.
$$
Similarly,
\begin{align*}
(1-|x_0|)^{k+1} \sigma^{-1} \|\nabla^k u\|_{B_{4\sigma} (x_0}
\leq \frac {(6(k+2)^{k+1})}{ (6(k+2)-4)^{k}} M_k
\leq C (k+2) M_k.
\end{align*}
Futhermore, we get for $1\leq l < k$
\begin{align*}
(1-|x_0|)^{k+1} & \sigma^{s-1} \frac {H^l l!
\|\nabla^{k-l} u
\|_{B_{6l\sigma+2\sigma}}}{(6l\sigma)^{l+s}}
\leq \frac{(6(k+2))^{k+1}}{(6(k+2-l) - 2)^{k-l} (6l)^{l+s}} H^l l! M_{k-l} \\
& =6 \frac{(k+2)^{k+1}}{((k-l) + \frac{10}{6})^{k-l} l^{l+s}} H^l l! M_{k-l}
\end{align*}
Note that
\begin{align*}
\frac{(k+2)^{k+1}} {(k-l+{\frac {10} 6})^{k-l} l^{l+1}}
&= \left(\frac{k+2}{k+1}\right)^{k+1} \left(\frac{k-l}{k-l + \frac {10} 6} \right)
^{k-l} \left( \frac l {l+1}\right)^{l+1} \frac{(k+1)^{k+1}} {(k-l)^{k-l}
(l+1)^{l+1}}
\\
&= \left(1 +\frac{1}{k+1}\right)^{k+1} \left(1 - \frac{\frac{10} 6}{k-l + \frac
{10} 6} \right)
^{k-l} \left( 1 - \frac{1} {l+1}\right) ^{l+1}\frac{(k+1)^{k+1}} {(k-l)^{k-l}
(l+1)^{l+1}}
\\
& \leq
C \frac{(k+1)^{k+1}} {(k-l)^{k-1} (l+1)^{l+1}}
\leq C (2e)^l\binom{k+1}{l}.
\end{align*}
In the last step we used Lemma \ref{lem:Binomial}.
Finally,
$$
\frac{(1-|x_0|)^{k+1}}{ \sigma (6k \sigma)^k} = \frac{(6(k+2))^{k+1}}{(6k)^k} = 6
(k+2) (1+\frac 2 k)^k \leq C 6(k+2).
$$
These estimates show that
$$
(1-|x_0|)^{k+1}|\nabla^{k+1} u(x_0)| \leq C \left(N_k + k \sum_{l=0}^{k} \binom{k}{l} M_{k-l} (2e)^l H^l l! \right)
$$
Taking the supreme over all $x_0 \in B_1(0)$ proves the theorem.
\end{proof}
\subsection{The Conclusion using Cauchy's Method of Majorants}
We will now conclude the proof of Theorem \ref{thm:AnalyticityOfSolutions} using
Cauchy's method of Majorants.
As being analytic is a local statement, we can assume w.l.o.g that there are constants $C_f, A_f < \infty$ such that
$$
N_k \leq C_f A_f^k k!
$$
for all $k \in \mathbb N_0$. Setting $A := \sup\{A_k, 2eH\}$ Theorem \ref{thm:RecursiveEstimate} tells us that
\begin{equation} \label{eq:RecursiveInequality}
M_{k+1} \leq C (N_k + k \sum_{l=0}^k \binom {k} {l} M_{k-l}(2eH)^l l!)
\leq C A^k k! + C k \sum_{l=0}^k \binom {k} {l} M_{k-l}A^l l!.
\end{equation}
for all $k \in \mathbb N_0.$
We will show that this recursive estimate implies that $M_k \leq C_u A_u^k k!$ for
suitably chosen constants $C_u, A_u$ by comparing it to the solution of an
analytic ordinary differential equation.
For this we put
$$
G(t) := C \sum_{k \in \mathbb N_0} A^k t^k
$$
and consider the solution to the initial value problem
$$
\begin{cases}
c'(t) = G(t) + (tG(t)c(t))' \\
c(0) = M_0.
\end{cases}
$$
As near to $t$ we have $1-tG(t) \not= 0$ we can rewrite this equation as
$$
c'(t) = \frac{2 G(t) + t G'(t)}{1-tG(t)}
$$
near $0$. Hence, the above initial value problem has a unique analytic solution on some small time interval
$(-\varepsilon, \varepsilon).$ The derivatives $\tilde M_k = c^{(k)}(0)$ satisfy
$$
\tilde M_k \leq C_u A_u^k k!
$$
for suitable constants $C_u, A_u$ and
the recursive relation
$$
\tilde M_{k+1} = C (N_k + k \sum_{l=0}^k \binom k l \tilde M_{k-l} (eH)^l l !)
$$
Comparing this with \eqref{eq:RecursiveInequality} we deduce by induction that
$$
M_{k} \leq \tilde M_{k} \leq C_u A_u^k k!.
$$
\section{Proof of the Theorem for $Ku(x) = f(x,u(x))$} \label{sec:Proof2}
Let us now move to the case that
$$
K(u) = f(x,u(x)) \text{ in } B_1(0).
$$
As in the last section we have
$$
M_{k+1} \leq C \left( N_k + k \sum_{l=0}^{k} \binom{k}{l} M_{k-l} (2e)^l H^l l!
\right)
$$
for all $k \in \mathbb N_0$ and a constant $A$ where now
$$
N_k = \|\nabla^k (f(x,u(x)))\|_{B_1}.
$$
We introduce the terms
$$
\tilde M_k = M_k +1
$$
and
$$
\tilde N_k = \|\nabla^k f\|_{K}
$$
where $K$ is the image of $x \rightarrow (x,u(x))$. As being analytic is a local property, we can again assume without loss of generality that
$$
\tilde N_k \leq C A_f^k k!
$$
for a constant $A_f < \infty.$ We still have
\begin{equation} \label{eq:RecursiveInequality2}
\tilde M_{k+1} \leq C \left( N_k + k \sum_{l=0}^{k} \binom{k}{l} \tilde M_{k-l} (2e)^l H^l l!
\right)
\end{equation}
We need a higher order chain rule to estimate $N_k$ in terms of $\tilde N_k$ and tilde $M_k$.
\subsection{Higher Order Chain Rule}
\begin{proposition} \label{prop:FraaDiBruno}
Let $g:\mathbb R^{m_1} \rightarrow \mathbb R^{m_2}$ and $f: \mathbb R^{m_2} \rightarrow \mathbb R$ be two $C^k$-functions. Then for an multiindex
$\alpha \in \mathbb N ^{m_1}$ of length $|\alpha| \leq k$ and $x \in \Omega$ the derivative
$$
\partial^\alpha (f \circ g ) (x) = P_{m_1, m_2}^\alpha ( \{\partial^\gamma f(g(x))\}_{|\gamma| \leq |\alpha|}, \{\partial^\gamma g_i\}_{0 \leq \gamma \leq \alpha} )
$$
where $P^\alpha_{m_1,m_2}$ is a linear combination with positive coefficients of terms of the form
$$
\partial^k_{x_{i_1}, x_{i_k}}( g(x)) \partial^{\gamma_1}g_{i_1} \cdots \partial^{\gamma_k}g_{i_k}
$$
with $1 \leq k \leq |\alpha|$ and $|\gamma_1| + \ldots |\gamma_k| = |\alpha|.$.
\end{proposition}
For $m_1=m_2=1$ we will use the notation $P^k$ instead of $P^\alpha_{m_1,m_2}.$
We leave the easy inductive proof of this statement to the reader. Although very precise formulas of the higher order chain rule wer give by Faa di Bruno \cite{DiBruno1857} for the univariate case and by for example Constanini and Savits in \cite{Constantine1996} for the multivariate case, the above proposition contains all that is needed in our proof.
Let us derive an easy consequences of Proposition \ref{prop:FraaDiBruno} that allows us in a sense to reduce the multivariate case to the univariate one.
\begin{lemma} \label{lem:SingleVariable}
For constants $a_\gamma= a_{|\gamma|}, b_{|\gamma|} \in
\mathbb R $ depending only on the
length of the multiindex $\gamma$ we have
$$
P^\alpha_{m_1, m_2} (\{a_{|\gamma|}\},\{b_{|\gamma|}\}) =
P^{|\alpha|}(\{a_{|\gamma|}\}, \{b_{|\gamma|}\}).
$$
\end{lemma}
\begin{proof}
Plugging functions $g$ and $f$ of the form $$g(x_1, \ldots ,x_{m_1} = \tilde
g( x_1+ \cdots + x_{m_1} ) \cdot (1, \ldots, 1)^t$$ and $$f(y_1, \ldots
y_{m_2})=\tilde f\left(\frac{y_1+ \cdots + y_{m_2}}{m_2}\right)$$ into the higher
order chain rule we get from
$$
f\circ g = (\tilde f \circ \tilde g) (x_1 + \ldots x_{m_1})
$$
that
$$
P^\alpha_{m_1, m_2} (\{\partial^\gamma f(g(x))\}_{|\gamma| \leq |\alpha|}, \{\partial^\gamma g_i\}_{0 \leq \gamma \leq \alpha}) = P^{|\alpha|} (\{\partial^l \tilde f( \tilde g(x))\}_{l \leq |\alpha|}, \{\partial^l \tilde g\}_{0 \leq l \leq |\alpha|})
$$
So for constants $a_\gamma= a_{|\gamma|}, b_{\gamma} \in
\mathbb R $ depending only on the
length of the multiindex $\gamma$ we have
$$
P^\alpha_{m_1, m_2} (\{a_{|\gamma|}\},\{b_{|\gamma|}\}) =
P^{|\alpha|}(\{a_{|\gamma|}\}, \{b_{|\gamma|}\}).
$$
\end{proof}
We will use this lemma to estimate $N_k$.
\begin{lemma} \label{lem:estimateComposition}
We have
\begin{align*}
N_k
& \leq C P^{k} ( \{ \tilde N_l\}, \{ \tilde M_l\}_{l=0, \ldots, k }).
\end{align*}
\end{lemma}
\begin{proof}
Applying Faa di Brunos formula to
$f\circ g$ where
$$
g(x) = (x, u(x))
$$
we get
\begin{align*}
\partial ^\alpha (f(x,u))\ & = P^{\alpha}_{n,n+1}
(\{\partial^\gamma f \}, \{\partial^\gamma g \})
\end{align*}
where $P^{\alpha}_{n,n +1}
(\{\partial^\gamma f \}, \{\partial^\gamma g \}) $ is a linear combination with positive coefficients of terms of the form
$$
\partial^m_{x_{i_1}, \ldots, x_{i_m}}f( g(x)) \, \partial^{\gamma_1}g_{i_1}
\cdots \partial^{\gamma_k}g_{i_m}
$$
with $1 \leq m \leq |\alpha|$ and $\gamma_1 + \ldots + \gamma_m = \alpha.$ Note
that due to the special structure of $g$ we have $\partial^\gamma g_i = 1$ for
$i=1, \ldots, n$ and $|\gamma|=1$ and $\partial^\gamma g_i = 0$ for $i=1, \ldots, n$
and $|\gamma|\geq 2$.
Hence,
\begin{align*}
(1-r)^{|\alpha|+s} \|\partial^m_{x_{i_1}, x_{i_m}} f( g(x))
\partial^{\gamma_1}g_{i_1} \cdots \partial^{\gamma_k}g_{i_m}\|_{(B_r(0))}
& \leq \| \partial^k_{x_{i_1}, x_{i_k}} f( g(x)) \|_{B_r(0)}
\tilde M_{|\gamma_1|} \cdots \tilde M_{|\gamma_m|} \\
& \leq \tilde N_k
\tilde M_{|\gamma_1|} \cdots \tilde M_{|\gamma_m|}.
\end{align*}
We hence deduce using Lemma \ref{lem:SingleVariable} that
\begin{align*}
\| (1-r)^{|\alpha|+s }\partial ^\alpha (f (x,u))\|_{B_r(0)}
& \leq P^{\alpha} ( \{ \tilde N_k\}, \{ \tilde M_k\}_{k=0, \ldots, |\alpha| }).
\end{align*}
Applying this estimate for all multiindices $\alpha \in \mathbb N_0^n$ with $|\alpha|=k$ proves the claim.
\end{proof}
\subsection{Conclusion of the Proof}
Combining \eqref{eq:RecursiveInequality2} with Lemma \ref{lem:estimateComposition} we get
\begin{multline*}
\tilde M_{k+1} \leq C (P^{k}(\{ \tilde N_l\}, \{ \tilde M_l\}) + k \sum_{l=0}^k \binom {k} {l} \tilde M_{k-l}(2eH)^l l!)
\\ \leq C (P^{k}(\{ A^l l!\}, \{ \tilde M_l\})+ C k \sum_{l=0}^k \binom {k} {l} \tilde M_{k-l} A^l l!
\end{multline*}
where again $A:= \sup \{A_f, 2eH\}$.
As above we conclude comparing this with the solution to the initial value problem
$$
\begin{cases}
c'(t)= G(c(t)) + (tG(t)c(t))', \\
c(0) = M_0.
\end{cases}
$$
\end{document}
|
\begin{document}
\title{Closures of positive braids and the Morton-Franks-Williams inequality}
\author{J. Gonz\'alez-Meneses\footnote{Both authors partially supported by MTM2010-19355 and FEDER. First author partially supported by FQM-P09-5112 and the Australian Research Council’s Discovery Projects funding scheme (project number DP1094072).} \ and P. M. G. Manch\'on }
\date{6 August 2013}
\maketitle
\begin{abstract}
We study the Morton-Franks-Williams inequality for closures of simple braids (also known as positive permutation braids). This allows to prove, in a simple way, that the set of simple braids is a orthonormal basis for the inner product of the Hecke algebra of the braid group defined by K\'alm\'an, who first obtained this result by using an interesting connection with Contact Topology.
We also introduce a new technique to study the Homflypt polynomial for closures of positive braids, namely resolution trees whose leaves are simple braids. In terms of these simple resolution trees, we characterize closed positive braids for which the Morton-Franks-Williams inequality is strict. In particular, we determine explicitly the positive braid words on three strands whose closures have braid index three.
\end{abstract}
\section{Introduction} \label{introS}
Let $P_L(v,z)\in \Bbb{Z}[v^{\pm 1},z^{\pm 1}]$ be the two-variable Homflypt polynomial, isotopy invariant of oriented links with normalization $P_{\raisebox{-.8mm}{\epsfysize.10in \epsffile{TrivialKnot.eps}}}(v,z)=1$ and determined by the Homflypt skein relation
$$v^{-1}P_{\raisebox{-.8mm}{\epsfysize.15in \epsffile{OrientedPositiveCrossing.eps}}}(v,z)
-vP_{\raisebox{-.8mm}{\epsfysize.15in \epsffile{OrientedNegativeCrossing.eps}}}(v,z)
=zP_{\raisebox{-.8mm}{\epsfysize.15in \epsffile{OrientedSmoothing.eps}}}(v,z).$$
In the following we will use the notation $P(L)$ instead of $P_L(v,z)$. Note that, for braids, the Homflypt skein relation can be written as $v^{-1}\sigma_i-v\sigma_i^{-1}=z$, or equivalently, as the quadratic skein relation $\sigma_i^2=vz\sigma_i+v^2$.
We first recall and fix terminology about the Morton-Franks-Williams (MFW) bounds and inequalities. If $L=\widehat{b}$ is the closure of a braid $b \in B_n$ with $n$ strands and writhe $w=\textnormal{wr}(b)$, then $w-n+1\leq \partial_v^-(P(L))$ and $\partial_v^+(P(L)) \leq w+n-1$ are the known MFW lower and upper inequalities~\cite{Morton,FW}, where $\partial_v^-(P(L))$ (resp. $\partial_v^+(P(L))$) is the lowest (resp. highest) $v$-degree of $P(L)$. We refer to $w-n+1$ (resp. $w+n-1$) as the MFW lower (resp. upper) bound of $b$. It follows that, if we define
$$
MFW(L)=\frac{\textnormal{span}_v(P(L))}{2}+1=\frac{\partial_v^+(P(L))-\partial_v^-(P(L))}{2}+1,
$$
we have the celebrated MFW inequality $MFW(L)\leq n$. In particular $MFW(L)\leq s(L)$ where $s(L)$ is the braid index (or Seifert circle index) of $L$.
Let $a,b \in B_n$ be two braids with $n$ strands. Then $\langle a,b \rangle _R$ is, by definition, the coefficient of $v^{w+n-1}$ in the two-variable polynomial $(-z)^{n-1}P(\widehat{ab^*})$, where $b^*$ is the reverse braid of $b$ and $w={\rm wr }(ab^*)$. This product can be extended to the whole of the Hecke algebra $H_n(z)$, obtaining a symmetric bilinear form. This is the inner product introduced by K\'alm\'an in \cite{Kalman}. Recall that $H_n(z)$ can be seen as the linear combinations of braids in $B_n$ with coefficients in $\Bbb{Z}[z^{\pm 1}]$, quotiented by the Homflypt skein relation with $v=1$.
Given a permutation $\alpha \in S_n$ on $\{ 1, 2, \dots , n\}$, there is exactly one positive braid~$T_{\alpha}$ which determines the permutation $\alpha$ on its endpoints, and such that every two strands of it cross at most once. The braid $T_{\alpha}$ is said to be the simple braid associated to $\alpha$ (originally called positive permutation braid in \cite{MortonElrifai}). Note that ${\rm wr}(T_{\alpha})=l(\alpha )$, the length of the permutation $\alpha$. We will write $T_{\alpha}^*$ for $(T_{\alpha})^*$. It is a well-known result that the set of simple braids on $n$ strands is a basis of $H_n(z)$. Moreover, the main theorem in \cite{Kalman} states that it is an orthonormal basis for the above inner product. The original proof is based on Contact Topology: it constructs a Legendrian representative of the link $\widehat{T_{\alpha}T_{\beta}^*}$, and uses a result by Rutherford~\cite{Rutherford} that relates the ruling polynomial of a front projection of a Legendrian link with its Homflypt polynomial.
In this paper we relate all the above notions, namely we study how simple braids behave with respect to the MFW inequalities, and we apply the obtained results to K\'alm\'an's inner product, and to closures of positive braids on three strands.
More precisely, in Section~\ref{simpleS} we prove that, among all the closures of simple braids, the MFW upper bound is reached only for the closure of the identity braid. This is used in Section~\ref{KalmanS} to give a simple proof of K\'alm\'an's result: the set of simple braids is an orthonormal basis for K\'alm\'an's inner product. In particular, our proof contains implicitly an algorithm for calculating this inner product.
Further, in Section~\ref{resolutionS} we introduce the notion of simple resolution trees, as positive resolution trees whose leaves are simple braids. By using them, we will obtain a characterization of the closed positive braids for which the MFW inequality is sharp (Theorem~\ref{tree} and Corollary~\ref{transformations}). Note that, when working with closures of positive braids, the MFW lower bound is always reached (a fact that we easily reproved by using again simple resolution trees), hence the MFW inequality is sharp if and only if the MFW upper bound is reached. In particular, this technique allows us to determine explicitly in Section~\ref{3braidS} which positive braid words on three strands have closures of braid index three.
{\bf Acknowledgements:} Part of this work was done during a stay of the first author at the Centre de Recerca Matem\`atica (CRM) in Bellaterra (Barcelona, Spain) and at the Department of Applied Mathematics-EUITI, Universidad Polit\'ecnica de Madrid (Spain), and also during a stay of the second author at the Department of Algebra, Universidad de Sevilla (Spain). We thank these institutions for their hospitality.
\section{MFW inequality for simple braids}\label{simpleS}
In this section we show the key result in this paper: the MFW upper bound is reached, among closures of simple braids, only for the identity braid.
\begin{proposition}\label{MFWforSimples}
Let $\alpha \in S_n$ be a permutation with length $w=l(\alpha )$. Then
$\partial_v^+(P(\widehat{T_{\alpha}}))=w+n-1$ if and only if $\alpha= {\rm id}$, and the coefficient of $v^{w+n-1}$ in $P(\widehat{T_{\rm id}})$ is $(-z)^{1-n}$.
\end{proposition}
\begin{proof}
If $\alpha ={\rm id} \in S_n$ then $w=0$, $T_{\alpha}=1_n=||\stackrel{n}{\dots}|$ and $\widehat{T_{\alpha}}$ is a collection of $n$ unlinked trivial knots
$\raisebox{-.8mm}{\epsfysize.15in \epsffile{TrivialKnot.eps}}\raisebox{-.8mm}{\epsfysize.15in \epsffile{TrivialKnot.eps}}\stackrel{n}{\dots}\raisebox{-.8mm}{\epsfysize.15in \epsffile{TrivialKnot.eps}}$ with Homflypt polynomial
$$
P(\widehat{T_{\alpha}})=\delta ^{n-1}=\left( \frac{v^{-1}-v}{z}\right)^{n-1}= z^{1-n}v^{1-n} + \dots +(-z)^{1-n}v^{n-1}.
$$
So the result holds for $\alpha={\rm id}$, even in the extreme case when $n=1$.
We will prove the result by induction on $n$, the number of strands of the braid~$T_{\alpha}$.
As the trivial braid is the only braid on $1$ strand, we have already shown the case $n=1$.
Assume now the statement for $\alpha \in S_k$ with $k = 1, \dots , n-1$ and suppose $\alpha \in S_n$. Consider the inclusion $i:S_{n-1} \hookrightarrow S_n \ \ \omega \mapsto \omega \otimes 1$. We distinguish two cases:
$\bullet $ If $\alpha \in S_n \setminus i(S_{n-1})$, there is a unique permutation $\alpha ' \in S_{n-1}$ and a unique natural number $k<n$ such that $\alpha = \alpha ' s_{n-1} s_{n-2} \dots s_{k}$. It turns out that $T_{\alpha}=T_{\alpha '}\sigma_{n-1} \sigma_{n-2} \dots \sigma_{k}$, as shown in Figure~\ref{TalfaTalfaPrimaF} (see for example \cite{BraidGroups}, page 167).
\begin{figure}
\caption{$T_{\alpha}
\label{TalfaTalfaPrimaF}
\end{figure}
Let $b=T_{\alpha '}\sigma_{n-2} \dots \sigma_{k} \in B_{n-1}$. Clearly $\widehat{T_{\alpha}}=\widehat{b}$, hence by the MFW upper inequality applied to the Homflypt polynomial of $\widehat{b}$, one has:
$$
\partial_v^+(P(\widehat{T_{\alpha}}))
=\partial_v^+(P(\widehat{b})) \leq (w-1)+(n-1)-1=w+n-3 < w+n-1.
$$
$\bullet $ If $\alpha =\alpha ' \otimes 1$ with $\alpha ' \in S_{n-1}$, then $T_{\alpha}=T_{\alpha '} \otimes 1$, $\widehat{T_{\alpha}}=\widehat{T_{\alpha '}} \sqcup \raisebox{-.8mm}{\epsfysize.15in \epsffile{TrivialKnot.eps}}$ and $P(\widehat{T_{\alpha}})=\delta P(\widehat{T_{\alpha '}})$. As we have already shown the result when $\alpha$ is trivial, we can assume that $\alpha\neq {\rm id}$ and then $\alpha'\neq {\rm id}$. Since $\delta=\frac{v^{-1}-v}{z}$, applying the induction hypothesis to $T_{\alpha'}$ which has $n-1$ strands and $w$ crossings, it follows that
$$
\partial_v^+(P(\widehat{T_{\alpha}}))=\partial_v^+(P(\widehat{T_{\alpha '}}))+1 < (w+(n-1)-1)+1 = w+n-1.
$$
\end{proof}
At this point, one could ask for an analogous result for the MFW lower inequality for closures of simple braids. However, it is known that, for closed positive
braids (and simple braids are positive braids) the MFW lower bound is always reached (see for example \cite{KalmanMeridian}, comment after Example 1$\cdot$8). In spite of this, we will give in Section~\ref{resolutionS} a direct proof of this fact, working with simple resolution trees.
\section{Inner products and the Homflypt skein relation} \label{KalmanS}
Recall, from the introduction, the inner product $\langle \cdot \:, \cdot \rangle _R$ defined by K\'alm\'an on the Hecke algebra $H_n(z)$. The following result was first obtained by K\'alm\'an~\cite{Kalman}, who proved it by using an interesting connection with Contact Topology (more details in the introduction). Here we give a simple proof of it, based on the Homflypt skein relation and on properties of the simple braids.
\begin{theorem}(K\'alm\'an) The set of simple braids $\{ T_{\alpha} \}_{\alpha \in S_n }$ is an orthonormal basis for $\langle \cdot \:, \cdot \rangle _R$.
\end{theorem}
\begin{proof}
We want to prove that, for any permutations $\alpha, \beta \in S_n$,
$$
\langle T_{\alpha},T_{\beta}\rangle _R=
\left\{
\begin{array}{cl}
1 & \text{ if } \beta =\alpha , \\
0 & \text{ otherwise. }
\end{array}
\right.
$$
This is equivalent to show that for all $\alpha, \beta \in S_n$, the coefficient of $v^{w+n-1}$ in $P(\widehat{T_\alpha T_\beta^*})$ is $(-z)^{1-n}$ if $\beta =\alpha$, and $0$ otherwise, where $w={\rm wr }(T_{\alpha}T_{\beta}^*)={\rm wr }(T_{\alpha}) + {\rm wr }(T_{\beta}) = l(\alpha )+l(\beta)$. Note that, by the MFW inequality, to say that the coefficient of $v^{w+n-1}$ in $P(\widehat{T_{\alpha}T_{\beta}^*})$ is zero is equivalent to say that $\partial_v^+(P(\widehat{T_{\alpha}T_{\beta}^*}))<w+n-1$.
The proof is by induction on the length $l(\beta)$ of the permutation $\beta$. If $l(\beta )=0$, then $\beta ={\rm id}$, $T_{\beta}=1_n \in B_n$, $\widehat{T_{\alpha}T_{\beta}^*}=\widehat{T_{\alpha}}$ and $w=l(\alpha)$. Then the result follows from Proposition~\ref{MFWforSimples}. Assume now that $l(\beta ) \geq 1$. Let $\beta =\kappa s_i$ with $l(\beta )=l(\kappa )+1$, hence $T_{\beta}=T_{\kappa }\sigma_i$ and $T_{\beta }^*=\sigma_i T_{\kappa }^*$.
\begin{enumerate}
\item If $T_{\alpha}\sigma_i$ is a simple braid (equivalently $T_{\alpha}\sigma_i=T_{\alpha s_i}$), then we will see that $\alpha \not= \beta$ and $\langle T_\alpha, T_\beta \rangle_R=0$. Indeed, $\alpha =\beta$ would imply $T_\kappa \sigma_i\sigma_i=T_\beta \sigma_i =~T_\alpha \sigma_i$ to be a simple braid, a contradiction since in $T_\kappa \sigma_i\sigma_i$ the strands ending in positions $i$ and $i+1$ cross at least twice. In particular, $\alpha \not= \kappa s_i$ hence $\alpha s_i \not= \kappa$. Then (see Figure~\ref{FigureCaseOne}) $T_{\alpha}T_{\beta}^*=T_{\alpha } \sigma_i T_{\kappa}^*=T_{\alpha s_i} T_{\kappa}^*$ hence $\langle T_{\alpha}, T_{\beta}\rangle _R = \langle T_{\alpha s_i}, T_{\kappa}\rangle _R=0$ where we have applied induction in the last equality since $l(\kappa )<l(\beta )$.
\begin{figure}
\caption{Case $T_{\alpha}
\label{FigureCaseOne}
\end{figure}
\item If $T_{\alpha}\sigma_i$ is a non-simple braid, then $l(\alpha s_i)=l(\alpha)-1$ and there exists a reduced expression $\alpha =\alpha_1 s_i$ of $\alpha$ ending with $s_i$ and $T_{\alpha}=T_{\alpha_1}\sigma_i$.
\begin{figure}
\caption{Case $T_{\alpha}
\label{FigureCaseTwo}
\end{figure}
Then $T_{\alpha}T_{\beta}^*=T_{\alpha_1}\sigma_i^2T_{\kappa}^*$ (see Figure~\ref{FigureCaseTwo}) and, by the quadratic relation $\sigma_i^2=vz\sigma_i+v^2$,
$$
P(\widehat{T_{\alpha}T_{\beta}^*})
= P(\widehat{T_{\alpha_1}\sigma_i ^2T_{\kappa}^*})
= vzP(\widehat{T_{\alpha}T_{\kappa}^*})
+v^2P(\widehat{T_{\alpha_1}T_{\kappa}^*}).
$$
Multiplying the above equality by $(-z)^{n-1}$ and considering the coefficients of $v^{w-n+1}$, it follows that:
$$
\langle T_{\alpha},T_{\beta}\rangle_R
=z\langle T_{\alpha},T_{\kappa}\rangle_R + \langle T_{\alpha_1},T_{\kappa}\rangle_R.
$$
Note that $\alpha_1=\kappa \Leftrightarrow \alpha_1s_i=\kappa s_i \Leftrightarrow \alpha=\beta$. We finally distinguish two cases; induction will be applicable since $l(\kappa)<l(\beta)$:
\noindent $\bullet$ Assume $\alpha = \beta$. Then $\alpha \not= \kappa$ and $\alpha_1 =\kappa$, hence
$$
\langle T_{\alpha},T_{\beta}\rangle_R
=z\langle T_{\alpha},T_{\kappa}\rangle_R + \langle T_{\alpha_1},T_{\kappa}\rangle_R
=z\!\cdot \!0+1=1.
$$
\noindent $\bullet$ Assume $\alpha \not= \beta$. In particular $\alpha_1 \not= \kappa$. Moreover, $\alpha \not= \kappa$ since, otherwise, $T_{\alpha}\sigma_i=T_{\kappa}\sigma_i=T_{\beta}$ would be a simple braid. Hence
$$
\langle T_{\alpha},T_{\beta}\rangle_R
=z\langle T_{\alpha},T_{\kappa}\rangle_R + \langle T_{\alpha_1},T_{\kappa}\rangle_R
=z\!\cdot \!0+0=0.
$$
\end{enumerate}
\end{proof}
\section{Positive braids and the Morton-Frank-Williams inequality}\label{resolutionS}
\noindent Suppose that $b$ is a positive braid whose closure is the oriented link $L$. Based on the quadratic relation $\sigma_i^2=vz\sigma_i+v^2$, in order to calculate the Homflypt polynomial of $L$ we can construct a resolution tree of $b$. That is, a binary tree with root $b$ and where each ramification has the following form, with $P$ and $Q$ positive braids:
\begin{figure}
\caption{Parent $P\sigma_i^2Q$, left child $PQ$ and right child $P\sigma_i Q$}
\end{figure}
It is known (see the proof of Theorem~\ref{tree}) that simple braids are precisely those positive braids which cannot be written as $P\sigma_i^2Q$, with $P$ and $Q$ positive braids. This means that if a simple braid appears in a resolution tree, it must necessarily be a leaf. A resolution tree is called {\it simple} if all the leaves are simple braids. As far as we know, simple resolution trees have not been considered yet; positive resolution trees have been used for example in \cite{Nakamura}.
As an example, we show in Figure~\ref{EPP} a simple resolution tree for the braid $b=32322323$ (meaning $\sigma_3\sigma_2\sigma_3\sigma_2\sigma_2\sigma_3\sigma_2\sigma_3$), with writhe $w=8$ and $n=4$ strands. This tree shows in an explicit way that the Homflypt polynomial of~$\widehat{b}$ is a combination of Homflypt polynomials of closures of simple braids, with coefficients in $\mathbb N[z,v]$ given by the product of the edge labels in the path going from each leaf to the root $b$. Collecting the leaves which correspond to the same simple braid, in this example we obtain
$$
\begin{array}{rcl}
P(\widehat{b}) & = & (1+z^2)\cdot v^8 \cdot P(\widehat{I_4}) \\
&& \\
& + &(z+z^3)\cdot v^7 \cdot P(\widehat{\sigma_2}) + (2z+z^3) \cdot v^7 \cdot P(\widehat{\sigma_3}) \\
&& \\
& + & (2z^2+z^4) \cdot v^{6} \cdot P(\widehat{\sigma_2\sigma_3})+
(2z^2+z^4) \cdot v^{6} \cdot P(\widehat{\sigma_3\sigma_2}) \\
&& \\
& + &(z+3z^3+z^5) \cdot v^{5} \cdot P(\widehat{\sigma_3\sigma_2\sigma_3}).
\end{array}
$$
\begin{figure}
\caption{A simple resolution tree for the braid $b=32322323$}
\label{EPP}
\end{figure}
We now prove that simple resolution trees always exist and that we can directly see, from a simple resolution tree, whether a closed positive braid reaches the MFW upper bound.
\begin{theorem}\label{tree}
Let $L=\widehat{b}$ be a link obtained as the closure of a positive braid~$b$ of $n$ strands and writhe $w$. Then $b$ admits a simple resolution tree and, moreover, the MFW upper bound is sharp for $L$, that is, $\partial_v^+(P(L))=w+n-1$, if and only if at least one leaf in this simple resolution tree is the identity braid.
\end{theorem}
\begin{proof}
That any positive braid has a simple resolution tree follows from the following well known fact: a positive braid $\beta$ is not simple if and only if we can decompose it as $\beta=P\sigma_i^2 Q$, with $P$ and $Q$ positive braids (see, for example, Lemma 2.5 and following remark in \cite{MortonElrifai}). As relations in the braid group are homogeneous, the lengths of the braids $PQ$ and $P\sigma_i Q$ are strictly smaller than the length of~$\beta$. Therefore, starting with the root $b$, we can iteratively decompose every node which is not simple into two smaller nodes. Clearly this process terminates, yielding a simple resolution tree for $b$.
Let $T_{\alpha_1},\ldots,T_{\alpha_k}$ be the (not necessarily distinct) simple braids corresponding to the $k$ leaves of a simple resolution tree of $b$. For $i=1,\ldots,k$, let $z^{a_i}v^{w-l(\alpha_i)}$ be the monomial obtained by multiplying the edge labels of the path that goes from the leaf $T_{\alpha_i}$ to the root $b$. Note that $a_i$ is the number of right children in this path. Then
\begin{equation}\label{Eq}
P(L) = \sum_{i=1}^{k}{z^{a_i}\cdot v^{w-l(\alpha_i)} \cdot P(\widehat{T_{\alpha_i}})}.
\end{equation}
Since ${\rm wr}(T_{\alpha_i})=l(\alpha_i)$, by Proposition~\ref{MFWforSimples} the highest $v$-degree of each summand is $(w-l(\alpha_i))+({\rm wr}(T_{\alpha_i})+n-1)=w+n-1$ if and only if $\alpha_i={\rm id}$. Then, if no leaf is the identity braid, $\partial_v^+(P(L))<w+n-1$. Reciprocally, if at least one leaf of the simple resolution tree is trivial, then the coefficient of $v^{w-n+1}$ in $P(L)$ is, again by Proposition~\ref{MFWforSimples}, the sum of the monomials $z^{a_j}(-z)^{1-n}$ that correspond to $\alpha_j ={\rm id}$, a sum which is obviously nonzero.
\end{proof}
As stated in the introduction, it is well known that the MFW lower inequality is actually an equality for the closure of any positive braid (see for example~\cite{KalmanMeridian}, comment after Example 1$\cdot$8). Here we reprove this result by making use of Equation~(\ref{Eq}) derived from a simple resolution tree, and following the steps in the proof of Proposition~\ref{MFWforSimples}. Rather than give complete details, we prefer to explain some historical remarks about the positiveness of the Homflypt polynomial. Recall that a nonzero (Laurent) polynomial in $z$ is said to be {\it positive} if all its coefficients are nonnegative. Answering positively a question by V. F. R. Jones, Cromwell and Morton \cite{MortonCromwell} proved that, for positive links, the evaluation of the Homflypt polynomial $P(L)(v,z)$ in any $v \in (0,1)$ provides a positive Laurent polynomial in $z$. If $v=1$ we obtain the Conway polynomial, also positive except that it can be zero if the original link is split.
\begin{proposition}\label{PositiveBraidsMFWlowerbound}
Let $L=\widehat{b}$ be a link obtained as the closure of a positive braid~$b$ of $n$ strands and writhe $w$. Then the MFW lower inequality is sharp for~$L$, that is, $\partial_v^-(P(L))=w-n+1$. Moreover, the coefficient of $v^{w-n+1}$ in $P(L)$ is a positive Laurent polynomial in $z$.
\end{proposition}
\begin{proof}
Following a double induction, first on the number of strands and then on the writhe, we will see that the coefficient $q_L(z)$ of $v^{w-n+1}$ in $P(L)(v,z)$ is a positive Laurent polynomial in the variable $z$. If $n=1$, then $w=0$ and the closure of the braid is the trivial knot with polynomial $1$, so the result holds.
For $n>1$ we follow the steps in the proof of Proposition~\ref{MFWforSimples} to see first that the result is true for any simple braid $T_\alpha$ with $n$ strands. If $\alpha \in S_n \setminus i(S_{n-1})$ then $\widehat{T_\alpha}=\widehat{d}$ for a positive braid $d$ with $n-1$ strands and writhe $w-1$, as given in the proof of Proposition~\ref{MFWforSimples}. Thus $P(\widehat{T_\alpha})=P(\widehat{d})$ and $(w-1)-(n-1)+1=w-n+1$. Since $d$ is positive (although non-simple) and has less than $n$ strands, induction can be applied. If $\alpha \in i(S_{n-1})$ then $\alpha =\alpha' \otimes 1$ and $P(\widehat{T_\alpha})=\delta P(\widehat{T_{\alpha '}})$ where $T_{\alpha '}$ is simple, with the same writhe as $T_\alpha$ and one less strand (again, see the proof of Proposition~\ref{MFWforSimples}). Clearly, $q_L(z)=\frac{1}{z}q_{\widehat{T_{\alpha '}}}(z)$, so the result holds for every simple braid with $n$ strands.
Finally, once we have proved the result for the closure of simple braids with $n$ strands, the result for a positive braid with $n$ strands follows from considering Equation~(\ref{Eq}), derived from a simple resolution tree.
\end{proof}
According to Proposition~\ref{PositiveBraidsMFWlowerbound}, for closures of positive braids the MFW inequality is sharp if and only if the MFW upper bound is reached. Then the following result is a nice consequence of Theorem~\ref{tree}:
\begin{corollary}\label{transformations}
The MFW inequality is sharp for a closed positive braid if and only if one (hence all) of its braid word representatives can be obtained from the empty word by a finite sequence of transformations of the following types:
\begin{enumerate}
\item Inserting $\sigma_i^2$ for some $i=1, \dots , n-1$,
\item doubling a letter $\sigma_i$ for some $i=1, \dots , n-1$, and
\item applying positive braid relations.
\end{enumerate}
\end{corollary}
\begin{proof}
Starting with the empty word, a sequence of the above transformations builds a branch of a simple resolution tree for the corresponding positive braid. Since the leaf of this branch is the identity, the MFW upper bound is sharp according to Theorem~\ref{tree}.
Reciprocally, suppose that $L=\widehat{b}$ reaches the MFW upper bound, and construct a simple resolution tree for $b$. By Theorem~\ref{tree} at least one of its leaves is the identity. Ascending in the tree from such a leaf provides the sequence of transformations of the above types which define a braid word for $b$.
\end{proof}
We now enumerate some examples which can be deduced from Corollary~\ref{transformations}:
\begin{corollary}\label{listado}
Let $w$ be a positive word representing a braid $b$. Then the MFW inequality is sharp for the oriented link $L=\widehat{b}$ if the word $w$ is in the following list:
\begin{enumerate}
\item Words which are product of positive powers of the generators, where all the exponents are greater than or equal to~two, that is, $w=\prod_k \sigma_{i_k}^{e_k}$ with $e_k \geq 2$ for all $k$. For example, $\sigma_3^2\sigma_2^5\sigma_1^2\sigma_2^3\sigma_3^3$.
\item Even positive palindromic braid words, that is, positive words with an even number of letters that reads the same backwards as forwards. For example, $\sigma_3\sigma_2\sigma_1^2\sigma_2\sigma_3$.
\item Any word of the form $uw_0v$ where $u, v$ are positive words and $w_0$ is any positive word representing the square of the half twist $\Delta \in B_n$.
\end{enumerate}
\end{corollary}
\begin{proof}
Words in the first item can be obtained by a finite number of transformations of type 1 and 2 in Corollary~\ref{transformations}. Words in the second item can be obtained by a finite number of transformations of type 1 in Corollary~\ref{transformations}.
To prove the statement for words in the third item, we first recall that the half twist or Garside element $\Delta \in B_n$ can be represented by two words which are the reverse of each other:
\begin{eqnarray*}
\Delta & = & \sigma_1(\sigma_2\sigma_1)\cdots (\sigma_{n-2}\cdots \sigma_1)(\sigma_{n-1}\cdots \sigma_1) \\
& = & (\sigma_1\cdots \sigma_{n-1})(\sigma_1\cdots \sigma_{n-2}) \cdots (\sigma_1\sigma_2)\sigma_1
\end{eqnarray*}
This means that $\Delta^2$ can be represented by an even positive palindromic braid word $w_0$, known already to be in the list. Note that any other positive word representing $\Delta^2$ is also in the list, since it can be obtained from $w_0$ by positive braid relations (transformation of type 3 in Corollary~\ref{transformations}).
The half twist $\Delta$ can be represented by a positive word ending (or starting) with any generator $\sigma_i$~\cite{MortonElrifai}. It follows easily the same for $w_0$; by positive braid relations $w_0$ can be transformed into a positive word $w_i'$ (resp. $w_i$) that starts (resp. ends) with $\sigma_i$. Then, if $v=\sigma_{i_1}\cdots \sigma_{i_k}$, by positive braid relations we transform $w_0$ into $w_{i_k}$, and then double the last letter $\sigma_{i_k}$ by a transformation of type 2. Next we apply positive braid relations to transform $w_{i_k}$ into $w_{i_{k-1}}$, and double its last letter to obtain $w_{i_{k-1}}\sigma_{i_{k-1}}\sigma_{i_k}$. Iterating this process, we finally obtain $w_{i_1}\sigma_{i_1}\sigma_{i_2}\cdots \sigma_{i_k}$, which by positive braid relations can be transformed into $w_0v$. Finally we repeat the whole process on the left, using the equivalent words $w_i'$, to obtain $uw_0v$.
\end{proof}
It is probably worth to rewrite the last item in Corollary~\ref{listado} (which was already shown in~\cite[Corollary 2.4]{FW}) with other words:
\begin{corollary}\label{Delta2C}
Let $a, b$ be two positive braids. Then the MFW inequality is sharp for the closure of the braid $a\Delta^2 b$.
\end{corollary}
Using the terminology from Garside theory~\cite{MortonElrifai}, the above result means that the MFW inequality is sharp for positive braids of {\it infimum} at least two. Therefore, the MFW inequality can be strict only for positive braids whose infimum is zero or one.
Recall the celebrated lower bound for the braid index $s(L)$ of an oriented link~$L$, defined as
$$
MFW(L)=\frac{\textnormal{span}_v(P(L))}{2}+1=\frac{(\partial_v^+(P(L))-\partial_v^-(P(L)))}{2}+1
$$
(see \cite{Morton}, \cite{FW}). In \cite{FW} Franks and Williams conjectured that, for a link which is closure of a positive braid, $MFW(L)=s(L)$. In \cite{MortonShort} Morton and Short showed a counterexample: for $L=\widehat{b}$ with $b=\sigma_{3}\sigma_{2}\sigma_{1}\sigma_{3}\sigma_{2}^2\sigma_{1}\sigma_{3}\sigma_{2}^2\sigma_{1}\sigma_{3}\sigma_{2} \in B_4$ we have $MFW(L)=3$ and $s(L)=4$. However, it is known that $MFW(L)=2$ if and only if $s(L)=2$, if and only if $L$ is a torus link $T(2,n)$ for certain $n\geq 2$ (see \cite[Theorem 1.2]{Nakamura}). We prove the following result:
\begin{proposition}
Let $L$ be an oriented link. Then $MFW(L)=s(L)$ if there exists a positive braid $b$ with $L=\widehat{b}$ and $b$ admits a simple resolution tree where at least one leaf is the identity braid. In this case, if $b\in B_n$, then $s(L)=n$.
\end{proposition}
\begin{proof}
Assume that $L=\widehat{b}$ where $b$ is a positive braid with $n$ strands and writhe $w$, and the identity braid with $n$ strands is one of the leaves of a simple resolution tree of $b$. By Proposition~\ref{PositiveBraidsMFWlowerbound} we have $\partial_v^-(P(L))=w-n+1$ and by Theorem~\ref{tree} $\partial_v^+(P(L))=w+n-1$. In particular $MFW(L)=n$. Since $MFW(L) \leq s(L) \leq n$, the results follows.
\end{proof}
Even if we restrict our attention to the oriented links which are closed positive braids, the converse result is not clear to us, since there are oriented links which are closures of positive braids, but with no positive braid representations of minimal number of strands \cite[Theorem 1]{Stoimenow}. The example exhibited by Stoimenow has braid index $s(L)=4$. We do not know if there are examples with $s(L)=3$.
\section{Positive braids on three strands}\label{3braidS}
We end this paper with a study of positive braid words on three strands. More precisely, we will study the braid index of their closures.
Clearly, the links of braid index one and two are precisely the trivial knot and the torus links $T(2,k)$ for $k\in \mathbb Z\setminus\{-1,1\}$. It is well known (see~\cite{BM93} and also~\cite[Theorem 1.1]{BM08}) that a braid with three strands closes to a link whose braid index is smaller than three (one of the above) if and only if it is conjugate to $\sigma_1^k\sigma_2^{\pm 1}$ for some $k\in \mathbb Z$. Hence, knowing how to solve the conjugacy problem in $B_3$ one can determine the braid index of a closed braid with three strands.
The next result, which uses the techniques introduced in this paper, avoids the need to use the conjugacy problem in the case of {\it positive} braids on three strands, as we give a complete list of positive words whose closures have braid index smaller than three.
\begin{theorem}~\label{3braidsT}
Let $w$ be a positive word in $\sigma_1, \sigma_2$, and let $b$ be the braid on three strands represented by $w$. Then the braid index of $\widehat{b}$ is smaller than three if and only if $w$ is, up to cyclic permutation, one of the following words:
\begin{enumerate}
\item $\sigma_1\sigma_2^p$\quad or \quad $\sigma_2 \sigma_1^{p}$, \quad for $p\geq 0$.
\item $\sigma_1\sigma_2\sigma_1^p\sigma_2^q$ \quad or \quad $\sigma_2\sigma_1 \sigma_2^p \sigma_1^q$, \quad for $p,q>0$.
\end{enumerate}
\end{theorem}
\begin{proof}
It is known~\cite[Proposition 3.1]{Nakamura} that if $L$ is the closure of a positive braid~$b$ on $n=3$ strands, then $MFW(L)=s(L)$, the braid index of $L$. And clearly, for braids on three strands, that the MFW inequality is sharp means exactly that $MFW(L)=3$. We now examine the different possibilities.
If $w$ is the trivial word the result holds trivially, as the trivial link with three components has braid index three.
Suppose that $w=\sigma_i^k$ only involves one of the generators $\sigma_1$ or $\sigma_2$. If $k=1$, $w$ is in the list above and clearly the braid index of $\widehat{b}$ is two. If $k>1$ then $w$ is not in the list (even considering cyclic permutation) and $w$ can be obtained by inserting $\sigma_i^2$ and then doubling $\sigma_i$ as many times as needed. By Corollary~\ref{transformations}, the result follows.
We can then assume that $w$ involves $\sigma_1$ and $\sigma_2$ and, after a cyclic permutation of its letters, that there are exponents $e_i>0$ for $i=1,\ldots,2k$ with
$$
w= \sigma_1^{e_1}\sigma_2^{e_2}\sigma_1^{e_3}\sigma_2^{e_4}\cdots \sigma_1^{e_{2k-1}}\sigma_2^{e_{2k}}.
$$
Suppose $k\geq 3$. We will produce $w$ from the trivial word going up in a simple resolution tree (that is, applying the transformations from Corollary~\ref{transformations}). By Corollary~\ref{Delta2C}, we can produce $\Delta^2\sigma_1^{e_7}\sigma_2^{e_8}\cdots \sigma_1^{e_{2k-1}}\sigma_2^{e_{2k}}$. That is, we can produce $\sigma_1\sigma_2\sigma_1\sigma_2\sigma_1\sigma_2\sigma_1^{e_7}\sigma_2^{e_8}\cdots \sigma_1^{e_{2k-1}}\sigma_2^{e_{2k}}$. Now doubling the letters in $\sigma_1\sigma_2\sigma_1\sigma_2\sigma_1\sigma_2$ as many times as needed, one obtains $w$. This implies, from Corollary~\ref{transformations}, that if $k\geq 3$ the braid index of $b$ is three.
It remains to study the cases $k=1$ and $k=2$.
Suppose $k=1$, so $w=\sigma_1^{e_1}\sigma_2^{e_2}$. If both $e_i>1$ the word is not in the list and the braid index is three by Corollary~\ref{listado}, so we can assume that either $e_1=1$ or $e_2=1$. If $e_1=1$ then $w=\sigma_1\sigma_2^{e_2}$, which clearly has braid index smaller than three, as it corresponds to a stabilization of a braid on two strands. The same happens if $e_2=1$, in which case $w=\sigma_1^{e_1}\sigma_2$ is equivalent to $\sigma_2\sigma_1^{e_1}$ up to cyclic permutation of its letters.
Suppose finally that $k=2$, so $w=\sigma_1^{e_1}\sigma_2^{e_2}\sigma_1^{e_3}\sigma_2^{e_4}$. Let us suppose that $w=\sigma_1\sigma_2\sigma_1^{p}\sigma_2^{q}$ with $p,q>0$. Then $b=\sigma_1\sigma_2\sigma_1^p\sigma_2^q = \sigma_2\sigma_1\sigma_2\sigma_1^{p-1}\sigma_2^q$ which is conjugate to $\sigma_1\sigma_2\sigma_1^{p-1}\sigma_2^{q+1}$. Repeating this process, we see that $b$ is conjugate to $\sigma_1\sigma_2 \sigma_1^{0}\sigma_2^{p+q} = \sigma_1\sigma_2^{p+q+1}$, so $\widehat{b}$ has braid index smaller than three, by the previous case. Similarly, if $w=\sigma_2\sigma_1\sigma_2^p\sigma_1^q$ with $p,q>0$, the braid index of $\widehat{b}$ is smaller than three.
We know from Corollary~\ref{listado} that if $e_i>1$ for $i=1, 2, 3, 4$ the braid index of~$\widehat{b}$ is three. Hence, up to cyclic permutation of the letters, and exchange of letters $\sigma_1$ and $\sigma_2$ (which preserves the braid index), the only remaining case is $w=\sigma_1\sigma_2^{e_2}\sigma_1^{e_3}\sigma_2^{e_4}$, with $e_2,e_4>1$. But in this case $w$ can be obtained from the trivial word going up in a simple resolution tree as follows: first we produce~$\sigma_1^2$, then we insert $\sigma_2^2$ twice to produce $\sigma_1\sigma_2^2\sigma_1\sigma_2^2$, and finally we double $\sigma_2$ and the second $\sigma_1$ as many times as needed (recall that $e_2$ and $e_4$ are greater than one). This implies that, in this case, the braid index of~$\widehat{b}$ is three.
Therefore, the only words which represent a braid whose closure has braid index smaller than three are, up to cyclic permutation of their letters, the ones in the statement.
\end{proof}
A straightforward consequence of the above result is the following, which could also be derived from~\cite[Theorem 1.1]{BM08}.
\begin{corollary}
Given a positive braid $b$ on three strands, the braid index of $\widehat b$ is smaller than three if and only if $b$ is conjugate to $\sigma_1^p \sigma_2$ for some $p\geq 0$.
\end{corollary}
\begin{proof}
This result follows immediately from Theorem~\ref{3braidsT}, as all braids appearing in its statement are conjugate to $\sigma_1^p \sigma_2$ for some $p\geq 0$. More precisely, one has:
$$
\sigma_2^{-1}\Delta^{-1}(\sigma_1\sigma_2^p)\Delta\sigma_2 = \sigma_1^p \sigma_2,
$$
$$
\sigma_2^{-1}(\sigma_2\sigma_1^p)\sigma_2 = \sigma_1^p \sigma_2,
$$
$$
\sigma_2^{-1}\Delta^{-1}\sigma_2^{-p}(\sigma_1\sigma_2\sigma_1^p\sigma_2^q)\sigma_2^p\Delta\sigma_2 = \sigma_1^{p+q+1}\sigma_2,
$$
$$
\sigma_2^{-1}\sigma_1^{-p}(\sigma_2\sigma_1\sigma_2^p\sigma_1^q)\sigma_1^p\sigma_2 = \sigma_1^{p+q+1}\sigma_2.
$$
Conversely, every braid conjugated to $\sigma_1^p\sigma_2$ for some $p\geq 0$ has the same closure as $\sigma_1^p\sigma_2$, which has braid index smaller than three as it is the stabilization of the 2-strands braid $\sigma_1^p$.
\end{proof}
\begin{tabular}{ll}
Juan Gonz\'alez-Meneses & Pedro M. Gonz\'alez Manch\'on \\ & \\
Departamento de \'Algebra & Departamento de Matem\'atica Aplicada \\
Facultad de Matem\'aticas & EUITI-Universidad Polit\'ecnica de Madrid \\
Instituto de Matem\'aticas (IMUS) & Ronda de Valencia 3 \\
Universidad de Sevilla & 28012 Madrid (Spain) \\
Apdo. 1160 & {\it [email protected]} \\
41080 Sevilla (Spain) & \\
{\it [email protected]} &
\end{tabular}
\end{document}
|
\begin{document}
\title{Cumulative Stay-time Representation for Electronic Health Records\in Medical Event Time Prediction}
\begin{abstract}
We address the problem of predicting when a disease will develop, i.e., medical event time~(MET), from a patient's electronic health record~(EHR).
The MET of non-communicable diseases like diabetes is highly correlated to cumulative health conditions, more specifically, how much time the patient spent with specific health conditions in the past.
The common time-series representation is indirect in extracting such information from EHR because it focuses on detailed dependencies between values in successive observations, not cumulative information.
We propose a novel data representation for EHR called cumulative stay-time representation~(CTR), which directly models such cumulative health conditions.
We derive a trainable construction of CTR based on neural networks that has the flexibility to fit the target data and scalability to handle high-dimensional EHR.
Numerical experiments using synthetic and real-world datasets demonstrate that CTR alone achieves a high prediction performance, and it enhances the performance of existing models when combined with them.
\if0
We address the problem of predicting when a disease will develop from a patient's electronic health record~(EHR), i.e., medical event time prediction (MTP).
The MTP of non-communicable diseases like diabetes is expected to be highly correlated to long-term information in EHR, especially how much time in total the patient spent at specific health conditions in the past.
Such long-term information cannot be efficiently handled with models based on ordinary time-series representation.
To address this issue, we propose a novel data representation for EHRs called \emph{cumulative stay-time representation}~(CTR).
Specifically, three different constructions of CTR are presented in order to address the problem of its computational complexity and flexibility.
The most advanced construction of CTR, CTR-N, can be trainable from data and straightforwardly combined with existing time-series models.
Numerical experiments using synthetic and real-world datasets demonstrate that CTR alone achieves a high C-index in MTP tasks, and it enhanced the performance of existing time-series models when combined with them.
We address the problem of predicting when a disease will develop from a patient's electronic health record (EHR).
When patients have high-risk health conditions for an extended period of time, such as high blood pressure, hyperglycemia, or increased body fat, diseases can develop.
Thus, using EHR to model patient's chronic health conditions, i.e., how much time they have spent with different conditions, can help improve prediction.
For modeling such cumulative features, the ordinary time-series representation, where the value of each observation variable is recorded at each time, is rather a detour.
This is because that the time-series representation is founded from the temporal locality in representation.
We therefore propose a novel representation for EHRs, i.e., the cumulative stay-time representation (CTR).
In CTR, we directly record the cumulative stay time at each combination of observed attribute values. The combination represents a patient's health condition as a state.
We derive a trainable construction of CTR based on neural networks.
Numerical experiments using synthetic and real-world datasets demonstrated the effectiveness of our method.
three types of definition for the state: the first discretely determines the state assignment for observations, and the second and third ones determine them as continuous measurements based on kernel functions and neural networks, respectively.
The state is determined by values of observation variables at each observation in the time-series, where we use bins of segmented values for observation variables as the discrete states. Since the number of the discrete states grows exponentially with the number of observation variables, we then extend the discrete one to the continuous one as kernel representation and neural network which can be learned from data. It avoids exponential increasing of the number of states and leads to smooth interpolation between states.
\fi
\end{abstract}
\section{Introduction}
\if0
\begin{figure}
\caption{Formalizing raw observations into tractable representation
}
\label{FigTime-seriesRepresentation}
\end{figure}
\fi
Predicting medical events, such as disease progression, from \emph{electronic health records} (EHR) is an important task in medical and healthcare applications~\cite{tan2020data}. The EHR represents a patient's health history. Such prediction can assist in providing detailed health guidance, e.g., for early disease detection, intervention, and the allocation of limited resources in healthcare organizations~\cite{inaguma2020increasing}.
This paper addresses a scenario in which we predict \emph{when} a patient will develop some disease after an index date, i.e., the \emph{medical event time}~(MET), from past observations in EHR, as shown in Fig.~\ref{FigProblem}~\cite{liu2018early}. This is a common task in survival analysis and time-to-event analysis, and we focus on MET, not just its occurrence.
The past observations for each patient come from a window that spans the initial observation time to the index date and contain lab test results at each time, as shown in the LHS in Fig.~\ref{FigOrdinary}.
From accumulated EHR datasets, we learn a prediction model for MET.
\begin{figure}
\caption{We predict when patient will develop disease after index date from EHR in observation window.}
\label{FigProblem}
\end{figure}
A patient's cumulative health conditions appearing in past observations in EHR are of help for MET prediction. They can be interpreted as the \emph{cumulative stay-time} in specific health states---more specifically, how much time a patient has spent with different health conditions.
For example, when a patient has high blood pressure, hyperglycemia, or high body fat for a long enough period, diseases can develop~\cite{james20142014,american20192}.
In particular, for non-communicable diseases, like diabetes,
the cumulative stay-time is extremely related to their progress and MET.
To utilize information in EHR, the common approach is to formalize the raw observations in EHR into an ordinary time-series representation~\cite{zhang2019attain,rubanova2019latent,hussain2021neural}.
In this approach, at each time, we record the value of each lab test result, as shown in the table in Fig.~\ref{FigOrdinary}. The focus is on the detailed dependencies between values in successive observations. When we handle the cumulative stay-time with this representation, prediction models, such as recurrent neural networks (RNNs)~\cite{zhang2019attain}, need to encode values in an \emph{entire time series} into the cumulative stay-time. This makes modeling the cumulative stay-time indirect.
We therefore propose directly extracting the cumulative stay-time from raw observations in EHR as a novel representation for EHR, that is, the \emph{cumulative stay-time representation} (CTR). In contrast to the time-series representation, we record the cumulative stay-time at each combination of values of lab test results that represents a state, as shown in Fig.~\ref{FigCumTime}. This explicitly represents how long a patient stays in a specific health state.
Representations for modeling the cumulative stay-time in specific states and using it in prediction have been proposed in other domains than EHR modeling, such as for the usage history of batteries~\cite{takahashi2012predicting} and GPS trajectories~\cite{liao2018trajectory}.
However, they are defined only with discrete state modeling that can be seen as bins of non-overlapping segmented values for lab test results, as shown in the table in Fig.~\ref{FigCumTime}. As such, they focus on low-dimensional observations, such as one, two, or three dimensions, and cannot handle more than several dimensions. This is because the number of states increases exponentially against the dimension of observation variables with this state definition. Since observations in EHR have many more dimensions, it is difficult to use these approaches on EHR directly.
This paper addresses the above difficulties by deriving methods for constructing CTR with enough scalability to handle EHR.
We first formally derive a general construction of CTR by using the discrete state. This formalization leads to further enhancements of CTR with states defined as continuous measurements, CTR-K and CTR-N, which have states based on kernel functions and neural networks, respectively.
They are more practical variants that avoid exponential increases in the number of states and lead to smooth interpolation between states.
In addition, CTR-N can be learned from data, which enables flexible state modeling.
\paragraph{Contributions.}
Our main contributions are the following:
\begin{itemize}
\setlength{\itemsep}{0.01cm}
\item We propose a novel representation for EHR for MET prediction, CTR, which represents how long a patient stays in a specific health state. This helps to model the cumulative health conditions of patients.
\item We derive a trainable construction of CTR based on neural networks that adapts to data flexibly and has scalability for high-dimensional EHR.
\item Extensive experiments on multiple MET prediction tasks with synthetic and real-world datasets show the effectiveness of CTR, especially for EHR with relatively longer observation periods, where cumulative health conditions are more crucial for MET.
CTR shows modularity high enough to further improve the prediction performance when combined with other models.
\end{itemize}
\if0
We propose a method to convert time-series in EHR into cumulative stay-time at states which are determined by values of observation variables.
State is continuous.
State is determined by a function of input observation variables, where the function is determined by past observations.
cumulative stay-time is sum of products of states and durations staying in the states over observations.
The function is neural network trained with past observations.
The function is kernel function which is constructed from past observations and represents proximity between them.
The neural network is trained in end to end manner through training of prediction model.
For implementing the algorithm, we propose a stochastic optimization method.
We propose a method to convert raw observations in EHR into CTR for modeling EHR, where we use a specific patient's state as an index of the representation and a value at each index represents a cumulative stay-time at the corresponding state. The cumulative stay-time is the sum of durations staying in the state over the time-series. The state is determined by values of observation variables at each observation in the time-series, where we use bins of segmented values for observation variables as the discrete states. We record cumulative stay-time at each value of observation variable.
To the best of authors knowledge, this is the first study for modeling cumulative stay-time in EHR.
\fi
\begin{figure}
\caption{Ordinary time-series representation.}
\label{FigOrdinary}
\end{figure}
\begin{figure}
\caption{Cumulative stay-time representation.}
\label{FigCumTime}
\end{figure}
\section{Preliminary}
\subsection{Medical Event Time Prediction}
Our goal is to construct a model for predicting the medical event time~(MET), $y > 0$, after an index date on the basis of pairs of past observations and the corresponding timestamps, $\{\bX, \bt\}$, which are recorded in the EHR of a patient~\cite{liu2018early}, as shown in Fig.~\ref{FigProblem}. The past observations for each patient contain $M$ number of observations in an observation window $\bX \equiv \{\bx^{\{\!m\!\}}\}_{m=1}^{M}$, where the $m$-th observation $\bx^{\{\!m\!\}}$ is represented as a $D$ number of lab test results $\bx^{\{\!m\!\}}\in \mathbb{R}^{D}$, and $\bX$ thus forms an $M \times D$ matrix.
The timestamps are $\bt \equiv \{t^{\{\!m\!\}}\}_{m=1}^{M}$, where the $m$-th timestamp is $t^{\{\!m\!\}}>0$.
We assume that each patient has an $M \times D$ matrix $\bX$ and $M$ vector $\bt$.
Note that observation intervals can vary over time, and the length of sequence $M$ can be different over patients.
When we take a machine learning-based approach, the raw observations $\{\bX, \bt\}$ must be formalized into a tractable representation that contains enough information for the MET prediction.
We here denote the representation as a function, $\bz\equiv\bz(\bX, \bt)$, whose output forms either a vector, matrix, or tensor depending on the formalization.
Once $\{\bX, \bt\}$ is formalized into $\bz$, we use $\bz$ as the input of the prediction model, $f(\bz)$, and learn it with a general scheme for minimizing the expected loss:
\begin{align}
\label{EqPredictionError}
f^* \equiv \argmin_f E[\calL(f(\bz),y)],
\end{align}
where $f^*$ is the optimal prediction model, $\calL$ is the loss function (e.g., the squared error), and $E$ denotes the expectation over $p(y,\bX, \bt)$.
In the Experiments section, we will define a specific prediction model and loss function for each specific problem.
By using the learned $f^*$, we can predict $y$ for new data as $\hy = f^*(\bz)$.
\subsection{Property for Cumulative Stay-time Representation}
This paper focuses on how to formalize raw observations $\{\bX, \bt\}$ into a tractable representation, $\bz$.
We directly model the cumulative stay-time of a specific patient's states with the construction of $\bz$.
We would like the representation to be:
\begin{description}
\setlength{\itemsep}{0.01cm}
\item[(i) Direct to model stay time]~\\
How long a patient stays in a specific health condition can make diseases develop, particularly non-communicable diseases, like diabetes~\cite{james20142014,american20192}.
\item[(ii) Explicit to handle variable observation intervals]~\\
Observation intervals in EHR can vary over time and over patients since the observations are recorded when a patient is treated.
\item[(iii) Scalable to high-dimensional data]~\\
Since EHR has high-dimensional observations, the representation should not cause combinatorial explosion against the number of dimensions in observations.
\end{description}
\subsection{Representations for EHR}
We first discuss the conventional related representations in this subsection considering the above properties; then, we derive the cumulative stay-time representation (CTR) for EHR in Section~3.
\subsubsection{Ordinary Time-series Representation}
In the ordinary time-series representation, raw observations $\{\bX, \bt\}$ are converted into the representation of a matrix form, $\bz_{\mathrm{ts}} \in \mathbb{R}^{M \times D}$, whose two-dimensional index respectively represents timestamps and lab test names, as shown in Fig.~\ref{FigOrdinary}.
This corresponds to directly using the lab test results $\bX$ as $\bz_{\mathrm{ts}}$, $\bz_{\mathrm{ts}}(\bX, \bt) \equiv \bX$.
In the matrix, successive observations are put in adjacent rows.
Thus, such representation helps in modeling the detailed dependencies between values in successive observations by using, for example, RNNs~\cite{xiao2017modeling,zhang2019attain}, hidden Markov models (HMMs)\cite{alaa2019attentive,hussain2021neural}, convolutional neural networks (CNNs)~\cite{cheng2016risk,makino2019artificial}, ODERNNs~\cite{rubanova2019latent}, and Transformers~\cite{luo2020hitanet}.
However, when we handle the cumulative stay-time, we need to consider how long and what state a patient has been in \emph{over entire observations}, and similar health states are highly related to each other even if they are time-distant. In this case, models need to learn to encode values in an entire time series into the cumulative stay-time completely from data. The learning thus becomes indirect and costly, which leads to degraded performance.
These approaches can still handle variable observation intervals by inputting the timestamps or intervals between observations as additional inputs. Also, they usually have good scalability for high-dimensional data. Therefore, we will show the performance difference with the proposed method in our experiments to investigate their indirectness in modeling stay time in a specific health state.
\subsubsection{Cumulative Stay-time Representation}
As discussed in Introduction, methods for directly modeling the cumulative stay-time in specific states have been proposed in domains other than EHR, e.g., cumulative stay-time for the internal state of batteries~\cite{takahashi2012predicting}, location grids against GPS trajectories~\cite{andrienko2007visual,boukhechba2018predicting}, and indoor positioning using RFID~\cite{zuo2016prediction,jiang2018research}.
They can handle variable observation intervals more naturally than the time-series representation.
The details of this approach and its practical limitations in EHR modeling will be described in Section~3.1. In brief, these methods do not have enough scalability for high-dimensional data because of their discrete state modeling and have not been defined formally with further extendibility. In this paper, we formally derive this approach as the \emph{cumulative stay-time representation with discrete states} (CTR-D) and extend its state definition with a kernel function and trainable neural networks to address a higher dimensional case of EHR.
\subsubsection{Other Representations}
Representations for temporal observations other than time-series representations and CTR have been studied as reviewed in~\cite{fu2011review}, such as methods based on binarization~\cite{bagnall2006bit} and segmented subsequences~\cite{lovric2014algoritmic}.
Changing the domain from time into other domains based such as on the Fourier transform~\cite{agrawal1993efficient} and tensor factorization~\cite{ho2014marble,yu2016temporal,yin2019learning} is another common way.
These methods assume high frequency and regular observation intervals, which is not the case in our scenario.
\if0
Such state-based modeling is also found in other representations~\cite{harris1954distributional,chen2012nonlinear,le2014distributed}, such as bag of words (BoW) for representing the frequency of events~\cite{salas2015cumulative} and sub-sequences of time series~\cite{oates2012exploiting,bromuri2014multi}.
CTR and BoW share a common philosophy in that the bins are filled with the sum of some kind of quantity.
The important difference is that the quantity is the cumulative stay-time for CTR, and for BoW, the quantity is the frequency. BoW totally ignores temporal information, whereas our method maintains temporal information as the cumulative stay-time, which has different information from the ordinary time series.
Methods for modeling cumulative stay-time in specific states have been proposed in multiple domains, e.g., cumulative stay-time for the internal state of batteries~\cite{takahashi2012predicting}, location grids against GPS trajectories~\cite{andrienko2007visual,scellato2011nextplace,boukhechba2018predicting,zhao2018mobile,liao2018trajectory}, points of interest~\cite{Xie2009From,Keles2017Extracting}, clustered location areas~\cite{yoon2010smart}, indoor positioning using RFID~\cite{zuo2016prediction,jiang2018research,wang2019analysis}, and cellular stations~\cite{guo2018detecting}.
They focus on low-dimensional observations, such as one, two, or three dimensions, and cannot handle more than several dimensions because of the combinatorial explosion against the dimension of observation variables, which comes from their discrete state modeling.
In addition, the discrete state modeling causes there to be a non-continuous boundary between states, which prevents generalization between adjacent states.
Some works on data visualization address the issue of discrete state modeling~\cite{Yau2020Understanding,zuo2014consumer,han2017visualization}.
They record the cumulative stay-time for states represented by a continuous function over two-dimensional space with kernel density estimation~\cite{silverman1986density,brunsdon1995estimating}, which provides smooth interpolation between states.
However, their focus is still only on two-dimensional data, and they do not use the cumulative stay-time for the input of a prediction model.
In this paper, we address a higher dimensional case and extend the representation with a trainable neural networks, which performed well in a real healthcare application.
CTR is also related to bag of words (BoW) in text analysis~\cite{harris1954distributional,le2014distributed}, which represents the frequency of each word or pattern.
Words can be seen as a state in our scenario. For time series in the healthcare domain, BoW has been used such as for modeling the counts for medical events~\cite{salas2015cumulative} and sub-sequences of time series~\cite{oates2012exploiting,wang2013bag,bromuri2014multi}.
The computing processes for CTR and BoW share a common philosophy, where we fill bins with the sum of some kind of quantity.
The important difference is that the quantity is the cumulative stay-time for CTR, and for BoW, the quantity is the frequency. The resulting representation is thus quite different.
Also, BoW for time-series usually uses sub-sequences as words, and it assumes a high frequency of observations or less importance for the timestamp and duration variations~\cite{vintsyuk1968speech}, which goes against our modeling assumption.
Some method extract or learn features directly from raw observation sequences in EHR with deep nerural networks~\cite{ravi2017deep,makino2019artificial}; examples include an autoencoders~\cite{lasko2013computational,miotto2016deep2}, and convolutional neural networks (CNN)~\cite{cheng2016risk}.
The timestamps or intervals between observations are explicitly input to the model for encoding the dependencies between successive observations. RNN models have been used for this purpose~\cite{choi2016doctor,xiao2017modeling,che2018recurrent,rajkomar2018scalable,zhang2019attain}. However, if the timestamps are not reliable, directly inputting them into the model might not be effective.
Methods have been proposed to handle ambiguous timestamps where the time intervals are variable. RNN models, which are inputted timestamps or event intervals along with other features, have been used for this purpose~\cite{choi2016doctor,xiao2017modeling}.
When we can assume the well-regulated property of constant observations at regular intervals, namely standard time-series data, there are many established models, including vector autoregressive (VAR) models~\cite{lutkepohl2005new}, hidden Markov models~\cite{baum1966statistical}, recurrent neural networks (RNN)~\cite{rumelhart1985learning} including long short-term memory (LSTM) models~\cite{hochreiter1997long}, conditional restricted Boltzmann machines (RBM)~\cite{taylor2007modeling}, spiking Boltzmann machines~\cite{hinton2000spiking}, temporal RBMs~\cite{sutskever2007learning}, and recurrent temporal RBMs~\cite{sutskever2009recurrent}, and extensions of those models. Their application area is tremendous; e.g., sensor-data analysis is the most common application in the data mining and machine learning communities, and economic and marketing applications are attracting much attention. In these applications, the timestamp is simply regarded as an index. The order of the sequence is used to represent the temporal dependencies. If the time intervals are not constant, prediction performance is degraded in return for this simpler modeling.
A number of studies have focused on irregular time intervals of event sequences. Dynamic time warping~\cite{vintsyuk1968speech} is the classical method for handling temporal irregularity. It can represent the similarities between two sequences robustly against irregular time intervals. Some methods are aimed at obtaining a representation of the sequences in order to handle their complicated nature in contrast to the well-regulated property of time-series data. Frequent pattern mining~\cite{heins2014statistical} and visualization~\cite{wongsuphasawat2012exploring,gotz2014decisionflow,liu2017coreflow} have been studied extensively for this purpose. Representation learning leveraging the graph structures of event sequences and neural networks has also been proposed~\cite{hong2017event2vec}. In the area of activity recognition, event sequences are used for recognizing input, but they cannot be used for prediction ~\cite{ordonez2013activity}. A richer dependency of successive events, such as relationships among multiple sequences, can be incorporated by explicitly inputting timestamps or event intervals into the model along with other features representing the events. RNN models have been used for this purpose~\cite{choi2016doctor,xiao2017modeling}. However, if the timestamps are not reliable, directly inputting them into the model might not be effective.
\fi
\section{CTR: Cumulative Stay-time Representation}
We propose a cumulative stay-time representation, CTR, for directly modeling the cumulative stay-time of a specific patient's states as a novel formalization of raw observations in EHR.
\subsection{CTR-D: CTR with Discrete States}
We convert raw observations $\{\bm{X}, \bm{t}\}$ into the cumulative stay-time at a finite $K$ number of states as $K$-dimensional vector $\bz$, whose $k$-th element is $z_k > 0$.
Each state represents a combination of observed attribute values and can be seen as a bin segmented by a lattice that defines the value range of each attribute in each state, as shown in Fig.~\ref{FigCumTime}.
We cumulatively fill each bin with the stay time of which the raw observation falls into the corresponding value ranges.
By using the state function $\bs(\bm{x}^{\{\!m\!\}})\in \{0,1\}^K$, which outputs a one-hot vector representing the current state for input observation $\bm{x}^{\{\!m\!\}}$, CTR $\bz$ is defined as
\begin{align}
\label{EqCTR}
\bm{z}(\bX, \bt) &\equiv \sum_m d^{\{\!m\!\}}\bm{s}\left(\bm{x}^{\{\!m\!\}} \right),\\\nonumber
\mathrm{where}~~&d^{\{\!m\!\}} \equiv \lambda^{t^{\{\!M\!\}}-t^{\{\!m\!\}}} (t^{\{\!m\!\}}-t^{\{m-1\}})
\end{align}
where $d^{\{\!m\!\}}$ is the stay time for the $m$-th observation, which is estimated by calculating the difference between consecutive timestamps $t^{\{\!m\!\}}$ and $t^{\{m-1\}}$ with decay for weighting newer observations. $\lambda$ is the decay rate and is optimized in training.
Since the output of the function $\bs(\bm{x}^{\{\!m\!\}})$ is a one-hot vector, only one element in the vector can become $1$, and the others are $0$, so the index for the element with value $1$ represents the current state of the patient.
Thus, for the $m$-th observation, the element in $d^{\{\!m\!\}}\bm{s}\left(\bm{x}^{\{\!m\!\}} \right)$ with the current state becomes just $d^{\{\!m\!\}}$, and the others are $0$.
Through the summation of $d^{\{\!m\!\}}\bm{s}\left(\bm{x}^{\{\!m\!\}} \right)$ over $m$, each element of $\bz$ represents the sum of the stay time in each state over the observations. Also, from Eq.~\eqref{EqCTR}, this representation can explicitly handle variable observation intervals without any additional encoding.
The algorithm is described in Algorithm~\ref{alg1}.
The state function $\bs(\bm{x}^{\{\!m\!\}})$ is defined by the indication function $\bI$, which always outputs a $K$-dimensional one-hot vector representing the current state:
\begin{equation}
\label{EqStateDis}
\bs(\bm{x}^{\{\!m\!\}}) \equiv \bm{I}\left(\bm{x}^{\{\!m\!\}}, \bm{A} \right),
\end{equation}
where $\bm{A} \in \{\bm{a}_k \}_{k=1}^K$ is the $K$ number of non-overlapping collectively exhaustive value segments. The detailed definition of $\bm{a}_k$ and the $k$-th element of the function $\bm{I}$ are in the appendix.
If $\bx^{\{\!m\!\}}$ falls into the $k$-th segment, only the $k$-th element of $\bm{I}\left(\bm{x}^{\{\!m\!\}}, \bm{A} \right)$ becomes $1$, and the others are $0$ because of the non-overlapping segmentation.
An example segmentation is shown in the table in Fig.~\ref{FigCumTime}, which is based on equally spaced boundaries over the value range of $\bx^{\{\!m\!\}}$, [-1, -0.5, 0, 0.5, 1], where $x^{\{\!m\!\}}_d$ is defined in $[-1, 1)$. For example, in a $3$-dimensional case, $K=4^3=64$.
We call CTR in Eq.~\eqref{EqCTR} with the state function in Eq.~\eqref{EqStateDis} \emph{CTR with discrete states} (CTR-D).
The discretely defined state $\bs(\bm{x}^{\{\!m\!\}})$ is easy to understand.
When the number of attributes in $\bx$ is small enough, we can practically use the function $\bs(\bm{x}^{\{\!m\!\}})$ in Eq.~\eqref{EqStateDis} for computing $\bz$.
However, since the number of combinations representing states grows exponentially with the number of attributes $D$, CTR-D cannot handle more than a few variables.
Observations in EHR have many more attributes in general. For example, when we set the number of segments to $100$, $K$ becomes $100^{D}$, which quickly causes a combinatorial explosion according to the number of attributes $D$.
Also, the non-continuous boundary prevents generalization between adjacent states, though adjacent states should represent states similar to each other because of the shared boundaries between them in our definition in Eq.~\eqref{EqStateDis} (see also Appendix A).
We thus extend the function $\bs(\bm{x}^{\{\!m\!\}})$ into a more practical one in the following sections.
\begin{algorithm}[t]
\caption{Cumulative stay-time representation}
\label{alg1}
\begin{algorithmic}[1]
\Require Raw observations $\{\bX, \bt\}$ and state function $\bs(\bullet)$
\Ensure Cumulative stay-time representation, CTR, $\bz$
\Initialize{$\bm{z} \leftarrow \bm{0}$}
\For{$m=1$ \textbf{to} $M$~~(which can be parallelized over $m$)}
\State $\bs^{\{\!m\!\}} \leftarrow \bs(\bm{x}^{\{\!m\!\}})$
\State $d^{\{\!m\!\}} \leftarrow \lambda^{t^{\{\!M\!\}}-t^{\{\!m\!\}}} (t^{\{\!m\!\}}-t^{\{m-1\}})$
\State $\bm{z} \leftarrow \bm{z} + d^{\{\!m\!\}} \bs^{\{\!m\!\}}$
\EndFor
\end{algorithmic}
\end{algorithm}
\subsection{CTR-K: CTR with Kernel-defined States}
For mitigating the exponential growth in the number of states, we change the definition of states in Eq.~\eqref{EqCTR} from discrete, i.e., what variable values an observation has, to continuous, i.e., \emph{how close an observation is to some basis vectors}, as shown in Fig.~\ref{FigCumTime}.
Continuous states are no longer represented as a one-hot vector corresponding to a unique state; they are represented as a weight vector determining at what proportion we assign the current stay time to each state represented by bases.
In this case, the number of states is limited to the number of bases and does not grow exponentially.
This also leads to interpolation between states and can smoothly represent intermediate states between the states.
We use a kernel function that represents affinities to bases for observations, where we construct the continuous state vector by assigning different values to multiple elements according to the affinities.
The state function $\bs_{\mathrm{K}}(\bm{x}^{\{\!m\!\}})\in \mathbb{R}^K$ based on the kernel function $\bm{\phi}$ is defined as
\begin{equation}
\label{EqStateCon}
\bs_{\mathrm{K}}(\bm{x}^{\{\!m\!\}}) \equiv \bm{\phi}\left(\bm{x}^{\{\!m\!\}},\bm{B} \right),
\end{equation}
where $\bm{B} \equiv \{ \bm{b}^{\{\!k\!\}} \}_{k=1}^K$ is the $K$ number of bases, and $\bm{b}^{\{\!k\!\}} \in \mathbb{R}^D$ is the $k$-th basis.
For example, $\bs_{\mathrm{K}}(\bm{x}^{\{\!m\!\}}) = \{0,0.3,0.7,0,...,0 \}$ means that we assign the stay time for the $m$-th observation with weights of $0.3$ and $0.7$ to the second and third states, respectively, in the summation in Eq.~\eqref{EqCTR}. Bases can be randomly sampled from the training set.
When the observation variables are real-valued, as in our scenario, the choice of $\bphi$ is an RBF kernel, whose definition is provided in Appendix B. We can also use other kernels, such as tf-idf vector $+$ cosine similarity~\cite{rajaraman2011mining}, for binary features.
\if0
When the observation variables are real-valued, as in our scenario, the choice of $\bphi$ is an RBF kernel defined as
\begin{equation}
\label{EqStateKernel}
\bm{\phi}\left(\bm{x}^{\{\!m\!\}},\bm{B} \right)\equiv \left\{ \frac{\exp(-\gamma \|\bm{x}^{\{\!m\!\}}- \bm{b}^{\{\!k\!\}}\|^2)}{Z_m} \right\}_{k=1}^K,
\end{equation}
where $\gamma$ is a bandwidth parameter to be optimized with a grid search using a validation set in training data, and $Z_m\equiv \sum_k \exp(-\gamma \|\bm{x}^{\{\!m\!\}}- \bm{b}^{\{\!k\!\}}\|^2)$ is a normalizing factor for the $m$-th observation, which comes from the requirement for using $\bs_{\mathrm{K}}$ as weights for assigning the stay time in Eq.~\eqref{EqCTR}.
We can also use other kernels, such as tf-idf vector $+$ cosine similarity~\cite{rajaraman2011mining}, for binary features.
\begin{equation}
\label{EqStateKernelBinary}
\bm{\phi}\left(\bm{x}^{\{\!m\!\}},\bm{B} \right)\equiv \left\{ \frac{\bm{\xi}(\bm{x}^{\{\!m\!\}}) \bm{\xi}(\bm{x}^{\{\!k\!\}}))}{|\bm{\xi}(\bm{x}^{\{\!m\!\}})||\bm{\xi}(\bm{x}^{\{\!k\!\}}))|} \right\}_{k=1}^K,
\end{equation}\
where $\bm{\xi}(\bullet)$ represents tf-idf function~\cite{rajaraman2011mining}.
\fi
We call CTR in Eq.~\eqref{EqCTR} with the state function in Eq.~\eqref{EqStateCon} \emph{CTR with kernel-defined states} (CTR-K).
\subsection{CTR-N: CTR with Neural Network-defined States}
Additionally, we can consider the requirement for continuous state $\bs_{\mathrm{K}}(\bm{x}^{\{\!m\!\}})$ in Eq.~\eqref{EqStateCon} to represent a similar observation with a similar weight vector.
Such a vector can also be modeled with neural networks since they are trained to produce similar outputs from similar inputs.
We thus extend $\bs_{\mathrm{K}}(\bm{x}^{\{\!m\!\}})$ to $\bs_{\mathrm{N}}(\bm{x}^{\{\!m\!\}})\in \mathbb{R}^K$ by replacing kernel function $\bphi$ with a trainable neural network, $\bg$, e.g., multilayer perceptron (MLP), that produces a state-indicating weight vector similar to $\bm{\phi}$, as
\begin{align}
\label{EqStateNeural}
\bs_{\mathrm{N}}(\bm{x}^{\{\!m\!\}}) &\equiv \bg\left(\bm{x}^{\{\!m\!\}},\bm{\theta}_g \right),
\end{align}
where $\bm{\theta}_g$ are parameters for the neural network. The outputs of the final layer for $\bg$ should be normalized, such as by the softmax function, as a weight vector, and the number of the outputs is $K$.
The specific neural network structure for $\bg$ is shown in the Experiments section.
We call CTR in Eq.~\eqref{EqCTR} with the state function in Eq.~\eqref{EqStateNeural} \emph{CTR with neural network-defined states} (CTR-N).
This representation can be learned from data and thus provides more flexibility in adjusting the state definition to target data. Also, in contrast to CTR-D and CTR-K, CTR-N does not require having to choose the state boundaries or the bases.
\newtheorem{lemma}{Lemma}
Formally, the following lemma characterizes CTR-K and CTR-N with respect to the three properties: (i) direct to model stay time, (ii) explicit to handle variable observation intervals, and (iii) scalable to high-dimensional data:
\begin{lemma}
(i) Every element in $\bm{z}(\bX, \bt)$ is a linear function of stay time $d$. Hence, $\bm{z}(\bX, \bt)$ is a direct representation of stay time.
(ii) $\bm{z}(\bX, \bt)$ is a function of an observation interval $t^{\{\!m\!\}}-t^{\{m-1\}}$.
(iii) The number of dimensions in $\bs_{\mathrm{K}}(\bm{x})$ and that of the corresponding $\bm{z}(\bX, \bt)$ depend on the number of bases, not the number of attributes in $\bx$, $D$. Also, the number of dimensions in $\bs_{\mathrm{N}}(\bm{x})$ and that of the corresponding $\bm{z}(\bX, \bt)$ depend on the number of outputs of $\bg$, not $D$.
\end{lemma}
\noindent The ordinary time-series representation $\bz_{\mathrm{ts}}(\bX, \bt)$ does not satisfy (i). CTR-D does not satisfy (iii).
\paragraph{Gradients for learning model parameters.}
We minimize Eq.~\eqref{EqPredictionError} by using gradient-based optimization methods.
We learn $f$ on the basis of Eqs.~\eqref{EqPredictionError} and~\eqref{EqCTR} by using the gradients for the model parameters for $f$, $\btheta_f$, as
\begin{align}
\label{gradient_f}
\frac{\partial \calL}{\partial \btheta_f} =& E \bigg[\frac{\partial \calL} {\partial f} \frac{\partial f} {\partial \btheta_f}\bigg],
\end{align}
where we omit the inputs of the functions for simplicity.
For CTR-N, in addition to learning $f$, we learn the parameters $\btheta_g$ of neural network $\bg$ in~\eqref{EqStateNeural}, which represents the state $\bs_{\mathrm{N}}(\bm{x}^{\{\!m\!\}})$.
The gradients for $\btheta_g$ can be derived as
\begin{align}
\label{gradient_N}
\frac{\partial \calL}{\partial \btheta_g} =& E \bigg[\frac{\partial \calL} {\partial f} \frac{\partial f} {\partial \bz} \sum_m d^{\{\!m\!\}} \frac{\partial g\left(\bm{x}^{\{\!m\!\}},\bm{\theta}_g \right)} {\partial \btheta_g}\bigg].
\end{align}
\if0
Using training samples, $\{y^{\{\!n\!\}}, \bX^{\{\!n\!\}}, \bt^{\{\!n\!\}}\}_n^N$, the gradient in Eq.~\eqref{gradient} can be estimated as
\begin{align}
\label{gradient_e1}
\frac{\partial \calL(f)}{\partial \btheta}= \frac{1}{N} \sum_{n=1}^N \bigg[&\frac{\partial \calL(y^{\{\!n\!\}},\bz^{\{\!n\!\}})} {\partial f(\bz^{\{\!n\!\}})} \frac{\partial f(\bz^{\{\!n\!\}})} {\partial \bz^{\{\!n\!\}}}\\\nonumber
&\times \sum_m d^{\{n,m\}} \frac{\partial \bs(\bm{x}^{\{n,m\}})} {\partial \btheta}\bigg].
\end{align}
\fi
\section{Experiments}
\if0
\begin{figure}
\caption{Examples of generated $\bz$ and $\bw$ in experiments on synthetic data ($K=100$). Darker color means higher value.}
\label{FigExSynthetic}
\end{figure}
\begin{figure*}
\caption{Comparison of proposed method and baselines in terms of C-index on synthetic data}
\label{FigExSyntheticRes5}
\end{figure*}
\begin{figure*}
\caption{Comparison of C-index on synthetic data (higher is better).}
\label{FigExSynthetic}
\end{figure*}
\begin{figure}
\caption{Comparison of C-index on synthetic data (higher is better).}
\label{FigExSynthetic}
\end{figure}
\begin{figure}
\caption{Comparison of C-index on Physionet data (higher is better).}
\label{FigExPhysionet}
\end{figure}
\fi
\begin{figure}
\caption{Comparison of C-index on (a) Synthetic and (b) Physionet (higher is better).}
\label{FigExSynPhy}
\end{figure}
We assessed the prediction performance of our method CTR in numerical experiments to show the effectiveness of directly modeling the cumulative stay-time.
\paragraph{Evaluation metric.}
We report the mean and standard error of the concordance index (C-index)~\cite{liu2018early} across $5$-fold cross-validation, each with a different randomly sampled training-testing split. The C-index becomes a high value when regressed MET values follow the true ordering, and it can handle censored data, which is essential for our application with EHR.
For each fold of the cross-validation, we randomly sampled $20\%$ of the training set for use as a validation set to determine the best hyperparameters for each method, where hyperparameters providing the highest C-index in the validation set were chosen.
\if0
The performance of event-time prediction is measured with the concordance index (C-index)~\cite{uno2011c,liu2018early}, which has a high value when regressed event time-values follow the true ordering.
For medical treatment and health guidance based on prediction, the ordering of event times is important. The C-index is defined as
\begin{align}
\label{cIndex}
\frac{1}{N_c} \sum_{\{n,l\} \in y^{\{\!n\!\}}<y^{\{\!l\!\}}} [\hy^{\{\!n\!\}} < \hy^{\{\!l\!\}}],
\end{align}
where $\hy^{\{\!n\!\}}$ and $\hy^{\{\!l\!\}}$ are the prediction results for the $n$-th patient and $l$-th patient, respectively, and $y^{\{\!n\!\}}$ and $y^{\{\!l\!\}}$ are the corresponding true event times. Censored data is sampled only for $l$ in the condition of $y^{n} < y^{l}$~\cite{uno2011c}, and $N_c$ is the number of combinations of $\{n,l\}$.
\fi
\paragraph{Implementations for CTRs.}
We used the loss proposed in~\cite{liu2018early}, which can handle censored data well.
We used a $3$-layer MLP with ReLU~\cite{nair2010rectified} (more specifically, $D$-$100$-$1$) as the prediction model $f$ for CTR-D, CTR-K, and CTR-N. In CTR-N, we used a $4$-layer MLP with ReLU (more specifically, $D$-$100$-$100$-$100$) as $\bg$ in Eq.~\eqref{EqStateNeural}, where the final layer is the softmax function for normalization.
More details on the implementation, such as the definitions for states in CTR-D and CTR-K, are provided in the appendix.
Note that we could not use CTR-D for real-world EHR experiments since we needed to handle a large number of attributes, which would cause a combinatorial explosion for $K$. For example, when $D=37$ and we set the number of segments to $100$, $K$ becomes $100^{37}$, where one of our real-world datasets contains $D=37$ number of attributes.
\paragraph{Methods compared.}
The proposed method was compared with four state-of-the-art methods:
\textbf{RankSVX}~\cite{liu2018early}, \textbf{LSTM}, \textbf{Transformer}~\cite{luo2020hitanet}, and \textbf{ODERNN}~\cite{rubanova2019latent}, where the loss for each method is the same as CTRs, and the number of hidden states in each model is the same as CTR-N.
They are based on the time-series representation with the stay time $\bd$ treated as another column for the input time series. Details on their implementation are provided in the appendix.
\paragraph{Combinations of CTR-N and compared methods.}
In real-world EHR experiments, we also examined combinations of CTR-N with the compared methods LSTM, Transformer, and ODERNN (\textbf{CTR+LSTM}, \textbf{CTR+Transformer}, and \textbf{CTR+ODERNN}, respectively). In these combinations, the representation just before the final linear layer of each model was extracted, concatenated with CTR $\bz$ as a single vector, and fed into the prediction model $f(\bz)$. They were trained in an end-to-end manner.
\paragraph{Computing infrastructure.}
All of the experiments were carried out
on workstations having $128$ GB of memory, a $4.0$-GHz CPU, and an Nvidia Tesla V100 GPU.
The computational time for each method was a few hours for producing the results for each dataset, \emph{except for ODERNN, which was $10$~to~$20$~times slower than the other methods.}
\subsection{Results}
We first use a synthetic dataset to investigate \emph{1) whether the method with CTR can indeed learn to predict what cannot be learned without CTR.}
Then, real-world EHR datasets are used to show the \emph{2) practical effectiveness of CTR.}
Finally, we show that \emph{3) CTR enhances the prediction performance, especially for EHR with relatively longer observation periods}, where cumulative health conditions are more crucial for MET. Details on the datasets are provided in the appendix.
\paragraph{Synthetic.}
The Synthetic dataset was generated on the basis of our assumed nature, i.e., the cumulative stay-time for each state leads to the development of a disease.
The number of records was $N=1,000$, the observation length for each record was $M=10$, and the number of attributes was $D=2$. The observation intervals varied between records.
We addressed large ($K=100$), medium ($K=49$), and small ($K=25$) numbers of states settings in data generation.
The results are shown in Fig.~\ref{FigExSynPhy}-(a), where the bars represent the means of the C-index across $5$-fold cross-validation, and the confidence intervals are standard errors. We show the results with the small ($K=25$) number of states here, and the others are provided in the appendix.
We can see that the overall performance of the proposed method was significantly better than those of the compared methods, which demonstrates that \emph{the proposed method with CTR can learn what cannot be learned without CTR well.}
Note that we used multiple settings for CTR-D: the same number of states $K$ for the data generation (CTR-D-True), $K_d-1$ (CTR-D-Minus), and $K_d+1$ (CTR-D-Plus), where $K_d \equiv \sqrt[2]{K}$. We used CTR-D-True as the reference when we knew the true CTR; it thus should achieve the highest score. CTR-K and CTR-N were better than CTR-D with the wrong number of states even if the error was $1$, which demonstrates that CTR-K and CTR-N have a better generalization capability than CTR-D against data variation.
CTR-N performed the best, which demonstrates that CTR-N learns states from data well.
\paragraph{Physionet.}
The Physionet dataset is a publicly available real-world EHR dataset (Physionet Challenge 2012~\cite{silva2012predicting}). The number of records was $N=8,000$, and the number of attributes was $D=37$. The observation intervals varied between records.
The results for the MET prediction task for patient death are shown in Fig.~\ref{FigExSynPhy}-(b) with the same configuration as the results of the Synthetic dataset.
The performances of the methods with CTR were better than those of the methods without CTR by a sufficient margin in terms of standard error.
These results demonstrate that CTR can improve the C-index in the MET prediction problem with real-world EHR. We omitted the results with CTR-K since it was always worse than CTR-N.
CTR-N achieved the best performance on average in comparison with the single models. In addition, when looking at results for combinations of CTR-N and other models, CTR+LSTM, CTR+Transformer, and CTR+ODERNN, we can see that adding CTR-N to these models improved their performance further, which shows the high modularity of CTR to work complementarily with other models.
\emph{This shows that CTR and the time-series models captured different temporal natures in real-world EHR.}
We can automatically determine which type of temporal natures to take into account with the training dataset by training the prediction model $f$, which is put on top of these models.
\paragraph{Case study.}
The above experiments on two different datasets have shown that the methods with CTR have superior prediction performance compared with the state-of-the-art methods for MET prediction from EHR.
Here, we show a real healthcare use-case, where we predict the onset of complications with diabetes mellitus from a real-world big EHR database.
We used datasets provided by one of the largest hospitals in Japan that has maintained a big database of more than $400,000$ patients since $2004$~\cite{makino2019artificial,inaguma2020increasing}.
We worked with six datasets for six kinds of complications of diabetes mellitus: hyperosmolar (HYP), nephrology (NEP), retinopathy (RET), neuropathy (NEU), vascular disease (VAS), and other complications (OTH), each of which has over $N=15,000$ records. The number of attributes was $D=26$, and the observation intervals and lengths varied between records.
In this scenario, ODERNN, which is $10$ to $20$ times slower than the other methods, did not meet the practical needs for this large-scale dataset.
Thus, we here show a comparison between the proposed method and the second-best baseline, LSTM, in experiments with the Synthetic and Physionet datasets.
The results of the mean and standard error of the C-index across $5$-fold cross-validation are listed in Table~\ref{tabDiabetesResults}.
For most of the six tasks having over $15,000$ samples each, the performances of the methods with CTR were better than LSTM by a sufficient margin in terms of standard error.
Complications of diabetes mellitus are known to develop due to time-cumulative effects for vessels with an unhealthy status. The results showed that our explicit mechanisms are essential to learning such effects to achieve higher prediction performance.
\paragraph{Performance analysis on different observation periods.}
We further analyzed the performance improvements of the methods with CTR compared with LSTM over different observation periods by using the Case study dataset containing EHR with more extended periods.
We plotted the mean improvements of the C-index between them for data with different observation periods, as shown in Fig.~\ref{FigExDiffPeriod}, where the confidence intervals are standard errors of the improvements.
The right region in the figure show the results for data with longer observation periods.
It shows that \emph{CTR improved the performance, especially for data with relatively longer observation periods}, where cumulative health conditions are more crucial for MET prediction.
\begin{table}[t]
\centering
\small
\begin{tabular}{cccc}
\toprule
{}&\!LSTM\!&\!\textbf{CTR-N}\!&\!\textbf{CTR+LSTM}\!\\
\midrule
HYP&\!0.589$\pm$0.026\!&\!$\bm{0.612}$$\pm$0.026\!&\!0.583$\pm$0.036\!\\
NEP&\!0.689$\pm$0.012\!&\!$\bm{0.739}$$\pm$0.010\!&\!0.708$\pm$0.007\!\\
RET&\!0.717$\pm$0.013\!&\!0.721$\pm$0.026\!&\!$\bm{0.745}$$\pm$0.008\!\\
NEU&\!0.569$\pm$0.023\!&\!$\bm{0.608}$$\pm$0.020\!&\!0.600$\pm$0.020\!\\
VAS&\!0.503$\pm$0.035\!&\!0.481$\pm$0.013\!&\!$\bm{0.534}$$\pm$0.022\!\\
OTH&\!0.718$\pm$0.015\!&\!$\bm{0.741}$$\pm$0.020\!&\!0.734$\pm$0.011\!\\
\midrule
Average&\!0.664\!&\!$0.687$\!&\!$\bm{0.688}$\!\\
\bottomrule
\end{tabular}
\caption{Comparison of C-index in case study (higher is better). Confidence intervals are standard errors. Best results are in bold.}
\label{tabDiabetesResults}
\end{table}
\begin{figure}
\caption{Analysis of performance improvement over different observation periods (higher is better).}
\label{FigExDiffPeriod}
\end{figure}
\section{Conclusion}
We proposed a cumulative stay-time representation, CTR, for a novel representation of EHR.
CTR can efficiently handle the situation in which the development of some disease is related to the cumulative stay-time of a specific patient's health conditions, e.g., non-communicable diseases.
We developed three variations of CTR with discrete states, continuous states with kernel functions, and continuous states with neural networks.
In particular, CTR with neural networks, CTR-N, is practical because it has scalability handling high-dimensional data and can be learned from data for flexibility in adjusting to the target data.
An experimental evaluation demonstrated that the method with CTR performed better than the methods without CTR.
Application to domains other than EHR will be an interesting avenue for future work.
\if0
\section{Ethical/societal impact}
The common concern when learning from data that is collected through experiments conducted with human participants, including healthcare and medical applications, is producing estimation models biased towards or against specific groups in a population.
Recent works on fairness in machine learning~\cite{pedreshi2008discrimination,kilbertus2017avoiding,kusner2017counterfactual,nabi2018fair,lipton2018does,zhang2018equality,locatello2019fairness,singh2019policy,bera2019fair,ding2020differentially,yan2020fairness,narasimhan2020pairwise,rezaei2020fairness} are one example of help for this, and developing efficient ways of applying them into our approach would be an interesting and useful next step of our study.
Another risk of estimation that may affect human decisions would be false alerts/reports and overlooking some important event.
We believe that the estimation should be carefully used as just one source of information, and it is better that actual decision-making based on this estimation is done from a broader perspective.
\fi
\fontsize{9.0pt}{10.0pt} \selectfont
\appendix
\section{Detailed Definition of States in CTR-D}
The state function $\bs(\bm{x}^{\{\!m\!\}})$ is defined by the indication function $\bI$, which always outputs a $K$-dimensional one-hot vector representing the current state:
\begin{equation}
\label{EqStateDis2}
\bs(\bm{x}^{\{\!m\!\}}) \equiv \bm{I}\left(\bm{x}^{\{\!m\!\}}, \bm{A} \right),
\end{equation}
where $\bm{A} \in \{\bm{a}_k \}_{k=1}^K$ is the $K$ number of non-overlapping collectively exhaustive value segments.
The segment for the $k$-th state, $\bm{a}_k\equiv \left\{\left[\zeta_{d,k}, \xi_{d,k} \right)\right\}_{d=1}^D$, represents the combination of $D$ number of value ranges, where $\zeta_{d,k}$ and $\xi_{d,k}$ respectively represent lower and higher boundaries for the $d$-th attribute $x^{\{\!m\!\}}_d$.
By using $\zeta_{d,k}$ and $\xi_{d,k}$, the $k$-the element of the function $\bm{I}$ is
\begin{equation}
\label{EqRule}
\left[\bm{I}\left(\bm{x}^{\{\!m\!\}}, \bm{A} \right)\right]_k \equiv \prod_d \mathbbl{1}\left(\zeta_{d,k} \le x^{\{\!m\!\}}_d < \xi_{d,k} \right),
\end{equation}
where $\mathbbl{1}(\bullet)$ is an indication function that returns only a value of $1$ when the $\bullet$ condition is satisfied and otherwise returns $0$.
If $\bx^{\{\!m\!\}}$ falls into the $k$-th segment, only the $k$-th element of $\bm{I}\left(\bm{x}^{\{\!m\!\}}, \bm{A} \right)$ becomes $1$ and the others $0$ because of the non-overlapping segmentation.
An example segmentation is shown in the table in Fig.~2 in the main text, which is based on equally spaced boundaries over the value range of $\bx^{\{\!m\!\}}$, [-1, -0.5, 0, 0.5, 1], where $x^{\{\!m\!\}}_d$ is defined in $[-1, 1)$.
For example, in a $3$-dimensional case, $K=4^3=64$.
\section{Detailed Definition of Kernels in CTR-K}
When the observation variables are real-valued, as in our scenario, the choice of $\bphi$ is an RBF kernel defined as
\begin{equation}
\label{EqStateKernel}
\bm{\phi}\left(\bm{x}^{\{\!m\!\}},\bm{B} \right)\equiv \left\{ \frac{\exp(-\gamma \|\bm{x}^{\{\!m\!\}}- \bm{b}^{\{\!k\!\}}\|^2)}{Z_m} \right\}_{k=1}^K,
\end{equation}
where $\gamma$ is a bandwidth parameter to be optimized with a grid search using a validation set in training data, and $Z_m\equiv \sum_k \exp(-\gamma \|\bm{x}^{\{\!m\!\}}- \bm{b}^{\{\!k\!\}}\|^2)$ is a normalizing factor for the $m$-th observation, which comes from the requirement for using $\bs_{\mathrm{K}}$ as weights for assigning the stay time in Eq. (2) in the main text.
We can also use other kernels, such as tf-idf vector $+$ cosine similarity~\cite{rajaraman2011mining}, for binary features. It is defined as
\begin{equation}
\label{EqStateKernelBinary}
\bm{\phi}\left(\bm{x}^{\{\!m\!\}},\bm{B} \right)\equiv \left\{ \frac{\bm{\xi}(\bm{x}^{\{\!m\!\}}) \bm{\xi}(\bm{b}^{\{\!k\!\}}))}{|\bm{\xi}(\bm{x}^{\{\!m\!\}})||\bm{\xi}(\bm{b}^{\{\!k\!\}}))|} \right\}_{k=1}^K,
\end{equation}\
where $\bm{\xi}(\bullet)$ represents tf-idf function~\cite{rajaraman2011mining}.
\section{Details on Experiments with Synthetic Dataset}
We randomly generated $N=1,000$ samples, where the $n$-th sample was represented by $\{\bX^{\{\!n\!\}}, \bd^{\{\!n\!\}}\}$. Note that we directly generated duration $\bd$ instead of timestamp $\bt$. Each sample contained $M=10$ number of raw observations, and the $m$-th observation in the $n$-th sample, $\bx^{\{n,m\}}$, was generated from uniform distribution $\UniformDist(\bx^{\{n,m\}} | -1, 1)$. We set the number of attributes in $\bx^{\{n,m\}}$ to $D=2$. The corresponding duration $d^{\{n,m\}}$ was then generated from $\UniformDist(d^{\{n,m\}} | 0, 1)$.
After that, using $\{\bX^{\{\!n\!\}}, \bd^{\{\!n\!\}}\}$, $\bz^{\{\!n\!\}}$ was computed by using Eqs.~(2)--(4) in the main text with equally spaced state boundaries over the value range of $\bx^{\{n,m\}}$ under different settings of numbers of states $K = \{25, 49, 100\}$. Under this $D=2$ setting, $\bz^{\{\!n\!\}}$ can be viewed as a matrix, as shown in Fig.~\ref{FigExSynthetic_example} ($K = 100$).
Finally, we generated $N$ sets of true labels $\by = \{y^{\{\!n\!\}}\}_{n=1}^{N}$ from Gaussian distribution $\NormalDist(y^{\{\!n\!\}} |\tr(\bw^\top \bz^{\{\!n\!\}}), 0.1)$, where $\bw$ is the same size of matrix as $\bz$ that was generated using a Gaussian function, whose center is the center of the bins for $\bz$ and whose width is $1$, as shown in Fig.~\ref{FigExSynthetic_example}; $\tr(\bullet)$ represents the trace of the matrix $\bullet$, and $\top$ denotes the transpose.
Since $\bw$ is generated from a smooth function over states, we have a chance to generalize among states even with a small number of samples.
Since the generation process for $y$ is non-linear and there is no longer a correlation between $y$ and $\{\bX, \bd\}$, learning with this data is difficult in general.
\begin{figure}
\caption{Examples of generated $\bz$ and $\bw$ in experiments on synthetic data ($K=100$). Darker color means higher value.}
\label{FigExSynthetic_example}
\end{figure}
\begin{figure*}
\caption{Comparison of C-index on synthetic data (higher is better).}
\label{FigExSynthetic_results}
\end{figure*}
We repeatedly evaluated the proposed method for each of the following settings for the number of states $K$ in $\bz$: $K = \{25, 49, 100\}$, which are referred to as large, medium, and small numbers of state settings in the main text.
This corresponds to dividing each attribute value into $K_d \equiv \sqrt[2]{K} = \{5, 7, 10\}$ segments in this $D=2$ setting, respectively.
We trained models with only $\bX$, $\bd$, and $y$ without $\bz$.
\paragraph{Implementation.}
We used the squared loss for the loss function $\calL$ in Eq.~(1) in the main text.
Then, using training samples, we empirically estimated the expected loss $E[\calL]$ in Eq.~(1) in the main text as
\begin{equation}
\label{EqLossSynthetic}
E[\calL(f(\bz),y)]\simeq \frac{1}{N_{\mathrm{train}}}\sum_n ( y^{\{\!n\!\}}- f(\bz^{\{\!n\!\}})))^2,
\end{equation}
where $\bz^{\{\!n\!\}}$ is $\bz(\bX^{\{\!n\!\}},\bd^{\{\!n\!\}})$ with input $\bd^{\{\!n\!\}}$ instead of $\bt^{\{\!n\!\}}$, and $N_{\mathrm{train}}$ is the number of training samples.
We used a $3$-layer MLP with ReLU~\cite{nair2010rectified} (more specifically, $D$-$100$-$1$) as the prediction model $f$ for CTR-D, CTR-K, and CTR-N. Each specific implementation for the stats in CTR-D, CTR-K, and CTR-N was as follows.
\textbf{CTR-D:} We used equally spaced state boundaries with multiple settings of $K'$: the same value as $K$ for the data generation (CTR-D-True), $K_d-1$ (CTR-D-Minus), and $K_d+1$ (CTR-D-Plus).
Note that we used CTR-D-True as the reference when we knew the true CTR; it thus should achieve the highest score.
We here examined how closely the other methods performed to CTR-D-True.
\textbf{CTR-K:} We used the RBF kernel in Eq.~(5) in the main text as $\bphi$ in Eq.~(4) in the main text with $K=100$ number of bases randomly sampled from the training set and with the candidates of hyperparameter $\gamma$ as $\{10^{-2},10^{-1},10^{0},10^{1},10^{2} \}$.
\textbf{CTR-N:} We used a $4$-layer MLP with ReLU (more specifically, $D$-$100$-$100$-$100$) as $\bg$ in Eq.~(6) in the main text, where the final layer is the softmax function for normalization.
For optimization, we used Adam with the recommended hyperparameters~\cite{kingma2014adam}, and the number of samples in the mini batches was $64$. We also used a dropout~\cite{srivastava2014dropout} with a rate of $50\%$ and batch normalization~\cite{ioffe2015batch} after each fully connected layer for both MLPs in $f$ and $\bg$.
By using the learned $\hf$ and $\hat{\bg}$, we estimated $\hy = \hf(\bz(\bX,\bd))$ for the new data.
\paragraph{Methods for comparison.}
The proposed method was compared with four state-of-the-art methods.
\textbf{RankSVX} is the method proposed in~\cite{liu2018early} that uses the loss in Eq.~\eqref{EqLossEHR} and has the same prediction model $f$ as our method without CTR, i.e., $f(\bx_{\mathrm{dem}})$. For its input, we used mean, standard deviation, and $\{0.1, 0.25, 0.5, 0.75, 0.9\}$ quantiles in the input time series having the stay time $\bd$ treated as another column for the input time series.
\textbf{LSTM}, \textbf{Transformer}, and \textbf{ODERNN} have the same prediction model $f$ as our method but have outputs of LSTM, Transformer, and ODERNN as inputs for $f$ instead of $\bz$, i.e., $f(\bh_{\mathrm{LSTM}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, $f(\bh_{\mathrm{Transformer}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, and $f(\bh_{\mathrm{ODERNN}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, respectively, where the number of hidden states in each model is the same as CTR-N, and $\bz_\mathrm{TS}$ is a time-series representation with stay time.
They are based on the time-series representation.
\paragraph{Results.}
The results are shown in Fig.~\ref{FigExSynthetic_results}, where the bars represent the means of the C-index across $5$-fold cross-validation, and the confidence intervals are standard errors.
We can see that the overall performance of the proposed method was significantly better than those of the compared methods, which demonstrates that the proposed method with CTR can learn what cannot be learned without CTR well.
Note that CTR-K and CTR-N were better than CTR-D with the wrong number of states even if the error was $1$, which demonstrates that CTR-K and CTR-N have a better generalization capability than CTR-D against data variation.
We also found that CTR-N performed the best, which demonstrates that CTR-N learns states for CTR from data well.
\section{Details on Experiments with Physionet Dataset}
We applied our CTR to a publicly available real-world EHR dataset (Physionet Challenge 2012~\cite{silva2012predicting}). The dataset consists of $N=8,000$ patients' records, and each record consists of $D=37$ lab test results, such as Albumin, heart-rate, glucose etc., from the first 48 hours after admission to an \emph{intensive care unit} (ICU). Details on the lab tests are at \url{https://physionet.org/content/challenge-2012/1.0.0/}.
We used this dataset for the MET prediction task for patient death.
The input EHR for the $n$-th patient was represented by $\{\bX^{\{\!n\!\}}, \bt^{\{\!n\!\}}\}$, where the raw observations $\bX^{\{\!n\!\}}$ were real-valued results of lab tests whose $m$-th observation was $\bx^{\{n,m\}}$, and the observation length for $\bX^{\{\!n\!\}}$ was $M_n$, which differed over patients.
$\bt^{\{\!n\!\}}$ represents the corresponding timestamps. The observation intervals between them vary over time. Thus, the stay time information and the direct modeling of it as a variable is crucial for prediction with this dataset.
The MET label for the $n$-th input EHR was $y^{\{\!n\!\}} > 0$, which is minutes over which a patient died.
All features were z-normalized with the exception of binary features.
\paragraph{Implementation.}
In this experiment, since there were censored data, we used the loss of a combination of both regression and ranking of MET values for $\calL$ in Eq.~(1) in the main text, which follows the work of~\cite{liu2018early}.
The ranking loss handles censored data well since it does not require a value for label $y$ but instead gets penalized if the values predicted for patients who have developed a complication earlier are larger than those predicted for patients who developed a disease or whose data were censored.
We then used training samples to empirically estimate the expected loss $E[\calL]$ in Eq.~(1) in the main text as
\begin{align}
\label{EqLossEHR}
&E[\calL(f(\bz),y)] \simeq \frac{1}{N_{\mathrm{train}}}\sum_n ( y^{\{\!n\!\}}- f(\bz^{\{\!n\!\}},\bx_{\mathrm{dem}}^{\{\!n\!\}}))^2 \nonumber\\
&- \frac{1}{N_c} \sum_{\{n,l\} \in y^{\{\!n\!\}}<y^{\{\!l\!\}}} \ln \sigma(f(\bz^{\{\!n\!\}},\bx_{\mathrm{dem}}^{\{\!n\!\}})-f(\bz^{\{\!l\!\}},\bx_{\mathrm{dem}}^{\{\!l\!\}})),
\end{align}
where $\bz^{\{\!\bullet\!\}}$ is $\bz(\bX^{\{\!\bullet\!\}},\bt^{\{\!\bullet\!\}})$, $\sigma$ is the sigmoid function, $n$ is sampled only from labeled data, $l$ is sampled from both censored and uncensored data under the condition of $y^{n} < y^{l}$, and $N_c$ is the number of combinations of $\{n,l\}$ in the training set. In this experiment, we used \textbf{CTR-N} with the same implementation as CTR-N in the previous experiment. Note that we could not use CTR-D here since we needed to handle $D=37$ attributes, which would cause a combinatorial explosion for $K$, i.e., when we set the number of segments to $100$, $K$ becomes $100^{26}$.
\if0
\subsubsection{Other implementations}
Missing observations in $\bX$ are lineally interpolated in accordance with time.
Also, we can make $d$ decay for weighting newer observations, such as $d'^{\{\!m\!\}} \equiv \lambda^{t_N-t_n} d'^{\{\!m\!\}}$, where $\lambda$ is the decay rate (but in the current implementation, we does not use this).
$z$ is always positive, and we divide it by the summation of elements for standardization.
Since we do not have all observations over patient life, we then multiply the $z$ by relative age (age$/100$) for standardizing age.
We use softmax function for standardization.
\fi
\paragraph{Methods for comparison.}
The methods for comparison were the same as in the synthetic dataset experiments.
\paragraph{Combinations of CTR and time-series models.}
We also examined combinations of CTR-N with time-series models LSTM, Transformer, and ODERNN by concatenating their representations as inputs for $f$, i.e., $f(\bz, \bh_{\mathrm{LSTM}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, $f(\bz, \bh_{\mathrm{Transformer}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, and $f(\bz, \bh_{\mathrm{ODERNN}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, respectively (\textbf{CTR+LSTM}, \textbf{CTR+Transformer}, and \textbf{CTR+ODERNN}).
\section{Details on Experiments with Case Study Dataset}
The case study dataset was a real-world EHR dataset provided by one of the largest hospitals in Japan that has maintained a big database of more than $400,000$ patients since $2004$~\cite{inaguma2020increasing}.
We worked with six datasets for six kinds of complications of diabetes mellitus: hyperosmolar (HYP), nephrology (NEP), retinopathy (RET), neuropathy (NEU), vascular disease (VAS), and other complications (OTH).
The numbers of samples were HYP: $N=15,428$, NEP: $N=15,862$, RET: $N=15,882$, NEU: $N=15,644$, VAS: $N=15,536$, and OTH: $N=15,591$.
The input EHR for the $n$-th patient was represented by $\{\bX^{\{\!n\!\}}, \bt^{\{\!n\!\}}\}$, where the raw observations $\bX^{\{\!n\!\}}$ were real-valued results of lab tests whose $m$-th observation was $\bx^{\{n,m\}}$, the number of attributes for $\bx^{\{n,m\}}$ was $D=26$, and the observation length for $\bX^{\{\!n\!\}}$ was $M_n$, which differed over patients. Details on the lab tests are summarized in Table~\ref{tabListLabTests}.
$\bt^{\{\!n\!\}}$ represents the corresponding timestamps. The observation intervals between them vary over time. Thus, the stay time information and the direct modeling of it as a variable is crucial for prediction with this dataset the same as in the previous experiments on synthetic data.
The MET label for the $n$-th input EHR was $y^{\{\!n\!\}} > 0$, which is the number of days over which a complication developed.
We used the patients' demographic information, such as their age and sex, which are summarized in Table~\ref{tabListDemoInfo}, with their latest lab test results as $\bx_{\mathrm{dem}}^{\{\!n\!\}}$ and input it to $f$ in addition to $\bz$ as $f(\bz, \bx_{\mathrm{dem}}^{\{\!n\!\}})$. We used their actual age, ``Age," as a natural number-valued feature and also applied one-hot encoding for each generation as a binary feature, such as ``20s (binary)." For sex, we used one-hot encoding again as ``Male (binary)" or ``Female (binary)."
All features were z-normalized with the exception of binary features.
\paragraph{Implementation.}
In this experiment, since there were censored data, we used the same loss as the Physionet experiments~\cite{liu2018early}.
In this experiment, \textbf{CTR-K} and \textbf{CTR-N} had the same implementations as CTR-K and CTR-N in the previous experiments. Note that we could not use CTR-D here since we needed to handle $D=26$ attributes, which would cause a combinatorial explosion for $K$, i.e., when we set the number of segments to $100$, $K$ becomes $100^{26}$.
\if0
\subsubsection{Other implementations}
Missing observations in $\bX$ are lineally interpolated in accordance with time.
Also, we can make $d$ decay for weighting newer observations, such as $d'^{\{\!m\!\}} \equiv \lambda^{t_N-t_n} d'^{\{\!m\!\}}$, where $\lambda$ is the decay rate (but in the current implementation, we does not use this).
$z$ is always positive, and we divide it by the summation of elements for standardization.
Since we do not have all observations over patient life, we then multiply the $z$ by relative age (age$/100$) for standardizing age.
We use softmax function for standardization.
\fi
\paragraph{Methods for comparison.}
The proposed method was compared with five state-of-the-art methods.
\textbf{RankSVX} is the method proposed in~\cite{liu2018early} that uses the loss in Eq.~\eqref{EqLossEHR} and has the same prediction model $f$ as our method without CTR, i.e., $f(\bx_{\mathrm{dem}})$. For its input, we used mean, standard deviation, and $\{0.1, 0.25, 0.5, 0.75, 0.9\}$ quantiles in the input time series having the stay time $\bd$ treated as another column for the input time series.
\textbf{CNN}, \textbf{GRU}, and \textbf{LSTM} have the same prediction model $f$ as our method but have outputs of CNN~\cite{cheng2016risk,makino2019artificial,phan2021deep}, GRU~\cite{chung2014empirical,tang2017memory}, and LSTM as inputs for $f$ instead of $\bz$, i.e., $f(\bh_{\mathrm{CNN}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, $f(\bh_{\mathrm{GRU}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, and $f(\bh_{\mathrm{LSTM}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, respectively, where the number of hidden states in each model is the same as CTR-N, and $\bz_\mathrm{TS}$ is a time-series representation with stay time.
They are based on the time-series representation.
\paragraph{Combinations of CTR and time-series models.}
We also examined combinations of CTR-N with time-series models CNN, GRU, and LSTM by concatenating their representations as inputs for $f$, i.e., $f(\bz, \bh_{\mathrm{CNN}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, $f(\bz, \bh_{\mathrm{GRU}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, and $f(\bz, \bh_{\mathrm{LSTM}}(\bz_\mathrm{TS}),\bx_{\mathrm{dem}})$, respectively (\textbf{CTR+CNN}, \textbf{CTR+GRU}, and \textbf{CTR+LSTM}).
\begin{table}[t]
\caption{List of lab tests in EHR.}
\label{tabListLabTests}
\centering
\begin{tabular}{cc}
\toprule
No&Lab test\\
\midrule
\midrule
1&Blood sugar level\\
2&BMI\\
3&BUN\\
4&CRP\\
5&Diastolic blood pressure\\
6&eGFR\\
7&Fe\\
8&Ferritin\\
9&HbA1c\\
10&HDL cholesterol\\
11&Hematocrit level\\
12&Hemoglobin\\
13&LDL cholesterol\\
14&MCH\\
15&MCHC\\
16&MCV\\
17&Serum albumin\\
18&Serum creatinine\\
19&Systolic blood pressure\\
20&Total cholesterol\\
21&Transferrin saturation\\
22&Triglyceride\\
23&UIBC\\
24&Uric acid\\
25&Urine occult blood\\
26&Urine protein\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[t]
\caption{List of demographic information of patients.}
\label{tabListDemoInfo}
\centering
\begin{tabular}{cc}
\toprule
No&Demographic information\\
\midrule
\midrule
1&Age\\
2&0s (binary)\\
3&10s (binary)\\
4&20s (binary)\\
5&30s (binary)\\
6&40s (binary)\\
7&50s (binary)\\
8&60s (binary)\\
9&70s (binary)\\
10&80s (binary)\\
11&90s (binary)\\
12&100s (binary)\\
13&Male (binary)\\
14&Female (binary)\\
\bottomrule
\end{tabular}
\end{table}
\if0
\begin{table}[t]
\caption{List of lab tests in EHR.}
\label{tabListLabTests}
\centering
\begin{tabular}{cc|cc}
\toprule
No&Lab test&No&Lab test\\
\midrule
\midrule
1&Albumin&14&Iron Saturation\\
2&Blood glucose level&15&LDL Cholesterol\\
3&BMI&16&Maximum blood pressure\\
4&BUN&17&MCH\\
5&Creatinine&18&MCHC\\
6&CRP&19&MCV\\
7&eGFR&20&Minimum blood pressure\\
8&Ferritin&21&Neutral fat\\
9&HbA1c&22&Total cholesterol\\
10&Hematocrit value&23&UIBC\\
11&Hemoglobin&24&Uric acid\\
12&HFL cholesterol&25&Urine occult blood\\
13&Iron&26&Urine protein\\
\bottomrule
\end{tabular}
\end{table}
\fi
\paragraph{Results.}
The results of the mean and standard error of the C-index are listed in Table~\ref{tabDiabetesResults2}.
For most of the six tasks having over $10,000$ samples each, the performances of the methods with CTR were better than those of the methods without CTR by a sufficient margin in terms of standard error.
These results demonstrate that CTR can improve the C-index in the MET prediction problem with real-world EHR.
We found that CTR-N based on the neural network performed significantly better than CTR-K based on the kernel, which demonstrates that CTR-N learns states from data well even for real-world EHR.
CTR-N achieved the best performance on average in comparison with the single models. In addition, when looking at results for combinations of CTR-N and other models, CTR+CNN, CTR+GRU, and CTR+LSTM, even for tasks where the performances of CTR-N alone were worse than the others, we can see that adding CTR-N to them improved their performance further, such as results on RET and VAS. Also, these combinations almost constantly enhanced the performances of the original ones, which shows the high modularity of CTR to work complementarily with other models.
This shows that CTR and the time-series models captured different temporal natures.
We can determine which type of temporal natures to take into account with the training dataset by training $f$ and putting it on top of these models.
\if0
\begin{table*}[t]
\caption{Comparison of proposed variations and baselines in terms of C-index (higher is better) for real EHR data. We show best methods in bold and confidence intervals are standard errors.}
\label{tabDiabetesResults}
\centering
\begin{tabular}{cccccc|cc}
\toprule
{} & NEP & RET & NEU & VAS & HYP & Average \\
\midrule
Static & 0.68$ \pm $0.008 & 0.64$ \pm $0.011 & 0.53$ \pm $0.021 & 0.50$ \pm $0.011 & 0.71$ \pm $0.008 & 0.63 \\
Simple & 0.66$ \pm $0.010 & 0.66$ \pm $0.013 & 0.55$ \pm $0.020 & 0.47$ \pm $0.026 & 0.71$ \pm $0.008 & 0.63 \\
LSTM & 0.67$ \pm $0.012 & 0.72$ \pm $0.017 & 0.55$ \pm $0.008 & $\bm{0.55 \pm 0.010}$ & 0.75$ \pm $0.009 & 0.67 \\
CTR-K & 0.70$ \pm $0.014 & 0.65$ \pm $0.034 & 0.54$ \pm $0.018 & 0.48$ \pm $0.040 & 0.70$ \pm $0.026 & 0.64 \\
CTR-N & $\bm{0.75 \pm 0.017}$ & 0.72$ \pm $0.021 & $\bm{0.63 \pm 0.013}$ & 0.50$ \pm $0.009 & 0.74$ \pm $0.007 & $\bm{0.70}$ \\
CTR+LSTM & 0.70$\pm$0.012 & $\bm{0.75\pm0.014}$ & 0.57$\pm$0.007 & $\bm{0.55\pm0.010}$ & $\bm{0.76\pm0.012}$ & 0.69 \\
\bottomrule
\end{tabular}
\end{table*}
\fi
\begin{table*}[t]
\centering
\small
\begin{tabular}{ccccccc|c}
\toprule
{} & HYP & NEP & RET & NEU & VAS & OTH & Average\! \\
\hline
\!RankSVX\! & \!0.566$\!\pm\!$0.052\! & \!0.688$\!\pm\!$0.008\! & \!0.628$\!\pm\!$0.028\! & \!0.539$\!\pm\!$0.024\! & \!0.458$\!\pm\!$0.020\! & \!0.684$\!\pm\!$0.019\! & \!0.622\! \\
\!CNN\! &\! 0.608$\!\pm\!$0.033 \!&\! 0.668$\!\pm\!$0.007 \!&\! 0.697$\!\pm\!$0.012 \!&\! 0.578$\!\pm\!$0.021 \!&\! 0.469$\!\pm\!$0.015 \!&\! 0.726$\!\pm\!$0.017 \!&\! 0.651 \\
\!GRU\! & \!0.551$\!\pm\!$0.064\! & \!0.705$\!\pm\!$0.010\! & \!$\bm{0.731}$$\!\pm\!$0.009\! & \!0.559$\!\pm\!$0.022\! & \!$\bm{0.516}$$\!\pm\!$0.024\! & \!0.712$\!\pm\!$0.015\! & \!0.671\! \\
\!LSTM\! & \!0.589$\!\pm\!$0.026\! & \!0.689$\!\pm\!$0.012\! & \!0.717$\!\pm\!$0.013\! & \!0.569$\!\pm\!$0.023\! & \!0.503$\!\pm\!$0.035\! & \!0.718$\!\pm\!$0.015\! & \!0.664\! \\
\hdashline[1pt/1pt]
\!CTR-K\! & \!0.585$\!\pm\!$0.047\! & \!0.688$\!\pm\!$0.015\! & \!0.647$\!\pm\!$0.010\! & \!0.550$\!\pm\!$0.025\! & \!0.463$\!\pm\!$0.023\! & \!0.704$\!\pm\!$0.021\! & \!0.633\! \\
\!CTR-N\! & \!$\bm{0.612}$$\!\pm\!$0.026\! & \!$\bm{0.739}$$\!\pm\!$0.010\! & \!0.721$\!\pm\!$0.026\! & \!$\bm{0.608}$$\!\pm\!$0.020\! & \!0.481$\!\pm\!$0.013\! & \!$\bm{0.741}$$\!\pm\!$0.020\! & \!$\bm{0.687}$\! \\
\midrule
\!CTR+CNN\! &\! \textbf{\textit{0.614}}$\!\pm\!$0.025 \!&\! 0.682$\!\pm\!$0.012 \!&\! 0.728$\!\pm\!$0.009 \!&\! 0.579$\!\pm\!$0.014 \!&\! 0.478$\!\pm\!$0.026 \!&\! 0.730$\!\pm\!$0.012 \!&\! 0.666 \\
\!CTR+GRU\! & \!\textbf{\textit{0.612}}$\!\pm\!$0.024\! & \!0.705$\!\pm\!$0.011\! & \!\textbf{\textit{0.738}}$\!\pm\!$0.017\! & 0.592$\!\pm\!$0.020\! & \!\textbf{\textit{0.531}}$\!\pm\!$0.019\! & \!0.723$\!\pm\!$0.006\! & \!0.682\! \\
\!CTR+LSTM\! & \!0.583$\!\pm\!$0.036\! & \!0.708$\!\pm\!$0.007\! & \!\textbf{\textit{0.745}}$\!\pm\!$0.008\! & \!0.600$\!\pm\!$0.020\! & \!\textbf{\textit{0.534}}$\!\pm\!$0.022\! & \!0.734$\!\pm\!$0.011\! & \!\textbf{\textit{0.688}}\! \\
\bottomrule
\end{tabular}
\caption{Comparison of C-index on real EHR data (higher is better). Upper seven results show comparison among single models, and best results are in bold. Bottom three results are for combinations of CTR and other models, and results further improved from best results in single models are in bold italic. Confidence intervals are standard errors.}
\label{tabDiabetesResults2}
\end{table*}
\if0
array([ 33, 467, 487, 249, 141, 196])
\fi
\paragraph{Performance analysis on different observation periods with LSTM, GRU, and CNN.}
Over different observation periods, we analyzed the performance improvements of CTR and CTR+LSTM compared with LSTM and the same comparison with GRU and CNN.
We plotted the mean improvements in the C-index by CTR for data having different minimum observation periods, as shown in Figs.~\ref{FigExDiffPeriod2}--\ref{FigExDiffPeriod_CNN}, where the confidence intervals are standard errors of the improvements.
The right region in the figure shows the results for data with longer observation periods.
These results demonstrate that CTR could improve the performance, especially for data with relatively longer observation periods, where cumulative health conditions are more crucial for MET prediction.
\begin{figure}
\caption{Analysis of performance improvement by CTR compared with \emph{LSTM}
\label{FigExDiffPeriod2}
\end{figure}
\begin{figure}
\caption{Analysis of performance improvement by CTR compared with \emph{GRU}
\label{FigExDiffPeriod_GRU}
\end{figure}
\begin{figure}
\caption{Analysis of performance improvement by CTR compared with \emph{CNN}
\label{FigExDiffPeriod_CNN}
\end{figure}
\if0
\section{Possible Variant: Decaying $d$}
To incorporate local temporal dependency into CTR, we can make the stay time $d$ decay for weighting newer observations as
\begin{align}
d'^{\{\!m\!\}} \equiv \lambda^{t^{\{\!M\!\}}-t^{\{\!m\!\}}} d^{\{\!m\!\}},
\end{align}
where $\lambda$ is the decay rate. However, in the current implementation, we did not use the decaying $d'$ because it did not improve the performance in our experiments. We put finding applications where the variant based on this decaying $d'$ works well as interesting future work.
\fi
\section{Ethical/Societal Impact}
The common concern when learning from data that is collected through experiments conducted with human participants, including healthcare and medical applications, is producing estimation models biased towards or against specific groups in a population.
Recent works on fairness in machine learning~\cite{pedreshi2008discrimination,kilbertus2017avoiding,kusner2017counterfactual,nabi2018fair,lipton2018does,zhang2018equality,locatello2019fairness,singh2019policy,bera2019fair,ding2020differentially,yan2020fairness,narasimhan2020pairwise,rezaei2020fairness} are one example of help for this, and developing efficient ways of applying them to our approach would be an interesting and useful next step of our study.
Another risk of estimation that may affect human decisions would be false alerts/reports and overlooking important events.
We believe that the estimation should be carefully used as just one source of information, and it is better that actual decision-making based on this estimation is done from a broader perspective.
\end{document}
|
\betaegin{equation}gin{document}
\betaegin{equation}gin{titlepage}
\vskip 1.5 true cm
\betaegin{equation}gin{center}
{\betaigss Geometric structures on Lie groups\\[.5em] with flat bi-invariant metric}
\vskip 1.0 true cm
{\mathcal{M}sslll Vicente Cort\'es$^1$ and Lars Sch\"afer$^2$} \\[3pt]
{\tauenrm Department Mathematik$^1$ \\
Universit\"at Hamburg \\ Bundesstra{\sigmas}e 55 \\Delta-20146 Hamburg, Germany \\
[email protected]
Institut f\"ur Differentialgeometrie$^2$ \\ Leibniz Universit\"at Hannover \\
Welfengarten 1\\ D-30167 Hannover, Germany \\
[email protected]}\\[1em]
October 23, 2008
\end{Prop}silonnd{center}
\vskip 1.0 true cm
\betaaselineskip=18pt
\betaegin{equation}gin{abstract}
\nablaoindent
Let $L\sigmaubset V=\mathbb{R}^{k,l}$ be a maximally isotropic subspace.
It is shown that any simply connected Lie group with a bi-invariant flat
pseudo-Riemannian metric of signature $(k,l)$ is
2-step nilpotent and
is defined by an element $\end{Prop}silonta \iotan \wedgembda^3L\sigmaubset \wedgembda^3V$.
If $\end{Prop}silonta$ is of type $(3,0)+(0,3)$ with respect to a skew-symmetric
endomorphism $J$ with $J^2=\end{Prop}silon Id$, then the Lie group
${\mathcal{A}l L}(\end{Prop}silonta )$ is endowed with a left-invariant nearly K\"ahler
structure if $\end{Prop}silon =-1$ and with a left-invariant nearly para-K\"ahler
structure if $\end{Prop}silon =+1$. This construction
exhausts all complete simply connected
flat nearly (para-)K\"ahler manifolds. If $\end{Prop}silonta \nablaeq 0$ has rational
coefficients with respect to some basis, then ${\mathcal{A}l L}(\end{Prop}silonta )$
admits a lattice $\Gammaamma$, and the quotient
$\Gammaamma\sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$ is a compact
inhomogeneous nearly (para-)K\"ahler manifold. The first non-trivial
example occurs in six dimensions. \\
{\iotat MSC(2000):} 53C50, 53C15. \\
{\iotat Keywords:} Flat Lie-groups, bi-invariant metrics, nearly para-K\"ahler manifolds, flat almost (para-)Hermitian manifolds, almost (para-)complex structures.
\end{Prop}silonnd{abstract}
\end{Th}arule width 3.0 cm
{\sigmamall \nablaoindent This work was supported by the SFB 676 of the Deutsche Forschungsgemeinschaft. }
\end{Prop}silonnd{titlepage}
\tauableofcontents
\sigmaection*{Introduction}
A pseudo-Riemannian manifold $(M,g)$ endowed with a skew-symmetric
almost complex structure
$J$ is called {\mathcal{M}ssl nearly K\"ahler} if the Levi-Civita
covariant derivative $DJ$ is skew-symmetric, that is $(D_XJ)X=0$ for all
$X\iotan TM$. Nearly K\"ahler manifolds with a positive definite metric
are by now well studied, see \mathcal{I}te{N} and
references therein. Replacing the equation $J^2=-Id$ by $J^2=Id$ one arrives
at the definition of {\mathcal{M}ssl nearly para-K\"ahler} manifold, see
\mathcal{I}te{I}. This generalises the notion of a para-K\"ahler (or bi-Lagrangian)
manifold. Such manifolds occur naturally in super-symmetric field theories
over Riemannian rather than Lorentzian space-times, see \mathcal{I}te{CMMS}.
In \mathcal{I}te{I} Ivanov and
Zamkovoy ask for examples of
Ricci-flat nearly para-K\"ahler manifolds
in six dimensions with $DJ\nablaeq 0$. In this
paper we will give a classification of flat nearly para-K\"ahler manifolds.
In particular, we will show that there exists a
compact six-dimensional such manifold with $DJ\nablaeq 0$.
It is noteworthy that flat nearly para-K\"ahler manifolds $M$
provide also solutions of the
so-called tt*-equations, see \mathcal{I}te{S} and references therein.
As a consequence, they give rise to a (para-)pluriharmonic map from $M$ into
the pseudo-Riemannian symmetric space $SO_0(n,n)/GL(n)$.
Let $V$ be a pseudo-Euclidian vector space and $\end{Prop}silonta\iotan \wedgembda^3V$.
Contraction with $\end{Prop}silonta$ defines a linear map $\wedgembda^2V^* \rhoightarrow V$.
The image of that map is denoted by $\Sigmaigma_\end{Prop}silonta$ and is called
the {\mathcal{M}ssl support} of $\end{Prop}silonta$. In the first section we will show that
any 3-vector $\end{Prop}silonta\iotan \wedgembda^3V$
with isotropic support defines a simply connected 2-step nilpotent
Lie group ${\mathcal{A}l L}(\end{Prop}silonta )$ with a flat bi-invariant
pseudo-Riemannian metric $h$ of the same signature as $V$. We prove that
this exhausts all simply connected Lie groups with a flat bi-invariant metric,
see Theorem \rhoef{Thm2}. After completion of our article, Oliver Baues,
has kindly informed us about the paper \mathcal{I}te{W}, which already
contains a version of that result.
It is shown that the groups $({\mathcal{A}l L}(\end{Prop}silonta ),h)$ admit a
lattice $\Gammaamma \sigmaubset {\mathcal{A}l L}(\end{Prop}silonta )$ if $\end{Prop}silonta$ has rational coefficients
with respect to some basis and that the quotient $M(\end{Prop}silonta, \Gammaamma ) :=
\Gammaamma\sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$ is a {\end{Prop}silonm flat compact homogeneous
pseudo-Riemannian manifold}, see Corollary \rhoef{Cor}.
Compact homogeneous flat pseudo-Riemannian manifolds
were recently classified in independent work by Baues, see \mathcal{I}te{B}.
It follows from this classification that the above examples
exhaust all compact homogeneous flat pseudo-Riemannian manifolds.
Assume now that $\deltaim V$ is even and that we fix $J\iotan \mathfrak{so}(V)$
such that $J^2= -Id$ or $J^2= Id$. We denote the corresponding
left-invariant endomorphism field on the
group ${\mathcal{A}l L}(\end{Prop}silonta )$ again by $J$.
Assume that $\end{Prop}silonta\iotan \wedgembda^3V$ has isotropic support and satisfies,
in addition,
$$ \{ \end{Prop}silonta_X,J\} := \end{Prop}silonta_XJ + J\end{Prop}silonta_X=0 \varthetauad\mbox{for all}\varthetauad X\iotan V,$$
or, equivalently, that $\end{Prop}silonta$ has type $(3,0) + (0,3)$.
Then $({\mathcal{A}l L}(\end{Prop}silonta ),h,J)$ is a flat nearly K\"ahler manifold
if $J^2=-Id$ and a flat nearly para-K\"ahler manifold if $J^2=Id$.
This follows from the results of \mathcal{I}te{CS} for the former case and is proven
in the second section of this paper for the latter case, see Theorem
\rhoef{1stThm}.
Moreover it is shown that any complete simply
connected flat nearly (para-)K\"ahler manifold is of this form, see
Corollary \rhoef{lastCor} and \mathcal{I}te{CS}.
To sum up, we have shown that any simply connected complete flat nearly
(para-)K\"ahler manifold is a Lie group ${\mathcal{A}l L}(\end{Prop}silonta )$
with a left-invariant nearly
(para-)K\"ahler structure and bi-invariant metric.
Conversely, it follows from unpublished work of Paul-Andi Nagy and
the first author that a Lie group with a left-invariant nearly
(para-)K\"ahler structure and bi-invariant metric is necessarily
flat and is therefore covered by one of our groups ${\mathcal{A}l L}(\end{Prop}silonta )$.
The proof of this statement uses the unicity of the connection
with totally skew-symmetric torsion preserving the nearly
(para-)K\"ahler structure and the Jacobi identity.
Suppose now that $\Gammaamma \sigmaubset {\mathcal{A}l L}(\end{Prop}silonta )$ is a lattice. Then
the almost (para-)complex structure $J$ on the group ${\mathcal{A}l L}(\end{Prop}silonta )$
induces an almost (para-)\lambdainebreak[3] complex structure $J$
on the compact manifold
$M=M(\end{Prop}silonta,\Gammaamma )= \Gammaamma \sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$. Therefore $(M,h,J)$
is a compact nearly (para-)K\"ahler manifold. However, the
(para-)complex structure is not ${\mathcal{A}l L}(\end{Prop}silonta )$-invariant, unless
$\end{Prop}silonta=0$. Moreover, for $\end{Prop}silonta\nablaeq 0$, $(M,h,J)$ is an inhomogeneous
nearly (para-)\lambdainebreak[3] K\"ahler manifold, that is, it
does not admit any transitive group of automorphisms of the nearly
(para-)K\"ahler structure.
Since $J$ is not right-invariant, this follows from the fact that ${\mathrm{Isom}}_0(M)$
is obtained from the
action of ${\mathcal{A}l L}(\end{Prop}silonta )$
by right-multiplication on $M$,
see Corollary \rhoef{Cor}. The first such non-trivial flat compact nearly
para-K\"ahler nilmanifold $M(\end{Prop}silonta )= \Gammaamma \sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$ is
six-dimensional and
is obtained from a non-zero element $\end{Prop}silonta \iotan \wedgembda^3V^+\mathcal{O}ng \mathbb{R}$, where
$V^+\sigmaubset V=\mathbb{R}^{3,3}$ is the $+1$-eigenspace of $J$.
\sigmaection{A class of flat pseudo-Riemannian Lie groups} \lambdaabel{simplytransSec}
Let $V=(\mathbb{R}^n,\lambdaangle \mathcal{D}ot , \mathcal{D}ot \rhoightarrowngle )$ be the standard
pseudo-Euclidian vector space of signature $(k,l)$, $n=k+l.$
Using the (pseudo-Euclidian) scalar product we shall identify
$V\mathcal{O}ng V^*$ and $\wedgembda^2V\mathcal{O}ng \mathfrak{so}(V)$. These
identifications provide the inclusion
$\wedgembda^3V \sigmaubset V^*\omegatimesimes \mathfrak{so}(V)$. Using it
we consider a three-vector $\end{Prop}silonta\iotan \wedgembda^3V$ as an
$\mathfrak{so}(V)$-valued one-form. Further we denote by
$\end{Prop}silonta_X\iotan \mathfrak{so}(V)$ the evaluation of this one-form on a vector
$X\iotan V$.
\nablaoindent
The {\mathcal{M}ssl support} of $\end{Prop}silonta \iotan \wedgembda^3V$ is defined by
\betaegin{equation} \Sigmaigma_{\end{Prop}silonta}:= {\rhom span}\{\end{Prop}silonta_XY \,|\, X,Y \iotan V\} \sigmaubset V.\end{Prop}silone
\betaegin{equation}gin{Th}\ \ \lambdaabel{2stepnp_Thm}
Each
$$\end{Prop}silonta \iotan \mathcal{C}(V):= \{\end{Prop}silonta \iotan \wedgembda^3V \,|\Sigmaigma_\end{Prop}silonta \mbox{
(totally) isotropic} \}=\upsilonnderset{L\sigmaubset V}{\betaigcup} \wedgembda^3 L$$
defines a 2-step nilpotent simply transitive subgroup
$\mathcal{L}(\end{Prop}silonta) \sigmaubset \mathrm{Isom}(V),$ where the union runs over
all maximal isotropic subspaces.
The subgroups $\mathcal{L}(\end{Prop}silonta)$, $\mathcal{L}(\end{Prop}silonta')
\sigmaubset \mathrm{Isom}(V)$
associated to $\end{Prop}silonta, \end{Prop}silonta' \iotan {\mathcal{A}l C}(V)$
are conjugated if and only if $\end{Prop}silonta' = g\mathcal{D}ot \end{Prop}silonta$ for some element of
$g \iotan O(V).$
\end{Prop}silont
\partialf
It is easy to see that any three-vector $\end{Prop}silonta \iotan \wedgembda^3V$ satisfies
$\end{Prop}silonta \iotan \wedgembda^3\Sigmaigma_\end{Prop}silonta$, cf.\ \mathcal{I}te{CS} Lemma 7.
This implies the equation
$\mathcal{C}(V)=\upsilonnderset{L\sigmaubset V}{\betaigcup} \wedgembda^3 L$.
Let an element $\end{Prop}silonta\iotan \mathcal{C}(V)$ be given.
One can easily show that $\Sigmaigma_\end{Prop}silonta$ is isotropic if and only if
the endomorphisms $\end{Prop}silonta_X \iotan\mathfrak{so}(V)$
satisfy $\end{Prop}silonta_X \mathcal{I}rc \end{Prop}silonta_Y =0$ for all $X, Y\iotan V$, cf.\ \mathcal{I}te{CS}
Lemma 6. The 2-step nilpotent group
$$\mathcal{L}(\end{Prop}silonta) := \lambdaeft. \lambdaeft\{g_X:=\end{Prop}silonxp \lambdaeft(
\betaegin{equation}gin{array}{cc}
\end{Prop}silonta_X & X \\
0 &0
\end{Prop}silonnd{array}
\rhoight)=\lambdaeft(
\betaegin{equation}gin{array}{cc}
\iotad + \end{Prop}silonta_{X} & X \\
0 &1
\end{Prop}silonnd{array}
\rhoight) \,\rhoight| \, X \iotan V \rhoight\} $$
acts simply transitively on $V\mathcal{O}ng V\tauimes \{ 1\} \sigmaubset V\tauimes \mathbb{R}$
by isometries:
$$ \lambdaeft(
\betaegin{equation}gin{array}{cc}
\iotad + \end{Prop}silonta_{X} & X \\
0 &1
\end{Prop}silonnd{array}
\rhoight)
\lambdaeft(
\betaegin{equation}gin{array}{c} 0 \\ 1\end{Prop}silonnd{array}\rhoight) = \lambdaeft(
\betaegin{equation}gin{array}{c} X \\ 1\end{Prop}silonnd{array}\rhoight).$$
Let us check that $\mathcal{L}(\end{Prop}silonta)$ is a group: Using $\end{Prop}silonta_X \mathcal{I}rc
\end{Prop}silonta_Y=0$ we obtain
\betaegin{equation}an
g_X \mathcal{D}ot g_Y&=&
\lambdaeft( \betaegin{equation}gin{array}{cc}
\iotad + \end{Prop}silonta_{X} & X \\
0 &1
\end{Prop}silonnd{array}
\rhoight)
\lambdaeft(\betaegin{equation}gin{array}{cc}
\iotad + \end{Prop}silonta_{Y} & Y \\
0 &1
\end{Prop}silonnd{array}
\rhoight)= \lambdaeft(\betaegin{equation}gin{array}{cc}
\iotad + \end{Prop}silonta_{X}+\end{Prop}silonta_Y+ \end{Prop}silonta_X\end{Prop}silonta_Y & X+Y+\end{Prop}silonta_XY \\
0 &1
\end{Prop}silonnd{array}
\rhoight)\\
&=&
\lambdaeft(\betaegin{equation}gin{array}{cc}
\iotad + \end{Prop}silonta_{X+Y} & X+Y+\end{Prop}silonta_XY \\
0 &1
\end{Prop}silonnd{array}
\rhoight) = g_{X+Y+\end{Prop}silonta_XY}.
\end{Prop}silonean
In the last step we used $\end{Prop}silonta_{\end{Prop}silonta_XY}=0,$ which follows from
$\lambdaangle\end{Prop}silonta_{\end{Prop}silonta_XY}Z,W \rhoightarrowngle= \lambdaangle\end{Prop}silonta_ZW, \end{Prop}silonta_XY\rhoightarrowngle$ for all
$X,Y,Z,W \iotan V.$ Next we consider $\end{Prop}silonta,\end{Prop}silonta' \iotan \mathcal{C}(V)$, $g\iotan O(V)$.
The computation
$$g\mathcal{L}(\end{Prop}silonta)g^{-1}=\lambdaeft\{ \lambdaeft. \lambdaeft(
\betaegin{equation}gin{array}{cc}
\iotad + g\end{Prop}silonta_{X}g^{-1} & gX \\
0 &1
\end{Prop}silonnd{array}
\rhoight) \,\rhoight| \, X \iotan V \rhoight\} = \lambdaeft\{ \lambdaeft. \lambdaeft(
\betaegin{equation}gin{array}{cc}
\iotad + g\end{Prop}silonta_{g^{-1}Y}g^{-1} & Y \\
0 &1
\end{Prop}silonnd{array}
\rhoight) \,\rhoight| \, Y \iotan V \rhoight\}
$$
shows that $g\mathcal{L}(\end{Prop}silonta)g^{-1}= \mathcal{L}(\end{Prop}silonta')$ if and only if
$\end{Prop}silonta'_X = (g \mathcal{D}ot \end{Prop}silonta)_X=g \, \end{Prop}silonta_{g^{-1}X} \, g^{-1}$ for all $X\iotan V$.
\varthetaed
\nablaoindent
Let ${\mathcal{A}l L} \sigmaubset \mathrm{Isom}(V)$ be a simply transitive group. Pulling back
the scalar product on $V$ by the orbit map
\betaegin{equation} {\mathcal{A}l L}\nablai g\mapsto
g0\iotan V \lambdaabel{def_obitmap} \end{Prop}silone yields a left-invariant flat pseudo-Riemannian metric $h$
on ${\mathcal{A}l L}$. A pair $({\mathcal{A}l L},h)$ consisting of a Lie group $\mathcal{A}l L$
and a flat left-invariant
pseudo-Riemannian metric $h$ on $\mathcal{A}l L$ is called
a {\mathcal{M}ssl flat pseudo-Riemannian Lie group}.
\betaegin{equation}gin{Th}\ \ \lambdaabel{Thm2} \betaegin{equation}gin{itemize}
\iotatem[(i)]
The class of flat pseudo-Riemannian Lie groups $({\mathcal{A}l L}(\end{Prop}silonta ),h)$
defined in Theorem \rhoef{2stepnp_Thm} exhausts all simply connected flat
pseudo-Riemannian Lie groups with bi-invariant metric.
\iotatem[(ii)] A Lie group with
bi-invariant metric is flat if and only if it is 2-step nilpotent.
\end{Prop}silonnd{itemize}
\end{Prop}silont
\partialf (i) The group ${\mathcal{A}l L}(\end{Prop}silonta )$ associated to
a three-vector $\end{Prop}silonta \iotan \mathcal{C}(V)$ is diffeomorphic to $\mathbb{R}^n$ by
the exponential map. We have to show that the flat pseudo-Riemannian
metric $h$ on ${\mathcal{A}l L}(\end{Prop}silonta )$ is bi-invariant. The Lie algebra
of ${\mathcal{A}l L}(\end{Prop}silonta )$ is identified with the vector space $V$ endowed
with the Lie bracket
\betaegin{equation} \lambdaabel{LiebracketEqu} [X,Y] := \end{Prop}silonta_XY-\end{Prop}silonta_YX= 2\end{Prop}silonta_XY,\varthetauad X,Y\iotan V.\end{Prop}silone
The left-invariant metric $h$ on ${\mathcal{A}l L}(\end{Prop}silonta )$ corresponds to the
scalar product $\lambdaangle \mathcal{D}ot ,\mathcal{D}ot \rhoightarrowngle$� on $V$.
Since $\end{Prop}silonta\iotan \wedgembda^3V$, the endomorphisms $\end{Prop}silonta_X=\varphirac{1}{2}ad_X$
are skew-symmetric. This shows that $h$ is bi-invariant.
Conversely, let $(V,[\mathcal{D}ot , \mathcal{D}ot ])$
be the Lie algebra of a pseudo-Riemannian Lie group of dimension $n$
with bi-invariant metric $h$. We can assume that the bi-invariant metric
corresponds to the standard scalar product $\lambdaangle \mathcal{D}ot ,\mathcal{D}ot \rhoightarrowngle$
of signature $(k,l)$ on $V$. Let us denote by $\end{Prop}silonta_X \iotan \mathfrak{so}(V)$,
$X\iotan V$, the skew-symmetric endomorphism of $V$ which corresponds to the
Levi-Civita covariant derivative $D_X$ acting on left-invariant vector
fields. {}From the bi-invariance and the Koszul formula we obtain
that $\end{Prop}silonta_X=\varphirac{1}{2}ad_X$ and, hence, $R(X,Y)= -\varphirac{1}{4}ad_{[X,Y]}$
for the curvature. The last formula shows that $h$ is flat if and only if
the Lie group is 2-step nilpotent. This proves (ii). To finish the
proof of (i) we have to show that, under this assumption,
$\end{Prop}silonta$ is completely skew-symmetric
and has isotropic support. The complete skew-symmetry follows from
$\end{Prop}silonta_X=\varphirac{1}{2}ad_X$ and the bi-invariance. Similarly, using the
bi-invariance, we have
$$4\lambdaangle \end{Prop}silonta_XY,\end{Prop}silonta_ZW\rhoightarrowngle =\lambdaangle [X,Y],[Z,W]\rhoightarrowngle = -\lambdaangle Y,[X,[Z,W]]\rhoightarrowngle =0 ,$$
since the Lie algebra is 2-step nilpotent. This shows that
$\Sigmaigma_\end{Prop}silonta$ is isotropic.
\varthetaed
\betaegin{equation}gin{Cor}\ \ With the above notations, let
$L\sigmaubset V$ be a maximally isotropic subspace.
The correspondence $\end{Prop}silonta \mapsto {\mathcal{A}l L}(\end{Prop}silonta )$ defines a
bijection between the points of the orbit space $\wedgembda^3L/GL(L)$ and
isomorphism classes of pairs $({\mathcal{A}l L}, h)$ consisting of a
simply connected Lie group
$\mathcal{A}l L$ endowed with a flat
bi-invariant pseudo-Riemannian metric $h$ of signature $(k,l)$.
\end{Prop}silonc
\betaegin{equation}gin{Cor}\ \ Any simply connected Lie group ${\mathcal{A}l L}$ with a flat bi-invariant metric
$h$ of signature
$(k,l)$ contains a normal subgroup of dimension $\mathfrak{a}mmae \max (k,l)\mathfrak{a}mmae
\varphirac{1}{2}\deltaim V$ which acts by translations on the pseudo-Riemannian
manifold $({\mathcal{A}l L},h)\mathcal{O}ng \mathbb{R}^{k,l}$.
\end{Prop}silonc
\partialf
Let $\mathfrak{a}:= ker (X\mapsto \end{Prop}silonta _X) \sigmaubset V$ be the kernel of
$\end{Prop}silonta$. Then $\mathfrak{a} = \Sigmaigma_\end{Prop}silonta^\partialerp$ is co-isotropic and defines
an Abelian ideal $\mathfrak{a}\sigmaubset \mathfrak{l}:=Lie\, {\mathcal{A}l L}\mathcal{O}ng V
\mathcal{O}ng \mathbb{R}^{k,l}$. The corresponding normal subgroup
$A\sigmaubset {\mathcal{A}l L}={\mathcal{A}l L}(\end{Prop}silonta )$
is precisely the subgroup of translations. So we have shown that
$\deltaim A \mathfrak{a}mmae \max (k,l)\mathfrak{a}mmae \varphirac{1}{2}\deltaim V$.
\varthetaed
\nablaoindent
{\betaf Remarks} 1) The number $\deltaim \Sigmaigma_\end{Prop}silonta$ is an isomorphism invariant of
the groups ${\mathcal{A}l L}={\mathcal{A}l L}(\end{Prop}silonta )$, which is independent of the metric.
We will denote it by $s({\mathcal{A}l L})$.
Let
$L_3\sigmaubset L_4 \sigmaubset \mathcal{D}ots \sigmaubset L$ be a filtration, where
$\deltaim L_j = j$ runs from $3$ to $\deltaim L$.
The invariant $\deltaim \Sigmaigma_\end{Prop}silonta$
defines a decomposition of $\wedgembda^3L/GL(L)$ as a union
$$\{0 \}\mathcal{U}p \betaigcup_{j=3}^{\deltaim L}\wedgembda^3_{reg}L_j/GL(L_j),$$
where $\wedgembda^3_{reg}\mathbb{R}^j \sigmaubset \wedgembda^3\mathbb{R}^j$ is the open subset
of 3-vectors with $j$-dimensional support. The points of the stratum
$\wedgembda^3_{reg}L_j/GL(L_j)\mathcal{O}ng \wedgembda^3_{reg}\mathbb{R}^j/GL(j)$ correspond
to isomorphism classes of pairs $({\mathcal{A}l L},h)$ with $s({\mathcal{A}l L})=j$.\\
2) Since in the above classification $\Sigmaigma_\end{Prop}silonta$ is isotropic,
it is clear that a flat (or 2-step nilpotent) bi-invariant metric on a
Lie group is indefinite, unless $\end{Prop}silonta=0$ and the group is Abelian.
It follows from Milnor's classification of Lie
groups with a flat left-invariant Riemannian metric \mathcal{I}te{Mi} that
a 2-step nilpotent Lie group with a flat left-invariant Riemannian metric
is necessarily Abelian.\\
Since a nilpotent Lie group with rational structure constants has a
(co-compact) lattice \mathcal{I}te{Ma}, we obtain:
\betaegin{equation}gin{Cor}\ \ \lambdaabel{Cor} The groups $({\mathcal{A}l L}(\end{Prop}silonta ),h)$ admit
lattices $\Gammaamma \sigmaubset {\mathcal{A}l L}(\end{Prop}silonta )$,
provided that $\end{Prop}silonta$ has rational coefficients with
respect to some basis.
$M=M(\end{Prop}silonta, \Gammaamma ):= \Gammaamma \sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$ is a
{\end{Prop}silonm flat compact homogeneous
pseudo-Riemannian manifold}. The connected
component of the identity in the isometry group of $M$
is the image of the natural group homomorphism $\partiali$ from
${\mathcal{A}l L}(\end{Prop}silonta )$ into the isometry group of
$M$.
\end{Prop}silonc
\partialf
First we remark that the bi-invariant metric $h$ induces an
${\mathcal{A}l L}(\end{Prop}silonta )$-invariant metric on the homogeneous space
$M= \Gammaamma \sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$. We shall identify
the group $\Gammaamma$ with a subgroup of the isometry group of
$\widetilde{M}:=({\mathcal{A}l L}(\end{Prop}silonta ),h)$ using the action of $\Gammaamma$ on
${\mathcal{A}l L}(\end{Prop}silonta )$ by left-multiplication.
Let $G$ be the connected component of the identity in the isometry group of
$\widetilde{M}$. It is clear that any element of $G$ which commutes
with the action of $\Gamma$ induces an isometry of $M$. Therefore
we have a natural homomorphism $Z_G(\Gammaamma ) \rhoightarrow \mathrm{Isom}(M)$ from
the centraliser $Z_G(\Gammaamma )$ of $\Gammaamma$ in $G$ into $\mathrm{Isom}(M)$.
In particular, the connected group $Z_G(\Gammaamma )_0$ is mapped into
$\mathrm{Isom}_0(M)$.
Conversely, the action of $\mathrm{Isom}_0(M)$ on $M$ can be lifted
to the action of a connected Lie
subgroup $H\sigmaubset G$ on $\widetilde{M}$,
which maps cosets of $\Gamma$ to cosets of $\Gamma$. The latter property
implies that $H$ normalises the subgroup $\Gamma \sigmaubset
\mathrm{Isom}(\widetilde{M})$.
Since $\Gamma$ is discrete and $G$ is connected, we can conclude that
$H$ is a subgroup of the centraliser $Z_G(\Gammaamma )$ of $\Gammaamma$ in $G$.
As $H$ is connected, we obtain $H\sigmaubset Z_G(\Gammaamma )_0$.
By the previous argument, we have also the opposite inclusion
$Z_G(\Gammaamma )_0 \sigmaubset H$ and, hence,
$H=Z_G(\Gammaamma )_0$.
Now the statement about the isometry group of $M$ follows from the fact that
the centraliser in $G$
of the left-action of $\Gammaamma \sigmaubset
{\mathcal{A}l L}(\end{Prop}silonta )$ on ${\mathcal{A}l L}(\end{Prop}silonta )$
is precisely the right-action of ${\mathcal{A}l L}(\end{Prop}silonta )$ on ${\mathcal{A}l L}(\end{Prop}silonta )$,
since $\Gammaamma \sigmaubset {\mathcal{A}l L}(\end{Prop}silonta )$ is Zariski-dense, see \mathcal{I}te{R}
Theorem 2.1. In fact, this shows that $H$ coincides with the
group ${\mathcal{A}l L}(\end{Prop}silonta )$
acting by right-multiplication on $\widetilde{M}={\mathcal{A}l L}(\end{Prop}silonta )$
and that $\mathrm{Isom}_0(M)$ coincides with ${\mathcal{A}l L}(\end{Prop}silonta )$
acting by right-multiplication on $M=\Gammaamma \sigmaetminus {\mathcal{A}l L}(\end{Prop}silonta )$.
\varthetaed
\betaegin{equation}gin{Ex}
We consider $V=(\mathbb{R}^{3,3}, \lambdaangle \mathcal{D}ot , \mathcal{D}ot \rhoightarrowngle) $ and a basis
$(e_1,e_2,e_3,f_1,f_2,f_3 )$
such that $ \lambdaangle e_i , f_j \rhoightarrowngle= \deltaelta_{ij}$ and $ \lambdaangle e_i , e_j \rhoightarrowngle= \lambdaangle f_i , f_j \rhoightarrowngle=0.$
Then the three-vector $\end{Prop}silonta := f_1\wedge f_2\wedge f_3\iotan \wedge^3V$ has
isotropic support $\Sigmaigma_{\end{Prop}silonta}=\mbox{span}\{f_1,f_2,f_3 \}$.
The non-vanishing components of the Lie bracket defined
by \rhoe{LiebracketEqu} are
$$[e_1,e_2]=2 f_3, [e_2,e_3]=2f_1, [e_3,e_1]=2 f_2.$$
We have seen above that the bi-invariant metric $h$ was obtained by
pulling back the scalar product
$\lambdaangle \mathcal{D}ot , \mathcal{D}ot \rhoightarrowngle$ by the orbit map
\end{Prop}silonqref{def_obitmap} which identifies $\mathcal{L}(\end{Prop}silonta)$ with $V$
via $ \mathcal{L}(\end{Prop}silonta) \nablai g_X \mapsto g_X0=X \iotan V.$ The inverse map is
$V \nablai X \mapsto g_X \iotan \mathcal{L}(\end{Prop}silonta).$ This identifies
the pseudo-Riemannian
manifolds $(\mathcal{L}(\end{Prop}silonta),h)$ and $(V, \lambdaangle \mathcal{D}ot , \mathcal{D}ot \rhoightarrowngle).$
In consequence the isometry group
of $\mathcal{L}(\end{Prop}silonta)$ is isomorphic to the full affine pseudo-orthogonal
group operating by $g_X \mapsto g_{AX+ v}$ with $A \iotan O(V)$ and $v \iotan V.$
Next we consider the lattice
\[ \Gamma := \{ g_Y|Y\iotan \mathbb{Z}^6\}, \]
where $\mathbb{Z}^6 \sigmaubset V$ is the lattice of integral vectors with
respect to the basis $(e_1,e_2,e_3,f_1,f_2,f_3 )$.
An element
$g_Y \iotan \Gamma$ operates from the left on $\mathcal{L}(\end{Prop}silonta)\mathcal{O}ng V$ as
$$ X \mapsto (\iotad + \end{Prop}silonta_{Y})X + Y. $$
Let us determine the centraliser of this $\Gamma$-action in the
isometry group of $\mathcal{L}(\end{Prop}silonta).$ A short calculation shows that
an affine isometry $(A,v)$ with linear part
$A \iotan O(V)$ and translational part $v\iotan V$ belongs to the centraliser
of $\Gamma$ if and only if
\[ [\end{Prop}silonta_Y,A]X+\end{Prop}silonta_Yv - AY + Y =0\]
for all $X\iotan V$, $Y\iotan \mathbb{Z}^6$. For $X=0$ we get
$AY=\end{Prop}silonta_Yv+Y=(\iotad - \end{Prop}silonta_v)Y$ and, hence, $A=\iotad - \end{Prop}silonta_v$. This
shows that the affine transformation $(A,v)$ corresponds to
the right action of the element $g_v$, which obviouly belongs to the
centraliser. Therefore, in this example,
we have proven by direct calculation that the
centraliser in the isometry group of $\mathcal{L}(\end{Prop}silonta)$
of $\Gamma$ acting by left-multiplication on $\mathcal{L}(\end{Prop}silonta)$
is precisely the group $\mathcal{L}(\end{Prop}silonta)$ acting by right-multiplication.
This fact was proven for arbitrary groups $\mathcal{L}(\end{Prop}silonta)$
and lattices $\Gamma$ in the proof of Corollary \rhoef{Cor}.
\end{Prop}silonnd{Ex}
\sigmaection{Flat nearly para-K\"ahler manifolds}
In this section we give a constructive classification of
flat nearly para-K\"ahler manifolds and show that such manifolds
provide a class of examples for the flat Lie groups discussed in section
\rhoef{simplytransSec}. The structure of the section is as follows. In the
first subsection we give a short introduction to para-complex geometry. For
more information the reader is referred to \mathcal{I}te{CMMS}. The second part
discusses nearly para-K\"ahler manifolds and derives some consequences of
the flatness. In the third subsection we give a local classification which
relates a flat nearly para-K\"ahler manifold to an element of a certain subset
${\mathcal{A}l C}_\tauau(V)$ of the cone ${\mathcal{A}l C}(V)\sigmaubset \wedge^3V$ defined
in Theorem \rhoef{2stepnp_Thm}. The structure of ${\mathcal{A}l C}_\tauau(V)$ is studied
in the last subsection and global classification results are derived.
\sigmaubsection{Para-complex geometry}
The idea of para-complex geometry is to replace the complex structure $J$
satisfying $J^2=- Id$ on a (finite) dimensional vector space $V$ by a {\mathcal{M}ssl para-complex structure} $\tauau$ satisfying $\tauau^2 = Id$
and to require that the two eigenspaces of $\tauau,$ i.e. $V^{\partialm}:= \kappaer(Id \mp \tauau),$ have the same dimension. A
{\mathcal{M}ssl para-complex vector space} $(V,\tauau)$ is a vector space endowed with a para-complex structure.
Para-complex, para-Hermitian and para-K\"ahler geometry was first studied in \mathcal{I}te{L}. We
invite the reader to consult \mathcal{I}te{CFG} or the more recent article \mathcal{I}te{AMT}
for a survey on this subject.
\betaegin{equation}gin{Def}\ \
An {\mathcal{M}ssl almost para-complex structure} $\tauau$ on a smooth manifold $M$ is an
endomorphism field $\tauau \iotan \Gamma(\mbox{End}(TM), p \mapsto \tauau_p,$ such that
$\tauau_p$ is a para-complex structure on $T_pM$ for all points $p\iotan M.$ A manifold endowed
with an almost para-complex structure is called an {\mathcal{M}ssl almost para-complex manifold}. \\
An almost para-complex structure is called {\mathcal{M}ssl integrable}
if its eigendistributions
$T^{\partialm}M:= \kappaer(Id \mp \tauau)$ are both integrable. A manifold endowed
with an integrable almost para-complex structure is called a {\mathcal{M}ssl para-complex manifold}.
\end{Prop}silond
We remark that the obstruction to integrability (cf.\ Proposition 1 of \mathcal{I}te{CMMS}) of an almost para-complex structure is the {\mathcal{M}ssl Nijenhuis tensor} of $\tauau$, which is the tensor field defined by
$$ N_\tauau (X,Y):= [X,Y] +[\tauau X,\tauau Y]- \tauau[X,\tauau Y] - \tauau[\tauau X,Y],$$
for all vector fields $X, Y$ on $M$.
\betaegin{equation}gin{Def}\ \
Let $(V,\tauau)$ be a para-complex vector space. A {\mathcal{M}ssl para-Hermitian scalar
product} $g$
on $(V,\tauau)$ is a pseudo-Euclidian scalar product, such that
$\tauau^*g(\mathcal{D}ot,\mathcal{D}ot)= g(\tauau\mathcal{D}ot,\tauau\mathcal{D}ot)= -g(\mathcal{D}ot,\mathcal{D}ot).$\\
A {\mathcal{M}ssl para-Hermitian vector space} is a para-complex vector space endowed with a
para-Hermitian scalar product. The pair $(\tauau,g)$ is called
{\mathcal{M}ssl para-Hermitian structure} on the vector space $V.$
\end{Prop}silond
The next two examples give two frequently used models of para-Hermitian structures:
\betaegin{equation}gin{Ex}
Let us consider the vector space $\mathbb{R}^{2n}=\mathbb{R}^{n}\omegapluslus\mathbb{R}^{n}$
and denote by $e^+_i=e_i\omegapluslus 0$ and $e^-_i=0\omegapluslus e_i$ its standard basis.
Its standard para-complex structure is given by $\tauau e^\partialm_i=\partialm e^\partialm_i.$ A para-Hermitian
scalar product $g$ is given by $g(e^\partialm_i,e^\partialm_j)=0$ and $g(e^\partialm_i,e^\mp_j)=\deltaelta_{ij}.$
We call the pair $(\tauau,g)$ the {\mathcal{M}ssl standard para-Hermitian structure} of $\mathbb{R}^{2n}.$
\end{Prop}silonnd{Ex}
\betaegin{equation}gin{Ex}
We denote by $C=\mathbb{R} [e]\mathcal{O}ng \mathbb{R} \omegapluslus \mathbb{R}$, $e^2=1$, the ring of
para-complex numbers.
Consider the real vector space $C^n=\mathbb{R}^n\omegapluslus \, e \mathbb{R}^n$ with
standard basis given by
$(e_1,\lambdadots,e_n,f_1,\lambdadots,f_n),$ where $f_i=ee_i$ and its standard para-complex structure which
is defined by $\tauau e_i = f_i$ and $\tauau f_i = e_i.$ Then we can define a para-Hermitian scalar
product by $g(e_i,e_j)=-g(f_i,f_j)=\deltaelta_{ij}$ and $g(e_i,f_j)=0.$ We
denote this pair $(\tauau,g)$ the {\mathcal{M}ssl standard para-Hermitian structure} of $C^{n}.$
\end{Prop}silonnd{Ex}
The decomposition of the cotangent bundle $T^*M= (T^*M)^+ \omegapluslus (T^*M)^-$
with respect to the dual para-complex structure induces a bi-grading on the bundle of
exterior forms $ \wedgembda^k T^*M = \omegapluslus_{k=p+q}\, \wedgembda^{p,q}T^*M.$
An element of $\wedgembda^{p,q}T^*M$ will be called of {\mathcal{M}ssl type} $(p,q).$ The corresponding
decomposition on differential forms is denoted by $ \Omega^k(M)= \omegapluslus_{k=p+q} \,\Omegamega^{p,q}(M).$
\betaegin{equation}gin{Def}\ \
An {\mathcal{M}ssl almost para-Hermitian manifold} $(M,\tauau ,g)$
is an almost para-complex
manifold $(M,\tauau)$ which is endowed with a pseudo-Riemannian metric $g$
which is {\mathcal{M}ssl para-Hermitian}, i.e.\ it satisfies $\tauau^*g(\mathcal{D}ot,\mathcal{D}ot) = g(\tauau\mathcal{D}ot,\tauau \mathcal{D}ot)=-g(\mathcal{D}ot,\mathcal{D}ot).$
\end{Prop}silond
Note that the condition on the metric to be para-Hermitian forces it to have split signature $(n,n).$
\sigmaubsection{Basic facts and results about nearly para-K\"ahler manifolds}
The notion of a {\iotat nearly para-K\"ahler manifold} was recently introduced by Ivanov and Zamkovoy \mathcal{I}te{I}.
\betaegin{equation}gin{Def}\ \
An almost para-Hermitian manifold $(M,\tauau,g)$ is called {\mathcal{M}ssl nearly
para-K\"ahler} manifold, if its Levi-Civita connection $D$ satisfies the
equation \betaegin{equation} (D_X\tauau)Y = -(D_Y\tauau)X, \varthetauad \varphiorall X,Y \iotan
\Gammaamma(TM). \lambdaabel{NK_def} \end{Prop}silone
A nearly para-K\"ahler manifold is called {\mathcal{M}ssl strict}, if $D \tauau \nablae 0.$
\end{Prop}silond
Like for a nearly K\"ahler manifold there exists a canonical para-hermitian
connection with totally skew-symmetric torsion.
\betaegin{equation}gin{Prop}\ \ [Prop. 5.1 in \mathcal{I}te{I}] \lambdaabel{Can_con} Let $(M,\tauau,g)$ be a nearly para-K\"ahler manifold.
Then there exists a unique connection $\nabla$ with totally skew-symmetric
torsion $T^{\nabla}$ (i.e. $g(T^{\nabla}(\mathcal{D}ot,\mathcal{D}ot),\mathcal{D}ot)$ is a three-form) satisfying $\nabla g=0$ and $\nabla \tauau =0.$ \end{Prop}silonp
\nablaoindent
More precisely, this connection is given by
\betaegin{equation} \nabla_XY = D_XY - \end{Prop}silonta_XY\mbox{ with } \end{Prop}silonta_XY = -\varphirac{1}{2} \tauau (D_X \tauau)Y
\mbox{ and } X,Y \iotan \Gammaamma(TM) \end{Prop}silone
and in consequence the torsion is
\betaegin{equation} T^{\nablaabla} = -2 \end{Prop}silonta \end{Prop}silone
and one has $\{\end{Prop}silonta_X,\tauau \} =0$ for all vector fields $X.$
In the same reference \mathcal{I}te{I} Theorem 5.3 it is shown that, as in the nearly
K\"ahler case, the torsion of $\nabla$ is parallel, i.e.
\betaegin{equation} \lambdaabel{tor_n_para} \nabla \end{Prop}silonta=0 \mbox{ and } \nabla (D\tauau)=0. \end{Prop}silone
\betaegin{equation}gin{Prop}\ \ \lambdaabel{eta_D_nabla_prop}
Let $(M,g,\tauau)$ be a flat nearly para-K\"ahler manifold, then
\betaegin{equation}gin{enumerate}
\iotatem[1)] $ \end{Prop}silonta_X \mathcal{I}rc \end{Prop}silonta_Y =0$ for all $X,Y,$
\iotatem[2)]$D \end{Prop}silonta =\nabla \end{Prop}silonta =0.$
\end{Prop}silonnd{enumerate}
\end{Prop}silonp
\partialf
On a nearly para-K\"ahler manifold one has the identity $$R^D(X,Y,Z,W)
+R^D(X,Y,\tauau Z,\tauau W) = g((D_X\tauau))Y,(D_Z\tauau)W),$$ cf. \mathcal{I}te{I} Proposition
5.2. For a flat nearly para-K\"ahler manifold it follows
\betaegin{equation}a
g((D_X\tauau)Y,(D_Z\tauau)W)=0 \varthetauad \varphiorall X,Y,Z,W.
\end{Prop}silonea
\nablaoindent
With this identity and $D\tauau \mathcal{I}rc \tauau = -\tauau \mathcal{I}rc D \tauau$ we obtain
\betaegin{equation}an
0=g((D_X\tauau )Y,(D_Z\tauau )W)&=&-g((D_Z\tauau )(D_X\tauau )Y,W)
= 4g(\end{Prop}silonta_Z\mathcal{I}rc \end{Prop}silonta_X Y,W).
\end{Prop}silonean
This shows $\end{Prop}silonta_X \mathcal{I}rc \end{Prop}silonta_Y =0$ for all $X,Y$ and finishes the proof of
part 1.). \\
2.) With two vector fields $X,Y$ we calculate
\betaegin{equation}an
(D_X\end{Prop}silonta)_Y &=& D_X(\end{Prop}silonta_Y)- \end{Prop}silonta_{D_XY} \omegaverset{D= \nabla +\end{Prop}silonta}{=}\nabla_X(\end{Prop}silonta_Y) + [\end{Prop}silonta_X ,\end{Prop}silonta_Y] - \end{Prop}silonta_{D_XY}\\
&=& (\nabla_X \end{Prop}silonta)_Y + \end{Prop}silonta_{[\nabla_XY -D_XY]} + [\end{Prop}silonta_X,\end{Prop}silonta_Y]= (\nabla_X \end{Prop}silonta)_Y - \end{Prop}silonta_{\end{Prop}silonta_XY } + [\end{Prop}silonta_X,\end{Prop}silonta_Y]\\
&\omegaverset{\end{Prop}silonqref{NK_def}}{=}& (\nabla_X \end{Prop}silonta)_Y + \end{Prop}silonta_{\mathcal{D}ot} \end{Prop}silonta_XY + [\end{Prop}silonta_X,\end{Prop}silonta_Y] \omegaverset{1.)}{=} (\nabla_X \end{Prop}silonta)_Y\omegaverset{\end{Prop}silonqref{tor_n_para}}=0.
\end{Prop}silonean This is part 2).
\varthetaed
\sigmaubsection{Local classification of flat nearly para-K\"ahler manifolds}
We consider $(C^n,\tauau_{can})$ endowed with the standard $\tauau_{can}$-anti-invariant pseudo-Euclidian scalar product $g_{can}$ of signature $(n,n).$
Let $(M,g,\tauau )$ be a flat nearly para-K\"ahler manifold. Then there exists for each point $p \iotan M$ an open set
$U_p \sigmaubset M$ containing the point $p,$ a connected open set $U_0$ of $C^n$ containing the origin $0 \iotan C^n$ and an isometry
$ \Phi\,:\, (U_p,g) \tauilde{\rhoightarrow} (U_0,g_{can}),$ such that in $p \iotan M$ we have $\Phi_* \tauau_p = \tauau_{can} \Phi_*.$
In other words, we can suppose, that locally $M$ is a connected open subset
of $C^n$ containing the origin $0$ and that $g=g_{can}$ and $\tauau_0=\tauau_{can}.$
Summarizing Proposition \rhoef{Can_con} and \rhoef{eta_D_nabla_prop} we obtain the
next Corollary.
\betaegin{equation}gin{Cor}\ \ \lambdaabel{class_cor}
Let $M \sigmaubset C^n$ be an open neighborhood of the origin endowed with a nearly para-K\"ahler structure $(g,\tauau)$ such
that $g=g_{can}$ and $\tauau_0=\tauau_{can}$. The $(1,2)$-tensor $$ \end{Prop}silonta := -\varphirac{1}{2} \tauau D\tauau $$ defines a constant three-form on $M \sigmaubset C^n =\mathbb{R}^{n,n}$ given
by $\end{Prop}silonta(X,Y,Z)= g(\end{Prop}silonta_XY,Z) $ and satisfying
\betaegin{equation}gin{enumerate}
\iotatem[(i)] $\end{Prop}silonta \iotan {\mathcal{A}l C}(V),$ i.e. $\end{Prop}silonta_X \, \end{Prop}silonta_Y =0,\varthetauad \varphiorall X,Y,$
\iotatem[(ii)] $\{\end{Prop}silonta_X,\tauau_{can} \}=0, \varthetauad \varphiorall X.$
\end{Prop}silonnd{enumerate}
\end{Prop}silonc
The rest of this subsection is devoted to the local classification
result. In subsection \rhoef{glob_class} we study the
structure of the subset of ${\mathcal{A}l C}(V)$ given by the condition (ii) in more
detail and give global classification results. The converse statement of Corollary \rhoef{class_cor} is given in the next lemma.
\betaegin{equation}gin{Lem}\ \ \lambdaabel{Lemma_J_from_eta}
Let $\end{Prop}silonta$ be a constant three-form on an open connected set $M \sigmaubset C^n$ of $0$ satisfying (i) and (ii) of Corollary \rhoef{class_cor}. Then there
exists a unique para-complex structure $\tauau $ on $M$ such that
\betaegin{equation}gin{enumerate}
\iotatem[a)] $\tauau_0=\tauau_{can},$
\iotatem[b)] $\{\end{Prop}silonta_X,\tauau\}=0,\varthetauad \varphiorall X,$
\iotatem[c)] $D\tauau =-2\tauau \end{Prop}silonta,$
\end{Prop}silonnd{enumerate}
where $D$ is the Levi-Civita connection of the pseudo-Euclidian vector space $C^n.$ \\
Let $\nabla:=D-\end{Prop}silonta$ and assume b) then c) is equivalent to
\betaegin{equation}gin{enumerate}
\iotatem[c)'] $ \nabla \tauau =0.$
\end{Prop}silonnd{enumerate}
Furthermore, this para-complex structure $\tauau$ is skew-symmetric with respect to $g_{can}.$
\end{Prop}silonl
\partialf
One proves the equivalence of c) and c)' by an easy computation. \\
Let us show the uniqueness: Given two almost para-complex structures
satisfying a)-c) we deduce $(\tauau -\tauau')_0=0$ and $\nabla \tauau =\nabla \tauau' =0.$ This shows $\tauau \end{Prop}silonquiv \tauau'.$ To show the existence we define
\betaegin{equation}a
\tauau &=& \end{Prop}silonxp\lambdaeft( 2 \sigmaum_{i=1}^{2n} x^i\, \end{Prop}silonta_{\partialartial_i}\rhoight) \tauau_{can} \omegaverset{(i)}{=} \lambdaeft( Id + 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i}\rhoight)\tauau_{can}, \lambdaabel{def_I}
\end{Prop}silonea
where $x^i$ are linear coordinates of $C^n =\mathbb{R}^{n,n} =\mathbb{R}^{2n}$ and
$\partialartial_i =\varphirac{\partialartial}{\partialartial x^i}.$ \\
{\betaf Claim:} $\tauau$ defines a para-complex structure which satisfies a)-c). \\
a) From $x^i(0)= 0$ we obtain $\tauau_0 =\tauau_{can}.$\\
b) Follows from the definition of $\tauau$ (cf. equation \end{Prop}silonqref{def_I}) and the properties (i) and (ii). \\
c) One computes
\betaegin{equation}an
D_{\partialartial_j} \tauau &=& 2 \end{Prop}silonxp \lambdaeft( 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i} \rhoight) \end{Prop}silonta_{\partialartial_j} \tauau_{can}\omegaverset{(ii)}{=} -2 \upsilonnderbrace{\end{Prop}silonxp \lambdaeft( 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i} \rhoight)\, \tauau_{can}}_\tauau\, \end{Prop}silonta_{\partialartial_j}=-2 \tauau\, \end{Prop}silonta_{\partialartial_j}.
\end{Prop}silonean
It holds $ \tauau= \tauau_{can} + \lambdaeft( 2 \sigmaum_{i=1}^{2n} x^i\, \end{Prop}silonta_{\partialartial_i}\rhoight) \tauau_{can},$
where $\{ \end{Prop}silonta_{\partialartial_i},\tauau_{can} \} =0$ and $ \end{Prop}silonta_{\partialartial_i}$ is $g$-skew-symmetric. This implies that $\tauau$ is $g$-skew-symmetric. It remains to prove $\tauau^2=Id.$
\betaegin{equation}an
\tauau^2 &= & \lambdaeft( Id + 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i}\rhoight)\tauau_{can} \lambdaeft( Id + 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i}\rhoight)\tauau_{can}\\&=& \lambdaeft( Id + 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i}\rhoight) \lambdaeft( Id - 2 \sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i}\rhoight) = \lambdaeft[ Id -4 \lambdaeft(\sigmaum_{i=1}^{2n} x^i \, \end{Prop}silonta_{\partialartial_i}\rhoight)^2\rhoight] \omegaverset{(i)}{= }Id.
\end{Prop}silonean
This finishes the proof of the lemma.
\varthetaed
\betaegin{equation}gin{Th}\ \ \lambdaabel{1stThm}
Let $\end{Prop}silonta$ be a constant three-form on a connected open set $U \sigmaubset C^n$ containing the origin $0$
which satisfies (i) and (ii) of Corollary \rhoef{class_cor}. Then there exists a unique almost para-complex structure
\betaegin{equation}
\tauau= \end{Prop}silonxp\lambdaeft( 2 \sigmaum_{i=1}^{2n} x^i\, \end{Prop}silonta_{\partialartial_i}\rhoight) \tauau_{can}
\end{Prop}silone
on $U$ such that a) $\tauau_0 =\tauau_{can},$ and b) $M(U,\end{Prop}silonta):= (U,g=g_{can},\tauau)$ is
a flat nearly para-K\"ahler manifold. Any flat nearly para-K\"ahler manifold is locally isomorphic to a flat nearly
para-K\"ahler manifold of the form $M(U,\end{Prop}silonta).$
\end{Prop}silont
\partialf
$(M,g)$ is a flat pseudo-Riemannian manifold. Due to Lemma \rhoef{Lemma_J_from_eta} $\tauau,$
is a skew-symmetric almost para-complex structure on $M$ and $\tauau_0=\tauau_{can}.$ From Lemma \rhoef{Lemma_J_from_eta} c) and the skew-symmetry of $\end{Prop}silonta$ it follows the skew-symmetry of $D\tauau.$ Therefore $(M,g,\tauau)$ is a nearly para-K\"ahler manifold.
The remaining statement follows from Corollary \rhoef{class_cor} and Lemma \rhoef{Lemma_J_from_eta}.
\varthetaed
\sigmaubsection{The variety $\mathcal{C}_{\tauau}(V)$} \lambdaabel{glob_class}
Now we discuss the solution of (i) and (ii) of Corollary \rhoef{class_cor}.
In the following we shall freely
identify the real vector space $V:=C^n=\mathbb{R}^{n,n}=\mathbb{R}^{2n}$ with
its dual $V^*$ by means of the pseudo-Euclidian scalar product $g=g_{can}$.
The geometric interpretation is given in terms of an affine variety $\mathcal{C}_{\tauau}(V)\sigmaubset \wedgembda^3 V.$
\betaegin{equation}gin{Prop}\ \ \lambdaabel{char_i_prop}
A three-form $\end{Prop}silonta \iotan \wedgembda^3 V^*\mathcal{O}ng \wedgembda^3 V$
satisfies (i) of Corollary \rhoef{class_cor}, i.e. $\end{Prop}silonta_X \mathcal{I}rc \end{Prop}silonta_Y=0, X,Y, \iotan V,$ if and only if
there exists an isotropic subspace $L\sigmaubset V$
such that $\end{Prop}silonta \iotan \wedgembda^3 L \sigmaubset \wedgembda^3 V$.
If $\end{Prop}silonta$ satisfies (i) and (ii) of Corollary \rhoef{class_cor} then
there exists a $\tauau_{can}$-invariant isotropic subspace $L\sigmaubset V$
with $\end{Prop}silonta \iotan \wedgembda^3 L$.
\end{Prop}silonp
\partialf The proposition follows from the next lemma by taking $L=\Sigmaigma_\end{Prop}silonta$.
\varthetaed
\betaegin{equation}gin{Lem}\ \ \lambdaabel{Lemma_cond_I}
\betaegin{equation}gin{enumerate}
\iotatem
$\Sigmaigma_{\end{Prop}silonta}$ is isotropic if and only if $\end{Prop}silonta$ satisfies (i) of
Corollary \rhoef{class_cor}. If $\end{Prop}silonta$ satisfies (ii) of Corollary
\rhoef{class_cor}, then
$\Sigmaigma_{\end{Prop}silonta}$ is $\tauau_{can}$-invariant.
\iotatem
Let $\end{Prop}silonta \iotan \wedgembda^3V$. Then $\end{Prop}silonta \iotan \wedgembda^3\Sigmaigma_{\end{Prop}silonta}.$
\end{Prop}silonnd{enumerate}
\end{Prop}silonl
\partialf
The proof of the first part is analogous to Lemma 6 in \mathcal{I}te{CS}. The
second part is Lemma 7 of \mathcal{I}te{CS}.
\varthetaed\\\nablaoindent
Any three-form $\end{Prop}silonta$ on $(V,\tauau_{can})$ decomposes with respect to the
grading induced by the decomposition $V = V^{1,0} \omegapluslus V^{0,1} $
into $ \end{Prop}silonta = \end{Prop}silonta^+ + \end{Prop}silonta^- $ with $\end{Prop}silonta^+ \iotan \wedgembda^+V := \wedgembda^{2,1}V + \wedgembda^{1,2}V$ and $\end{Prop}silonta^- \iotan \wedgembda^-V := \wedgembda^{3,0}V + \wedgembda^{0,3}V.$
\betaegin{equation}gin{Th}\ \ \lambdaabel{2ndThm}
A three-form $\end{Prop}silonta \iotan \wedgembda^3 V^*\mathcal{O}ng \wedgembda^3 V$ satisfies (i)
and (ii) of Corollary \rhoef{class_cor} if and only if there exists an
isotropic $\tauau_{can}$-invariant subspace $L$
such that $\end{Prop}silonta \iotan \wedgembda^-L=\wedgembda^{3,0}L + \wedgembda^{0,3}L \sigmaubset \wedgembda^3L\sigmaubset \wedgembda^3 V$
(The smallest such subspace $L$ is $\Sigmaigma_\end{Prop}silonta$.).
\end{Prop}silont
\nablaoindent
We need the following general Lemma.
\betaegin{equation}gin{Lem}\ \
It is
\betaegin{equation}an \wedgembda^-V =
\{ \end{Prop}silonta \iotan \wedgembda^3 V\,|\, \end{Prop}silonta(\mathcal{D}ot,\tauau\mathcal{D}ot,\tauau \mathcal{D}ot) = \end{Prop}silonta(\mathcal{D}ot,\mathcal{D}ot,\mathcal{D}ot) \}
= \{ \end{Prop}silonta \iotan \wedgembda^3 V\,|\,\{\end{Prop}silonta_X,\tauau \}=0, \; \varphiorall X \iotan V \}.
\end{Prop}silonean
\end{Prop}silonl
\partialf (of Theorem \rhoef{2ndThm}) By Proposition \rhoef{char_i_prop}, the conditions (i)
and (ii) of Corollary \rhoef{class_cor} imply the existence
of an isotropic $\tauau_{can}$-invariant subspace $L\sigmaubset V$ such that
$\end{Prop}silonta \iotan \wedgembda^3L$. The last lemma shows that the condition (ii)
is equivalent to $\end{Prop}silonta \iotan \wedgembda^-V.$ Therefore
$\end{Prop}silonta \iotan \wedgembda^3L\mathcal{A}p \wedgembda^-V = \wedgembda^-L$.
The converse statement follows from the same argument. \varthetaed
\betaegin{equation}gin{Cor}\ \ \lambdaabel{cor_Th1_th2}
\betaegin{equation}gin{itemize}
\iotatem[(i)] The conical affine variety $ \mathcal{C}_\tauau(V):= \{ \end{Prop}silonta\, | \,
\end{Prop}silonta \mbox{ satisfies } (i) \mbox{ and } (ii) \mbox{ in Corollary
\rhoef{class_cor} }\}\sigmaubset \wedgembda^3V$
has the following description
$ \mathcal{C}_\tauau(V)= \upsilonnderset{L\sigmaubset V}{\betaigcup} \wedgembda^-L= \upsilonnderset{L\sigmaubset V}{\betaigcup} (\wedgembda^3L^+ + \wedgembda^3L^-), $
where the union is over all $\tauau$-invariant maximal isotropic subspaces.
\iotatem[(ii)]
If $\deltaim V < 12$ then it holds
$ \mathcal{C}_\tauau(V) = \wedgembda^3V^+ \mathcal{U}p \wedgembda^3V^-.$
\iotatem[(iii)]
Any flat nearly para-K\"ahler manifold $M$ is locally of the form $M(U,\end{Prop}silonta )$, for some $\end{Prop}silonta \iotan \mathcal{C}_\tauau(V)$
and some open subset $U \sigmaubset V.$
\iotatem[(iv)]
There are no strict flat nearly para-K\"ahler manifolds of
dimension less than 6.
\end{Prop}silonnd{itemize}
\end{Prop}silonc
\partialf (i) follows from Theorem \rhoef{2ndThm}. \\
(ii) Let $L \sigmaubset V$ be a $\tauau$-invariant isotropic subspace. If $\deltaim V < 12,$ then
$\deltaim L < 6$ and, hence, either $\deltaim L^+ < 3$ or $\deltaim L^- < 3.$ In the first case we have
$$ \wedgembda^-L = \wedgembda^3L^+ + \wedgembda^3L^- =\wedgembda^3L^- \sigmaubset \wedgembda^3V^-,$$
in the second case it is $ \wedgembda^-L = \wedgembda^3L^+ + \wedgembda^3L^- =\wedgembda^3L^+ \sigmaubset \wedgembda^3V^+.$\\
(iii) is a consequence of (i), Theorem \rhoef{1stThm} and \rhoef{2ndThm}. \\
(iv) By (iii) the strict nearly para-K\"ahler manifold $M$ is locally of the form $M(U,\end{Prop}silonta ),$ which is strict if and only
if $\end{Prop}silonta\nablaeq 0$. This is only possible for
$\deltaim L \mathfrak{a}mmae 3$, i.e.\ for $\deltaim M\mathfrak{a}mmae 6$. \varthetaed
\betaegin{equation}gin{Ex}
We have the following example which shows that part (ii) of
Corollary \rhoef{cor_Th1_th2} fail in dimension $\mathfrak{a}mmae$ 12: Consider $(V,\tauau)=(C^6,e)=\mathbb{R}^6\omegapluslus \, e \mathbb{R}^6$ with a basis given by
$(e^+_1,\lambdadots,e^+_6,e^-_1,\lambdadots,e^-_6),$ such that $e^\partialm_i$ form a
basis of $V^\partialm$ with $g(e_i^+,e_j^-)= \deltaelta_{ij}.$ Then the form $\end{Prop}silonta := e^+_1\wedge e^+_2\wedge e^+_3 + e^-_4\wedge e^-_5\wedge e^-_6$
lies in the variety $ \mathcal{C}_\tauau(V).$
\end{Prop}silonnd{Ex}
\betaegin{equation}gin{Th}\ \ \lambdaabel{3rdThm} Any strict flat nearly para-K\"ahler manifold
is locally a pseudo-Riemannian product $M=M_0\tauimes M(U,\end{Prop}silonta )$ of a flat
para-K\"ahler factor $M_0$
of maximal dimension and a flat nearly para-K\"ahler
manifold $M(U,\end{Prop}silonta )$, $\end{Prop}silonta\iotan C_\tauau (V)$, of signature $(m,m)$,
$2m=\deltaim M(U,\end{Prop}silonta )\mathfrak{a}mmae 6$ such that $\Sigmaigma_\end{Prop}silonta$ has dimension $m$.
\end{Prop}silont
\partialf By Theorem \rhoef{1stThm} and \rhoef{2ndThm}, $M$ is locally isomorphic
to an open subset of a manifold of
the form $M(V,\end{Prop}silonta )$, where $\end{Prop}silonta \iotan \wedgembda^3V$ has a $\tauau_{can}$-invariant
and isotropic support $L=\Sigmaigma_\end{Prop}silonta$. We choose a $\tauau_{can}$-invariant
isotropic subspace $L'\sigmaubset V$ such that $V':=L+L'$ is nondegenerate and
$L\mathcal{A}p L'=0$ and put $V_0 = (L+L')^\partialerp$. Then $\end{Prop}silonta \iotan \wedgembda^3V'
\sigmaubset \wedgembda^3V$ and $M(V,\end{Prop}silonta ) = M(V_0,0)\tauimes M(V',\end{Prop}silonta )$.
Notice that $M(V_0,0)$ is simply the flat para-K\"ahler manifold $V_0$
and that $M(V',\end{Prop}silonta )$ is strict
of split signature $(m,m)$, where
$m=\deltaim L\mathfrak{a}mmae 3$. \varthetaed \\
\betaegin{equation}gin{Cor}\ \ \lambdaabel{lastCor} Any simply connected nearly para-K\"ahler manifold with a
(geodesically) complete flat metric
is a pseudo-Riemannian product $M=M_0\tauimes M(\end{Prop}silonta )$ of a flat
para-K\"ahler factor $M_0=\mathbb{R}^{l,l}$
of maximal dimension and a flat nearly para-K\"ahler
manifold $M(\end{Prop}silonta ):=M(V,\end{Prop}silonta )$, $\end{Prop}silonta\iotan C_\tauau (V)$, of signature $(m,m)$
such that $\Sigmaigma_\end{Prop}silonta$ has dimension
$m=0,3,4,\lambdadots$.
\end{Prop}silonc
\nablaoindent
Next we wish to describe the moduli space of (complete simply connected) flat
nearly para-K\"ahler manifolds $M$ of dimension $2n$ up to isomorphism.
Without
restriction of generality we will assume that $M=M(\end{Prop}silonta )$
has no para-K\"ahler
de Rham factor, which means that $\end{Prop}silonta\iotan C_\tauau (V)$ has maximal
support $\Sigmaigma_\end{Prop}silonta$, i.e.\ $\deltaim \Sigmaigma_\end{Prop}silonta = n$.
We denote by $C_\tauau^{reg}(V)\sigmaubset C_\tauau(V)$ the open
subset consisting of elements with maximal support.
The group
\[ G:={\rhom Aut}(V,g_{can},\tauau_{can}) \mathcal{O}ng GL(n)\]
acts on $C_\tauau(V)$ and preserves $C_\tauau^{reg}(V)$.
Two nearly para-K\"ahler manifolds $M(\end{Prop}silonta )$ and $M(\end{Prop}silonta')$ are
isomorphic if and only if $\end{Prop}silonta$ and $\end{Prop}silonta'$ are related by an element
of the group $G$.
For $\end{Prop}silonta\iotan C_\tauau(V)$ we denote by $p$, $q$ the dimensions of
the eigenspaces of $\tauau$ on $\Sigmaigma_\end{Prop}silonta$ for the eigenvalues $1, -1$,
respectively. We call the pair $(p,q)\iotan \mathbb{N}_0\tauimes \mathbb{N}_0$
the {\mathcal{M}ssl type} of $\end{Prop}silonta$. We will also say that the
corresponding flat nearly para-K\"ahler manifold $M(\end{Prop}silonta )$ has type $(p,q)$.
We denote by $C^{p,q}_\tauau(V)$ the
subset of $C_\tauau (V)$ consisting of elements of type $(p,q)$.
Notice that $p+q\lambdae n$ with equality if and
only if $\end{Prop}silonta \iotan C_\tauau^{reg}(V)$. We have the following
decomposition
$$C_\tauau^{reg}(V) = \upsilonnderset{(p,q)\iotan \Pi}{\betaigcup}C^{p,q}_\tauau (V),$$
where $\Pi := \{ (p,q)| p,q \iotan \mathbb{N}_0\sigmaetminus \{1,2\}, p+q=n\}$.
The group $G=GL(n)$ acts on the
subsets $C^{p,q}_\tauau(V)$ and we are interested in the orbit space
$C^{p,q}_\tauau(V)/G$.
Fix a $\tauau$-invariant maximally isotropic subspace $L\sigmaubset V$
of type $(p,q)$ and put $\wedgembda^-_{reg}L:=\wedgembda^-L\mathcal{A}p C_\tauau^{reg}(V)
\sigmaubset C^{p,q}_\tauau (V)$. The stabilizer $G_L\mathcal{O}ng GL(L^+)\tauimes GL(L^-)
\mathcal{O}ng GL(p)\tauimes GL(q)$ of
$L=L^++L^-$ in $G$ acts on $\wedgembda^-_{reg}L$.
\betaegin{equation}gin{Th}\ \
There is a natural one-to-one correspondence between complete simply connected
flat nearly para-K\"ahler manifolds of type $(p,q)$, $p+q=n$,
and the points of the following orbit space:
$$C^{p,q}_\tauau (V)/G\mathcal{O}ng \wedgembda^-_{reg}L/G_L \sigmaubset \wedgembda^-L/G_L =
\wedgembda^3L^+/GL(L^+) \tauimes \wedgembda^3L^-/GL(L^-).$$
\end{Prop}silont
\partialf
Consider two complete simply connected
flat nearly para-K\"ahler manifolds $M$, $M'$. By the previous
results we can assume that $M=M(\end{Prop}silonta )$, $M'=M(\end{Prop}silonta')$ are
associated with $\end{Prop}silonta, \end{Prop}silonta' \iotan C^{p,q}_\tauau (V)$. It is clear that
$M$ and $M'$ are isomorphic if $\end{Prop}silonta$ and
$\end{Prop}silonta'$ are related by an element of $G$. To prove the converse
we assume that $\varphi : M \rhoightarrow M'$ is an isomorphism of
nearly para-K\"ahler manifolds. By the results of Section \rhoef{simplytransSec}
$\end{Prop}silonta$ defines a simply transitive group of isometries. This group
preserves also the para-complex structure $\tauau$, which is
$\nablaabla$-parallel and hence left-invariant. This shows that $M$ and $M'$
admit a transitive group of automorphisms. Therefore, we can assume that
$\varphi$ maps the origin in $M=V$ to the origin in $M'=V$.
Now $\varphi$ is an isometry of pseudo-Euclidian vector spaces
preserving the origin. Thus $\varphi$ is an element of $O(V)$
preserving also the para-complex structure $\tauau$ and hence
$\varphi \iotan G$.
The identification of orbit spaces can be easily checked using Lemma \rhoef{Lemma_cond_I} 2.\ and the fact
that any $\tauau$-invariant isotropic subspace $\Sigmaigma = \Sigmaigma^++\Sigmaigma^-$ can be mapped onto $L$ by an element of $G$.
\varthetaed
\betaegin{equation}gin{thebibliography}{XXXX}
\betaibitem[AMT]{AMT} D.\ V.\ Alekseevsky, C.\ Medori, A.\ Tomassini, {\iotat Homogeneous para-K\"ahler Einstein manifolds},
math.DG/0806.2272.
\betaibitem[B]{B} O.\ Baues, {\iotat Prehomogeneous Affine Representations and Flat
Pseudo-Riemannian Manifolds}, to appear in Handbook of Pseudo-Riemannian
Geometry and Supersymmetry, European Mathematical Society, ed.\
V.\ Cort\'es.
\betaibitem[CMMS]{CMMS} V.\ Cort\'es, C. Mayer, T. Mohaupt and F. Saueressig,
{\iotat Special Geometry of Euclidean Supersymmetry I: Vector Multiplets,}
J.\ High Energy Phys.\ {\betaf 2004}, no.\ 3, 028, 73 pp.
\betaibitem[CFG]{CFG} V.\ Cruceanu, P.\ Fortuny
and P.\ M.\ Gadea, {\iotat A survey on paracomplex geometry},
Rocky Mountain J.\ Math.\ {\betaf 26} (1996), no. 1, 83--115.
\betaibitem[CS]{CS} V.\ Cort\'es and L.\ Sch\"afer, {\iotat Flat nearly K\"ahler manifolds,} Ann.\ Glob.\ Anal.\ Geom.\ {\betaf 32} (2007), 379--389.
\betaibitem[IZ]{I} S.\ Ivanov and S.\ Zamkovoy, {\iotat Para-Hermitian and Para-Quaternionic manifolds}, Diff. Geom. Appl. {\betaf 23} (2005), 205--234.
\betaibitem[L]{L} P.\ Libermann, {\iotat Sur le probl\`eme d'\'equivalence de
certaines structures infinit\'esimales}, Ann.\ Mat.\ Pura Appl.\ {\betaf 4}
(1954), no.\ 36, 27--120.
\betaibitem[Ma]{Ma} A.\ Malcev, {\iotat On a class of homogeneous spaces},
Amer.\ Math.\ Soc.\ Translation {\betaf 1951} (1951), no.\ 39, 33 pp.
\betaibitem[Mi]{Mi} J.\ Milnor, {\iotat Curvatures of left invariant metrics on
Lie groups}, Advances in Math.\ {\betaf 21} (1976), no.\ 3, 293--329.
\betaibitem[N]{N} P.A.\ Nagy, {\iotat Connections with totally skew-symmetric
torsion and nearly-K\"ahler geometry}, to appear in Handbook of Pseudo-Riemannian Geometry and Supersymmetry, European Mathematical Society, ed.\
V.\ Cort\'es.
\betaibitem[R]{R} M.S.\ Raghunathan, {\iotat Discrete subgroups of Lie groups},
Ergebnisse der Mathematik und ihrer Grenzgebiete, Band 68, Springer-Verlag,
New York-Heidelberg, 1972.
\betaibitem[S]{S} L.\ Sch\"afer, {\iotat Para-tt*-bundles on the tangent
bundle of an almost
para-complex manifold}, Ann.\ Glob.\ Anal.\ Geom.\ {\betaf 32} (2007), 125--145.
\betaibitem[W]{W} J.A.\ Wolf, {\iotat On the geometry and classification of
absolute parallelisms I}, J.\ Diff.\ Geom.\ {\betaf 6} (1971/72),
317--342.
\end{Prop}silonnd{thebibliography}
\end{Prop}silonnd{document}
|
\begin{document}
\title{Exploring Lie theory with {\mathfrak{s}f GAP}}
\author{Willem A. de Graaf}
\mathrm{\mathop{ad}}dress{
Dipartimento di Matematica\\
Universit\`{a} di Trento\\
Italy}
\email{[email protected]}
\thanks{The author was partially supported by an Australian Research Council
grant, identifier DP190100317.}
\mathfrak{s}ubjclass{17B45, 20G05}
\keywords{Lie groups, Lie algebras, nilpotent orbits, computational methods}
\begin{abstract}
We illustrate the Lie theoretic capabilities of the computational algebra
system {{\sf GAP}}4 by reporting on results on nilpotent orbits of simple Lie
algebras that have been obtained using computations in that system.
Concerning reachable elements in simple Lie algebras we show by computational
means that the simple Lie algebras of exceptional type have the Panyushev
property. We computationally prove two propositions on the dimension
of the abelianization of the centralizer of a nilpotent element in simple Lie
algebras of exceptional type. Finally we obtain the closure ordering of the
orbits in the null cone of the spinor representation of the group
$\mathrm{Spin}_{13}(\mathbb{C})$. All input and output of the relevant {\sf GAP}\ sessions
is given.
\end{abstract}
\maketitle
\mathfrak{s}ection{Introduction}
This paper has two purposes. Firstly, it serves to introduce and advertise
the capabilities of the computer algebra system {{\sf GAP}}4 \cite{GAP4} to perform
computations related to various aspects of Lie theory. The main objects
related to Lie theory that {{\sf GAP}} can deal with directly are Lie algebras and
related finite structures such as root systems and Weyl groups.
But Lie algebras play
an important role in the study of the structure and representations of
linear algebraic groups. So also the algorithms implemented in {\sf GAP}\
can also be used to perform computations regarding those objects.
The second purpose of the paper is to describe the results of three
computational projects that I have been involved in. The first of these
is the subject of Section \ref{sec:reach} and concerns reachable nilpotent
orbits in Lie algebras of exceptional type. Let $\mathop{\mathfrak{g}}$ be a semisimple complex Lie
algebra and let $e\in \mathop{\mathfrak{g}}$ be nilpotent. By $\mathop{\mathfrak{g}}_e$ we denote the centralizer of
$e$ in $\mathop{\mathfrak{g}}$. The element $e$ is said to be {\em reachable} if $e\in [\mathop{\mathfrak{g}}_e,
\mathop{\mathfrak{g}}_e]$. A nilpotent $e$ lies in a so-called $\mathfrak{\mathop{sl}}_2$-triple, which defines
a grading on $\mathop{\mathfrak{g}}$. Panyushev \cite{panyushev4} proposed a characterization of
reachable nilpotent elements in terms of this grading; here we call this the
Panyushev property of $\mathop{\mathfrak{g}}$. In \cite{panyushev4} this property was proved for
Lie algebras of type $A$. Yakimova \cite{yakimova2} showed that the Lie
algebras of type $B$, $C$, $D$ also have the Panyushev property. In Section
\ref{sec:reach} we show by calculations in {\sf GAP}\ that the simple Lie algebras
of exceptional type also have the Panyushev property.
The second project concerns the quotients $\mathop{\mathfrak{g}}_e/[\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$ where again $e$ is
a nilpotent element in a simple complex Lie algebra $\mathop{\mathfrak{g}}$. These play an
important role in \cite{pretop}. In Section \ref{sec:Qe} we show that a
statement proved in
\cite{pretop} for the simple Lie algebras of classical type also holds
for the exceptional types, albeit with a few explicitly listed exceptions.
The results of Sections \ref{sec:reach}, \ref{sec:Qe} have also appeared in the
arxiv preprint \cite{slapaper}, without giving the details of the
computations.
In Section \ref{sec:clos0} we look at the null cone of the spinor representation
of the group $\mathrm{Spin}_{13}(\mathbb{C})$. The orbits of this group in the null
cone were first listed in \cite{gavin}. A' Campo and Popov
\cite[Example (f), p. 348]{dekem3} observed, also by computational means, that
these orbits coincide with the strata of the null cone (and they corrected
the dimensions given in \cite{gavin}). Here we show how an algorithm given
in \cite{gravinya} can be extended to this case to obtain the closure
ordering of these orbits. We give a simple implementation in {\sf GAP}\ and
obtain the closure diagram. Furthermore, we use {\sf GAP}\ to study the stabilizers
of the elements of the null cone.
In this paper we will not give a full introduction into working with Lie
algebras in {\sf GAP}\ but refer to the reference manual of {\sf GAP}\ which can be
found on its website, and to the manuals of the various packages that are
listed in the next section. The website of {\sf GAP}\ also has various introductory
materials of a more general nature. The topics that we discuss in this paper all
involve semisimple Lie algebras. For a general introduction to the theory
of these algebras we refer to the book by Humphreys, \cite{hum}.
We will give all input and output of the {\sf GAP}\ sessions. Most commands return
very quickly. If a command takes markedly longer then we display the runtime,
by using the {\sf GAP}\ function \verb+time+; this command displays the runtime
in milliseconds, so that a value of, for example, \verb+23345+ means 23.3
seconds.
\noindent{\bf Acknowledgements.} I thank Alexander Elashvili for suggesting the
topic of Section \ref{sec:reach} and Alexander Premet for suggesting the
computations reported on in Section \ref{sec:Qe}. I thank the anonymous referee
for many comments which helped to improve the exposition of the paper.
\mathfrak{s}ection{Preliminaries}
{{\sf GAP}}4 \cite{GAP4} is an open source computational algebra system.
Its mathematical functionality is contained in a ``core system''
(which consists of a small kernel written in C and a library of functions
written in the {\sf GAP}\ language) and a rather large number of packages which
can be loaded separately. The {\sf GAP}\ library has a number of functions
for constructing and working with Lie algebras and their representations.
For an overview we refer to the reference manual of {\sf GAP}. Furthermore
there are the following packages that deal with various aspects of Lie theory:
\begin{itemize}
\item {\mathfrak{s}f CoReLG} \cite{corelg}, for working with real semisimple Lie algebras.
\item {\mathfrak{s}f FPLSA} \cite{fplsa}, for dealing with finitely presented Lie
algebras.
\item {\mathfrak{s}f LieAlgDB} \cite{liealgdb}, which contains various databases of
small dimensional Lie algebras.
\item {\mathfrak{s}f LiePRing}, \cite{liepring} containing a database and algorithms
for Lie $p$-rings.
\item {\mathfrak{s}f LieRing} \cite{liering}, for computing with Lie rings.
\item {\mathfrak{s}f NoCK} \cite{nock}, for the computation of Tolzano’s obstruction
for compact Clifford-Klein forms.
\item {\mathfrak{s}f QuaGroup} \cite{quagroup}, for computations with quantum groups.
\item {\mathfrak{s}f SLA} \cite{sla}, for computations with various aspects of semisimple
Lie algebras.
\item {\mathfrak{s}f Sophus} \cite{sophus}, for computations in nilpotent Lie algebras.
\end{itemize}
We also mention the package {\mathfrak{s}f CHEVIE} for dealing with groups of Lie type
and related structures such as Weyl groups and Iwahori-Hecke algebras.
This package is built on {\sf GAP} 3, not {\sf GAP} 4. We refer to its website
\url{https://webusers.imj-prg.fr/~jean.michel/chevie/chevie.html} for
more information.
The projects discussed in this paper mainly use the {\sf GAP}\ core system and
the package {\mathfrak{s}f SLA}. In the next two subsections we briefly look at how
simple Lie algebras and their modules are constructed in {\sf GAP}\ and how
{\mathfrak{s}f SLA} deals with nilpotent orbits in simple Lie algebras.
\mathfrak{s}ubsection{Simple Lie algebras in {\mathfrak{s}f GAP}}
{\sf GAP}\ has a function \verb1SimpleLieAlgebra1 for creating the simple split
Lie algebras over fields of characteristic 0. (The semisimple Lie algebras
can be constructed by the function \verb1DirectSumOfAlgebras1.)
They are given by a
multiplication table with respect to a Chevalley basis (for the latter
concept we refer to \cite[Theorem 25.2]{hum}). For the base field we
usually take the rational numbers because often the computations
with these algebras are entirely rational, that is, require no solutions
to polynomial equations. The $i$-th basis element of such a Lie algebra is
written as \verb1v.i1. In the next example we construct the simple Lie
algebra of type $E_8$, a basis of it and two of its elements.
\begin{verbatim}
gap> L:= SimpleLieAlgebra( "E", 8, Rationals );
<Lie algebra of dimension 248 over Rationals>
gap> b:= Basis(L);; b[123];
v.123
gap> b[2]-3*b[5]+1/7*b[100];
v.2+(-3)*v.5+(1/7)*v.100
\end{verbatim}
Such simple Lie algebras come with a lot of data like a Chevalley basis
and a root system. Again we refer to the reference manual for more details.
There also is a function for constructing the irreducible modules
of a semisimple Lie algebra. Such a module is given by a highest weight,
which is a nonnegative integral linear combination of the fundamental weights.
This linear combination is just given by its coefficient vector.
(The order of the fundamental weights is given by the Cartan matrix of
the root system of the Lie algebra.) The action of an element of the Lie
algebra on an element of its module is computed by the infix caret operator
\verb1^1. In the next example we construct the irreducible 3875-dimensional
module of the Lie algebra of type $E_8$. We see that the computation in
{\sf GAP}\ takes about 174 seconds. We also compute the action of an element of
the Lie algebra on an element of the module.
\begin{verbatim}
gap> L:= SimpleLieAlgebra( "E", 8, Rationals );;
gap> V:= HighestWeightModule( L, [1,0,0,0,0,0,0,0] ); time;
<3875-dimensional left-module over <Lie algebra of dimension 248
over Rationals>>
174425
gap> bL:= Basis(L);; bV:= Basis(V);;
gap> bL[1]^bV[263];
-1*y112*v0
\end{verbatim}
(For an explanation of the notation of the basis elements of these modules
we again refer to the reference manual.)
\mathfrak{s}ubsection{Nilpotent orbits in {\mathfrak{s}f GAP}}\label{sec:gapnilo}
Here we recall some definitions and facts on nilpotent orbits.
For more background information we refer to the the book by Collingwood and
McGovern (\cite{colmcgov}). Secondly we show how the package {\mathfrak{s}f SLA}
deals with nilpotent orbits.
Let $\mathop{\mathfrak{g}}$ be a semisimple Lie algebra over $\mathbb{C}$ (or over an algebraically
closed field of characteristic 0). Let $G$ denote the adjoint group of $\mathop{\mathfrak{g}}$;
this is the identity component of the automorphism group of $\mathop{\mathfrak{g}}$. An $e\in\mathop{\mathfrak{g}}$
is said to be nilpotent if the adjoint map $\mathrm{\mathop{ad}} e : \mathop{\mathfrak{g}}\to\mathop{\mathfrak{g}}$ is nilpotent.
If $e\in G$ is nilpotent then the entire orbit $Ge$ consists of nilpotent
elements, and is therefore called a nilpotent orbit.
By the Jacobson-Morozov theorem a nilpotent $e\in \mathop{\mathfrak{g}}$ lies in an
$\mathfrak{\mathop{sl}}_2$-triple $(f,h,e)$ (where $[e,f]=h$, $[h,e]=2e$, $[h,f]=-2f$).
Let $\mathop{\mathfrak{h}}h\mathfrak{s}ubset\mathop{\mathfrak{g}}$ be a Cartan subalgebra containing $h$. Let $\Phi$ be the root
system of $\mathop{\mathfrak{g}}$ with respect to $\mathop{\mathfrak{h}}h$. Then there is a basis of simple roots
$\Delta = \{\alpha_1,\ldots,\alpha_\ell\}$ of $\Phi$, such that $\alpha_i(h) \in \{0,1,2\}$.
The Dynkin diagram of $\Delta$,
where the node corresponding to $\alpha_i$ is labeled $\alpha_i(h)$, is called a
weighted Dynkin diagram. It uniquely determines the orbit $Ge$.
The nilpotent orbits of the simple Lie algebras have been classified,
see \cite{colmcgov}. In the {\mathfrak{s}f SLA} package they can be constructed with the
command \verb1NilpotentOrbits1. The output is a list of objects that
carry some information such as the weighted Dynkin diagram of the orbit and
an $\mathfrak{\mathop{sl}}_2$-triple containing a representative. Here is an example for the
Lie algebra of type $E_7$, where we inspect the weighted Dynkin diagram and
the third element of an $\mathfrak{\mathop{sl}}_2$-triple of the 37-th orbit (that is, a
representative of the nilpotent orbit).
\begin{verbatim}
gap> L:= SimpleLieAlgebra("E",7,Rationals);;
gap> no:= NilpotentOrbits(L);;
gap> Length(no);
44
gap> WeightedDynkinDiagram( no[37] );
[ 2, 0, 0, 2, 0, 0, 2 ]
gap> SL2Triple( no[37] )[3];
v.8+v.11+v.13+v.15+v.22+v.23+v.24
\end{verbatim}
Now we briefly describe the concept of {\em induced} nilpotent orbit.
A subalgebra of $\mathop{\mathfrak{g}}$ is said to be {\em parabolic} if it contains a Borel
subalgebra (i.e., a maximal solvable subalgebra). Let $\mathop{\mathfrak{h}}$ be a fixed
Cartan subalgebra of $\mathop{\mathfrak{g}}$. Let $\Phi$ denote the root system of $\mathop{\mathfrak{g}}$ with
respect to $\mathop{\mathfrak{h}}$, and let $\Delta$ be a fixed set of simple roots.
For a root $\alpha$ we denote the corresponding root space in $\mathop{\mathfrak{g}}$ by
$\mathop{\mathfrak{g}}_\alpha$. For a subset $\Pi\mathfrak{s}ubset\Delta$ we define $\mathop{\mathfrak{p}}_\Pi$ to be the
subalgebra generated by $\mathop{\mathfrak{h}}$, $\mathop{\mathfrak{g}}_{-\alpha}$ for $\alpha\in\Pi$ and
$\mathop{\mathfrak{g}}_\alpha$ for all positive roots $\alpha$. Then $\mathop{\mathfrak{p}}_\Pi$ is a parabolic
subalgebra. Furthermore, every parabolic subalgebra is $G$-conjugate to
a subalgebra of the form $\mathop{\mathfrak{p}}_\Pi$.
Let $\mathop{\mathfrak{p}}=\mathop{\mathfrak{p}}_\Pi$ for a subset $\Pi\mathfrak{s}ubset \Delta$. Let $\Psi \mathfrak{s}ubset \Phi$
be the root subsystem that consists of the roots that are linear combinations
of the elements of $\Pi$. Then $\mathop{\mathfrak{p}} = \mathfrak{l}\oplus\mf{n}$ where $\mathfrak{l}$ is the
subalgebra spanned by $\mathop{\mathfrak{h}}$ and $\mathop{\mathfrak{g}}_\alpha$ for $\alpha\in\Psi$. Secondly,
$\mf{n}$ is spanned by $\mathop{\mathfrak{g}}_\alpha$ for positive $\alpha$ that do not lie in
$\Psi$. The decomposition $\mathop{\mathfrak{p}} = \mathfrak{l}\oplus\mf{n}$ is called the Levi
decomposition of $\mathop{\mathfrak{p}}$ and the subalgebra $\mathfrak{l}$ is called a (standard)
Levi subalgebra of $\mathop{\mathfrak{g}}$. We observe that $\mathfrak{l}$ is a reductive Lie algebra.
In the sequel nilpotent orbits in Levi subalgebras appear. The definitions of
their properties are the obvious analogues of the definitions concerning
semisimple Lie algebras.
Now let $\mf{p}\mathfrak{s}ubset \mathop{\mathfrak{g}}$ be a parabolic subalgebra, with Levi decomposition
$\mf{p} = \mathfrak{l}\oplus \mf{n}$. Let $L\mathfrak{s}ubset G$ be the
connected subgroup of $G$ with Lie algebra $\mathfrak{l}$. Let $Le'$ be a nilpotent
orbit in $\mathfrak{l}$. Lusztig and Spaltenstein (\cite{lusp}) have shown that there
is a unique nilpotent orbit $Ge\mathfrak{s}ubset \mathop{\mathfrak{g}}$ such that
$Ge\cap (Le'\oplus \mf{n})$ is open and nonempty in $Le'\oplus \mf{n}$.
The orbit $Ge$ is said to be {\em induced} from the orbit $Le'$. Nilpotent
orbits which are not induced are called {\em rigid}.
Let $n$ be a non-negative integer. The irreducible components of the locally
closed set
$$A^n = \{ x \in \mathop{\mathfrak{g}} \mid \dim Gx = n\}$$
are called {\em sheets} of $\mathop{\mathfrak{g}}$ (see \cite{borho}, \cite{borhokraft}). A sheet
is $G$-stable and contains a {\em unique} nilpotent orbit. Sheets in general
are not disjoint, and different sheets may contain the same nilpotent orbit.
The sheets of $\mathop{\mathfrak{g}}$ are indexed by $G$-classes of pairs $(\mathfrak{l}, Le')$, where
$\mathfrak{l}$ is a Levi subalgebra, and $Le'$ is a rigid nilpotent orbit in $\mathfrak{l}$,
see \cite{borho}. The nilpotent orbit that is contained in the corresponding
sheet is equal to the nilpotent orbit induced from $Le'$.
The {\em rank} of the sheet corresponding to the pair $(\mathfrak{l}, Le')$ is defined
to be the dimension of the centre of $\mathfrak{l}$.
In the {\mathfrak{s}f SLA} package a sheet is represented by a {\em sheet diagram}.
We first explain how this is defined.
Consider a parabolic subalgebra $\mathop{\mathfrak{p}}=\mathop{\mathfrak{p}}_\Pi$ with corresponding Levi subalgebra
$\mathfrak{l}$. Let $Le'$ be a rigid nilpotent orbit in $\mathfrak{l}$, then the pair
$(\mathfrak{l},Le')$ corresponds to a sheet. Now we label the Dynkin diagram of
$\Phi$ in the following way. Write $\Delta = \{\alpha_1,\ldots,\alpha_\ell\}$.
If $\alpha_i\not\in\Pi$ then node $i$ has label 2. The subdiagram consisting
of the nodes $i$ such that $\alpha_i\in \Pi$ is the Dynkin diagram of the
semisimple part of $\mathfrak{l}$. To these nodes we attach the labels of the
weighted Dynkin diagram of $Le'$. It is known that the weighted Dynkin diagram
of a rigid nilpotent orbit only has labels 0,1. So from a sheet diagram we
can identify $\mathfrak{l}$ and $Le'$ and hence the corresponding sheet.
The {\mathfrak{s}f SLA} package has a function \verb1InducedNilpotentOrbits1 for
computing the induced nilpotent orbits of a simple Lie algebra. This function
returns a list of records that is in bijection with the sheets of the
Lie algebra. Each record has two components: \verb1norbit1 which is the
nilpotent orbit contained in the sheet, and \verb1sheetdiag1 which is the
list of labels of the sheet diagram of the sheet. Here is an example for
the simple Lie algebra of type $E_7$.
\begin{verbatim}
gap> L:= SimpleLieAlgebra( "E", 7, Rationals );;
gap> ind:= InducedNilpotentOrbits( L );;
gap> Length( ind );
46
gap> ind[34];
rec( norbit := <nilpotent orbit in Lie algebra of type E7>,
sheetdiag := [ 2, 0, 0, 1, 0, 2, 2 ] )
gap> WeightedDynkinDiagram( ind[19].norbit );
[ 0, 0, 0, 2, 0, 0, 2 ]
gap> WeightedDynkinDiagram( ind[22].norbit );
[ 0, 0, 0, 2, 0, 0, 2 ]
gap> WeightedDynkinDiagram( ind[34].norbit );
[ 0, 0, 0, 2, 0, 0, 2 ]
\end{verbatim}
The numbering of the nodes of the Dynkin diagram of the Lie algebra
of type $E_7$ follows \cite[\S 11.4]{hum}. Hence the sheet diagram of the
34-th sheet is
$$2~~~~0~~~~\overset{\text{\normalsize 0}}{1}~~~~0~~~~2~~~~2$$
We obtain the Dynkin diagram of the corresponding Levi subalgebra $\mathfrak{l}$
by removing the nodes labeled 2; wee see that its semisimple part is of
type $D_4$. The weighted Dynkin diagram of the rigid nilpotent orbit
in $\mathfrak{l}$ has a 1 on the central node and zeros elsewhere. The rank of the
sheet is the dimension of the centre of $\mathfrak{l}$; this is the number of 2's
in the diagram, and we see that it is 3.
Furthermore we see that sheets 19 and 22 contain the same nilpotent orbit.
By inspection it can be verified that there are no other sheets that contain
this nilpotent orbit. Hence this is a nilpotent orbit lying in three sheets.
\mathfrak{s}ection{Reachable elements}\label{sec:reach}
For $e\in \mathop{\mathfrak{g}}$ we denote its centralizer
in $\mathop{\mathfrak{g}}$ by $\mathop{\mathfrak{g}}_e$. In \cite{panyushev4} an $e$ in $\mathop{\mathfrak{g}}$ is defined to be
{\em reachable} if $e \in [\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$. Such an element has to be nilpotent.
It is obvious that $e$ is reachable if and only if all elements in its
orbit are reachable. Hence if $e$ is reachable then we also say that its
orbit $Ge$ is reachable.
In \cite{elashgre}, Elashvili and Gr\'elaud listed the reachable orbits in
simple complex Lie algebras $\mathop{\mathfrak{g}}$ (in that paper reachable elements are
called {\em compact}, in analogy with \cite{blabry}).
For a given semisimple Lie algebra we can easily obtain this classification
in {\mathfrak{s}f GAP}4, using the {\mathfrak{s}f SLA} package. Here is an example for the simple
Lie algebra of type $E_6$.
\begin{verbatim}
gap> L:= SimpleLieAlgebra( "E", 6, Rationals );;
gap> nL:= NilpotentOrbits( L );;
gap> reach:= [ ];;
gap> for o in nL do
> e:= SL2Triple( o )[3]; ge:= LieCentralizer( L, Subalgebra(L,[e]) );
> if e in LieDerivedSubalgebra( ge ) then Add( reach, o ); fi;
> od;
gap> Length( reach );
6
gap> WeightedDynkinDiagram( reach[3] );
[ 0, 0, 0, 1, 0, 0 ]
\end{verbatim}
This simple procedure obtains six reachable nilpotent orbits. For each such
orbit we can look at its weighted Dynkin diagram to identify it in the known
lists of nilpotent orbits as in \cite[\S 8.4]{colmcgov}. The third element of
our list of reachable orbits corresponds to the orbit with label $3A_1$ in
the list in \cite[\S 8.4]{colmcgov}.
Let $e\in\mathop{\mathfrak{g}}$ be nilpotent, lying in the $\mathfrak{\mathop{sl}}_2$-triple $(f,h,e)$. The
subalgebra spanned by $(f,h,e)$ acts on $\mathop{\mathfrak{g}}$ (by restricting the adjoint
representation of $\mathop{\mathfrak{g}}$). By the representation theory of $\mathfrak{\mathop{sl}}_2(\mathbb{C})$
the eigenvalues of $\mathrm{\mathop{ad}} h$ are integers. Hence we get a grading
$$ \mathop{\mathfrak{g}} = \bigoplus_{k\in\mathbb{Z}} \mathop{\mathfrak{g}}(k)$$
where $\mathop{\mathfrak{g}}(k) = \{ x \in \mathop{\mathfrak{g}}\mid [h,x]=kx\}$. Now set $\mathop{\mathfrak{g}}(k)_e = \mathop{\mathfrak{g}}(k)\cap
\mathop{\mathfrak{g}}_e$, and let $\mathop{\mathfrak{g}}(\mathop{\mathfrak{g}}eq 1)_e$ denote the subalgebra spanned by all
$\mathop{\mathfrak{g}}(k)_e$, $k\mathop{\mathfrak{g}}eq 1$.
Panyushev (\cite{panyushev4}) showed that, for $\mathop{\mathfrak{g}}$ of type $A_n$,
$e$ is reachable if and only if $\mathop{\mathfrak{g}}(\mathop{\mathfrak{g}}eq 1)_e$ is generated as Lie algebra
by $\mathop{\mathfrak{g}}(1)_e$. Here we call this the {\em Panyushev property} of $\mathop{\mathfrak{g}}$.
In \cite{panyushev4} it is stated that this property also holds for the
other classical types and the question is posed whether it holds for the
exceptional types. In \cite{yakimova2} a proof is given that the Panyushev
property holds in types $B_n$, $C_n$, $D_n$. Computations in {\mathfrak{s}f GAP}
show that it also holds for the exceptional types.
\begin{proposition}\label{prop:pan}
Let $\mathop{\mathfrak{g}}$ be a simple Lie algebra of exceptional type. Then $\mathop{\mathfrak{g}}$ has the
Panyushev property.
\end{proposition}
\begin{proof}
One direction is easily seen to hold in general. Indeed,
suppose that if $\mathop{\mathfrak{g}}(\mathop{\mathfrak{g}}eq 1)_e$ is generated as Lie algebra by $\mathop{\mathfrak{g}}(1)_e$.
Since $e\in \mathop{\mathfrak{g}}(2)_e$ it immediately follows that $e$ is reachable.
The converse is shown by case by case computations in {\mathfrak{s}f GAP}.
Here we show this for the Lie algebra of type $E_6$. We let {\tt reach} be
the list of reachable nilpotent orbits, as computed above.
\begin{verbatim}
gap> for o in reach do
> e:= SL2Triple( o )[3]; ge:= LieCentralizer( L, Subalgebra(L,[e]) );
> h:= SL2Triple( o )[2]; gr:= SL2Grading( L, h );
> gegeq1:= Intersection( ge, Subspace( L, Concatenation( gr[1] ) ) );
> ge1:= Intersection( ge, Subspace( L, gr[1][1] ) );
> Print( Subalgebra( L, Basis(ge1) ) = gegeq1, " " );
> od;
true true true true true true
\end{verbatim}
The identifier {\tt gr} contains the grading corresponding to the
$\mathfrak{\mathop{sl}}_2$-triple. This is a list consisting of three lists. The first of these
has bases of the subspaces $\mathop{\mathfrak{g}}(1),\mathop{\mathfrak{g}}(2),\ldots$. So $\mathop{\mathfrak{g}}(\mathop{\mathfrak{g}}eq 1)_e$ is the
intersection of $\mathop{\mathfrak{g}}_e$ and the subspace spanned by all elements in the union
of the lists in {\tt gr[1]}; this space is assigned to the identifier
\verb1gegeq11. Secondly, $\mathop{\mathfrak{g}}(1)_e$ is the intersection of
$\mathop{\mathfrak{g}}_e$ and the subspace spanned by the first element of {\tt gr[1]}; this
space is assigned to \verb1ge11.
The penultimate line instructs {\mathfrak{s}f GAP} to print {\tt true} if the
subalgebra generated by $\mathop{\mathfrak{g}}(1)_e$ is equal to $\mathop{\mathfrak{g}}(\mathop{\mathfrak{g}}eq 1)_e$.
\end{proof}
Yakimova (\cite{yakimova2}) studied the stronger condition $\mathop{\mathfrak{g}}_e =
[\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$. In this paper we call elements $e$
satisfying this condition {\em strongly reachable}. She showed that
for $\mathop{\mathfrak{g}}$ of classical type, $e$ is strongly reachable if and only if
the nilpotent orbit of $e$ is rigid. By an explicit example
this is shown to fail for $\mathop{\mathfrak{g}}$ of exceptional type.
For the exceptional types we can show the following.
\begin{proposition}\label{prop:reachrig}
Let $\mathop{\mathfrak{g}}$ be a simple Lie algebra of exceptional type. Let $e\in \mathop{\mathfrak{g}}$ be
nilpotent. Then $e$ is strongly reachable if and only if $e$ is both
reachable and rigid.
\end{proposition}
\begin{proof}
If $e$ is strongly reachable then it is reachable, but also rigid by
\cite{yakimova2}, Proposition 11. As the {\mathfrak{s}f SLA} package has a function for
determining the rigid nilpotent orbits, the converse can easily be shown
by direct computation.
But it also follows from Proposition \ref{prop:pan}.
Indeed, if $e$ is rigid then $\mathop{\mathfrak{g}}(0)_e$ is semisimple, so
$[\mathop{\mathfrak{g}}(0)_e,\mathop{\mathfrak{g}}(0)_e]=\mathop{\mathfrak{g}}(0)_e$. Furthermore, $[\mathop{\mathfrak{g}}(0)_e,\mathop{\mathfrak{g}}(1)_e]=
\mathop{\mathfrak{g}}(1)_e$ by \cite[Lemma 8]{yakimova2} (where this is shown to hold for
all nilpotent $e$).
By the Panyushev property this implies that $[\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]= \mathop{\mathfrak{g}}_e$.
\end{proof}
\begin{remark}
We can easily compute the rigid nilpotent orbits that are not strongly reachable.
Here is an example for the Lie algebra of type $E_8$.
\begin{verbatim}
gap> L:= SimpleLieAlgebra( "E", 8, Rationals );;
gap> rig:= RigidNilpotentOrbits( L );;
gap> exc:= [ ];;
gap> for o in rig do
> e:= SL2Triple( o )[3]; ge:= LieCentralizer( L, Subalgebra(L,[e]) );
> if ge <> LieDerivedSubalgebra(ge) then Add( exc, o ); fi;
> od;
gap> Length( exc );
3
gap> WeightedDynkinDiagram( exc[1] );
[ 0, 0, 0, 0, 0, 1, 0, 1 ]
\end{verbatim}
We see that we have obtained three nilpotent orbits that are rigid but
not strongly reachable. Comparing the weighted Dynkin diagram of the first
of those orbits with the tables in \cite{colmcgov} we see that its Bala-Carter
label is $A_3+A_1$. Table \ref{tab:rignsr} contains the rigid but not strongly
reachable orbits in the Lie algebras of exceptional type;
it is used in the proof of \cite[Lemma 3.7]{preste}. For
an explanation of the notation used for the labels we refer to
\cite[\S 8.4]{colmcgov}.
\begin{table}[htb]
\begin{tabular}{|c|c|c|c|c|c|c|}
\mathop{\mathfrak{h}}line
type & $E_7$ & $E_8$ & $E_8$ & $E_8$ & $F_4$ & $G_2$\\
\mathop{\mathfrak{h}}line
label \mathop{\mathfrak{p}}hantom{{\mathop{\mathfrak{h}}uge B}}
& {\mathfrak{s}mall $(A_3+A_1)'$} & {\mathfrak{s}mall $A_3+A_1$} & {\mathfrak{s}mall $D_5(a_1)+A_2$} &
{\mathfrak{s}mall $A_5+A_1$} & {\mathfrak{s}mall $\widetilde{A}_2+A_1$} & {\mathfrak{s}mall $A_1$} \\
\mathop{\mathfrak{h}}line
{\mathfrak{s}mall $(\dim \mathop{\mathfrak{g}}_e, \dim [\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e])$} & (41,40) & (84,83) & (46,45) & (46,45) &
(16,15) & (6,5) \\
\mathop{\mathfrak{h}}line
\end{tabular}
\caption{Rigid but not strongly reachable nilpotent orbits}\label{tab:rignsr}
\end{table}
From the last line we see that in all cases $[\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$ is of codimension 1
in $\mathop{\mathfrak{g}}_e$. Taking Proposition \ref{prop:reachrig} into account we see that
this implies that $\mathop{\mathfrak{g}}_e = \langle e \rangle \oplus [\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$. In
\cite{preste} the $e$ with this property are called {\em almost reachable}.
\end{remark}
\mathfrak{s}ection{The quotients $\mf{c}_e$}\label{sec:Qe}
Let $\mathop{\mathfrak{g}}$ be a simple Lie algebra, and $e$ a representative of a nilpotent orbit.
As before we denote its centralizer by $\mathop{\mathfrak{g}}_e$. In this section we consider the
quotient $\mf{c}_e = \mathop{\mathfrak{g}}_e/[\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$. These have been studied by Premet and
Topley \cite{pretop} in relation to finite $W$-algebras. In \cite{pretop}
it is shown that the statement of Proposition \ref{prop:sht} holds without
exceptions for the classical Lie algebras. Proposition \ref{prop:sht}, as well as
the tables of \cite[Section 3]{slapaper}, are used in \cite{pretop} for showing
that for $\mathop{\mathfrak{g}}$ of exceptional type, $U(\mathop{\mathfrak{g}},e)^{\mathrm{ab}}$ (the abelianization of
a finite $W$-algebra $U(\mathop{\mathfrak{g}},e)$) is isomorphic to a polynomial ring (with the
same six exceptions as Proposition \ref{prop:sht}).
\begin{proposition}\label{prop:sht}
Let $\mathop{\mathfrak{g}}$ be a simple Lie algebra of exceptional type. Let $e\in \mathop{\mathfrak{g}}$ be a
representative of an induced nilpotent orbit lying in a unique sheet. Then the
rank of that sheet is equal to $\dim \mf{c}_e$, except the
cases listed in Table \ref{tab:prop1}.
\end{proposition}
\begin{table}[htb]
\begin{tabular}{|l|l|l|c|c|}
\mathop{\mathfrak{h}}line
$\mathop{\mathfrak{g}}$ & label & weighted Dynkin diagram & rank & $\dim \mf{c}_e$ \\
\mathop{\mathfrak{h}}line
$E_6$ & $A_3+A_1$ & $0~~~~1~~~~\overset{\text{\normalsize 1}}{0}~~~~1~~~~0$
& 1 & 2 \\
$E_7$ & $D_6(a_2)$ & $0~~~~1~~~~\overset{\text{\normalsize 1}}{0}~~~~1~~~~0~~~~2$
& 2 & 3\\
$E_8$ & $D_6(a_2)$ & $0~~~~1~~~~\overset{\text{\normalsize 1}}{0}~~~~0~~~~0~~~~1~~~~0$
& 1 & 3\\
$E_8$ & $E_6(a_3)+A_1$ & $1~~~~0~~~~\overset{\text{\normalsize 0}}{0}~~~~1~~~~0~~~~1~~~~0$
& 1 & 3\\
$E_8$ & $E_7(a_2)$ & $0~~~~1~~~~\overset{\text{\normalsize 1}}{0}~~~~1~~~~0~~~~2~~~~2$
& 3 & 4\\
$F_4$ & $C_3(a_1)$ & 1~~~0~~~1~~~0 & 1 & 3\\
\mathop{\mathfrak{h}}line
\end{tabular}
\caption{Table of exceptions to Proposition \ref{prop:sht}.}\label{tab:prop1}
\end{table}
\begin{proof}
The proof is obtained by explicit computations in {\sf GAP}\ with the {\mathfrak{s}f SLA}
package loaded. We show the computation for the Lie algebra of type $E_8$.
First we compute the list of sheets (as explained in Section
\ref{sec:gapnilo}). For each sheet we compute $\dim \mf{c}_e$, where
$e$ is a representative of the unique nilpotent orbit in the sheet. These
dimensions are stored in the list \verb1dims1. Secondly, for each sheet we
compute the number of sheets having the same nilpotent orbit as the given
sheet. This number is stored in the list \verb1nr1.
\begin{verbatim}
gap> L:= SimpleLieAlgebra( "E", 8, Rationals );;
gap> shts:= InducedNilpotentOrbits( L );;
gap> nr:= [ ];; dims:= [ ];;
gap> for s in shts do
> e:= SL2Triple( s.norbit )[3];
> ge:= LieCentralizer( L, Subalgebra( L, [e] ) );
> Add( dims, Dimension(ge)-Dimension(LieDerivedSubalgebra(ge)) );
> Add( nr, Length( Filtered( shts, t -> t.norbit = s.norbit ) ) );
> od;
\end{verbatim}
For each sheet whose nilpotent orbit lies in no other sheet (that is, the
corresponding element of \verb1nr1 is 1) we compute its rank, which is
equal to the number of 2's in its sheet diagram (see Section \ref{sec:gapnilo}).
If the rank is not equal to $\dim \mf{c}_e$ then we store the sheet
in the list \verb1exc1. At the end this list contains the elements of Table
\ref{tab:prop1}.
\begin{verbatim}
gap> exc:= [ ];;
gap> for i in [1..Length(shts)] do
> if nr[i]=1 then
> rk:= Length( Filtered( shts[i].sheetdiag, x -> x = 2 ) );
> if rk <> dims[i] then Add( exc, shts[i] ); fi;
> fi; od;
gap> WeightedDynkinDiagram( exc[1].norbit );
[ 0, 1, 1, 0, 1, 0, 2, 2 ]
gap> Length( Filtered( exc[1].sheetdiag, x -> x = 2 ) );
3
gap> Position( shts, exc[1] );
8
gap> dims[8];
4
\end{verbatim}
So we have obtained the data of the penultimate line of Table \ref{tab:prop1}.
\end{proof}
\begin{proposition}
Let $\mathop{\mathfrak{g}}$ be a simple Lie algebra of exceptional type, and let $e\in \mathop{\mathfrak{g}}$ be a
nilpotent orbit that lies in more than one sheet. Then the maximal rank of
such a sheet is strictly smaller than $\dim \mf{c}_e$.
\end{proposition}
\begin{proof}
Also this proposition is proved by direct computation. Again we show the
computation for the simple Lie algebra of type $E_8$. We assume that the first
part of the computation explained in the proof of the previous proposition
has been done. In this case, for each sheet such that the corresponding number
in \verb1nr1 is greater than one, we first determine all sheets that have the
same nilpotent orbit. This list is assigned to the identifier \verb1sh1.
Then we compute the rank of all sheets in \verb1sh1. If the maximum of
those ranks is not strictly smaller than $\dim \mf{c}_e$ (where $e$ is a
representative of the nilpotent orbit in the considered sheet) then
we print a \verb1?1; otherwise we print a \verb1!1. Since we only obtain
\verb1!1, the proposition is proved in this case.
\begin{verbatim}
gap> for i in [1..Length(shts)] do
> if nr[i] > 1 then
> sh:= Filtered( shts, t -> t.norbit = shts[i].norbit );
> rks:= List( sh, r -> Length( Filtered( r.sheetdiag, x -> x=2 ) ) );
> if Maximum( rks ) >= dims[i] then Print("?"); else Print("!"); fi;
> fi; od;
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
\end{verbatim}
\end{proof}
\begin{remark}
Let $e\in \mathop{\mathfrak{g}}$ be nilpotent lying in the $\mathfrak{\mathop{sl}}_2$-triple $(f,h,e)$. The
Jacobi identity implies that the adjoint map $\mathrm{\mathop{ad}} h : \mathop{\mathfrak{g}}\to \mathop{\mathfrak{g}}$ stabilizes
$\mathop{\mathfrak{g}}_e$ and $[\mathop{\mathfrak{g}}_e,\mathop{\mathfrak{g}}_e]$. Hence it induces a map $\mathrm{\mathop{ad}} h : \mf{c}_e
\to \mf{c}_e$. The representation theory of $\mathfrak{\mathop{sl}}_2(\mathbb{C})$ implies that
$\mathrm{\mathop{ad}} h$ acts with non-negative integral eigenvalues on $\mf{c}_e$.
The paper \cite{slapaper} contains tables listing those eigenvalues for
the nilpotent orbits of exceptional simple Lie algebras.
\end{remark}
\mathfrak{s}ection{Closures of nilpotent orbits of $\mathrm{Spin}_{13}$}\label{sec:clos0}
Let $G$ be a reductive complex algebraic group and let $V$ be a
finite-dimensional rational $G$-module. Then the invariant ring
$\mathbb{C}[V]^G$ is finitely generated by homogeneous elements. The null cone
$N_G(V)$ is defined to be the zero locus of the homogeneous invariants
of positive degree. The null cone is stable under the action of $G$ but in
general consists of an infinite number of orbits. Hesselink \cite{hesselink2}
constructed a stratification of the null cone, by which it is possible to
study its geometric properties. In \cite[\S 5.5, 5.6]{povin} Popov and
Vinberg gave a version
of this theory in characteristic 0 that works with certain elements,
called characteristics, in the Lie algebra of $G$. Popov \cite{popov}
developed an algorithm to compute these characteristics.
The $G$-module $V$ is
said to be {\em visible} (or {\em observable}) if the null cone has a finite
number of orbits. Kac \cite{kacnilp} classified the visible representations of
reductive algebraic groups. It turns out that irreducible visible
representations of connected simple groups either
arise as so-called $\theta$-groups or as the spinor modules of
$\mathrm{Spin}_{11}(\mathbb{C})$ and $\mathrm{Spin}_{13}(\mathbb{C})$. For an algorithm for
determining the closures of the nilpotent orbits of a $\theta$-group we
refer to \cite{gravinya}. The orbits of the spinor module
$\mathrm{Spin}_{11}(\mathbb{C})$ have been determined by Igusa \cite{igusa}.
It is likely that the closures of the nilpotent orbits can be determined
in the same way as is done below.
Kac and Vinberg \cite{gavin} classified the orbits of the group
$\mathrm{Spin}_{13}(\mathbb{C})$ on its 64-dimensional spinor module. It turns out
that the null cone has 13 orbits (excluding 0). A' Campo and Popov
\cite[Example (f), p. 348]{dekem3}, using their implementation of Popov's
algorithm \cite[Appendix C]{dekem3} for computing the characteristics
of the strata, observed that there are also 13 strata in the null cone. This
implies that the strata are orbits. Moreover, their computations gave the
dimensions of the orbits in the null cone, which were not all
correctly given in \cite{gavin}.
The package {\mathfrak{s}f SLA} also has an implementation of Popov's algorithm.
So we can recover these observations by a computation using that
package. In this section we give an algorithm, which is similar to an
algorithm given in \cite{gravinya}, to determine when the (Zariski-) closure
of a stratum contains a given other stratum. This algorithm works under
some hypotheses that are shown to be satisfied by the spinor module of
$\mathrm{Spin}_{13}(\mathbb{C})$. We discuss a simple implementation of this
algorithm in {\mathfrak{s}f GAP} and we obtain the Hasse diagram of the closures of
the orbits in the null cone of the spinor module of $\mathrm{Spin}_{13}(\mathbb{C})$.
\mathfrak{s}ubsection{Preliminaries on the strata of the nullcone}
Everything we will say here works for reductive groups, but for simplicity we
consider a simple algebraic group $G$ over $\mathbb{C}$. We let $\mathop{\mathfrak{g}}$ be its Lie
algebra and $(~,~) : \mathop{\mathfrak{g}}\times \mathop{\mathfrak{g}} \to \mathbb{C}$ the Killing form (so
$(x,y) = \mathrm{\mathop{Tr}}((\mathrm{\mathop{ad}} x)(\mathrm{\mathop{ad}} y))$). We say that a semisimple element $h\in \mathop{\mathfrak{g}}$
is {\em rational} if the eigenvalues of $\mathrm{\mathop{ad}} h$ lie in $\mathbb{Q}$. This is equivalent
to saying that the eigenvalues of $h$ on any $\mathop{\mathfrak{g}}$-module are rational.
Let $\mathop{\mathfrak{h}}\mathfrak{s}ubset \mathop{\mathfrak{g}}$ be a Cartan subalgebra. Then by $\mathop{\mathfrak{h}}_\mathbb{Q}$ we denote the
set of its rational elements, which is a vector space over $\mathbb{Q}$ of dimension
$\dim_\mathbb{C} \mathop{\mathfrak{h}}$. We define the norm of $h\in \mathop{\mathfrak{h}}_\mathbb{Q}$ by $||h|| = \mathfrak{s}qrt{(h,h)}$.
Now we let $V$ be a rational $G$-module and consider the null cone $N_G(V)$.
By the Hilbert-Mumford criterion a $v\in V$ lies in $N_G(V)$ if and
only if there is a cocharacter $\chi : \mathbb{C}^*\to G$ such that
$\lim_{t\to 0} \chi(t)\cdot v = 0$ (see \cite[Section III.2]{kraft}).
Setting $h = d \chi(1)$ we have that $h$ is
a rational semisimple element and writing $v$ as a sum of $h$-eigenvectors
we get that the corresponding eigenvalues are all positive.
For a rational semisimple $h\in \mathop{\mathfrak{g}}$ and $\tau\in \mathbb{Q}$ we let $V_\tau$ be
the $\tau$-eigenspace of $h$. Furthermore, we set
$$V_{\mathop{\mathfrak{g}}eq 2} (h) = \bigoplus_{\tau\mathop{\mathfrak{g}}eq 2} V_\tau(h).$$
Let $v\in V$. Then a {\em characteristic} of $v$ is a shortest rational
semisimple element $h\in \mathop{\mathfrak{g}}$ such that $v\in V_{\mathop{\mathfrak{g}}eq 2}(h)$.
We have the following facts concerning characteristics
(see \cite[\S 5.5, 5.6]{povin}, \cite[\S 7.4.1, 7.4.2]{gra16}):
\begin{enumerate}
\item $v$ has a characteristic if and only if $v\in N_G(V)$.
\item If $h\in \mathop{\mathfrak{g}}$ is a characteristic of $v\in V$ and $g\in G$ then
$\mathop{\mathrm{Ad}}(g)(h)$ is a characteristic of $gv$.
\item Let $\mathop{\mathfrak{h}}$ be a fixed Cartan subalgebra of $\mathop{\mathfrak{g}}$. Then there are a finite
number of characteristics $h_1,\ldots,h_s$ in $\mathop{\mathfrak{h}}$, up to the action of $G$.
\item For $1\leq i\leq s$ let $S(h_i)$ be the set of all $v\in N_G(V)$
such that $v$ has a characteristic that is $G$-conjugate to $h_i$.
The set $S(h_i)$ is called the stratum corresponding to $h_i$.
\item The stratification of $N_G(V)$ is $N_G(V) = S(h_1)\cup
\cdots \cup S(h_s)$ (disjoint union).
\end{enumerate}
Popov \cite{popov} (see also \cite[\S 7.4.3]{gra16})
devised an algorithm to compute the characteristics
$h_1,\ldots,h_s$ in $\mathop{\mathfrak{h}}$. The algorithm also computes the dimensions of
the corresponding strata.
\mathfrak{s}ubsection{Closures of the strata}\label{sec:clos}
The topological notions (closed sets, open sets, closure,...) that we use here
are relative to the Zariski topology.
Let $\mathop{\mathfrak{h}}$ be a fixed Cartan subalgebra of $\mathop{\mathfrak{g}}$. For a rational $h\in \mathop{\mathfrak{h}}$
we let $Z(h) = \{ g\in G \mid \mathop{\mathrm{Ad}}(g)(h) = h\}$; then
$\mathop{\mathfrak{z}}(h) = \{ x\in \mathop{\mathfrak{g}} \mid [x,h]=0\}$ is the Lie algebra of $Z(h)$.
Both $Z(h)$ and $\mathop{\mathfrak{z}}(h)$ stabilize the spaces $V_\tau(h)$ for $\tau\in \mathbb{Q}$.
Let $h_1,\ldots,h_s\in\mathop{\mathfrak{h}}$ be the characteristics of the strata of the
nullcone of $V$.
Here we assume two things:
\begin{enumerate}
\item Each $V_2(h_i)$ has an open $Z(h_i)$-orbit.
\item The strata coincide with the $G$-orbits in the nullcone.
\end{enumerate}
\begin{remark}\label{rem:vinopen}
Let $h$ be one of the characteristics. A $v\in V_2(h)$ lies in the
open $Z(h)$-orbit if and only if $\mathop{\mathfrak{z}}(h)\cdot v = V_2(h)$.
\end{remark}
Under these hypotheses we can generalize a few results from \cite{gravinya}.
\begin{lemma}\label{lem:char1}
Let $h$ be one of the characteristics. Then the open
$Z(h)$-orbit in $V_2(h)$ is equal to $V_2(h)\cap S(h)$. Moreover, $h$
is a characteristic of every element in $V_2(h)\cap S(h)$.
\end{lemma}
\begin{proof}
Let $u$ be an element of the open $Z(h)$-orbit in $V_2(h)$.
From Theorem 5.4 in \cite{povin} it follows that the set of elements of
$V_2(h)$ with characteristic $h$ is open and nonempty. As nonempty
open sets intersect, there is a $g\in Z(h)$ such that
$g\cdot u$ has characteristic $h$. But then the characteristic of
$u=g^{-1}\cdot (gu)$ is $\mathop{\mathrm{Ad}}(g^{-1})(h)=h$. It follows that $h$ is a
characteristic of $u$, and in particular that $u \in S(h)$.
For $\tau\in \mathbb{Q}$ and $w\in V_2(h)$ set
$${\mathop{\mathfrak{g}}}_w = \{ x\in \mathop{\mathfrak{g}} \mid x\cdot w = 0\}, \text{ and } {\mathop{\mathfrak{g}}}_{\tau,w} =
\{ x\in {\mathop{\mathfrak{g}}}_{w} \mid [h,x]=\tau x\}.$$
Let $v\in V_2(h)\cap S(h)$.
Since $v$ lies in the closure of $Z(h)u$, we have that $\dim \mathop{\mathfrak{g}}_{\tau,v}\mathop{\mathfrak{g}}eq
\dim \mathop{\mathfrak{g}}_{\tau, u}$, for all $\tau\in \mathbb{Q}$. Because $u,v\in S(h)$ and our
assumption that the strata are $G$-orbits, $v$ and $u$ lie in the same
$G$-orbit. Hence $\dim \mathop{\mathfrak{g}}_{v} = \dim \mathop{\mathfrak{g}}_{u}$. But $\mathop{\mathfrak{g}}_v$ is the
direct sum of the various $\mathop{\mathfrak{g}}_{\tau,v}$, and similarly for $\mathop{\mathfrak{g}}_{u}$. It
follows that $\dim \mathop{\mathfrak{g}}_{\tau,v} = \dim \mathop{\mathfrak{g}}_{\tau, u}$ for all $\tau$. But
$${\mathop{\mathfrak{g}}}_{0,v} = \{ x\in \mathop{\mathfrak{z}}(h) \mid x\cdot v = 0\},$$
and similarly for $\mathop{\mathfrak{g}}_{0,u}$. This implies that $\dim \mathop{\mathfrak{z}}(h)v = \dim \mathop{\mathfrak{z}}(h)u$.
So also the orbit $Z(h)v$ is open in $V_2(h)$ by Remark \ref{rem:vinopen}.
In particular, $v$ lies in the open $Z(h)$-orbit in $V_2(h)$.
\end{proof}
\begin{lemma}
Let $W$ denote the Weyl group of the root system of $\mathop{\mathfrak{g}}$.
Let $h,h'$ be two of the characteristics. Then $S(h')$ is
contained in the closure of $S(h)$ if and only if there is a
$w\in W$ such that $U=V_2(h')\cap V_{\mathop{\mathfrak{g}}eq 2}(wh)$ contains a point of
$S(h')$. Furthermore, the intersection of $U$ and $S(h')$
is open in $U$.
\end{lemma}
\begin{proof}
Here we use the fact that $\overline{S(h)} = GV_{\mathop{\mathfrak{g}}eq 2}(h)$
(\cite{povin}, Theorem 5.6). This immediately implies the ``only if'' part.
Let $P(h)$ denote the parabolic subgroup with Lie algebra $\oplus_{\tau \mathop{\mathfrak{g}}eq 0}
\mathop{\mathfrak{g}}_{\tau}(h)$. Using the Bruhat decomposition we then have
$$\overline{S(h)} = \bigcup_{w\in W} P(h') wP(h) (V_{\mathop{\mathfrak{g}}eq 2}(h))
= \bigcup_{w\in W} P(h')w(V_{\mathop{\mathfrak{g}}eq 2}(h)).$$
Suppose that $S(h') \mathfrak{s}ubset \overline{S(h)}$. Let $v'\in
V_2(h')\cap S(h')$.
Then it follows that
there are $p\in P(h')$, $w\in W$, $v\in V_{\mathop{\mathfrak{g}}eq 2}(h)$ with $v'=pw\cdot v$,
or $p^{-1}\cdot v'= w\cdot v$.
We have that $P(h') = Z(h)\ltimes N$, where $N$ is the unipotent subgroup of
$G$ with Lie algebra $\oplus_{\tau >0} \mathop{\mathfrak{g}}_\tau(h)$. So $p^{-1} = zn$ with
$z\in Z(h)$, $n\in N$. As $v'\in V_2(h')$, we see that $nv' = v'+v''$ with
$v''\in V_{>2}(h')$. So $p^{-1}\cdot v' = zv'+zv''$ with $zv'\in V_2(h')$,
$zv''\in V_{>2}(h')$. In particular, $p^{-1}\cdot v' \in V_{\mathop{\mathfrak{g}}eq 2}(h')$.
But $w\cdot v \in V_{\mathop{\mathfrak{g}}eq 2}(wh)$. So $p^{-1}\cdot v' \in V_{\mathop{\mathfrak{g}}eq 2}(h')\cap
V_{\mathop{\mathfrak{g}}eq 2}(wh)$. Denote the latter space by $\widetilde{U}$.
Since $h'$ and $wh$ commute, $\widetilde{U}$ is stable under $h'$. So
$\widetilde{U}$ is the direct sum of $h'$-eigenspaces. Hence $zv'\in
\widetilde{U}$. So, in fact, $zv' \in U$, and obviously, $zv'\in S(h')$.
The last statement follows from \cite{povin}, Theorem 5.4.
\end{proof}
These lemmas underpin a direct method for checking whether $S(h') \mathfrak{s}ubset
\overline{S(h)}$:
\begin{enumerate}
\item For all $w\in W$ compute the space $U_w = V_2(h')\cap V_{\mathop{\mathfrak{g}}eq 2}(wh)$.
\item Take a random point $u\in U_w$. If $\dim \mathop{\mathfrak{z}}(h')\cdot u = \dim
V_2(h')$, then conclude that $S(h')\mathfrak{s}ubset \overline{S(h)}$.
\end{enumerate}
If in Step 2, the equality does not hold, then it is very likely that $U_w$
contains no point of $S(h')$. However, we still need to prove it. One method
for that is described in \cite[Section 5]{gravinya}, based on
computing the generic rank of a matrix with polynomial entries. It also works
here. However, a different approach is also possible: Compute the weights
$\mu_1,\ldots,\mu_r\in \mathop{\mathfrak{h}}^*$
of the weight spaces whose sum is $U_w$. By using the form $(~,~)$
we obtain an isomorphism $\nu : \mathop{\mathfrak{h}}\to \mathop{\mathfrak{h}}^*$ by $\nu(x)(y) = (x,y)$.
We consider the Euclidean space $\mathop{\mathfrak{h}}_\mathbb{R} = \mathbb{R}\otimes \mathop{\mathfrak{h}}_\mathbb{Q}$ with inner product
$(~,~)$. Let $C$ be the convex hull in $\mathop{\mathfrak{h}}_\mathbb{R}$ of the points
$\mathop{\mathfrak{h}}at h_i=\nu^{-1}(\mu_i)$. Note that all $\mathop{\mathfrak{h}}at h_i$ lie in the affine space
$H_2$ consisting of all $x\in \mathop{\mathfrak{h}}_\mathbb{R}$ with $(h',x)=2$. So also $C\mathfrak{s}ubset H_2$.
Let $\tau\in \mathbb{Q}$ be
such that $(h',\tau h')=2$, then also $\tau h'\in H_2$. Now if $\tau h'$
does not lie in $C$ then $U_w$ has no point of $S(h')$.
This follows from the following fact: let $u\in U_w$, and
let $C'$ be the convex hull of $\nu^{-1}(\mu)$, where $\mu$ ranges over
the weights involved in an expression of $u$ as sum of weight vectors, and let
$\tilde h$ be the point on $C'$ closest to 0, and let $\mathop{\mathfrak{h}}at h$ be such that
$(\tilde h,\mathop{\mathfrak{h}}at h)=2$, then $\mathop{\mathfrak{h}}at h$ is a characteristic of $u$, or
$\mathop{\mathfrak{h}}$ does not contain a characteristic of $u$ (see \cite[Section 5.5]{povin}
or \cite[Lemma 7.4.16]{gra16}).
\mathfrak{s}ubsection{Implementation for $\mathrm{Spin}_{13}$}
The Lie algebra of $G=\mathrm{Spin}_{13}(\mathbb{C})$ is the simple Lie algebra of
type $B_6$. We can construct this Lie algebra in {\mathfrak{s}f GAP}. The nodes
of the Dynkin
diagram of the root system of this Lie algebra are numbered in the
usual way (see, for example, \cite[\S 11.4]{hum}). Denoting the corresponding
fundamental weights by $\lambda_1,\ldots,\lambda_6$ we have that the
highest weight of the spinor module is $\lambda_6$. The {\mathfrak{s}f SLA} package
contains the function \verb1CharacteristicsOfStrata1 which implements
Popov's algorithm. On input a semisimple Lie algebra and a dominant weight
it returns a list of two lists: the first is the list of characteristics,
the second is the list of dimensions of the corresponding strata.
In the next example we compute the characteristics of the strata of the spinor
module of $G$ (which takes about 87 seconds).
With \verb1SortParallel1 we sort the list of dimensions, and
apply the same permutation to the list of characteristics.
We display the list of dimensions and the first characteristic,
which is an element of $\mathop{\mathfrak{g}}$. Comparing with \cite[Example (f), p. 348]{dekem3}
we see that we get the same dimensions as A'Campo and Popov.
\begin{verbatim}
gap> L:= SimpleLieAlgebra("B",6,Rationals);;
gap> st:= CharacteristicsOfStrata( L, [0,0,0,0,0,1] );; time;
86818
gap> chars:= st[1];; dims:= st[2];;
gap> SortParallel( dims, chars );
gap> dims;
[ 22, 32, 35, 42, 43, 43, 46, 50, 50, 53, 56, 58, 62 ]
gap> chars[1];
(2/3)*v.73+(4/3)*v.74+(2)*v.75+(8/3)*v.76+(10/3)*v.77+(2)*v.78
\end{verbatim}
Above we already argued that the strata are $G$-orbits. In order to be
able to apply the algorithm of the previous section we need to show that
for each characteristic $h$ the space $V_2(h)$ has an open $Z(h)$-orbit.
For this we first construct the spinor module $V$ (this is done with the
{\mathfrak{s}f GAP} function {\tt HighestWeightModule}). If {\tt x}, {\tt v} are
elements of the Lie algebra {\tt L} and the module {\tt V} respectively, then
\verb1x^v1 is the result of acting with {\tt x} on \verb1v1. Since
the basis elements of the module that is output by \verb1HighestWeightModule1
are weight vectors relative to the Cartan subalgebra of \verb1L1 that contains
the characteristics, the following function can be used to find a basis of
$V_2(h)$:
\begin{verbatim}
V2:= function( V, h )
return Filtered( Basis(V), v -> h^v = 2*v );
end;
\end{verbatim}
Let $h$ be a characteristic, say the fifth one. We show that $V_2(h)$ has an
open $Z(h)$-orbit:
\begin{verbatim}
gap> V:= HighestWeightModule( L, [0,0,0,0,0,1] );;
gap> h:= chars[5];;
gap> v2:= V2( V, h );;
gap> v:= Sum( v2, x -> Random([-100..100])*x );;
gap> zh:= LieCentralizer( L, Subalgebra( L, [h] ) );;
gap> zhv:= Subspace( V, List( Basis(zh), x -> x^v ) );;
gap> Dimension( zhv ) = Length(v2);
true
\end{verbatim}
Here we take a random point $v$ of $V_2(h)$. We let \verb1zh1, \verb1zhv1
be the centralizer $\mathop{\mathfrak{z}}(h)$ and the space $\mathop{\mathfrak{z}}(h)\cdot v$ respectively.
The last line shows that $\dim \mathop{\mathfrak{z}}(h)\cdot v=
\dim V_2(h)$. This implies that the orbit of $v$ is open in $V_2(h)$
(Remark \ref{rem:vinopen}). We have executed this procedure for all
characteristics, and hence both hypotheses of the previous section are
satisfied.
Now in order to execute the procedure of the previous section we need
functions for computing $V_{\mathop{\mathfrak{g}}eq 2}(h)$ and $wh$ for $w$ in the Weyl group
$W$. The function for the former is straightforward:
\begin{verbatim}
Vgeq2:= function( V, h )
local m,i;
m:= MatrixOfAction( Basis(V), h );
i:= Filtered( [1..Length(m)], i -> m[i][i] >= 2 );
return Basis( V ){i};
end;
\end{verbatim}
That is, we take the matrix of \verb1h1 (which is diagonal) and return the
list of basis vectors that correspond to an eigenvalue which is at least 2.
In order to compute $wh$ we consider a Chevalley basis of $L$,
\cite[Theorem 25.2]{hum}. Such a basis consists of elements $x_\alpha$ for
$\alpha$ in the root system, and $h_1,\ldots,h_\ell$ that lie in the
Cartan subalgebra. We refer to the cited theorem for the multiplication
table with respect to this basis. For a root $\alpha$ we set $h_\alpha =
[x_\alpha,x_{-\alpha}]$. Then we have $wh_\alpha = h_{w\alpha}$. Furthermore, if
$\alpha_1,\ldots,\alpha_\ell$ are the simple roots then $h_{\alpha_i} = h_i$.
A simple Lie algebra in {\mathfrak{s}f GAP}, constructed with the function
\verb+SimpleLieAlgebra+, has a stored Chevalley basis. This is
a list consisting of three lists. In the first list we have the $x_\alpha$ for
$\alpha$ a positive root. In the second list we have the $x_\alpha$ for
$\alpha$ a negative root. The third list has the elements $h_1,\ldots,h_\ell$.
The ordering that is used on the positive roots is height compatible
(cf. \cite[\S 10.1]{hum}). This means that the $x_{\alpha_i}$ for
$1\leq i\leq \ell$ come first. Denote the positive roots, as ordered by
{\mathfrak{s}f GAP}, by $\alpha_1,\ldots,\alpha_n$. For $n+1\leq i\leq 2n$ set $\alpha_i =
-\alpha_{i-n}$. The {\mathfrak{s}f SLA} package has a function,
\verb1WeylGroupAsPermGroup1, that gives the Weyl group as a permutation
group on $1,\ldots ,2n$. If $w$ is an element of this group then the
corresponding element of the Weyl group acts as $\alpha_i\mapsto \alpha_{i^w}$.
These considerations yield the following function for computing $wh$,
where $w$ is given as a permutation and $h$ lies in the given Cartan
subalgebra. Here the first two input parameters are the following: \verb1BH1 is
the basis of the Cartan subalgebra with basis vectors $h_1,\ldots,h_l$;
\verb1hs1 is the list $h_{\alpha_i}$ for $1\leq i\leq 2n$.
\begin{verbatim}
wh:= function( BH, hs, w, h )
local cf, i;
cf:= Coefficients( BH, h );
i:= List( [1..Length(cf)], j -> j^w );
return cf*hs{i};
end;
\end{verbatim}
With this preparation we can give the implementation of the algorithm
described in Section \ref{sec:clos}. Here we give the simplified probabilistic
version, where we do not prove the non-inclusions. (The complete version
is longer as it includes an implemtation of a function to check membership
of a convex hull. It has been used to prove the correctness of the diagram
in Figure \ref{fig:spin13}, and is available from the author upon request.)
We start by defining a number of global variables
that will be accessed by the function. Most of these have been explained above.
The list \verb1eW1 contains all elements of the Weyl group. The function
\verb1inc1 is a straightforward implementation of the algorithm
given in Section \ref{sec:clos}.
\begin{verbatim}
L:= SimpleLieAlgebra("B",6,Rationals);
st:= CharacteristicsOfStrata( L, [0,0,0,0,0,1] );
chars:= st[1];; dims:= st[2];;
SortParallel( dims, chars );
V:= HighestWeightModule(L,[0,0,0,0,0,1]);
R:= RootSystem(L);
ch:= ChevalleyBasis(L);
hs:= List( [1..36], i -> ch[1][i]*ch[2][i] );
hs:= Concatenation( hs, -hs );
h:= ch[3];
BH:= Basis( CartanSubalgebra(L), h );
eW:= Elements( WeylGroupAsPermGroup(R) );
inc:= function( h1, h2 )
local v2, zh1, w, vgeq2, U, u;
v2:= Subspace( V, V2( V, h1 ) );
zh1:= BasisVectors( Basis( LieCentralizer( L, Subalgebra( L, [h1]))));
for w in eW do
vgeq2:= Subspace( V, Vgeq2( V, wh( BH, hs, w, h2 ) ) );
U:= Intersection( v2, vgeq2 );
if Dimension(U) > 0 then
u:= Sum( Basis(U), x -> Random([-30..30])*x );
if Subspace( V, List( zh1, x -> x^u)) = v2 then
return true;
fi;
fi;
od;
return false;
end;
\end{verbatim}
We now give a short example of the useage of this function.
\begin{verbatim}
gap> inc( chars[6], chars[9] ); time;
true
228
gap> inc( chars[6], chars[7] ); time;
false
3923358
\end{verbatim}
Here we see that the orbit with the sixth characteristic is contained in
the closure of the orbit with the ninth characteristic, but not in the
closure of the orbit with the seventh characteristic. The first computation
takes 0.2 seconds whereas the second computation takes 3923.3 seconds.
This is explained by the fact that for the second computation the entire
Weyl group is transversed, which has 46080 elements, whereas the
first computation is decided after considering just one element of the
Weyl group.
\mathfrak{s}ubsection{Closure diagram and stabilizers}
By applying the implementation of the previous section we arrive at the
Hasse diagram in Figure \ref{fig:spin13} that displays the closure relation
of the orbits in the null cone.
\unitlength=1cm
\begin{figure}
\caption{Hasse diagram of the closures of the orbits of $\mathrm{Spin}
\label{fig:spin13}
\end{figure}
Using Lemma \ref{lem:char1} it is straightforward to find representatives
of the orbits in the null cone. We illustrate this by an example:
\begin{verbatim}
gap> h:=chars[5];;
gap> v2:=V2( V, h );;
gap> zh:= Basis( LieCentralizer( L, Subalgebra(L,[h])) );;
gap> Length(v2);
32
gap> v:= v2[1]+v2[32];;
gap> Dimension( Subspace( V, List( zh, x -> x^v ) ) );
32
\end{verbatim}
This computation shows that the constructed element \verb1v1 is a
representative of the orbit corresponding to the fifth characteristic.
(We have found it by systematically trying sums of elements of \verb1v21;
here we do not go into that.)
Given an element $v\in V$ we can consider its stabilizer in $\mathop{\mathfrak{g}}$:
$${\mathop{\mathfrak{g}}}_{v} = \{ x\in \mathop{\mathfrak{g}} \mid x\cdot v = 0\},$$
which is the Lie algebra of the stabilizer in $G$.
The {\mathfrak{s}f SLA} package does not contain a function for computing this stabilizer,
but it is easily written:
\begin{verbatim}
stab:= function( v ) # v in V, we return its stabilizer in L
local m, sol;
m:= List( Basis(L), x -> Coefficients( Basis(V), x^v ) );
sol:= NullspaceMat( m );
return List( sol, x -> x*Basis(L) );
end;
\end{verbatim}
We then can use {\mathfrak{s}f GAP} functionality to study the structure of the
stabilizer. We use the function \verb1LeviMalcevDecomposition1 which for a
Lie algebra \verb1K1 returns a list of two subalgebras. The first of these is
semisimple, the second is solvable and \verb1K1 is their semidirect sum.
In our example this goes as follows.
\begin{verbatim}
gap> K:= Subalgebra( L, stab(v) );;
gap> ld:=LeviMalcevDecomposition(K);;
gap> SemiSimpleType(ld[1]);
"A4"
gap> Dimension(ld[2]);
11
\end{verbatim}
We see that the stabilizer is the semidirect product of a simple Lie algebra
of type $A_4$ and an 11-dimensional solvable ideal. By inspecting the basis
elements of this ideal it is easily seen that it is spanned by root vectors
corresponding to positive roots. Hence the ideal is unipotent. We indicate
this by saying that the stabilizer is of type $A_4\ltimes U_{11}$.
By doing similar computations for all 13 characteristics we arrive at Table
\ref{tab:stab}.
\begin{table}[htb]
\begin{tabular}{|r|c|r|}
\mathop{\mathfrak{h}}line
nr & dim & type of stabilizer\\
\mathop{\mathfrak{h}}line
1 & 22 & $A_5\ltimes U_{21}$\\
2 & 32 & $A_2+G_2\ltimes U_{24}$\\
3 & 35 & $A_1+B_3\ltimes U_{19}$\\
4 & 42 & $B_2+T_1\ltimes U_{25}$\\
5 & 43 & $A_4\ltimes U_{11}$\\
6 & 43 & $C_3\ltimes U_{14}$\\
7 & 46 & $B_2\ltimes U_{22}$\\
8 & 50 & $A_1+A_2\ltimes U_{17}$\\
9 & 50 & $A_2\ltimes U_{20}$\\
10 & 53 & $A_1+A_1\ltimes U_{19}$\\
11 & 56 & $A_1+A_1\ltimes U_{16}$\\
12 & 58 & $B_2\ltimes U_{10}$\\
13 & 62 & $A_1\ltimes U_{13}$\\
\mathop{\mathfrak{h}}line
\end{tabular}
\caption{Stabilizers of the orbits in the null cone of the spinor
representation of $\mathrm{Spin}_{13}$}\label{tab:stab}
\end{table}
We see that the sum of the dimension in the second column and the dimension
of the stabilizer is always $78=\dim\mathop{\mathfrak{g}}$ (which should be the case as
$\dim \mathop{\mathfrak{g}}_v + \dim \mathop{\mathfrak{g}}\cdot v = \dim \mathop{\mathfrak{g}} =78$).
\newcommand{\etalchar}[1]{$^{#1}$}
\def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def\Dbar{\leavevmode\lower.6ex\mathop{\mathfrak{h}}box to
0pt{\mathop{\mathfrak{h}}skip-.23ex \accent"16\mathop{\mathfrak{h}}ss}D} \def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$}
\def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'${$'$}
\mathop{\mathfrak{p}}rovidecommand{\bysame}{\leavevmode\mathop{\mathfrak{h}}box to3em{\mathop{\mathfrak{h}}rulefill}\thinspace}
\mathop{\mathfrak{p}}rovidecommand{\MR}{\relax\ifhmode\unskip\mathfrak{s}pace\fi MR }
\mathop{\mathfrak{p}}rovidecommand{\MRhref}[2]{
\mathop{\mathfrak{h}}ref{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\mathop{\mathfrak{p}}rovidecommand{\mathop{\mathfrak{h}}ref}[2]{#2}
\end{document}
|
\begin{document}
\title{A quantitative notion of redundancy for infinite frames}
\author[J. Cahill]{Jameson Cahill}
\address{Department of Mathematics, University of Missouri, Columbia, MO 65211-4100, USA}
\email{[email protected]}
\author[P. G. Casazza]{Peter G. Casazza}
\address{Department of Mathematics, University of Missouri, Columbia, MO 65211-4100, USA}
\email{[email protected]}
\author[A. Heinecke]{Andreas Heinecke}
\address{Department of Mathematics, University of Missouri, Columbia, MO 65211-4100, USA}
\email{[email protected]}
\thanks{The authors were supported by NSF DMS 0704216 and 1008183.}
\begin{abstract}
Bodmann, Casazza and Kutyniok introduced a quantitative notion
of redundancy for finite frames - which they called
{\em upper and lower redundancies} - that match better with
an intuitive understanding of redundancy for finite frames \gk{ in a Hilbert
space}.
The objective of this paper is to see how much of this
theory generalizes to infinite frames.
\end{abstract}
\keywords{Frames, Linearly Independent Sets, Redundancy,
Spanning Sets.}
\subjclass{Primary: 94A12; Secondary: 42C15, 15A04, 68P30}
\maketitle
\section{Introduction}
The customary notion of redundancy for a finite frame $\{\phi_i\}_{i=1}^N$ in $\mathcal{H}_n$ is
to use $\frac{N}{n}$. Many people have felt for a long time that this was not really
satisfactory since it assigns {\em redundancy 2} to each of the following frames (where
$\{e_i\}_{i=1}^n$ is an orthonormal basis for $\mathcal{H}_n$):
\[ \Phi_{1} = \{e_1,e_1,e_2,e_2,\ldots,e_n,e_n\};\]
\[ \Phi_2 = \{e_1,\ldots,e_1,e_2,e_3,\dots,e_n\},\ \ \mbox{where $e_1$ occurs $(n+1)$-times,}\]
\[ \Phi_3 = \{e_1,0,e_2,0,\ldots,e_n,0\}.\]
The frame $\Phi_1$ has redundancy 2 and is a disjoint union of two spanning sets and
a disjoint union of two linearly independent sets. This description of redundancy is
informative. But for $\Phi_2$, the frame is heavily concentrated in one dimension of the
space. In particular, this frame is made up of just one spanning set and it requires
(n+1)-linearly independent sets to represent it. Finally, the frame $\Phi_3$ is made
up of one orthonormal basis and a collection of zero vectors. Assigning this frame
redundancy 2 is quite misleading. Although zero vectors are important in some
areas of frame theory, such as filter bank theory, counting them in redundancy gives
no useful information. What is important, is to keep track of the number of zero vectors
while not letting them artificially increase redundancy.
In this paper, we generalize the results of \cite{BCK} by applying their quantitative
notion of redundancy for finite frames \gk{in a Hilbert space $\mathcal{H}$}, to infinite frames.
Most of the results carry over easily, but a few fail in this setting .
Concerning infinite-dimensional Hilbert spaces,
much work has been done on the idea of {\em deficits, excesses
and redundancy} \cite{BCHL03, BCHL03a,BCHL06,BCHL06a,BCHL06b, BCL09}.
In \cite{BL07}, the authors provide
a meaningful quantitative notion of redundancy which applies to general
infinite frames. In their work, redundancy
is defined as the reciprocal of a so-called frame measure function, which is a
function of certain averages of inner products of frame elements with their
corresponding dual frame elements. More recently, in \cite{BCL09}, it is shown that
$\ell_1$-localized frames satisfy
several properties intuitively linked to redundancy such as that any frame with
redundancy \gk{greater} than one should contain in it a frame with redundancy arbitrarily
close to one, the redundancy of any frame for the whole space should be greater than
or equal to one, and that the redundancy of a Riesz basis should be exactly one,
were proven for this notion. Our approach is slightly different in that we are interested
in how many spanning sets or linearly independent sets are in the frame. However,
our notion does not capture much information about infinite frames whose frame
vectors are not bounded. We will give examples to show the problems with
our notion of redundancy for unbounded frames.
\subsection{Review of Frames}
We start by fixing our terminology while briefly reviewing the basic definitions
related to frames. \gk{Let} $\mathcal{H}n$ denote an $n$-dimensional
real or complex Hilbert space and $\mathcal{H}$ denotes
a finite or infinite dimensional Hilbert space. A {\em frame} for a Hilbert
space $\mathcal{H}$ is a family of vectors $\{\phi_i\}_{i\in I}$
(with $|I|$ finite or infinite) for which there exists constants $0 < A \le B < \infty$
such that
\[
A\norm{x}^2 \leq \sum_{i\in I} |\langle x, \varphi_i \rangle |^2 \leq B\norm{x}^2
\quad \mbox{for all } x \in \mathcal{H}n.
\]
When $A$ is chosen as the largest possible value and $B$ as the smallest
for these inequalities to hold,
then we call them the {\em (optimal) frame constants}.
If $A$ and $B$ can be chosen as $A=B$, then the frame is called {\em $A$-tight}, and if
$A=B=1$ is possible, $\Phi$ is a {\em Parseval frame}. A frame is called
{\em equal-norm}, if there exists some $c>0$ such that $\|\varphi_i\|=c$ for all
$i=1,\ldots,N$, and it is {\em unit-norm} if $c=1$.
Apart from providing redundant expansions, frames can also serve as an analysis tool.
In fact, they allow the analysis of data by studying the associated {\em frame coefficients}
$(\langle x, \varphi_i \rangle)_{i\in I}$, where the operator $T_\Phi$
defined by
$T_\Phi: \mathcal{H}n \to \ell_2I)$, $x \mapsto (\langle x,\varphi_i\rangle)_{i\in I}$
is called the \emph{analysis operator}. The adjoint $T^*_\Phi$ of the analysis operator is typically
referred to as the {\em synthesis operator} and satisfies $T^*_\Phi((c_i)_{i\in I}) = \sum_{i\in I}
c_i\varphi_i$.
The main operator associated with a frame, which provides a stable reconstruction process, is the
{\em frame operator}
\[
S_\Phi=T^*_\Phi T_\Phi : \mathcal{H}n \to \mathcal{H}n, \quad x \mapsto \sum_{i\in I} \langle x,\varphi_i\rangle \varphi_i,
\]
a positive, self-adjoint invertible operator on $\mathcal{H}n$. In the case of a Parseval frame, we have $S_\Phi=\mbox{\rm Id}_{\mathcal{H}n}$.
In general, $S_\Phi$ allows reconstruction of a signal $x \in \mathcal{H}n$ through the
reconstruction formula
\begin{equation} \label{eq:expansion}
x = \sum_{i\in I}\langle x,S_\Phi^{-1} \varphi_i\rangle \varphi_i.
\end{equation}
The sequence $(S_\Phi^{-1} \varphi_i)_{i=1}^N$ which can be shown to form a frame itself, is often
referred to as the {\em canonical dual frame}.
We note that the choice of coefficients in the expansion \eqref{eq:expansion}
is generally not the only possible one. If the frame is linearly dependent --
which is typical in applications --
then there exist infinitely many choices of coefficients $(c_i)_{i=1}^N$ leading to expansions of
$x \in \mathcal{H}$ by
\begin{equation} \label{eq:sparseexpansion}
x = \sum_{i\in I} c_i \varphi_i.
\end{equation}
This fact, for instance, ensures resilience to erasures and noise. The particular choice of coefficients
displayed in \eqref{eq:expansion} is the smallest in $\ell_2$ norm \cite{Chr03}, hence contains the least energy.
A different paradigm has recently received rapidly increasing attention \cite{BDE09}, namely to choose
the coefficient sequence to be sparse in the sense of having only few non-zero entries, thereby allowing
data compression while preserving perfect recoverability.
For a more extensive introduction to frame theory, we refer the interested reader to the books
\cite{Dau92,Mal98,Chr03} as well as to the survey papers \cite{KC07a,KC07b}.
\subsection{\gk{An Intuition-Driven Approach to Redundancy}}
\label{subsec:intuition}
In order to properly define a quantitative notion
of redundancy, the authors of \cite{BCK} first gave a list of desiderata that a notion is required
to satisfy.
\subsection{Desiderata}
\label{subsec:desiderata}
Summarizing and analyzing the requirements we have discussed, we state the following list of
desired properties for an upper redundancy ${\mathcal{R}}^+_\Phi$ and a lower redundancy ${\mathcal{R}}^-_\Phi$
of a frame $\Phi = (\varphi_i)_{i\in I}$ for a finite or infinite dimensional
real or complex Hilbert space $\mathcal{H}$.
\renewcommand{{\rm (\roman{enumi})}}{{\rm [D\arabic{enumi}]}}
\begin{enumerate}\item\label{D0}{\em Zero Vectors.} Redundancy should not count
zero vectors.
\item\label{DEE} {\em Generalization.} If $\Phi$ is an equal-norm Parseval frame, then
in this special case the customary notion of redundancy shall be attained, i.e., ${\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi $.
\item\label{DNyquist} {\em Nyquist Property.} The condition ${\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi$ shall characterize
tightness of a normalized version of $\Phi$, thereby supporting the intuition that upper and lower
redundancy being different implies `non-uniformity' of the frame. In particular, ${\mathcal{R}}^-_\Phi =
{\mathcal{R}}^+_\Phi = 1$ shall be equivalent to orthogonality as the `limit-case'.
\item\label{Duplow} {\em Upper and Lower Redundancy.} Upper and lower redundancy shall be
`naturally' related by $0 < {\mathcal{R}}^-_\Phi \le {\mathcal{R}}^+_\Phi < \infty$.
\item\label{Dadditiv} {\em Additivity.} Upper and lower redundancy shall be subadditive and superadditive,
respectively, with respect to unions of frames. They shall be additive provided that the redundancy
is uniform, i.e., ${\mathcal{R}}^-_\Phi={\mathcal{R}}^+_\Phi$.
\item\label{DInvariance} {\em Invariance.} Redundancy shall be invariant under the action of a unitary
operator on the frame vectors, under scaling of single frame vectors, as well as under permutation,
since intuitively all these actions should have no effect on, for instance, robustness against erasures,
\gk{which is one property redundancy shall intuitively measure.}
\item\label{Daverob} {\em Spanning Sets.} The lower redundancy shall measure the maximal number of
spanning sets of which the frame consists. This immediately implies that the lower
redundancy is a measure for robustness of the frame against erasures in the sense that any set of
a particular number of vectors can be deleted yet leave a frame.
\item\label{Dmaxrob} {\em Linearly Independent Sets.} The upper redundancy shall measure the minimal
number of linearly independent sets of which the frame consists.
\end{enumerate}
It is straightforward to verify that for the special type of frames consisting of orthonormal basis vectors, each
repeated a certain number of times, the upper and lower redundancies given by the maximal or minimal
number of repetitions satisfy these conditions. The challenge is now to extend this definition to all
frames in such a way that as many of these properties as possible are preserved.
\renewcommand{{\rm (\roman{enumi})}}{{\rm (\roman{enumi})}}
\section{Defining Redundancy}
\subsection{Definitions}
As explained before, we first introduce a local redundancy given in \cite{BCK}, which
encodes the concentration of frame vectors around one point. Since the
norms of the frame vectors do not matter for concentration, we normalize
the given frame and also consider only points on the unit sphere $\mathbb{S}n=\{x \in \mathcal{H}n: \|x\|=1\}$
in $\mathcal{H}$. Hence another way to view local redundancy is by considering
it as some sort of density function on the sphere. A consequence of normalizing the
frame vectors, is that our new set may no longer be Bessel.
We now define a notion of local redundancy. \gk{For this, we remark that} throughout the paper, we
let \gk{$\langle y \rangle$ denote the span of some $y \in \mathcal{H}n$ and $P_{\langle y \rangle}$
the orthogonal projection onto $\langle y \rangle$.}
\begin{definition}
Let $\Phi = (\varphi_i)_{i\in I}$ be a frame for $\mathcal{H}$. For
each $x \in \mathbb{S}n$, the {\em redundancy function}
${\mathcal{R}}_\Phi : \mathbb{S}n \to \mathbb{R}^+$ is defined by
\[
{\mathcal{R}}_\Phi(x) = \sum_{i\in I} \|P_{\langle \varphi_i \rangle} (x)\|^2
.
\]
\end{definition}
We might think about the function $\mathcal R_\Phi$ as a redundancy pattern on the sphere,
which measures redundancy at each single point. Also notice that this notion is reminiscent
of the fusion frame condition \cite{CKL08}, here for rank-one projections.
In contrast to \cite{BCK}, this redundancy function may not assume its maximum or
minimum on the unit sphere and in general both the max and min of this function
could be infinite.
We now define:
\begin{definition} \label{def:upplowred}
Let $\Phi = (\varphi_i)_{i\in I}$ be a frame for $\mathcal{H}$.
Then the {\em upper redundancy of $\Phi$} is defined by
\[
{\mathcal{R}}^+_\Phi = \sup_{x \in \mathbb{S}n} {\mathcal{R}}_\Phi(x)
\]
and the {\em lower redundancy of $\Phi$} by
\[
{\mathcal{R}}^-_\Phi = \inf_{x \in \mathbb{S}n} {\mathcal{R}}_\Phi(x).
\]
Moreover, $\Phi$ has a {\em uniform redundancy}, if
\[
{\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi.
\]
\end{definition}
This notion of redundancy hence equals the upper and lower frame bound of the
normalized version of the frame - which could now be infinite.
\section{The Case of Infinite Redundancy}
\subsection{Main Result}
With the previously defined quantitative notion of upper and lower redundancy, we can
now verify the properties from Subsection \ref{subsec:desiderata} which hold (and those
which do not hold) in the infinite dimensional setting.
\begin{theorem}
\label{T1}
Let $\Phi = (\varphi_i)_{i\in I}$ be a frame for an $\infty$-dimensional real or complex Hilbert space $\mathcal{H}n$ and assume that ${\mathcal{R}}^+< \infty$.
\begin{enumerate}
\item[{\rm [D1]}] {\em Generalization.} If $\Phi$ is an equal-norm Parseval frame, then
\[
{\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi.
\]
\item[{\rm [D2]}] {\em Nyquist Property.} The following conditions are equivalent:
\begin{itemize}
\item[{\rm (i)}] We have ${\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi$.
\item[{\rm (ii)}] The normalized version of $\Phi$ is tight.
\end{itemize}
Also the following conditions are equivalent.
\begin{itemize}
\item[{\rm (i')}] We have ${\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi = 1$.
\item[{\rm (ii')}] $\Phi$ is orthogonal.
\end{itemize}
\item[{\rm [D3]}] {\em Upper and Lower Redundancy.} We have
\[
0 < {\mathcal{R}}^-_\Phi \le {\mathcal{R}}^+_\Phi < \infty.
\]
\item[{\rm [D4]}] {\em Additivity.} For each orthonormal basis $(e_i)_{i=1}^n$,
\[
{\mathcal{R}}^\pm_{\Phi\cup(e_i)_{i=1}^n} = {\mathcal{R}}^\pm_\Phi + 1.
\]
Moreover, for each frame $\Phi'$ in $\mathcal{H}n$,
\[
{\mathcal{R}}^-_{\Phi\cup\Phi'} \ge {\mathcal{R}}^-_\Phi + {\mathcal{R}}^-_{\Phi'}
\quad \mbox{and} \quad
{\mathcal{R}}^+_{\Phi\cup\Phi'} \le {\mathcal{R}}^+_\Phi + {\mathcal{R}}^+_{\Phi'}.
\]
In particular, if $\Phi$ and $\Phi'$ have uniform redundancy, then
\[
{\mathcal{R}}^-_{\Phi\cup\Phi'} = {\mathcal{R}}_\Phi + {\mathcal{R}}_{\Phi'} = {\mathcal{R}}^+_{\Phi\cup\Phi'}.
\]
\item[{\rm [D5]}] {\em Invariance.}
Redundancy is invariant under application of a unitary operator $U$ on $\mathcal{H}n$, i.e.,
\[
{\mathcal{R}}^\pm_{U(\Phi)} = {\mathcal{R}}^\pm_{\Phi},
\]
under scaling of the frame vectors, i.e.,
\[
{\mathcal{R}}^\pm_{(c_i \varphi_i)_{i=1}^N} = {\mathcal{R}}^\pm_{\Phi}, \quad c_i \mbox{ scalars},
\]
and under permutations, i.e.,
\[
{\mathcal{R}}^\pm_{(\varphi_{\pi(i)})_{i=1}^N} = {\mathcal{R}}^\pm_{\Phi}, \quad \pi \in S_{\{1,\ldots,N\}},
\]
\item[{\rm [D6]}] {\em Spanning Sets.} In the finite setting, $\Phi$ contains
$\lfloor {\mathcal{R}}^-_\Phi\rfloor$ disjoint spanning sets.
In the infinite dimensional setting, this property fails as we will show with
an example.
\item[{\rm [D7]}] {\em Linearly Independent Sets.}
$\Phi$
can be partitioned into $\lceil {\mathcal{R}}^+_\Phi\rceil$ linearly independent sets.
\end{enumerate}
\end{theorem}
\begin{proof}
The properties [D1] is true because redundancy is
the upper and lower frame bounds of the normalized version
of the frame.
The first part of [D2] is true by definition and for the
second part of [D3], it is well known that a unit norm Parseval frame must
be an orthonormal basis.
Property [D4] follows easily from the argument in \cite{BCK}.
Property [D5] is ovbious.
Property [D6] fails as we will see in the next section.
[D7] follows from Theorem 4.2 of \cite{CKLV} which states that: Every Bessel sequence
$\{\varphi_i\}_{i\in I}$ with Bessel bound $B$ and $\|\varphi_i\|\ge c$ for all $i\in I$
(in our case $c=1$), can be decomposed into $\lceil B/c^2\rceil$ linearly independent
sets.
\end{proof}
The redundancy function gives little information near the extreme
cases - as was true in the finite dimensional case, as the following example shows.
\begin{example}
\label{example:F3}
{\rm We add a example in which the frame is not merely composed of vectors from the unit basis
$\{e_1, \ldots, e_n\}$. Letting $0 < \varepsilon < 1$, we choose $\Phi_4 = (\varphi_i)_{i\in I}$
as
\[
\varphi_i = \left\{\begin{array}{rcl}
e_1 & : & i=1,\\
\sqrt{1-\varepsilon^2} e_1 + \varepsilon e_i & : & i \neq 1\\
e_i&:& i>N.
\end{array} \right.
\]
This frame is strongly concentrated around the vector $e_1$. We first observe that
\[
{\mathcal{R}}_{\Phi_3}(e_1) = \sum_{i=1}^N \|P_{\langle \varphi_i \rangle}(e_1)\|^2
= 1 + \sum_{i=2}^N \absip{e_1}{\sqrt{1-\varepsilon^2} e_1 + \varepsilon e_i}^2
= 1 + (N-1)(1-\varepsilon^2).
\]
However, this is not the maximum, which is in fact attained at the average point of the
frame vectors. But in order to avoid clouding the intuition by technical details,
we omit this analysis, and observe that
\[
1 + (N-1)(1-\varepsilon^2) \le {\mathcal{R}}_{\Phi_4}^+ < N.
\]
Since
\[
{\mathcal{R}}_{\Phi_4}(e_2) = \sum_{i=1}^N \|P_{\langle \varphi_i \rangle}(e_2)\|^2
= \sum_{i=2}^N \absip{e_2}{\sqrt{1-\varepsilon^2} e_1 + \varepsilon e_i}^2
= \varepsilon^2,
\]
we can conclude similarly, that
\[
0 < {\mathcal{R}}_{\Phi_4}^- \le \varepsilon^2.
\]}
\end{example}
The frame $\Phi_3$ shows that the new redundancy notion gives little information
near the extreme cases: ${\mathcal{R}}^- \approx 0$ and ${\mathcal{R}}^+$ large,
but becomes increasingly more accurate as ${\mathcal{R}}^-$ and ${\mathcal{R}}^+$ become closer to one another.
By [D2], the frame $\Phi_4$ is not orthogonal, nor is it tight. [D6] is not applicable
for this frame, since $\lfloor {\mathcal{R}}_{\Phi_4}^- \rfloor = 0$ although there does exist a
partition into one spanning set. Now, [D7] implies that this frame can be partitioned into $N-1$ linearly
independent sets. Again, we see that we can do better than this by merely taking the
whole frame which happens to be linearly independent. As before, we observe that [D7]
is not sharp for large values of ${\mathcal{R}}^+$. However, these become increasingly accurate
as ${\mathcal{R}}^-$ and ${\mathcal{R}}^+$ approach each other.
\section{Infinite Equal Norm Parseval Frames}
It is easy to construct infinite equal norm
Parseval frames for which the norms of the vectors are aribitrarily
close to one.
\vskip12pt
\begin{theorem}
For any $r\le 1$, there is an equal norm Parseval frame $\{\varphi_i\}_{i=1}^{\infty}$
for $\ell_2$ with $\|\varphi_i\|^2 = r$, for all $i=1,2,\ldots$.
\end{theorem}
\begin{proof}
Given the orthonormal basis $\{e^{2\pi int}\}_{n\in \mathbb{Z}}$, let $E\subset [0,1]$ be
a measurable set for which $|E|=r$. Then
\[ \left \{ e^{2\pi int}\chi_E \right \}_{n\in \mathbb{Z}}\]
is a Parseval frame of norm $r$ vectors.
\end{proof}
\section{Infinite Parseval Frames}
Since {\em linear independence} is so weak in the infinite dimensional setting,
we will now see that (equal norm Parseval) frames can have some surprising
properties. This will affect our work in this area.
\begin{example}\label{E1}
For every natural number $k\in \mathbb{N}$
there is an equal norm Parseval frame for $\ell_2$ which can be written as
$j$-linearly independent and disjoint spanning sets, for all $j=1,2,\ldots,k$.
\end{example}
\begin{proof}
It is straightforward to choose families of vectors $\{f_{ij}\}_{i,j=1}^{\infty}$ satisfying:
\vskip12pt
(1) The vectors $\{f_{ij}\}_{i,j=1}^{\infty}$ are linearly independent.
\vskip12pt
(2) For each $j=1,2,\ldots$, we have that span $\{f_{ij}\}_{i=1}^{\infty}$ is dense in $\ell_2$.
\vskip12pt
It follows that if we apply Grahm-Schmidt to $\{f_{ij}\}_{i=1}^{\infty}$
for each $j=1,2,\ldots$, we get a sequence of orthonormal basis
$\{g_{ij}\}_{i=1}^{\infty}$ for $\ell_2$, with the property that $\{g_{ij}\}_{i,j=1}^{\infty}$
is a linearly independent set.
Fix $k\in \mathbb{N}$ and consider the family:
\[ \left \{ \frac{1}{\sqrt{k}}f_{ij}\right \}_{i=1,j=1}^{\ \infty,\ k}.\]
This family clearly has the desired properties.
\end{proof}
\begin{example}
There is a Parseval frame for $\ell_2$ which can be written as
$j$-linearly independent and disjoint spanning sets, for all $j=1,2,\ldots,\infty$.
(Note that $j=\infty$ is included here).
\end{example}
\begin{proof}
We use the family $\{g_{ij}\}_{i,j=1}^{\infty}$ from Example \ref{E1} and form
the Parseval frame
\[ \left \{ \frac{1}{2^j}g_{ij} \right \}_{i,j=1}^{\infty}.\]
This is the required family.
\end{proof}
\section{More on the Infinite Version of Property [D6]}
In this section we will further examine property [D6]. Unfortunately, it is
dangerously
close to Kadison-Singer. First, we give an alternative proof of Corollary 2.4
of \cite{BCPS09}.
\begin{definition}
A family of vectors $\{\varphi_i\}_{i=1}^{\infty}$ is $\omega$-{\bf independent}
if whenever
\[ \sum_{i=1}^{\infty}a_i\varphi_i =0,\]
it follows that $a_i=0$, for all $i=1,2,\ldots$. If we have this property only for
all $\{a_i\}_{i=1}^{\infty}\in \ell_2$, we say the family of vectors is $\ell_2$-{\bf independent}.
\end{definition}
\begin{theorem}
Let $\{Pe_i\}_{i=1}^{\infty}$ be a Parseval frame in $\mathcal{H}$. If $I\subset \mathbb{N}$, the following
are equivalent:
1. The family $\{Pe_i\}_{i\in I}$ spans $P(\mathcal{H})$.
2. The family $\{(I-P)e_i\}_{i\in I^c}$ is $\ell_2$-independent.
\end{theorem}
\begin{proof}
\noindent $(1)\Rightarrow (2)$:
Assume that $\{(I-P)e_i\}_{i\in I^c}$ is not $\ell_2$-independent. Then there exists
scalars $\{b_i\}_{i\in I^c}\in \ell_2$ so that
\[ \sum_{i\in I^c}b_i(I-P)e_i =0.\]
It follows that
\[ f = \sum_{i\in I^c}b_ie_i = \sum_{i\in I^c}b_iPe_i \in P(\mathcal{H}).\]
Thus,
\[ \langle f,Pe_j\rangle = \langle Pf,e_j\rangle = \sum_{i\in I^c}b_i\langle e_i,e_j\rangle
=0,\ \ \mbox{if $j\not= i$. i.e. if $j\in I$}.\]
So $f \perp span\ \{Pe_i\}_{i\in I}$, and this family is not spanning for $P(\mathcal{H})$.
\vskip12pt
\noindent $(2)\Rightarrow (1)$:
First assume there is an $f\in P(\mathcal{H})$ so that $f \perp span\ \{Pe_i\}_{i\in I}$.
Then, $f=\sum_{i\in I}a_iPe_i$. Also,
\[ \langle f,Pe_i\rangle = \langle Pf,e_i\rangle = \langle f,e_i\rangle =0,
\ \ \mbox{for all $i\in I$}.\]
Hence, $f=\sum_{i\in I^c}b_ie_i$, with not all $b_i=0$ and $\{b_i\}_{i\in I^c}\in \ell_2$. Thus,
\[ \sum_{i\in I^c}b_ie_i = f = Pf = \sum_{i\in I^c}b_iPe_i.\]
i.e.
\[ \sum_{i\in I^c}b_i(I-P)e_i =0.\]
That is, $(I-P)e_i\}_{i\in I^c}$ is not $\ell_2$-independent.
\end{proof}
\begin{corollary}
The property [D6] is true for unit norm 2-tight frames
if and only if whenever $\{\varphi_i\}_{i=1}^{\infty}$ is a
unit norm 2-tight frame then there is a partition $\{I_1,I_2\}$ of $\mathbb{N}$ so that
$\{\varphi_i\}_{i\in I_j}$ is $\ell_2$-independent for $j=1,2$.
\end{corollary}
If we examine the proof above, we see that what is proved is really
the following:
\begin{corollary}
Let $\{e_i\}_{i\in I}$ be an orthonormal basis for
$\mathcal{H}$ and $\{Pe_i\}_{i\in I}$ be a Parseval frame for $P(\mathcal{H})$.
If
\[ \varphi \in span\ \{e_i\}_{i\in J}\cap P(\mathcal{H}),\]
then $\varphi \perp span\ \{Pe_i\}_{i\in J^c}$.
In particular,
the following are equivalent for a subset $J\subset I$:
(1) We have
\[ span\ \{e_i\}_{i\in J}\cap P(\mathcal{H}) \not= \{0\},\]
(2) We have
\[ span\ \{Pe_i\}_{i\in J^c} \not= \mathcal{H}.\]
(3) The family $\{(I-P)e_i\}|_{i\in J}$ is not $\ell_2$-independent.
\end{corollary}
\begin{remark}
Note that the above corollary unifies the finite linearly
independent result with the infinite one. i.e. The same theorem above
holds with $|I|$ finite and {\em linearly independent} for part (3).
\end{remark}
\begin{remark}
The above also raises the question if there is an infinite dimensional
Rado-Horn Theorem. But we are not sure what it should say at this time.
\end{remark}
We note that property [D6] is an infinite dimensional version of a result
from \cite{CFMT}. In this paper, using variations of the discrete
Fourier transform matrices, the authors construct families of unit norm
2-tight frames for $\mathcal{H}_n$, so that whenever you partition the frame vectors into two subsets,
the lower Riesz bound of one of the subsets is on the order of $1/n$.
For a counter-example to [D6], we are
looking for unit norm 2-tight frames for $\ell_2$ so that whenever you divide the
frame vectors into two sets, one of them is not $\ell_2$-independent.
Finally, let us observe that we can find an equal norm Parseval frame with
the above properties. This example is due to Bodmann, Casazza, Paulsen,
and Speegle.
\begin{example}
For any $E\subset [0,1]$ measurable, the family
\[ \left \{ e^{2\pi int}\chi_E \right \}_{n\in \mathbb{Z}},\]
can be written as $k$, $\ell_2$-independent spanning sets for all
$k=1,2,\ldots,\infty$.
\end{example}
\section{Some Notes}
It is possible that there is a better notion of redundancy than that given in
\cite{BCK}. The problem with that notion is that if we apply
an invertible operator
to a frame, we get different redundancy. Intuitively,
this should not give a different
value. A possible alternative definition is:
\begin{definition}
Given a frame $\Phi=\{\varphi_i\}_{i=1}^N$ in $\mathcal{H}_n$ with frame operator $S$,
let
or
each $x \in \mathbb{S}n$, the {\em redundancy function}
${\mathcal{R}}_\Phi : \mathbb{S}n \to \mathbb{R}^+$ is defined by
\[
{\mathcal{R}}_\Phi(x) = \sum_{i=1}^N \|P_{\langle S^{-1/2}(\varphi_i) \rangle} (x)\|^2
.
\]
\end{definition}
\begin{definition} \label{def:upplowred}
Let $\Phi = (\varphi_i)_{i\in I}$ be a frame for $\mathcal{H}$.
Then the {\em upper redundancy of $\Phi$} is defined by
\[
{\mathcal{R}}^+_\Phi = \sup_{x \in \mathbb{S}n} {\mathcal{R}}_\Phi(x)
\]
and the {\em lower redundancy of $\Phi$} by
\[
{\mathcal{R}}^-_\Phi = \inf_{x \in \mathbb{S}n} {\mathcal{R}}_\Phi(x).
\]
Moreover, $\Phi$ has a {\em uniform redundancy}, if
\[
{\mathcal{R}}^-_\Phi = {\mathcal{R}}^+_\Phi.
\]
\end{definition}
This notion of redundancy equals the upper and lower frame bounds of the
normalized version of the canonical Parseval frame to $\Phi$.
This definition seems to lose some of the properties of the original definition - which
needs to be checked - such as
[D4] Do these hold? Especially, if we add a Parseval frame to a frame,
do these
redundancies increase by 1?
[D5] Does \[
{\mathcal{R}}^\pm_{(c_i \varphi_i)_{i=1}^N} = {\mathcal{R}}^\pm_{\Phi}, \quad c_i
\mbox{ scalars},
\]
Everything else seems to hold. But we do pick up a new result that
redundancy is
invariant under application of an invertible operator.
\begin{theorem}
If $\Phi = \{\varphi_i\}_{i=1}^N$ is a frame for $\mathcal{H}_n$ and $T$ is an
invertible
operator on $\mathcal{H}_n$, then for all $x\in \mathcal{H}_n$ we have
\[ {\mathcal{R}}_{T(\Phi)}^+(x)= {\mathcal{R}}_{\Phi}^+(x),\ \ \mbox{and}\ \
{\mathcal{R}}_{T(\Phi)}^-(x) =
{\mathcal{R}}_{\Phi}^-(x).\]
\end{theorem}
\begin{proof}
Let $S$ (resp. $S_T$) be the frame operator for $\Phi$ (resp. $T(\Phi)$).
Then
$S^{-1/2}\Phi$ is equivalent to $\Phi$ which is equivalent to $T(\Phi)$
which is
equivalent to $S_1^{-1/2}(T\Phi)$. Since both of these frames are
Parseval, it follows
from \cite{CKov} that there is a unitary operator $U$ satisfying:
\[ U[(S^{-1/2}(\Phi)] = S_1^{-1/2}(T\Phi).\]
The result is obvious from here.
\end{proof}
\noindent {\bf Alert}: Unfortunately, this new idea for a definition
does not work. We will now give an example to show that the upper
frame bound of the normalized version of a Parseval frame is not
related to the number of linearly independent sets we can partition
the family into.
\vskip12pt
\begin{example} Fix $N$ and let $\{e_i\}_{i=1}^N$ be an orthonormal
basis for $\mathcal{H}_N$. We will build a Parseval frame for $\mathcal{H}_N$ in
pieces. First, we build the sets
\[ \frac{1}{\sqrt{2N}}e_1+\frac{1}{\sqrt{2N}}e_i,\ \
\frac{1}{\sqrt{2N}}e_1-\frac{1}{\sqrt{2N}}e_i\ \ \mbox{for all
$i=2,3,\ldots,N$}.\]
This family is a frame for $\mathcal{H}_N$ with frame operator having
eigenvectors $\{e_i\}_{i=1}^N$ and respective eigenvalues
\[ \{\frac{N-1}{N},\frac{1}{N},\frac{1}{N} ,\cdots, \frac{1}{N}\}\]
Hence, if we add to this family the vectors
\[ \{\frac{1}{\sqrt{N}}e_1\} \cup \left \{ \sqrt{1-\frac{1}{N}}e_i \right \}_{i=2}^N,\]
then we will have a Parseval frame which can clearly be devided into
3 linearly independent sets. Namely, divide the first set into
\[ \left \{\frac{1}{\sqrt{2N}}e_1+\frac{1}{\sqrt{2N}}e_i \right \}_{i=2}^N\]
and
\[ \left \{\frac{1}{\sqrt{2N}}e_1-\frac{1}{\sqrt{2N}}e_i \right \}_{i=2}^N\]
and the third set is already linearly independent. However, if we
normalize the vectors, we get a frame:
\[ \left \{\frac{1}{\sqrt{2}}e_1 \pm \frac{1}{\sqrt{2}}e_i\right \}_{i=2}^N,\]
plus $\{e_i\}_{i=2}^N$. For this family, if we check the
frame bound at say $e_1$ we get:
\[ \sum_{i=2}^N |\langle e_1,\frac{1}{\sqrt{2}}e_1\pm \frac{1}{\sqrt{2}}
e_i \rangle |^2 = \frac{N-1}{2}.\]
That is, the upper frame bound of the normalized version of this
Parseval frame is unrelated to the number of linearly independent
sets we can divide it into.
\end{example}
\section{Concluding Remarks}
In the case of infinite redundancy, it is possible that our upper frame bound
is infinity. It is not clear at this time if anything can be concluded from this
case.
\end{document}
|
\begin{equation}gin{document}
\title{Sequential weak measurement}
\author{Graeme \surname{Mitchison}}
\email[]{[email protected]} \affiliation{Centre for
Quantum Computation, DAMTP,
University of Cambridge,
Cambridge CB3 0WA, UK}
\author{Richard \surname{Jozsa}}
\email[]{[email protected]} \affiliation{Department of Computer
Science, University of Bristol, Bristol, BS8 1UB, UK}
\author{Sandu \surname{Popescu}}
\email[]{[email protected]} \affiliation{
H.H. Wills Physics Laboratory, University of Bristol, Tyndall Avenue,
Bristol BS8 1TL, UK}
\affiliation{
Hewlett-Packard Laboratories, Stoke Gifford, Bristol
BS12 6QZ, UK}
\begin{equation}gin{abstract}
The notion of weak measurement provides a formalism for extracting
information from a quantum system in the limit of vanishing
disturbance to its state. Here we extend this formalism to the
measurement of sequences of observables. When these observables do not
commute, we may obtain information about joint properties of a quantum
system that would be forbidden in the usual strong measurement
scenario. As an application, we provide a physically compelling
characterisation of the notion of counterfactual quantum computation.
\end{abstract}
\pacs{03.67.-a, 02.20.Qs}
\maketitle
\pagestyle{plain}
\section{Introduction}
Quantum mechanics is still capable of giving us surprises. A good
example is the concept of weak measurement discovered by Aharonov and
his group \cite{AAV88,ABPRT01}, which challenges one of the canonical
dicta of quantum mechanics: that non-commuting observables cannot be
simultaneously measured.
Standard measurements yield the eigenvalues of the measured
observables, but at the same time they significantly disturb the
measured system. In an ideal von Neumann measurement the state of the
system after the measurement becomes an eigenstate of the measured
observable, no matter what the original state of the system was. On
the other hand, by coupling a measuring device to a system weakly it
is possible to read out certain information while limiting the
disturbance to the system. The situation becomes particularly
interesting when one post-selects on a particular outcome of the
experiment. In this case the eigenvalues of the measured observable
are no longer the relevant quantities; rather the measuring device
consistently indicates the {\em weak value} given by the AAV formula
\cite{AAV88,AharonovRohrlich05}:
\begin{equation} \label{AAV-formula}
A_w=\frac{\bra{\psi_f}A\ket{\psi_i}}{\braket{\psi_f}{\psi_i}}
\end{equation}
where $A$ is the operator whose value is being ascertained,
$\ket{\psi_i}$ is the initial state of the system, and $\ket{\psi_f}$
is the state that is post-selected (e.g. by performing a
measurement). The significance of this formula is that, if we couple a
measuring device whose pointer has position coordinate $q$ to the
system $\cS$, and subsequently measure $q$, then the mean value
$\langle q \rangle$ of the pointer position is given by
\begin{equation} \label{q-average} \langle q \rangle = g\ Re [A_w], \end{equation}
where $Re$ denotes the real part. This formula requires the initial
pointer wavefunction to be real and of zero mean, but these
assumptions will be relaxed later. The coupling interaction is also
taken to be the standard von Neumann measurement interaction
$H=gAp$. The coupling constant $g$ is assumed to be small, but we can
determine $A_w$ to any desired accuracy if enough repeats of the
experiment are carried out.
The formula (\ref{AAV-formula}) implies that, if the initial state
$\ket{\psi_i}$ is an eigenstate of a measurement operator $A$, then
the weak value post-conditioned on that eigenstate is the same as the
classical (strong) measurement result. When there is a definite
outcome, therefore, strong and weak measurements agree. However, weak
measurement can yield values outside the normal range of measurement
results, eg spins of 100 \cite{Spin100}. It can also give complex
values, whose imaginary part correspond to the pointer momentum. In
fact, the mean of the pointer momentum is given by
\begin{equation} \label{p-average}
\langle p \rangle = 2gv\ Im [A_w],
\end{equation}
where $Im$ denotes the imaginary part and $v$ is the variance in
the initial pointer momentum.
The fact that one hardly disturbs the system in making weak
measurements means that one can in principle measure different
variables in succession. We follow this idea up in this paper.
\section{A new paradox}\label{new}
Weak measurement has proved to be a valuable tool in analysing
paradoxical quantum situations, such as Hardy's paradox
\cite{Hardy92,ABPRT01}. To illustrate the idea of sequential weak
measurement and its potential applications we first construct a new
quantum paradox. Consider the {\em double interferometer}, the
optical circuit shown in Figure \ref{weak2}, where a photon passes
through two successive interferometers. This configuration has been
considered previously by Bl\"{a}si and Hardy \cite{BH95} in another
context. Using the labels of the paths shown in the figure, and
denoting the action of the $i$-th beam-splitter by $U_i$, the system
evolves as follows:
\begin{equation}gin{align}
U_1\ket{A}&=(\ket{B}+\ket{C})/\sqrt{2},\label{evolution1} \\
U_2\ket{B}&=(\ket{E}+\ket{F})/\sqrt{2},\ \ U_2\ket{C}=(\ket{E}-\ket{F})/\sqrt{2},\label{evolution2}\\
U_3\ket{E}&=(-\ket{D}+\ket{D'})/\sqrt{2},\ \ U_3\ket{F}=(\ket{D}+\ket{D'})/\sqrt{2}.\label{evolution3}
\end{align}
(The signs here are determined by the fact that reflection on the
silvered outer surface of a beam-splitter gives a phase of $\pi$
whereas transmission or reflection by the inner surface gives zero
phase.)
\begin{equation}gin{figure}[hbtp]
\centerline{\epsfig{file=weakfigure4.eps,width=0.4\textwidth}}
\caption{The double interferometer: an optical circuit in which a
photon, injected along path $A$, passes through two
interferometers, represented by paths $B$ and $C$ and paths $E$
and $F$. Finally, the photon is post-selected at the detector $D$.
The beam-splitters are shown with their reflecting surface marked
in black. \label{weak2}}
\end{figure}
Suppose now that we select a large number $N$ of successful runs of
our experiment, i.e. those runs where the photon is detected by the
detector $D$.
We can now make the following statements about this situation:
(1) {\it All photons go through path $E$.}
Indeed, equations (\ref{evolution1}) and (\ref{evolution2}) tell us
that if a photon is injected along path A, it must exit the first
interferometer along path $E$. Consequently, if we measure the
observable $P_E$, the projector for path $E$, we find the total number
of photons detected is $N_E=N$ with certainty.
(2) {\it All photons go through path $C$.}
Indeed, the second interferometer is arranged in such a way that any
photon entering along path $B$ will end up at $D'$. Hence, a very
simple calculation shows that if, instead of measuring $N_E$, we
measure $N_C$, the number of photons going along path $C$ in all $N$
runs of the experiment, we will obtain with certainty $N_C=N$.
(3) {\it When photons go through path $C$, a subsequent
measurement reveals that half of them must go through path $E$ and
half through path $F$.}
Indeed, if we measure the position of the photons in the first
interferometer and find that all go via $C$, then a subsequent
measurement of $N_E$ and $N_F$ must yield $N/2$ in each case, up to
statistical fluctuations. (In fact this is true regardless of whether or
not all photons end up eventually at $D$).
(4) {\it When photons go through path $E$, a subsequent
measurement reveals that half of them must have
come via path $B$ and half via path $C$.}
This last statement is similar to point (3) above.
The above four statements seem to imply a paradoxical situation. On
the one hand, statement (2) tells us, when we pool all the results,
that all $N$ photons go via path $C$; together with statement (3) this
implies that the number of photons that go along path $E$ must be
$N/2$. On the other hand, statement (1) tells us that all $N$ photons
actually go along path $E$! A similar contradiction arises in
connection with the number of photons going along path $C$. On the one
hand, statement (1) tells us that all photons go via $E$; together
with statement (4) this implies that the number of photons that go
along path $C$ must be only $N/2$. On the other hand, statement (2)
tells us that all $N$ photons actually go along path $C$!
The usual way of resolving this paradox is to say that the above
statements refer to measurements that cannot all be made
simultaneously. Indeed, it is true that if we measure $P_E$ we find
it is 1 with certainty, but {\it only} if we do not also measure
$P_C$. If we also measure $P_C$ in the same experiment, then it is no
longer the case that $P_E=1$. Similarly, it is true that $P_C=1$ with
certainty, but {\it only} if we do not also measure $N_E$. If we also
measure $P_E$ in the same experiment, then it is no longer the case
that $P_E=1$. So, we are told, the statements (1)-(4) above have no
simultaneous meaning, for they do not refer to the same
experiment. Hence there is no paradox: In formulating the paradox
presented above we made use of facts that are not all simultaneously
true.
\begin{equation}gin{figure}[hbtp]
\centerline{\epsfig{file=paths.eps,width=0.4\textwidth}}
\caption{Paths through the double interferometer, and the number of
photons that follow the indicated path. Thus for instance
$N_{BE}=N/2$. Note however the curious prediction $N_{BF}=-N/2$.
\label{paths}}
\end{figure}
On the other hand, as is emphasised in \cite{AharonovRohrlich05}, one
should not dismiss such paradoxes too lightly. Indeed it is possible
to make a trade-off: By accepting some imprecision in measuring $P_E$,
$P_C$, etc., we can limit the disturbance these measurements
produce. The way to do this is to weaken the coupling of the measuring
devices to the photons.
Since the disturbance is now small, we can make all the measurements
in the same experiment, and we expect all the statements (1)-(4) to be
true. Hence we expect $N_E=N$, $N_C=N$ and obviously $N_F=0$ and
$N_B=0$. On the other hand, we also expect that $N_{CE}$, and
$N_{CF}$, the total numbers of photons that went along $C$ and
subsequently along $E$ or $F$, respectively, should both be equal to
$N/2$; this is because all the $N$ photons go via $C$ and half of them
should continue along $E$ and half along $F$. Also we expect $N_{CF}$,
the number of photons that went along $C$ and subsequently along $E$,
to be $N_{CE}=N/2$. Similarly we expect that $N_{CE}$ and $N_{BE}$
should both be $N/2$, since all $N$ photons go along $E$ and half of
them must come via $B$ and half via $C$.
While all the above predictions seem reasonable, here is the surprise:
Overall we have only $N$ photons. They could have moved along four
possible trajectories: $BE$, $BF$, $CE$ or $CF$. Since
$N_{BE}+N_{BF}+N_{CE}+N_{CF}=1$ and since $N_{BE}=N_{CE}=N_{CF}=N/2$
it must be the case that $N_{BF}=-N/2$! Furthermore, our prediction
has a remarkable internal consistency. We know that the total number
of photons that go along $F$ must be zero. They can arrive at $F$ in two
ways, either by $BF$ or $CF$. Thus $N_F=N_{BF}+N_{CF}$. As noted above,
$N_{CF}=N/2$, but no photons are supposed to go through $F$. This is due
to the fact that $N_{BF}$ is negative, i.e. $N_{BF}=-N/2$.
The above predictions seem totally puzzling, no less puzzling than the
original paradox. However, what we have now is not a mere
interpretation that can simply be dismissed. These are now predictions
about the results of real measurements - in particular the weak
measurement of the number of photons that passes along path B and then
along path F. This is a {\it two-time} measurement.
In general, by ensuring that the measurement interaction is weak, we
can consider {\em sequences} of measurements. Describing such
measurements is the main subject of our paper. In the process, we will
formally derive the strange predictions made above for the double
interferometer, and will discuss the interpretation of weak
measurements. Finally, we apply these ideas to counterfactual
computation, which is a catch-all for numerous counterfactual
phenomena including, for example, interaction-free measurement
\cite{ElitzurVaidman93}.
\section{Sequential weak measurements}\label{section:seq}
The situation we shall consider is where a system $\cS$ evolves
unitarily from an initial state $\ket{\psi_i}$ to a final
post-selected measurement outcome $\bra{\psi_f}$. At various points,
observables may be measured weakly. Here we consider the scenario
where there is a single copy of the system, with the measuring device
weakly coupled to it. Generally, reliable information will only be
obtained after many repeats of the given experiment.
In the simplest case where there is just one observable, $A$ say, we
assume the evolution from $\ket{\psi_i}$ to the point where $A$ is
measured is given by $U$, and from this point to the post-selection
the evolution is given by $V$. Then we can rewrite (\ref{AAV-formula})
as:
\begin{equation} \label{fullAAV}
A_w=\frac{\bra{\psi_f}VAU\ket{\psi_i}}{\bra{\psi_f}VU\ket{\psi_i}},
\end{equation}
and the mean of the pointer is given by (\ref{q-average}) as before.
Consider next the case of two observables, $A_1$ and $A_2$, measured
at different times on a system $\cS$. We assume the system evolves
under $U$ from $\ket{\psi_i}$ to the point where $A_1$ is measured,
then under $V$ to the point where $A_2$ is measured, and finally under
$W$ to $\ket{\psi_f}$. Our strategy is to use two measuring devices
for measuring $A_1$ and $A_2$. Let the positions of their pointers be
denoted by $q_1$ nd $q_2$, respectively. We couple them to the system
at successive times, measure $q_1$ and $q_2$, and then take the
product $q_1q_2$.
We begin, therefore, with the weak coupling of system and pointers,
with the usual von Neumann-type Hamiltonians for measuring $A_1$ and
$A_2$. The state of system and pointers after this coupling is:
\begin{equation} \label{ABinitial}
\Psi_{\cS\cM_1\cM_2}=e^{-ig p_2 A_2}Ve^{-ig p_1 A_1} U\ket{\psi_i}_{\cS}
\phi(q_1)\phi(q_2),
\end{equation}
where $p_1$ and $p_2$ are the two pointer momenta (the label
$\cS$ refers to the system and $\cM_1$, $\cM_2$ to the pointers). Here
$\phi(q)$ is the initial pointer distribution, and we have assumed,
for simplicity, that the two pointers have identical initial
distributions and equal coupling constants $g$. Post-selecting on
$\bra{\psi_f}$ gives the state of the pointers as
\begin{equation} \label{ABpointer-only}
\Psi_{\cM_1\cM_2}=\bra{\psi_f}We^{-ig p_2 A_2}Ve^{-ig p_1 A_1} U\ket{\psi_i} \phi(q_1)\phi(q_2).
\end{equation}
As $g$ is small, we can approximate the state as:
\begin{equation} \label{ABexpansion}
\Psi_{\cM_1\cM_2}=\bra{\psi_f}\left(W(1 -igp_2A_2 -\frac{g}{2}^2p_2^2A_2^2 + \ldots)V
(1 -igp_1A_1 -\frac{g}{2}^2p_1^2A_1^2 + \ldots)U\right)\ket{\psi_i} \phi(q_1)\phi(q_2).
\end{equation}
Putting $p=-i\partial/\partial q$, we get
\begin{equation}a \label{ABstate} \Psi_{\cM_1\cM_2} &=& F \
\mbox{\big[}\phi(q_1)\phi(q_2)-g(A_1)_w\phi^\prime(q_1)\phi(q_2)-g(A_2)_w\phi(q_1)\phi^\prime(q_2)
+\frac{g^2}{2}(A_1^2)_w\phi^{\prime\prime}(q_1)\phi(q_2)\\\nonumber
&+&\frac{g^2}{2}(A_2^2)_w\phi(q_1)\phi^{\prime\prime}(q_2)+g^2(A_2,A_1)_w\phi^\prime(q_1)\phi^\prime(q_2)+O(g^3)\mbox{\big]}
\nonumber \end{equation}a
where $F=\bra{\psi_f}WVU\ket{\psi_i}$,
$(A_1)_w=\bra{\psi_f}WVA_1U\ket{\psi_i}/F$,
$(A_1^2)_w=\bra{\psi_f}WVA_1^2U\ket{\psi_i}/F$,
$(A_2)_w=\bra{\psi_f}WA_2VU\ket{\psi_i}/F$,
$(A_2^2)_w=\bra{\psi_f}WA_2^2VU\ket{\psi_i}/F$ and $(A_2,A_1)_w$ is defined by
\begin{equation} \label{ABweakvalue}
(A_2,A_1)_w=\frac{\bra{\psi_f}WA_2VA_1U\ket{\psi_i}}{\bra{\psi_f}WVU\ket{\psi_i}}.
\end{equation}
Following measurement of $q_1$ and $q_2$, the expected value of their
product is given by
\begin{equation} \label{product-formula} \langle q_1q_2 \rangle = \frac{\int
q_1q_2|\Psi_{\cM_1\cM_2}|^2 dq}{\int |\Psi_{\cM_1\cM_2}|^2 dq}. \end{equation}
For simplicity, let us make the following assumption (we will discuss
the general case later):\\
\newline
{\bf Assumption A}: {\em The initial pointer distribution $\phi$ is
real-valued, and its mean is zero, i.e. $\int q \phi^2(q)dq=0$}.
\newline
\newline
We also assume, without loss of generality, that $\phi$ is normalised
so that $\int \phi^2=1$. With these assumptions, all the terms in
(\ref{product-formula}) of order 0 and 1 in $g$ vanish, and we are
left with
\begin{equation}gin{align}\label{qAqB}
\langle q_1q_2 \rangle=g^2 \mbox{\big [}
(A_2,A_1)_w+\overline{(A_2,A_1)}_w+\overline{(A_1)}_w(A_2)_w+(A_1)_w\overline{(A_2)}_w \mbox{\big ]} \left(\int q\phi(q)\phi^\prime(q)dq\right)^2,
\end{align}
where bars denote complex conjugates. Integration by parts implies
$\int q \phi(q) \phi^\prime(q)dq=-\frac{1}{2}$, so we get the final
result
\begin{equation} \label{ABmean} \langle q_1q_2 \rangle=\frac{g^2}{2}\ Re \left[
(A_2,A_1)_w+(A_1)_w\overline{(A_2)}_w \right]. \end{equation}
Here $(A_2,A_1)_w$ is the sequential weak value given by
(\ref{ABweakvalue}); note the reverse order of operators, to fit with
the convention of operating on the left.
\section{The sequential weak value}\label{sequential-weak-value}
In the section above we considered two measurements -- a measurement
of $A_1$ at time $t_1$ and of $A_2$ at $t_2$ -- and we looked at the
product of the outcomes $q_1q_2$ in the limit when the coupling of the
measuring devices with the measured system was weak. This procedure
was motivated by our example of the double interferometer: we wanted
to check whether the photon followed a given path, say the path that
goes along $C$ in the first interferometer and then along $E$ in the
second interferometer. In that case the variables of interest are
$P_C$, the projector on path C and $P_E$, the projector on path
$E$. When the photon follows this path, the value of the
{\it product} of these projectors is $1$ while in all other situations
the product is $0$. We wanted to see what the behavior of the photon
was when the measurements did not disturb it significantly.
Since $q_1$ measures $A_1$ and $q_2$ measures $A_2$, it seems obvious
that the quantity that represents the product of the two observables
is $\langle q_1q_2 \rangle$ given in (\ref{qAqB}) above. However, the
situation is more subtle, as we show below.
Consider the simpler case of two {\it commuting} operators $A_1$ and
$A_2$, and suppose we are interested in the value of the product
$A_2A_1$ at some time $t$. (Note that we are now talking about operators at
one given time, not at two different times.) We can measure this
product in two different ways. First, we can measure the product
directly, by coupling a measuring device directly to the product via
the interaction Hamiltonian $H=gpA_2A_1$. When we make the coupling
weaker, we find that the pointer indicates the value
\begin{equation}gin{align}
\langle
q\rangle=gRe(A_1A_2)_w =gRe
\frac{\bra{\psi_f}A_2A_1\ket{\psi_i}}{\braket{\psi_f}{\psi_i}}.
\end{align}
This is straightforward: it is simply the weak value of the operator
$A_2A_1$. On the other hand, we could attempt to measure the product
in the same way that we measured the sequential product. That is, we
can use two measuring devices with pointer position variables $q_1$
and $q_2$, couple the first measuring device to $A_1$ and the second
to $A_2$, and then look at the product $q_1q_2$. The latter method was
proposed by Resch and Steinberg \cite{RS04} for the
simultaneous measurement of two operators. They showed that in this case
\begin{equation}gin{align} \label{RSmean}
\langle q_1q_2 \rangle=\frac{g^2}{2}\ Re\
\left[(A_1A_2)_w+(A_1)_w\overline{(A_2)}_w \right].
\end{align}
We see that the value indicated by $ \langle q_1q_2 \rangle$ is {\it
not} equal to the weak value of the product, but contains a
supplementary term, $Re(A_1)_w\overline{(A_2)}_w $. In other words,
although we expected the two methods to be equivalent, it is not the
case. To obtain the true weak value of the product we must subtract
this second term. This second term is an artifact of the method of
using two separate measuring devices rather than coupling one
measuring device directly to the product operator.
In the case of sequential measurement there is no product operator to
start with, for we are interested in the product of the values of
operators at two different times. Hence the first method, of coupling
directly to the product operator, makes no sense, and we must use two
independent couplings. In order to obtain the quantity of interest,
i.e. the quantity that is relevant to situations such as the double
interferometer of Section \ref{new}, we must subtract the term
$Re(A_1)_w\overline{(A_2)}_w $ from (\ref{ABmean}). We thus conclude
that the quantity of interest is the sequential weak value given in
(\ref{ABweakvalue}).
\section{General sequential weak measurement}\label{general}
Sequential weak measurement can be easily extended to $n$ measurements
of Hermitian operators $A_i$ with intervening unitary evolution steps
$U_i$. The weak values are given by
\begin{equation}gin{align} \label{sequential-value}
(A_n, \ldots ,A_1)_w=\frac{\bra{\psi_f}U_{n+1}A_nU_n \ldots A_1U_1\ket{\psi_i}}{\bra{\psi_f}U_{n+1}U_n \ldots U_1\ket{\psi_i}},
\end{align}
and the expected values $\langle q_1q_2 \ldots q_n \rangle$ can be
expressed in terms of these weak values. For example, with
Assumption A
\begin{equation} \label{123mean}
\langle q_1q_2q_3 \rangle=\frac{g^3}{4} \ Re \left[
(A_3,A_2,A_1)_w+(A_2,A_1)_w\overline{(A_3)}_w+(A_3,A_1)_w\overline{(A_2)}_w+(A_3,A_2)_w\overline{(A_1)}_w \right],
\end{equation}
and the case of general $n$ is given in the Appendix. Similarly, we
can express expected values for products of momenta in terms of the
weak values (see Appendix). For instance
\begin{equation}gin{align} \label{special}
\langle p_1p_2 \rangle = 2(gv)^2Re \left[-(A_2,A_1)_w+(A_1)_w\overline{(A_2)}_w \right].
\end{align}
Mixed products of positions and momenta give similar formulae. For instance
\begin{equation}gin{align} \label{mixed}
\langle q_1p_2 \rangle=-g^2v \ Im
\left[(A_2,A_1)_w+\overline{(A_1)}_w(A_2)_w \right].
\end{align}
The foregoing examples illustrate a general pattern, which is that
expectations of products of $p$'s and $q$'s depend on the real part of
sequential weak values if there is an even number of $p$'s in the
product and on the imaginary part if there is an odd number of $p$'s.
The sequential weak values satisfy the following rules:
1) {\bf Linearity in each variable separately:}
\[
(A_n, \ldots , A_i , \ldots , A_1)_w+(A_n, \ldots , A_i^\prime , \ldots , A_1)_w=(A_n, \ldots , (A_i+A_i^\prime) , \ldots , A_1)_w,
\]
for any $1 \le i \le n$.
2) {\bf Agreement with strong measurement:}
Suppose that, with preselection by $\ket{\psi_i}$ and post-selection
by $\ket{\psi_f}$, strong measurements of $A_1$, $A_2$, $\ldots$ ,
$A_n$ always give the same outcomes $a_1, a_2, \ldots, a_n$; then
$(A_n \ldots A_1)_w=a_1a_2 \ldots a_n$.
3) {\bf Marginals:} If $I$ is the identity operator at location $i$:
\[
(A_n, \ldots A_{i+1}, A_{i-1}, \ldots , A_1)_w=\sum_i(A_n, \ldots
A_{i+1}, I, A_{i-1}, \ldots , A_1)_w.
\]
We can illustrate some of these rules with the double interferometer
experiment (figure \ref{weak2}). The measurements we consider are
projectors that detect the presence of a photon on various edges; for
instance, the projector $P_B$ indicates whether a photon is present on
the edge $B$. For simplicity we write $B_w$ for the weak value
$(P_B)_w$, etc., and we use the same convention for sequential weak
values. Then using (\ref{fullAAV}) we find $C_w=1$, $B_w=0$, $E_w=1$
and $F_w=0$. Using (\ref{ABweakvalue}) we find $(E,B)_w=1/2$,
$(F,B)_w=-1/2$, $(E,C)_w=1/2$ and $(F,C)_w=1/2$. Since $P_E+P_F=I$,
rule 1) implies $(E,B)_w+(F,B)_w=(I,B)_w$, and then rule 3) implies
$(I,B)=B_w$. Thus we expect $(E,B)_w+(F,B)_w=B_w$, which holds if we
substitute the values above. Similarly $(E,C)_w+(F,C)_w=1/2+1/2=C_w$,
and so on. As for rule 2), we have seen (Section \ref{new}) that
strong measurement of $P_C$ and $P_E$ yields 1, so we expect the weak
values to be the same, as is the case.
There is a further rule that applies when one of the operators being
measured is a projector. We illustrate it with the double
interferometer. We can write
\begin{equation}gin{align}\label{ratio-rule}
\frac{(E,C)_w}{(F,C)_w}=\frac{\bra{D}U_3P_EU_2\ket{C}\
\bra{C}U_1\ket{A}\ }{\bra{D}U_3P_FU_2\ket{C}\ \bra{C}U_1\ket{A}\
}=\frac{\bra{D}U_3P_EU_2\ket{C}}{\bra{D}U_3P_FU_2\ket{C}}=\frac{E_w}{F_w}.
\end{align}
Here $E_w$ and $F_w$ in the final ratio are calculated assuming that
$\ket{\psi_i}=\ket{C}$, in other words, as though we were calculating
weak values for the second interferometer treated separately from the
rest of the system, with initial state $\ket{C}$ and post-selection by
$\ket{D}$ (Figure \ref{small}). If we only knew the
single-measurement weak values $E_w$, $F_w$ and $C_w$, we could
calculate $(E,C)_w$ and $(F,C)_w$ using this rule and the relationship
$(E,C)_w+(F,C)_w=C_w$ derived above.
\begin{equation}gin{figure}[hbtp]
\centerline{\epsfig{file=small.eps,width=0.4\textwidth}}
\caption{The double interferometer restricted to its second
interferometer. According to (\ref{ratio-rule}), the ratio of the weak
values $E_w/F_w$ in the second interferometer, with photons injected
along $C$, is the same as the ratio of the sequential weak values
$(E,C)_w/(F,C)_w$ in the double interferometer with photons injected
along $A$.
\label{small}}
\end{figure}
\section{The meaning of weak values}\label{meaning:seq}
Consider some experiment in which we inject some kind of particle and
weakly measure the projector onto some location $X$. Suppose we
collect some large number $N$ of runs of the experiment that satisfy
the post-selection criterion. We interpret the fact that the projector
at $X$ has weak value $X_w$ to mean that, for any appropriate physical
property we test, due for instance to the charge, gravitational field,
etc. of the particle, it is as though $NX_w$ particles (up to a
binomial distribution error) passed along $X$. Thus in the double
interferometer experiment we expect all physical tests to give
outcomes appropriate to there being, in all $N$ runs of the
experiment, a total of $N_E=NE_w=N$ photons passing along $E$,
$N_{CE}=N/2$ photons passing along $C$ then $E$, and so on.
Can we justify the foregoing interpretation of weak values? For weak
measurements of a single operator, there is a body of work showing
that weak values, even when they lie in an unexpected range, can be
treated as though they were the actual values in the underlying
physical theory and will then yield correct predictions. Examples of
this include weakly measured negative kinetic energies when a particle
is in a classically forbidden region \cite{RAPV95}, and weakly
measured faster-than-light velocities that are associated with
Cerenkov radiation \cite{RA02}. If a measure is entirely consistent
with physics in this fashion, then we are entitled to say that it is
telling us a true physical fact. For sequential weak values, we can
make a similar argument. The physical meaning of sequential weak
values needs to be explored in many physical situations to give the
kind of justification that single weak values enjoy. However, the
internal consistency is already clear from the double interferometer
example, and, more generally, from the rules in Section \ref{general}.
\section{Broadening the concept: weak interactions} \label{broad}
So far, we have considered ideal weak measurements, in which the
pointer distribution is real and has zero mean (Assumption A). If
we drop these assumptions, we find in place of (\ref{q-average})
that
\begin{equation}gin{align}\label{complex-version}
\langle q \rangle=\mu+g(Re[A_w]+Im[A_w]y),
\end{align}
where $y=\int \bar \phi(pq+qp)\phi dq - 2\mu \nu$, with $\mu=\int \bar \phi
q \phi dq$, $\nu=\int \bar \phi p \phi dq$.
The expectation $\langle r_1r_2 \ldots r_n\rangle$ for a general
initial pointer distribution, where each $r_i$ is either $q_i$ or
$p_i$, is a very complicated expression, but, so far as the system
goes, depends only on the real and complex parts of sequential weak
values up to $(A_n,\ldots A_1)_w$. Thus we can write
\begin{equation}gin{align}\label{poly-form}
\langle r_1r_2 \ldots r_n \rangle=\Phi(Re(A_n,\ldots A_1)_w,Im(A_n,\ldots A_1)_w, \ldots ,Re(A_n)_w,Im(A_n)_w, \ldots , Re(A_1)_w,Im(A_1)_w),
\end{align}
for some polynomial function $\Phi$. The coefficients in $\Phi$
are themselves polynomials in expectations $\int \bar f
\gamma(p_i,q_i)f dq$ for polynomials $\gamma$, as we see in the
case of equation (\ref{complex-version}), where $y$ has this form.
In the next section, we shall want to consider the most general
possible type of {\em weak interaction} which allows any sort of
(suitably weak) coupling between the system and an ancilla followed by
any further evolution or measurement of the ancilla alone (the pointer
in our previous discussion and its von Neumann measurement interaction
$gpA$ will be a special case of such an ancilla and weak
interaction). Our notion of general weak interaction is the following:
Consider the system and ancilla initially in product state
$\ket{\psi_i}\ket{\xi}$. Let $H_{\rm S,anc}$ be any Hamiltonian of the
joint system, and $g$ a coupling constant. For a single interaction
event, and to first order in $g$, the state becomes
\begin{equation}gin{align}\label{eq1}
(I-igH_{\rm S,anc})\ket{\psi}\ket{\xi}.
\end{align}
Any joint Hamiltonian may be expressed as a sum of products of
individual Hamiltonians
\begin{equation}gin{align}
H_{\rm S,anc}= \sum_k H^k_{\rm S} \otimes H^k_{\rm anc}.
\end{align}
Post-selecting the system state in equation (\ref{eq1}) with
$\ket{\psi_f}$ gives
\begin{equation}gin{align}
\Psi_{\rm anc}=\braket{\psi_f}{\psi_i}[I_{\rm anc}-ig\sum_k(H^k_{\rm
S})_wH^k_{\rm anc}]\ket{\xi};
\end{align}
So the system Hamiltonians $H^k_{\rm S}$ have been effectively
replaced by their weak values $(H^k_{\rm S})_w$. The important point
here is that all subsequent manipulations of the ancilla will depend
on the pre- and post-selected system only through {\em weak values} of
suitably chosen observables. A similar result clearly holds for any
sequential weak interactions and suitably associated sequential weak
values, and also for terms of any higher order in $g$.
As a simple illustrative example, suppose that the ancilla is the
pointer system of a von Neumann measurement interaction with
Assumption A in force, and that this same pointer is weakly coupled
twice for the sequential measurement of both $A_1$ and $A_2$. If this
pointer has position $q$ and momentum $p$, the pointer state after
post-selection is
\begin{equation}gin{align}
\Psi_{\cM}&=\bra{\psi_f} \left(U_3e^{-igpA_2}U_2e^{-igpA_1}U_1\right) \ket{\psi_i} \phi(q),
\end{align}
yielding
\[
\langle q \rangle=g\ Re \ \left[(A_1)_w+(A_2)_w \right].
\]
The effect in this instance is therefore the same as adding the
individual post-measurement results, and it depends on the system
only through associated weak values.
\section{Counterfactuality and weak measurement}
Counterfactual computation \cite{Jozsa98,MitJozsa} provides a general
framework for looking at counterfactual phenomena, including
interaction-free measurement as a special case. We consider arbitrary
protocols, at various points of which a quantum computer can be
inserted. The computer has a switch qubit (with $\ket{0}$=off and
$\ket{1}$=on) and an output qubit. A special case of this formalism is
where the protocol is represented by an optical circuit, and a computer
insertion means that the computer (or a copy of it) is placed in some
path of the circuit and is switched on by a photon passing along that
path.
We assume that the computer is programmed ready to perform a
computational task with answer $0$ or $1$ which will be written into
the output qubit if the switch is turned on. In addition to the switch
and output qubits, the protocol will in general have additional
qubits, and will involve some measurements. We say that an outcome of
these measurements {\em determines the computer output} if that
outcome only occurs when the computer output has a specific value,
$\ket{0}$ or $\ket{1}$. Such an outcome is said to be counterfactual
if its occurrence also implies that the computer was never switched
on, i.e. its switch was never set to $\ket{1}$, during the protocol.
To make this precise, note first that one can always produce an
equivalent protocol in which the state is entangled with extra qubits
and the measurement deferred to the end of the protocol. Thus the
protocol can be assumed to consist of a period of unitary evolution
followed by a measurement, which can be assumed (again by adding extra
qubits) to be a projective measurement. Let $\ket{\psi_i}$ be the
initial state of the protocol, and let $\ket{\psi_f}$ be a measurement
outcome that determines some specific computer output, in the sense
defined above. Suppose the computer is inserted $n$ times. Let $\cF$
(for ``oFf'') denote the projection $\ketbra{0}$ onto the off value of
the computer switch and $\cN$ (for ``oN'') denote the complementary
projector $\ketbra{1}$, and let $\xi$ be one of the $2^n$ possible
strings of $\cF$'s or $\cN$'s of length $n$; we call this a {\em
history}. Let $U_i$ denote the unitary evolution in the protocol
between the $(i-1)$th and $i$th insertions of the computer.
\begin{equation}gin{definition}[Counterfactuality by histories \cite{MitJozsa}]
\label{histories}
The measurement outcome $\ket{\psi_f}$ is a {\em counterfactual
outcome} if
1) $\ket{\psi_f}$ determines the computer output.
2) The amplitude of any history $\xi$ containing an $\cN$ vanishes. In
other words, for all histories $\xi$ other than the all-$\cF$ history,
$\bra{\psi_f}U_{n+1}\xi_nU_n \ldots U_2\xi_1 U_1\ket{\psi_i}=0$.
\end{definition}
One may question whether this is the ``correct'' definition of a
notion of counterfactual computation or whether alternative
definitions might be convincingly plausible. Condition 1) is
uncontroversial but condition 2) might seem less immediately
compelling. It is evidently equivalent to obtaining a null result if
we carry out a strong non-demolition measurement of {\cal N} at each
computer insertion. However the disturbance that such a measurement
causes might lead one to question the suitability of this
condition. Indeed recently Hosten et al. \cite{Hosten06} proposed an
alternative definition of counterfactual computation that violates
condition 2) of definition VIII.1 and sparked a controversy
\cite{MJ06} over the relative merits and validity of the two
notions. We will now develop some alternative characterisations of our
definition VIII.1 in terms of {\em weak} measurements, thereby
addressing the disturbance issue. We will argue that these new
characterisations considerably strengthen the credibility of the
original definition as the ``correct'' one.
Let us therefore consider carrying out a weak measurement of $\cN$ at
each insertion. A non-zero weak value implies that there is a
detectable physical effect that can only occur if the computer is
switched on. Vaidman's treatment of the three-box paradox
\cite{Vaidman06} gives a good example of this reasoning.
\begin{equation}gin{figure}
\centerline{\epsfig{file=weakfigure6.eps,width=0.4\textwidth}}
\caption{The double interferometer of Figure \ref{weak2} treated as a
protocol with computer insertions (black rectangles) in paths $B$ and
$F$. If a photon passes down either of these paths, the computer runs.
\label{weak3}}
\end{figure}
Our two-interferometer example shows that it does not suffice to
consider the individual weak values at each insertion. For suppose the
computer is inserted in paths $B$ and $F$, as shown in Figure
\ref{weak3}. Then we have seen that the weak values $B_w$ and $F_w$
are zero, yet the sequential weak value $(F,B)_w$ is non-zero. The
non-vanishing of the sequential weak value implies that a photon
passes along {\em both} path $B$ and $F$, since there is a physical
effect that causes correlated deflections of pointers at both sites.
There is a subtlety here, because it could be argued that, because
sequential pairwise weak measurements give second-order effects in $g$
(see (\ref{ABmean})), we might detect a departure from zero in the weak
measurements for each operator individually, i.e. in the deflections
of the pointers at $B$ and $F$, if we looked at second or higher order
terms in $g$. However, if $A$ is any projector and $A_w=0$, then the
von Neumann interaction $e^{-igpA}$ reduces to $Ae^{-igp}+I-A$, which
is the identity to all orders in $g$ in the weak measurement
calculation. Thus we truly need to carry out the sequential weak
measurement here to identify the physical effect due to the photon.
In general, we need to consider all possible sequential weak
measurements to obtain an adequate test of counterfactuality. This is
why we must use weak rather than strong measurements. As we have seen
in Section \ref{sequential-weak-value}, there is no strong measurement
corresponding to sequential weak measurements.
We therefore propose the following:
\begin{equation}gin{definition}[Counterfactuality by weak values]\label{weak-values}
The measurement outcome $\ket{\psi_f}$ is a {\em counterfactual
outcome} if
1) $\ket{\psi_f}$ determines the computer output.
2) $(\cN_{i_k}, \cN_{i_{k-1}}, \ldots \cN_{i_1})_w=0$,
for any $1 \le i_1 < i_2 < \ldots < i_k \le n$, where $n$ is the number of insertions of the computer.
\end{definition}
By (\ref{sequential-value}), conditions 2) for \ref{histories} and
\ref{weak-values} are equivalent, using the fact that $\cF+\cN=1$
together with the linearity and marginal rules. For instance, with two
insertions of the computer, condition 2) of Definition \ref{histories}
amounts to $(\cN_1,\cN_2)_w=0$, $(\cF_1,\cN_2)_w=0$ and
$(\cN_1,\cF_2)_w=0$, and these imply $(\cN_1)_w=0$, $(\cN_2)_w=0$ and
$(\cN_1,\cN_2)_w=0$, which constitute condition 2) for Definition
\ref{weak-values}.
We can try to strengthen the requirements for counterfactuality by
demanding that a zero response is obtained for any conceivable
weak interaction, in the sense of the preceding section. In our
present application we must further restrict the weak interaction
to take place only if the switch has the property of being "on",
i.e. the interaction Hamiltonian must have the form $(\cN \otimes
I_{\rm anc})H_{\rm s, anc} (\cN \otimes I_{\rm anc})$. We say that
such an interaction is a weak interaction involving the projector
$\cN$. Since $\cN$ is a one-dimensional projector, this implies
that the interaction Hamiltonian has the form $\cN \otimes H_{\rm
anc}$. In a more general scenario the projector $\tilde \cN$ for
counterfactuality (analogous to the switch being "on") may have
rank larger than 1 and then the interaction Hamiltonian may have
the more general form $(\tilde \cN \otimes I_{\rm anc})M_{\rm s,
anc} (\tilde \cN \otimes I_{\rm anc})$ for any Hermitian $M$. For
example, the switch may be a photon with both path and
polarisation properties. Then a weak interaction restricted to its
presence on a path would correspond to a two-dimensional projector
on its polarisation state-space associated to that path.
\begin{equation}gin{definition}[Counterfactuality by general weak interactions]\label{universal}
The measurement outcome $\ket{\psi_f}$ is a {\em counterfactual
outcome} if
1) $\ket{\psi_f}$ determines the computer output.
2) Any possible weak interaction involving the projections $\cN_1,
\ldots \cN_n$ yields a null result.
\end{definition}
By a null result, we mean the same result that would be obtained
for $g=0$. It is not difficult to show that this apparently much
broader concept is in fact equivalent to Definition
\ref{weak-values}. In one direction, we know from the last section
that any expectation depends only on the sequential weak values,
involving the projectors $\cN_i$, so when these weak values vanish
we obtain a null result. In the other direction, we have only to
show that we can choose particular weak interactions whose null
results will imply the vanishing of all sequential weak values.
However, if we first obtain a null value of $\langle q_i \rangle$
and $\langle p_i \rangle$ for the standard von Neumann measurement
weak interaction for every $i$, then we know by (\ref{q-average})
and (\ref{p-average}) that both real and imaginary parts of all
the weak values $(\cN_i)_w$ are zero. Then by obtaining null
values of $\langle q_iq_j \rangle$ and $\langle p_iq_j \rangle$
for all $i < j$, we infer from (\ref{ABmean}) and (\ref{mixed})
that the real and imaginary parts of all $(\cN_j,\cN_i)_w$ are
zero. We continue this way, using the fact that expectations of
products of $p$'s and $q$'s with an even number of $p$'s depend on
the real part of sequential weak values, whereas those with an odd
number of $p$'s depend on their imaginary parts (see Appendix).
We have therefore proved:
\begin{equation}gin{theorem}
All three definitions, \ref{histories}, \ref{weak-values} and
\ref{universal}, are equivalent.
\end{theorem}
\section{Discussion}
Sequential weak values are a natural generalisation of the weak value
of a single measurement operator \cite{AAV88}. Resch and Steinberg's
simultaneous measurement of two operators \cite{RS04} gives the same
result in the special case where these operators commute, but it
does not address the case where we have a succession of measurements
with unitary evolution between them.
One can argue that both single and sequential weak measurements tell
us what the physical situation is. In the double interferometer, for
instance, $C_w=1$ really means that all the photons go via $C$, and
$(E,C)_w=1/2$ really means that approximately half the photons go via
$C$ followed by $E$. This is of course a matter of interpretation, and
may be disputed; but at least it seems to be true that weak values can
be fitted into the framework of physics without contradiction, and
give illuminating explanations of many phenomena.
Our application of weak measurement to counterfactuals does not depend
on the foregoing interpretation. The most straightforward part of our
claim is that, if a weakly coupled measuring device indicates a
displacement of pointers in some region of an apparatus, then one
cannot claim that the state of the system was unaltered in that
region; for example, in the case of an optical device, such a shift
would indicate that a photon was present. The importance of sequential
weak measurements in this context is illustrated by the double
interferometer (Figure \ref{weak2}). If two pointers are coupled to
the paths $B$ and $F$ in this apparatus, each pointer individually
will show no displacement on average after many runs of the
experiment. However, the product of the positions of the pointers will
show a shift. Thus the photon reveals its presence only when
information from both pointers is suitably combined.
The other part of our claim about counterfactuals can be summed up by
what we might call the principle of weak detectability:
{\em An event that cannot be detected by any possible weak
interaction does not take place.}
This means that we learn a fact $X$ about an event
counterfactually from a certain experiment if (1) the outcome of
the experiment implies $X$, and (2) no possible weak interaction
can detect the occurrence of this event during the experiment. It
seems as though part (2) might be hard to confirm, because there
is a great variety of possible weak interactions. However, this
condition proves to be equivalent to the vanishing of all
sequential weak values associated to the event in question, and
this will often be much easier to check.
Finally, we mention the striking fact that sequential weak values are
formally closely related to amplitudes. Consider the case where we
measure $n$ projectors $P_{X_1}, \ldots P_{X_n}$ that define a path
$\pi_x$ between the initial and post-selected states $\ket{\psi_i}$
and $\ket{\psi_f}$, respectively. We can write
\begin{equation}gin{align}
(P_{X_n}, \ldots , P_{X_1})_w= \frac{\bra{\psi_f}U_{n+1}\ket{X_n}\
\bra{X_n}U_n\ket{X_{n-1}} \ \ldots
\bra{X_1}U_1\ket{\psi_i}}{\bra{\psi_f}U_n \ldots U_1\ket{\psi_i}}=\frac{{\rm Amplitude} (\pi_x)}{\sum_i \mbox{Amplitude}(\pi_i)},
\end{align}
where $\pi_i$ runs over all paths between $\ket{\psi_i}$ and
$\ket{\psi_f}$. Nonetheless, weak values are like measurement results
rather than amplitudes! This way of looking at sequential weak values
suggests a close connection with path integrals that remains
to be explored.
\section{Calculation of general correlations}
With Assumption A, we show here that the general version of
(\ref{ABmean}) is
\begin{equation}gin{align} \label{sequential-product}
\langle q_1q_2 \ldots q_n \rangle=\frac{g^n}{2^{n-1}}\ Re \sum_{r
\ge s} \sum_{\bf i,j} (A_{i_r}, \ldots ,A_{i_1})_w\overline{(A_{j_s}, \ldots ,A_{j_1})}_w.
\end{align}
where the weak values in this formula are given by \ref{sequential-value}.
In (\ref{sequential-product}) the sum is over all ordered indices
${\bf i}=(i_1, \ldots i_r)$ with $i_p < i_{p+1}$ for $1 \le p \le
r-1$, and ordered indices ${\bf j}=(j_1, \ldots j_s)$ that make up the
complement of ${\bf i}$ in the set of integers from $1$ to $n$,
i.e. that satisfy $(i_1, \ldots i_r)\cup (j_1, \ldots j_s)= (1,2,
\ldots n)$ and $(i_1, \ldots i_r)\cap (j_1, \ldots j_s)=
\emptyset$. We include the empty set $\emptyset$ as a possible set of
indices. In order not to count indices twice, we require $r \ge s$,
and when $r=s$ we require $i_1=1$.
For instance, with $n=2$, the possible indices are ${\bf i} =(1,2)$,
${\bf j} =\emptyset$; ${\bf i} =(1)$, ${\bf j} =(2)$, which yields
\begin{equation} \label{12mean}
\langle q_1q_2 \rangle=\frac{g^2}{2} \ Re \left[
(A_2,A_1)_w+(A_1)_w\overline{(A_2)}_w \right].
\end{equation}
This is just equation (\ref{ABmean}). For $n=3$ we have ${\bf i}
=(1,2,3)$, ${\bf j} =\emptyset$; ${\bf i} =(1,2)$, ${\bf j} =(3)$;
${\bf i} =(1,3)$, ${\bf j} =(2)$; ${\bf i} =(2,3)$, ${\bf j} =(1)$,
giving (\ref{123mean}). Equation (\ref{sequential-product}) is proved
in the same way as (\ref{ABmean}), the state of the $n$ pointers after
post-selection being:
\begin{equation}a \label{q-expansion} \Psi_{\cM_1 \ldots
\cM_n}&=&\bra{\psi_f}\left(U_{n+1}e^{-igp_nA_n}U_n \ldots
U_2e^{-igp_1A_1}U_1\right)\ket{\psi_i} \phi(q_1) \ldots \phi(q_n),
\\\nonumber
&=&\bra{\psi_f}\left(U_{n+1}\left(\phi(q_n)-gA_n\phi^\prime(q_n)+ \ldots
\right)U_n \ldots U_2\left(1-gA_1\phi^\prime(q_1) + \ldots
\right)U_1\right)\ket{\psi_i}, \\\nonumber &=&
\bra{\psi_f}U_{n+1}U_n \ldots U_1\ket{\psi_i} \ \left(1 + g\sum_i
\frac{\phi^\prime(q_i)}{\phi(q_i)}(A_i)_w + g^2\sum_{i<j}
\frac{\phi^\prime(q_i)\phi^\prime(q_j)}{\phi(q_i)\phi(q_j)}(A_j,A_i)_w +
\ldots \right)\phi(q_1) \ldots \phi(q_n).\nonumber \end{equation}a
Assumption A implies that only the terms in $q_1q_2 \ldots q_n$ in
$|\Psi_{\cM_1 \ldots \cM_n}|^2$ need to be taken into account in
calculating
\[
\langle q_1q_2 \ldots q_n \rangle = \frac{\int q_1q_2 \ldots q_n
|\Psi_{\cM_1 \ldots \cM_n}|^2 dq_1 \ldots dq_n}{\int |\Psi_{\cM_1
\ldots \cM_n}|^2 dq_1 \ldots dq_n},
\]
and this leads to (\ref{sequential-product}).
We can also calculate $\langle p_1p_2 \ldots p_n \rangle$, the product
of the momenta of the pointers. To do this, it is convenient to move
to the momentum basis, replacing $\phi(q)$ by its Fourier transform
$\tilde \phi(p)$
and carrying out an expansion in the $p_i$:
\begin{equation}a \label{p-expansion}
\Psi_{\cM_1 \ldots \cM_n}&=&\bra{\psi_f}\left(U_{n+1}e^{-igp_nA_n}U_n \ldots U_2e^{-igp_1A_1}U_1\right)\ket{\psi_i} \tilde \phi(p_1) \ldots
\tilde \phi(p_n), \\\nonumber
&=&\bra{\psi_f}U_{n+1}U_n \ldots U_1\ket{\psi_i} \ \left( 1 -ig\sum_i
p_i(A_i)_w + (-ig)^2\sum_{i<j} p_ip_j(A_j,A_i)_w + \ldots
\right)\tilde \phi(p_1) \ldots \tilde \phi(p_n).\nonumber
\end{equation}a
Assumption A implies that only the terms in $p_1p_2 \ldots p_n$ in
$|\Psi_{\cM_1 \ldots \cM_n}|^2$ need be considered in calculating
\begin{equation}gin{align}
\langle p_1p_2 \ldots p_n \rangle= \frac{\int
\overline{\Psi_{\cM_1 \ldots \cM_n}} p_1 \ldots p_n \Psi_{\cM_1
\ldots \cM_n} dp_1 \ldots dp_n}{\int |\Psi_{\cM_1 \ldots \cM_n}|^2
dp_1 \ldots dp_n}.
\end{align}
It is simplest to treat the cases of $n$ even and odd separately. For
the even case we have
\begin{equation} \label{even-p-product} \langle p_1p_2 \ldots p_{2m}
\rangle=2(-1)^m (gv)^{2m} \ Re \sum_{r \ge s} \sum_{\bf
i,j} (-1)^r(A_{i_r}, \ldots ,A_{i_1})_w\overline{(A_{j_s}, \ldots
, A_{j_1})}_w, \end{equation}
and for the odd case:
\begin{equation}gin{align} \label{odd-p-product}
\langle p_1p_2 \ldots p_{2m+1}
\rangle=2(-1)^{m+1} (gv)^{2m+1} \ Im \sum_{r > s} \sum_{\bf
i,j} (-1)^r(A_{i_r}, \ldots ,A_{i_1})_w\overline{(A_{j_s}, \ldots ,A_{j_1})}_w,
\end{align}
where $v=\int p^2{\tilde \phi}^2(p)dp$.
The case of mixed products of positions and momenta are treated
similarly, and they depend only on the real or imaginary parts of the
sequential weak values given by (\ref{sequential-value}). For example,
to calculate $\langle q_1p_2 \rangle$ we express the first variable in
the position basis and the second in the momentum basis:
\[
\Psi_{\cM_1,\cM_2}=\bra{\psi_f}U_3U_2U_1\ket{\psi_i} \
\left( \phi(q_1)\tilde \phi(p_2)+g(A_1)_w\phi^\prime(q_1)\tilde \phi(p_2)-ig(A_2)_w\phi(q_1)p_2\tilde \phi(p_2)
+ig^2(A_2,A_1)_w\phi^\prime(q_1)p_2\tilde \phi(p_2) \right),
\]
which yields (\ref{mixed}). For these mixed products, since there
is a factor of $i$ for each $p$ in the product, we take the
imaginary part of weak values when there is an odd number of $p$'s
present and the real part otherwise.
Thus all possible expectations of products of position or momentum can
be obtained from the sequential weak values.
\begin{equation}gin{thebibliography}{15}
\expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi
\expandafter\ifx\csname bibnamefont\endcsname\relax
\def\bibnamefont#1{#1}\fi
\expandafter\ifx\csname bibfnamefont\endcsname\relax
\def\bibfnamefont#1{#1}\fi
\expandafter\ifx\csname citenamefont\endcsname\relax
\def\citenamefont#1{#1}\fi
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\bibitem[{\citenamefont{Aharonov
et~al.}(1988{\natexlab{a}})\citenamefont{Aharonov, Z.Albert, and
Vaidman}}]{AAV88}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Aharonov}},
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Z.Albert}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Vaidman}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{60}},
\bibinfo{pages}{1351} (\bibinfo{year}{1988}{\natexlab{a}}).
\bibitem[{\citenamefont{Aharonov et~al.}(1991)\citenamefont{Aharonov, Botero,
Popescu, Reznik, and Tollaksen}}]{ABPRT01}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Aharonov}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Botero}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}},
\bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Reznik}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Tollaksen}}, in
\emph{\bibinfo{booktitle}{Proceedings of NATO ARW Mykonos 2000 Decoherence
and its implications in quantum computation and information transfer:
[proceedings of the NATO advanced research workshop, Mykonos, Greece,
25-30.06.2000]}}, edited by
\bibinfo{editor}{\bibfnamefont{A.}~\bibnamefont{Gonis}} \bibnamefont{and}
\bibinfo{editor}{\bibfnamefont{P.}~\bibnamefont{Turchi}}
(\bibinfo{publisher}{IOS Press}, \bibinfo{year}{1991}).
\bibitem[{\citenamefont{Aharonov and Rohrlich}(2005)}]{AharonovRohrlich05}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Aharonov}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Rohrlich}},
\emph{\bibinfo{title}{Quantum Paradoxes}} (\bibinfo{publisher}{Wiley-VCH},
\bibinfo{address}{Weinheim, Germany}, \bibinfo{year}{2005}).
\bibitem[{\citenamefont{Aharonov
et~al.}(1988{\natexlab{b}})\citenamefont{Aharonov, Albert, and
Vaidman}}]{Spin100}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Aharonov}},
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Albert}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Vaidman}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{60}},
\bibinfo{pages}{1351} (\bibinfo{year}{1988}{\natexlab{b}}).
\bibitem[{\citenamefont{Hardy}(1992)}]{Hardy92}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Hardy}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{68}},
\bibinfo{pages}{2981} (\bibinfo{year}{1992}).
\bibitem[{\citenamefont{Bl\"{a}si and Hardy}(1995)}]{BH95}
\bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Bl\"{a}si}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Hardy}},
\bibinfo{journal}{Phys. Lett. A} \textbf{\bibinfo{volume}{207}},
\bibinfo{pages}{119} (\bibinfo{year}{1995}).
\bibitem[{\citenamefont{Elitzur and Vaidman}(1993)}]{ElitzurVaidman93}
\bibinfo{author}{\bibfnamefont{A.~C.} \bibnamefont{Elitzur}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Vaidman}},
\bibinfo{journal}{Foundations of Physics} \textbf{\bibinfo{volume}{23}},
\bibinfo{pages}{987} (\bibinfo{year}{1993}).
\bibitem[{\citenamefont{Resch and Steinberg}(2004)}]{RS04}
\bibinfo{author}{\bibfnamefont{K.~J.} \bibnamefont{Resch}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.~M.} \bibnamefont{Steinberg}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{92}},
\bibinfo{pages}{130402} (\bibinfo{year}{2004}).
\bibitem[{\citenamefont{Rohrlich et~al.}(1995)\citenamefont{Rohrlich, Aharonov,
Popescu, and Vaidman}}]{RAPV95}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Rohrlich}},
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Aharonov}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Vaidman}},
\bibinfo{journal}{Ann. N.Y. Acad. Sci.} \textbf{\bibinfo{volume}{755}},
\bibinfo{pages}{394} (\bibinfo{year}{1995}).
\bibitem[{\citenamefont{Rohrlich and Aharonov}(2002)}]{RA02}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Rohrlich}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Aharonov}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{66}},
\bibinfo{pages}{042102} (\bibinfo{year}{2002}).
\bibitem[{\citenamefont{Jozsa}(1999)}]{Jozsa98}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Jozsa}},
\bibinfo{journal}{Chaos solitons fractals} \textbf{\bibinfo{volume}{10}},
\bibinfo{pages}{1657} (\bibinfo{year}{1999}).
\bibitem[{\citenamefont{Mitchison and Jozsa}(2001)}]{MitJozsa}
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Mitchison}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Jozsa}},
\bibinfo{journal}{Proc. R. Soc. Lond. A} \textbf{\bibinfo{volume}{457}},
\bibinfo{pages}{1175} (\bibinfo{year}{2001}).
\bibitem[{\citenamefont{Hosten et~al.}(2006)\citenamefont{Hosten, Rakher,
T.Barreiro, Peters, and Kwiat.}}]{Hosten06}
\bibinfo{author}{\bibfnamefont{O.}~\bibnamefont{Hosten}},
\bibinfo{author}{\bibfnamefont{M.~T.} \bibnamefont{Rakher}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{T.Barreiro}},
\bibinfo{author}{\bibfnamefont{N.~A.} \bibnamefont{Peters}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{P.~G.}
\bibnamefont{Kwiat.}}, \bibinfo{journal}{Nature}
\textbf{\bibinfo{volume}{439}}, \bibinfo{pages}{949} (\bibinfo{year}{2006}).
\bibitem[{\citenamefont{Mitchison and Jozsa}(2006)}]{MJ06}
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Mitchison}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Jozsa}}
(\bibinfo{year}{2006}), \eprint{quant-ph/0606092}.
\bibitem[{\citenamefont{Vaidman}(2006)}]{Vaidman06}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Vaidman}}
(\bibinfo{year}{2006}), \eprint{quant-ph/0610174}.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{The optimal bound of quantum erasure with limited means}
\author{Filippo M. Miatto$^{1}$, Kevin Pich\'e$^{1}$, Thomas Brougham$^{2}$, and Robert W.~Boyd$^{1,2,3}$}
\affiliation{$^1$Dept.~of Physics, University of Ottawa, Ottawa, Canada}
\affiliation{$^2$School of Physics and Astronomy, University of Glasgow, Glasgow (UK)}
\affiliation{$^3$Institute of Optics, University of Rochester, Rochester, USA}
\date{\today}
\begin{abstract}
In practical applications of quantum information science, quantum systems can have non-negligible interactions with the environment, and this generally degrades the power of quantum protocols as it introduces noise. Counteracting this by appropriately measuring the environment (and therefore projecting its state) would require access all the necessary degrees of freedom, which in practice can be far too hard to achieve.
To better understand one's limitations, we calculate the upper bound of optimal quantum erasure (i.e. the highest recoverable visibility, or ``coherence''), when erasure is realistically limited to an accessible subspace of the whole environment. In the particular case of a two-dimensional accessible environment, the bound is given by the sub-fidelity of two particular states of the \emph{inaccessible} environment, which opens a new window into understanding the connection between correlated systems. We also provide an analytical solution for a three-dimensional accessible environment. This result provides also an interesting operational interpretation of sub-fidelity.
We end with a statistical analysis of the expected visibility of an optimally erased random state and we find that \emph{i}) if one picks a random pure state of 2 qubits, there is an optimal measurement that allows one to distill a 1-qubit state with almost 90\% visibility and \emph{ii}) if one picks a random pure state of 2 qubits in an inaccessible environment, there is an optimal measurement that allows one to distill a 1-qubit state with almost twice its initial visibility.
\end{abstract}
\maketitle
\section{Introduction}
Complementarity is one of the jewels of quantum mechanics. It was first introduced by Bohr \cite{Bohr1}, as a consequence of the uncertainty principle. However, it took several decades to establish that its origin is really due to quantum correlations \cite{Scully1991,Wiseman1995,Durr1998}. The principle of complementarity gained its modern form through the works of several authors \cite{WZ1972,GreenbergerYasin,Scully1989,Sanders1989,Durr1998}.
In particular, Englert gave a very lucid exposition of the connection between complementarity and the working principles of a two-way interferometer \cite{Englert1996}. As the state of a quantum system in a two-way interferometer can be described as a simple qubit, an effective way of studying complementarity is through our familiarity with the Bloch sphere. This intuition is the key to also understand quantum erasure, i.e. the ability of restoring coherence in a system by appropriately projecting another system that is correlated to it and that is preventing the occurrence of interference \cite{Scully1982,Kwiat1992,Herzog1995}.
We now describe the situation that we are considering and the concepts that we will adopt. A state that lives in a 2-dimensional Hilbert space can be described in the language of quantum information as a qubit. Due to the possible embedding of this Hilbert space in a larger one (which in our choice of language represents the ``environment''), correlations of both classical and quantum nature can exist between the two. In this situation, the reduced state of the qubit is not pure, i.e. it has a certain degree of mixedness.
Complete knowledge of a quantum state implies that such state is pure and in fact, a possible strategy to restore coherence is to gather the necessary knowledge from the environment by way of a suitable measurement. When the environment is measured, the qubit is projected on the state that is relative to the outcome of the measurement, and for an optimal choice of measurement, the projected state can be pure. However, there can be different choices of optimal measurements, that give rise to different final results. In particular, if we fix a preferred basis in the 2-dimensional space of the qubit, we can pick a measurement that maximizes the degree of superposition of the two basis vectors or one that maximizes the amplitude of one basis vector over the other. These two measurement choices are both optimal in the sense that they maximize some criterion, and we will refer to them as \emph{quantum erasure} and the \emph{which-alternative} measurements, respectively \cite{Englert2000}.
In the Bloch sphere picture, where the preferred basis is represented by the two poles, a quantum erasure measurement on the environment projects the qubit states towards the equator, while the which-alternative measurement projects the qubit states towards one of the poles.
\begin{figure}
\caption{\label{fig1}
\label{fig1}
\end{figure}
\begin{figure}
\caption{\label{alicebob}
\label{alicebob}
\end{figure}
From this introduction it would seem rather feasible to control the qubit by way of measurements on the environment, but this operation is limited by two factors. The first is of physical nature: we can control the qubit to a degree that depends on how strong the correlations with the environment are. As a limiting case, if the two systems were independent we would have no control over the qubit by manipulating the environment. The second is of technical nature: in order to have the highest degree of control allowed by the strength of the correlations, one would need to be able to perform the \emph{desired} measurement, i.e. to project on the desired axes of the Hilbert space of the environment, which implies the ability of manipulating all the necessary degrees of freedom. This can be very hard to achieve, and in the case of an environment with too many degrees of freedom it is practically impossible.
In this work, we want to quantify the degree of control in the limiting case of minimal access to the environment: we consider a 1-qubit state embedded in an arbitrary-dimensional environment, and we split the Hilbert space of the environment into an accessible part $\mathcal H_B$ and an inaccessible one $\mathcal H_C$ (as we will be referring to these three systems rather often, we will indicate them by $A$, $B$ and $C$ throughout the paper). Then, we quantify the highest average visibility that one can retrieve on the qubit $A$ by appropriately measuring the accessible environment $B$. Geometrically, it is the largest \emph{average} distance of all the outcomes from the line connecting the poles of the Bloch sphere, see Fig.~\ref{fig1}. We find a surprising analytical answer to this problem, in terms of the sub-fidelity of conditional states of the inaccessible environment $C$, Eq.~\eqref{result}.
\section{Quantifying visibility and predictability}
In our analysis we do not allow for selective measurements, the reason is that selective measurements (i.e. postselection) allow one to achieve a considerable flexibility at the expense of probability of success, whereas we are interested in ``one-shot'' measurements, which cannot rely on postselection. These would occur, for instance, when a measurement takes place too far into a quantum algorithm and it would be too inconvenient to start over, or if two parties cannot communicate, as in a remote state preparation scheme. In general, if we had complete access to the environment, the strength of correlations between the qubit and the environment would be the only limitation on our ability to indirectly prepare the qubit. However, if we could perform a measurement over and over until the desired outcome occurs (say, if we had an inexpensive source of identically prepared states), we would be able to eventually prepare the qubit regardless of the strength of the correlations (as long as they are not zero). On the other hand, if selective measurements were not allowed, the states that the qubit could reach after a measurement of the environment would be restricted by the strength of the correlations.
Regarding as ``environment'' the whole set of quantum systems that are correlated to the qubit (so that the state of qubit+environment is pure), we now prove that a successful measurement of a rank-1 projector in the whole environment space projects the qubit in a conditional pure state which can reach any point in the Bloch sphere (the price to pay is a probability of success which in general is less than 1):
start with the joint qubit+environment state
\begin{align}
\label{fullstate}
|\rho\rangle=\sqrt{p_0}|0,e_0\rangle+\sqrt{p_1}e^{i\phi}|1,e_1\rangle,
\end{align}
where the qubit is in the computational basis.
If $|\rho\rangle$ is non-separable, it must hold that $|e_0\rangle\neq|e_1\rangle$, so it is possible to write $|e_0\rangle=\alpha|e_1\rangle+\beta|e_1^\bot\rangle$ for an appropriate choice of $|e_1^\bot\rangle$ orthogonal to $|e_1\rangle$ which implies $|e_1\rangle=\alpha^*|e_0\rangle+\beta e^{i\theta}|e_0^\bot\rangle$, with $|e_0^\bot\rangle\neq|e_1^\bot\rangle$. Consider then a successful measurement of the environment in the state $a|e_0^\bot\rangle+b|e_1^\bot\rangle$. This projects the qubit in the conditional state $\frac{\beta}{\sqrt{P_s}}(\sqrt{p_0}b^*|0\rangle+\sqrt{p_1}a^* e^{i(\theta+\phi)}|1\rangle)$ with a success probability $P_s=(p_1|b|^2+p_2|a|^2)|\beta|^2$. As $|0\rangle$ and $|1\rangle$ are orthogonal and as $a$ and $b$ can be chosen freely, one can reach any pure state on the surface of the Bloch sphere. An immediate generalization allows one to conclude that using elements of a probability operator measure (POM) (also known as positive operator-valued measure, POVM), one can reach any state also in the interior of the Bloch sphere, and the proof is done.
The freedom to indirectly prepare a state is quite different for a non-selective measurement, i.e. one that does not allow one to wait until the desired result appears. In this case, it is no longer possible to obtain any desired conditional state. At this point we need to introduce the concepts of visibility and predictability. We present here only the necessary introduction to these concepts, for an in-depth description we refer to Bergou and Englert's work \cite{Englert2000}.
Consider a POM, composed of a number $N$ of probability operators $\hat \pi_k$, each corresponding to one of the possible outcomes of a measurement on the environment. We recall that these operators are hermitian, positive, they sum to the identity, but need not be mutually orthogonal. To each measurement outcome corresponds a conditional state of the qubit:
\begin{align}
\hat\rho_k=\frac{\mathrm{Tr_E}[(\mathbb{\hat 1}\otimes\hat\pi_k)\hat\rho]}{p_k},
\end{align}
where $p_k=\mathrm{Tr}[(\mathbb{\hat 1}\otimes\hat\pi_k)\hat\rho]$ is the probability of the $k$-th outcome and the partial trace is calculated over the environment. This state is at some location in the Bloch sphere, at a distance $\mathcal V_k$ from the N-S line, and at a distance $\mathcal P_k$ from the equatorial plane, see Fig.~\ref{definitions}. In the language of the Bloch vector $\mathbf{r}=(x,y,z)$, i.e. if one writes the state in the form $\hat\rho=\frac{1}{2}(\mathbb{\hat 1}+\mathbf{r}\cdot\boldsymbol{\hat\sigma})$, one could write $\mathcal{V}=\sqrt{x^2+y^2}$ and $\mathcal{P}=|z|$. These two distances are called ``visibility'' and ``predictability'' and clearly depend on the choice of basis: if we considered two different opposite points on the surface as the new North and South poles, the distance of the state $\hat\rho_k$ from the new N-S line and the new equatorial plane would change (the only exception being for the maximally mixed state at the center of the sphere, for which $\mathcal V=\mathcal P=0$ regardless of the choice of basis). The visibility $\mathcal V_k$ is a measure of the degree of coherence of the two alternatives that define the North and South poles. The predictability $\mathcal P_k$ is a measure of our ability to predict which of the two will occur upon a measurement of the qubit in that specific basis. Our POM identifies $N$ conditional qubit states, each of which displays its own visibility and predictability. One can then calculate the statistical average of these quantities:
\begin{align}
\bar{\mathcal V}&=\sum_kp_k\mathcal {V}_k=\sum_k \left|\mathrm{Tr}\left[((\hat\sigma_x+i\hat\sigma_y)\otimes\hat\pi_k)\hat\rho\right]\right|\\
\bar{\mathcal P}&=\sum_kp_k\mathcal {P}_k=\sum_k \left|\mathrm{Tr}\left[(\hat \sigma_z\otimes\hat\pi_k)\hat\rho\right]\right|,
\end{align}
where the sums run from 1 to $N$ and where the absolute value of $\hat\sigma_x+i\hat\sigma_y$ measures the distance from the N-S line and the absolute value of $\hat\sigma_z$ measures the distance from the equatorial plane. We stress that $\bar{\mathcal V}$ and $\bar{\mathcal P}$ are not the expectation values of some operators, because of the absolute value which wraps the trace. There is also a deeper reason why there is no observable which corresponds to these quantities, and it is that if it existed, one could violate the no-signalling principle.
\begin{figure}
\caption{\label{definitions}
\label{definitions}
\end{figure}
It is very simple to prove that the values of $\bar{\mathcal V}$ and $\bar{\mathcal P}$ that a POM allows us to infer on the qubit are going to be greater or equal than those obtained by ignoring the environment:
\begin{align}
\bar{\mathcal P}&=\sum_k \left|\mathrm{Tr}\left[(\hat \sigma_z\otimes\hat\pi_k)\hat\rho\right]\right|\geq\left| \sum_k\mathrm{Tr}\left[(\hat \sigma_z\otimes\hat\pi_k)\hat\rho\right]\right|=\mathcal P,
\end{align}
and analogously for the visibility. Here we used the fact that $\sum_k\hat\pi_k=\mathbb{\hat 1}$. Therefore, $\mathcal V$ is the lower bound of the average visibility and it is achieved when ignoring the environment. Similarly, $\mathcal P$ is the lower bound of the average predictability and it is achieved when ignoring the environment. What about the upper bounds? One defines the coherence $\mathcal C\leq1$ as the upper bound of the average visibility and the distinguishability $\mathcal D\leq1$ as the upper bound the of average predictability, which are achieved by employing the optimal POMs on the \emph{whole} environment: not having access to the whole environment will inevitably hinder the possibility of reaching $\mathcal C$ and $\mathcal D$. Lastly, note that in general, the POM that maximizes $\bar{\mathcal P}$ does not automatically maximize $\bar{\mathcal V}$ and vice versa. With this in mind, we can write the following hierarchies:
\begin{subequations}
\begin{align}
\mathcal P&\leq\bar{\mathcal P}\leq\mathcal D,\\
\mathcal V&\leq\bar{\mathcal V}\leq\mathcal C.
\end{align}
\end{subequations}
The standard duality relation $\mathcal P^2+\mathcal V^2\leq 1$ can be justified geometrically by interpreting $\mathcal P$ and $\mathcal V$ as in Fig.~\ref{definitions}. It contains the lower bounds of predictability and visibility, and therefore it refers to a situation in which the environment is not taken into account. If the environment is measured, one has to replace those lower bounds with the averages: $\bar{\mathcal P}^2+\bar{\mathcal V}^2\leq 1$. If one implements a which-alternative measurement, $\bar{\mathcal P}$ will reach the distinguishability, and one obtains ${\mathcal D}^2+\bar{\mathcal V}^2\leq 1$. Complementarily, if one implements an erasure measurement, $\bar{\mathcal V}$ will reach the coherence, and one obtains $\bar{\mathcal P}^2+{\mathcal C}^2\leq 1$. However, we note that as in general these two optimal measurements differ, the quantity ${\mathcal D}^2+{\mathcal C}^2$ can exceed the value of 1.
Therefore with a non-selective measurement one obtains an ensemble of conditional states whose values of $\bar{\mathcal P}$ and $\bar{\mathcal V}$ are limited by the bounds given above. This explains why one does not have the freedom to indirectly prepare the qubit in any desired state. In contrast, we saw that in case selective measurements were allowed, one would eventually (given nonzero correlations between qubit and environment) obtain a state anywhere on or in the Bloch sphere.
\section{Optimal erasure bound}
\begin{figure}
\caption{\label{fig1}
\label{fig1}
\end{figure}
Let's now consider the situation described in Fig.~\ref{fig1}. We are facing the task of erasing the information about the alternatives of $A$ that is stored in $B$, by projecting $B$ in the most appropriate basis. We are looking at how well we can perform this task, and how much the state of $C$ matters.
We start by considering the purification $|\rho_{ABC}\rangle$ of the qubit plus the environment.
After we fix the computational basis on the Bloch sphere of $A$, we can write the (unnormalized) conditional states of $B$ and $C$ as
\begin{align}
\tilde \rho_{B|k}&=\mathrm{Tr}_{AC}[(|k\rangle\langle k|\otimes\mathbb{\hat 1}_B\otimes \mathbb{\hat 1}_C)\hat \rho_{ABC}],\\
\tilde \rho_{C|k}&=\mathrm{Tr}_{AB}[(|k\rangle\langle k|\otimes\mathbb{\hat 1}_B\otimes \mathbb{\hat 1}_C)\hat \rho_{ABC}],
\end{align}
where the vertical bar notation is intended to mean ``given the qubit $A$ in the computational state $k=0,1$'' and $|k\rangle\langle k|$ is the projector on the computational states of $A$. We use a tilde to remind that the state is unnormalized, to normalize it we would have to divide it by the probability of measuring the projector $|k\rangle\langle k|$, i.e. $\hat \rho_{C|k}=\tilde \rho_{C|k}/\mathrm{Tr}(\tilde \rho_{C|k})$. Using unnormalized states simplifies the equations below, so we will postpone normalization factors to the end. Note that the state of $A$ and $B$ is given by the density matrix
\begin{align}
\hat\rho_{AB}=\begin{pmatrix}
\tilde\rho_{B|0}&\tilde\chi_B\\
\tilde\chi_B^\dagger&\tilde\rho_{B|1}
\end{pmatrix}
\end{align}
From this matrix, we need two operators: the unnormalized off-diagonal block and the unnormalized difference between the diagonal blocks
\begin{align}
\tilde \chi_B=\mathrm{Tr}_{AC}[(|1\rangle\langle0|\otimes\mathbb{\hat 1}_B\otimes \mathbb{\hat 1}_C)\hat \rho_{ABC}&],\nonumber\\
\tilde\rho_{B|0}-\tilde\rho_{B|1}=\mathrm{Tr}_{AC}[(\hat\sigma_z\otimes\mathbb{\hat 1}_B\otimes \mathbb{\hat 1}_C)\hat \rho_{ABC}&].
\end{align}
We now have all we need to define the key quantity that we want to calculate (the largest visibility of $A$ that can be retrieved by optimizing a quantum erasure POM on $B$) and the largest predictability of the alternatives of $A$ that can be retrieved by optimizing a which-alternative POM on $B$ (which we deal with in the appendix):
\begin{align}
\label{PVDC}
\mathcal C_{A|B}&=\sup_{\mathrm{POM}_B}\sum_kp_k\mathcal {V}_k=2\,\mathrm{Tr}\left|\tilde\chi_B\right|\\
\mathcal D_{A|B}&=\sup_{\mathrm{POM}_B}\sum_kp_k\mathcal {P}_k=\mathrm{Tr}\left|\tilde \rho_{B|0}-\tilde \rho_{B|1}\right|.
\end{align}
The trace norm $\mathrm{Tr}|x|$ corresponds to the sum of the singular values of $x$, which are the eigenvalues of the positive matrix $|x|=\sqrt{x^\dagger x}$.
Note that we used a notation that resembles the coherence because $\mathcal C_{A|B}$ is the highest value of $\bar{\mathcal V}$ that can be retrieved on the qubit $A$ by accessing only $B$ (hence the subscripts). Had we the ability to access the whole environment, the value that we would achieve would be the true coherence $\mathcal C$.
In addressing this problem, we ask the question of how large $\bar{\mathcal V}$ can be, given the constraints imposed by correlations and measurements. As we said, we restrict the measurements to those that span $\mathcal H_B$, so $\bar{\mathcal V}\leq\mathcal C_{A|B}$ and we now calculate this upper bound.
At this point we use the assumption that $\mathrm{dim}(\mathcal H_B)=2$, so $\tilde{\chi}_B^\dagger\tilde{\chi}_B$ will have two positive eigenvalues. Call them $a$ and $b$, it holds that $\mathrm{Tr}|\tilde\chi_B|=\sqrt{a}+\sqrt{b}$. We can express the sum of two square roots in terms of the elementary symmetric polynomials in two variables $s_1=a+b$ and $s_2=ab$:
\begin{align}
\label{key}
\mathrm{Tr}|\tilde\chi_B|=\sqrt{a}+\sqrt{b}=\sqrt{s_1+2\sqrt{s_2}}
\end{align}
The last thing to do is to express the symmetric polynomials in terms of traces, which can be done elegantly via Newton's identities:
\begin{subequations}
\label{newton}
\begin{align}
s_1&=\mathrm{Tr}(x)\\
2s_2&=\mathrm{Tr}(x)^2-\mathrm{Tr}(x^2)\\
6s_3&=\mathrm{Tr}(x)^3-3\mathrm{Tr}(x)\mathrm{Tr}(x^2)+2\mathrm{Tr}(x^3)\\
\dots\nonumber
\end{align}
\end{subequations}
In our case $x=\tilde\chi_B^\dagger \tilde\chi_B$.
After a bit of algebra (see appendix) we find
\begin{align}
\mathrm{Tr}|\tilde\chi_B|^2=E(\tilde \rho_{C|0},\tilde \rho_{C|1})
\end{align}
where $E(\tilde \rho_{C|0},\tilde \rho_{C|1})$ is the sub-fidelity of $\tilde \rho_{C|0}$ and $\tilde \rho_{C|1}$. The sub-fidelity is a lower bound of Uhlmann's fidelity $F(x,y)=\mathrm{Tr}(\sqrt{\sqrt{x}\,y\sqrt{x}})$ and is defined as
\begin{align}
E(x,y)=\mathrm{Tr}(xy)+\sqrt{2}\sqrt{\mathrm{Tr}(xy)^2-\mathrm{Tr}(xyxy)}.
\end{align}
This allows us to write the bound $\mathcal C_{A|B}$ as
\begin{align}
\label{result}
\mathcal C_{A|B}=2\sqrt{E(\tilde \rho_{C|0},\tilde \rho_{C|1})}=2\sqrt{p_0p_1E(\hat \rho_{C|0},\hat \rho_{C|1})}
\end{align}
Where we re-introduced the normalization of the states and exploited the bilinearity of sub-fidelity.
We have therefore found a fundamental link between sub-fidelity and the highest visibility achievable in quantum erasure with minimal access to the environment. Interestingly, under some conditions one can turn the argument around and \emph{define} the sub-fidelity of two states of a system of arbitrary dimension, as the highest visibility that can be reached by acting on one of two qubits that are correlated to it. This would also allow indirect measurements of the sub-fidelity of inaccessible states.
Following similar steps we can extend our analysis to the case $\mathrm{dim}(\mathcal H_B)=3$, i.e. the case where one can access a three-dimensional subspace of the environment. In this case, some simple algebra will tell us that
\begin{align}
\mathrm{Tr}|\tilde\chi_B|&=\sqrt{a}+\sqrt{b}+\sqrt{c}\\
&=\sqrt{s_1+2\sqrt{s_2+2\sqrt{s_3}\,\mathrm{Tr}|\tilde\chi_B|}}
\label{dim3}
\end{align}
where now the symmetric polynomials are in three variables: $s_1=a+b+c$, $s_2=ab+bc+ca$ and $s_3=abc$ and they still satisfy Eq.~\eqref{newton}. So one can solve Eq.~\eqref{dim3} for $\mathrm{Tr}|\tilde\chi_B|$ and still find the highest visibility analytically. It is in principle possible to extend this method to higher dimensions, but it becomes quickly intractable because the number of terms grows exponentially.
\section{average bound}
We now turn our attention to a very interesting problem: we want to find the \emph{average} performance of optimal quantum erasure, i.e. we want to compare the ``raw'' visibility of a random qubit with the visibility after performing optimal erasure on its environment. We can calculate the former analytically, and we will compare it with a numerical evaluation of the latter, making the observation that the ratio between the two is practically independent of the size of the environment.
Technically, we need to find the average of $\mathcal C_{A|B}$ over random states in $B$ with respect to the measure that is induced by tracing away a $2K$-dimensional environment (i.e. the 2-dimensional space $\mathcal H_B$ and a $K$-dimensional space $\mathcal H_C$).
One (slow) way to do this would be to uniformly generate random pure states in the whole space $\mathcal H_A\otimes\mathcal H_B\otimes \mathcal H_C$, then trace $\mathcal H_B$ away, find the operators $\tilde\rho_{C|0}$ and $\tilde\rho_{C|1}$ and calculate their sub-fidelity.
A much quicker way to do this is to generate random states directly through complex random gaussian matrices, which is a quite remarkable method: generate an $m\times n$ matrix $\mu$, with entries sampled from the gaussian distribution in the complex plane centered on the origin and with unit variance. Then, all $n\times n$ density matrices $\rho=\mu^\dagger\mu/\mathrm{Tr}(\mu^\dagger\mu)$ are distributed according to the induced trace measure obtained from tracing $m$ dimensions away from an $mn$-dimensional Hilbert space from which we are sampling uniformly \cite{KarolBook}. In our case $n=2$ and $m=2K$. We will perform this task numerically for environments of dimension $K$ up to $10^3$ within reasonable computation time.
\begin{figure}
\caption{\label{fig3}
\label{fig3}
\end{figure}
\begin{figure}
\caption{\label{fit}
\label{fit}
\end{figure}
Interestingly, we can still find the average bound analytically for $K=1$, i.e. in the case of an environment entirely constituted by a 2-dimensional accessible space $\mathcal H_B$. In this case, $\mathcal C_{A|B}=2\sqrt{p_0p_1}$, and we can calculate the average of this quantity by sampling mixed states from all the Bloch ball of the qubit with a uniform measure. This cannot be done if $\mathrm{Dim}(\mathcal H_C)>1$, in which case the measure will be more biased towards the center of the Bloch ball. If we indicate with $z$ the vertical coordinate with origin at the centre of the sphere, we have $p_0=(1+z)/2$, $p_1=(1-z)/2$ and the uniform measure on the sphere is $\frac{3}{4} (1 - z^2)\,dz$, so the result is
\begin{align}
\langle\mathcal C_{A|B}\rangle&=\int_{-1}^1 2 \sqrt{\frac{1+z}{2}\frac{1 - z}{2}}\, \frac{3}{4} (1 - z^2)\,dz\nonumber\\
&=\frac{9\pi}{32}\approx0.88357\dots
\end{align}
We note that this result is quite remarkable on its own right: what is says is that given a known random state of two qubits, \emph{on average} one could prepare a single qubit state with an average visibility of almost 90\%. This figure is destined to decrease as the dimension of $\mathcal H_C$ grows, so we are interested in understanding how quickly it does so.
As we are interested in comparing this scaling with the raw visibility of the qubit alone, without any intervention in the space $\mathcal H_B$, we need to calculate $\langle\mathcal V\rangle$. We can do so analytically. We start from the eigenvalue distribution induced by the partial trace $P^\mathrm{trace}_{2,K}(\lambda)$, where $K$ is the dimension of the environment (which for us is going to be $2\mathrm{Dim(\mathcal H_C)}$, where the factor 2 is coming from the dimension of $\mathcal H_B$). For a qubit state, one eigenvalue is sufficient, as the other is determined by the fact that the trace of the density matrix has to be 1. We know from \cite{KarolBook} that
\begin{align}
P^\mathrm{trace}_{2,K}(\lambda)=\frac{\Gamma(2 K)}{2 \Gamma(K) \Gamma(K - 1)} (\lambda - \lambda^2)^{K-2} (2 \lambda - 1)^2
\end{align}
Therefore, given a diagonalized state with eigenvalues $\lambda$ and $1-\lambda$, we simply have to apply a uniform random rotation in $SU(2)$ and extract the off-diagonal element $\mu=(1-2 \lambda ) \sin (\theta ) (\cos (\psi )+i \sin (\psi ) \cos (\phi )) (\cos (\theta )+i \sin
(\theta ) \sin (\psi ) \sin (\phi ))$, written in 4-dimensional polar coordinates (considering $S^3$ as the manifold underlying $SU(2)$). So in summary, we average the visibility $2|\mu|$ over $SU(2)$ with the uniform Haar measure $dg$ and over the eigenvalue space with the induced trace measure to obtain:
\begin{align}
\langle \mathcal V\rangle_K&=\int_0^1d\lambda \int_{SU(2)}dg\,2|\mu|P^\mathrm{trace}_{2,K}(\lambda)\nonumber\\
&=\frac{\pi}{4^{K}}\frac{\Gamma(2K)}{K\Gamma(K)^2}
\label{avgVis}
\end{align}
Which in the limit for large $K$, scales like $O(K^{-1/2})$. Recalling that in our case $K=2\mathrm{Dim}(\mathcal H_C)$, one readily obtains the blue curve in Fig.~\ref{fig3}. In case of a pure random two-qubit state (i.e. if there are no correlations with any environment), one obtains the value $\langle\mathcal V\rangle_2=3\pi/16\approx0.58905$. How does $\langle \mathcal C_{A|B}\rangle_K$ compare with $\langle \mathcal V\rangle_K$? In other words, what is the advantage of performing quantum erasure? We find that the advantage does not depend on the dimension of $\mathcal H_C$. In fact, as the dimensionality of the environment increases, the value of the average coherence becomes a \emph{constant} multiple of the average visibility, i.e.
\begin{align}
\langle \mathcal C_{A|B}\rangle_K\sim c\langle \mathcal V\rangle_K\qquad(K\rightarrow\infty).
\end{align}
Although this seems to imply that for small $K$ this relation is not in good health, it actually has an error of less than 2\% already from $K=10$.
We ran a simulation and estimated $c=1.94382\pm0.00013$ to a very high degree of confidence (see Fig.~\ref{fit}). This means that if you were to pluck a random pure state of 2 qubits embedded an inaccessible environment, you can expect to almost double the coherence of one of the qubits by optimally measuring the other.
\begin{comment}
\section{Examples}
A central concern in all quantum information applications is the control of systems that are coupled to an environment \cite{}. In particular, it is vital to understand how one can control realistic qubits that would inevitable have some interactions with other systems. As is well know, these extra interaction can cause decoherence, which significantly impacts on quantum information applications. It has been shown that if one can monitor the environment, then it is often possible to undo some of the effects of decoherence \cite{}. However, it is generally impractical to monitor the whole environment. As such, the situation considered in our modified which-alternative and erasure relations is important to realistic quantum information protocols. We will illustrate this by considering a simple physical example.
One way of encoding a qubit is on two levels of an atom, when we can suppress transitions to other levels. One can thus treat the atom as a two level system with a ground state $|g\rangle$ and excited state $|e\rangle$. Initially, the qubit will be prepared in a pure state, $|\psi\rangle_A=c_1|g\rangle+c_2|e\rangle$. If we could isolate this atom from other atoms or stray fields we could study spontaneous emission. After some time $t$, the evolution of the atom with decay rate $\gamma$ is described by a unitary $\hat U$ that acts as follows
\begin{align}
\hat U|g\rangle_A|0\rangle_E&=|g\rangle_A|0\rangle_E,\nonumber\\
\hat U|e\rangle_A|0\rangle_E&=\sqrt{1-e^{-2\gamma t}}|g\rangle_A|1\rangle_E+e^{i\phi}e^{-\gamma t}|e\rangle_A|0\rangle_E,\nonumber
\end{align}
where $E$ denotes the environment modes into which photons can be emitted. Hence, $|0\rangle_E$ is the vacuum and $|1\rangle_E$ corresponds to a single photon emitted into the environment.
The effect of spontaneous emission is to change the qubit from being in the pure state $|\psi\rangle_A$ to being in the mixed state $\rho_A=b(1-e^{-2\gamma t})|g\rangle\langle g|+|\varphi\rangle\langle\varphi|$, where $|\varphi\rangle=\sqrt{a}|g\rangle_A+\sqrt{b}e^{i\phi}e^{-\gamma t}|e\rangle_A$. The visibility and predictability for this new qubit are
\begin{align}
\label{vp}
\mathcal{V}&=2\sqrt{ab}\,e^{-\gamma t},\nonumber\\
\mathcal{P}&=\Big|(b-a)-2|\gamma|^2b\Big|.
\end{align}
Note that both of these quantities reduce to the standard result when $|\gamma|=0$.
One can monitor the environment by surrounding the atom with detectors.
In practice it would be very difficult to monitor all possible spatial modes. Nevertheless, we might be able to check a fraction of them. This would correspond to having access to a part of the environment, which we call our system $B$. The remainder of the environment will be our system $C$.
We view our measurement on system $B$ as only telling us whether we observe a photon within any of the monitored modes. In this way, we can view system $B$ as also being a qubit with basis states $|0\rangle_B$ and $|1\rangle_B$.
The tripartite state of our whole system will initally be $|\Psi_0\rangle_{ABC}=|\psi\rangle_A|0\rangle_B|0\rangle_C$. At some later time $t$, the state will have evolved to $|\Psi_t\rangle_{ABC}=\hat U|\psi\rangle_A|0\rangle_B|0\rangle_C$. From equation (\ref{udef}), we find that
\begin{align}
|\Psi_t\rangle_{ABC}=|g\rangle_A&\otimes\left(\sqrt{a}|00\rangle_{BC}+\sqrt{b}\gamma_B|10\rangle_{BC}+\sqrt{b}\gamma_C|01\rangle_{BC}\right)\nonumber\\&+b\sqrt{1-|\gamma|^2}|e\rangle_A|00\rangle_{BC},
\end{align}
where $|\gamma_B|^2+|\gamma_C|^2=|\gamma|^2$. The term $|\gamma_B|^2$ is the probability of an excited state decaying by emitting a photon into on of the modes that comprise system $B$. Similarly, $|\gamma_C|^2$ is the probability of an excited state emitting a photon in to the unmonitored environmental modes, system $C$. A quick calculation shows that
\begin{align}
\label{cb}
\mathcal{C}_{A|B}^2&=4b\left(a+b|\gamma_B|^2\right)\sqrt{1-|\gamma|^2}
\end{align}
This quantity must equal the sub-fidelity of the states of the environment conditional on the atom being decayed or not. These states encode what the universe knows about the atom, and we can measure their sub-fidelity just by monitoring one single atom.
\end{comment}
\section{Conclusion}
In this work we have addressed the limitations of quantum erasure on a qubit when we have minimal access to its environment. We find that the highest visibility of the qubit is proportional to the sub-fidelity of the conditional states of the inaccessible part of the environment. This result provides an operational interpretation of sub-fidelity, an insight into correlated systems and it can also give us a way of measuring the sub-fidelity of inaccessible states. Finally, we found that optimal quantum erasure can almost double the visibility of a random qubit embedded in an arbitrarily large environment of which we control only a 2-dimensional subspace.
\section{Appendix}
We now provide our derivation of $\mathcal C_{A|B}$. We first expand $\hat\chi_B=\tilde\chi_B/\sqrt{p_0p_1}$ in its most general form:
\begin{align}
\hat\chi_B=\left(
\begin{array}{c@{}c}
\sqrt{r_0s_0}\langle c_{10}|c_{00}\rangle & e^{i\theta'}\sqrt{r_0s_1}\langle c_{11}|c_{00}\rangle\\
e^{-i\theta}\sqrt{r_1s_0}\langle c_{10}|c_{01}\rangle & e^{-i(\theta-\theta')}\sqrt{r_1s_1}\langle c_{11}|c_{01}\rangle\\
\end{array}\right),
\end{align}
where $|c_{ab}\rangle$ are the states of $C$ conditioned on the alternatives of $A$ and $B$ (being $\mathcal H_B$ 2-dimensional, $B$ is a qubit too). The positive numbers $r_b$ and $s_b$ are the relative probabilities of $|c_{0b}\rangle$ and $|c_{1b}\rangle$, respectively. $\theta$ and $\theta'$ are the phases of the states of $B$ conditioned on the alternatives of $A$. For simplicity, we will rewrite this as
\begin{align}
\hat\chi_B=\begin{pmatrix}
\alpha & \gamma\\
\delta & \beta\\
\end{pmatrix}.
\end{align}
Plugging this into Eq.~\eqref{key} gives us
\begin{align}
\mathrm{Tr}|\hat\chi_B|^2=|\alpha|^2+|\beta|^2+|\gamma|^2+|\delta|^2+2|\alpha\beta-\gamma\delta|.
\end{align}
Expanding $|\alpha|^2+|\beta|^2+|\gamma|^2+|\delta|^2$ we obtain
\begin{align}
\mathrm{Tr}(\tilde\rho_{C|00}\tilde\rho_{C|10})&+\mathrm{Tr}(\tilde\rho_{C|00}\tilde\rho_{C|11})+\nonumber\\\mathrm{Tr}(\tilde\rho_{C|01}\tilde\rho_{C|10})&+\mathrm{Tr}(\tilde\rho_{C|01}\tilde\rho_{C|11})\nonumber\\=\mathrm{Tr}(\hat\rho_{C|0}\hat\rho_{C|1})
\end{align}
where $\tilde\rho_{C|0b}=r_b|c_{0b}\rangle\langle c_{0b}|$ and $\tilde\rho_{C|1b}=s_b|c_{1b}\rangle\langle c_{1b}|$ are \emph{unnormalized} states. Consequently, $\hat\rho_{C|a}=\tilde\rho_{C|a0}+\tilde\rho_{C|a1}$ are the normalized states of $C$ conditioned on $A$ while ignoring (tracing away) $B$.
The final term is not as straightforward. We begin first by rewriting $|\alpha\beta-\gamma\delta|$ as $\sqrt{(\alpha\beta-\gamma\delta)(\alpha^*\beta^*-\gamma^*\delta^*)}$. We expand what is under the square root and then add and subtract to it the following term:
\begin{align}
\mathrm{Tr}(\tilde\rho_{C|00}\tilde\rho_{C|11})&\mathrm{Tr}(\tilde\rho_{C|01}\tilde\rho_{C|11})+\nonumber\\
\mathrm{Tr}(\tilde\rho_{C|00}\tilde\rho_{C|10})&\mathrm{Tr}(\tilde\rho_{C|01}\tilde\rho_{C|10}).
\end{align}
We then simplify the result with the identity $\mathrm{Tr}(XY)\mathrm{Tr}(XZ)=\mathrm{Tr}(XYXZ)$, which holds for $X$ rank-1. We obtain
\begin{align}
\mathrm{Tr}|\hat\chi_B|^2&=\mathrm{Tr}(\hat\rho_{C|0}\hat\rho_{C|1})\nonumber\\+&\sqrt{2}\sqrt{[\mathrm{Tr}(\hat\rho_{C|0}\hat\rho_{C|1})]^2-\mathrm{Tr}[(\hat\rho_{C|0}\hat\rho_{C|1})^2]}\nonumber\\
&=E(\hat\rho_{C|0},\hat\rho_{C|1}).
\end{align}
For completeness, we now look at the dual problem of optimizing a which-alternative sorting, i.e. the goal is to maximize the which-alternative information. Again, we are restricted in our measurements to those that span $\mathcal H_B$. We can still use Eq. \eqref{key}, only now we have $x=\tilde \rho_{B|0}-\tilde \rho_{B|1}$, which is hermitian. The hermiticity of $x$ allows us to simplify Eq.~\eqref{key} to
\begin{align}
\left(\mathrm{Tr}|x|\right)^2=
\begin{cases}
2\mathrm{Tr}(x^2)-\mathrm{Tr}(x)^2\quad&\mathrm{Tr}(x^2)\geq\mathrm{Tr}(x)^2\\
\mathrm{Tr}(x)^2&\mathrm{Tr}(x^2)\leq\mathrm{Tr}(x)^2
\end{cases}.
\end{align}
The first case implies that the accessible space $\mathcal H_B$ contains which-alternative information and we can access it.
The second case is trivial and implies that $\mathcal D_{A|B}^2$ reaches its minimum of $\mathcal P^2=(p_0-p_1)^2$, i.e. the accessible space $\mathcal H_B$ does not carry which-alternative information. So it is not surprising that the upper bound $\mathcal D_{A|B}^2$ depends on the conditional states of $\mathcal H_B$:
\begin{align}
\mathcal D_{A|B}^2=
\begin{cases}
2\mathrm{Tr}[(\tilde \rho_{B|0}-\tilde \rho_{B|1})^2]-\mathcal P^2\\
\mathcal P^2
\end{cases}\label{Db}
\end{align}
\bibstyle{unsrt}
\end{document}
|
\begin{document}
\title{Rationality does not specialize among terminal varieties}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lemma}[theorem]{Lemma}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
An algebraic variety is {\it rational }if it becomes isomorphic
to projective space after removing lower-dimensional
subvarieties from both sides. Little is known about
how rationality behaves in families. In particular,
given a family of projective varieties for which the geometric
generic fiber is rational, is every fiber geometrically rational?
(``Geometric'' refers to properties of a variety after
extending its base field to be algebraically closed.)
Matsusaka proved that the analogous
question for geometric ruledness has a positive answer
\cite[Theorem IV.1.6]{Kollarbook}.
(By definition, a variety is ruled if it is birational to the product
of the projective line with some variety.) That is, ruledness
specializes in families of varieties. For example,
Koll\'ar used Matsusaka's theorem
to show that a large class of Fano hypersurfaces
are not ruled and therefore not rational
\cite[Theorem V.5.14]{Kollarbook}. By contrast, rationality
does not specialize in this generality, as shown by a family
of cubic surfaces over the complex numbers $\text{\bf C}$
with most fibers smooth and one fiber the projective cone
over a smooth cubic curve. Every smooth cubic surface
is rational, but the cone over a smooth cubic curve $E$ is birational
to $E\times \text{\bf P}^1$, which is not rational because
it has a nonzero holomorphic 1-form.
Note, however, that the cone over a cubic curve has a fairly bad singularity:
it is log canonical but not klt (Kawamata log terminal). This suggests
the question of whether rationality specializes among
varieties with milder singularities. Indeed, it follows
from de Fernex and Fusi \cite[Theorem 1.3]{DFF} and Hacon
and M\textsuperscript{c}Kernan
\cite[Corollary 1.5]{HM}
that rationality specializes
among klt complex varieties of dimension at most 3.
Extending work of Voisin \cite{Voisin} and Colliot-Th\'el\`ene
and Pirutka \cite{CTP},
\cite[Theorem 2.1]{Totarohyper}
showed that a large class of Fano hypersurfaces $X$
are not stably rational. (That is,
no product of $X$ with projective space is rational.)
As an application, suggested by de Fernex,
\cite[Corollary 4.1]{Totarohyper} showed
that rationality does not specialize among klt varieties
of dimension 4 or higher.
In this paper, we find that the results of \cite{Totarohyper}
are strong enough to imply that rationality
does not specialize even among {\it terminal }varieties.
Terminal singularities
form the narrowest class of singularities that comes up
in the minimal model program.
The examples
are in any dimension at least 5.
Some natural remaining questions are:
Does rationality specialize among terminal 4-folds?
Does rationality specialize among smooth varieties?
This work was supported by NSF grant DMS-1303105.
\section{The example}
\begin{theorem}
\label{main}
There is a flat projective morphism $f\colon X\rightarrow C$
with $C$ a Zariski open subset of the complex affine line
such that $0$ is in $C$,
all fibers of $f$ have terminal singularities, all fibers of $f$
over $C-0$ are rational, and the fiber $F$ over $0$ is not rational.
Such examples exist with $F$ of any dimension at least 5.
There is also a family of 4-folds with canonical singularities over
a Zariski open subset $C$ of $A^1_{\text{\bf C}}$
such that all fibers over $C-0$ are rational and the fiber $F$
over $0$ is not rational.
\end{theorem}
In other words, rationality does not specialize among terminal varieties
of dimension at least 5, or among canonical varieties of dimension
at least 4. (Throughout, we are talking about families
of projective varieties.)
\begin{proof} (Theorem \ref{main})
We start with the following old observation.
\begin{lemma}
\label{mult}
If $X$ is a hypersurface of degree $d$ in $\text{\bf P}^{n+1}$ over a field $k$
such that $X$ has multiplicity equal to $d-1$
at some $k$-rational point $p$,
and if the singular locus
of $X$ has codimension at least 2, then $X$ is rational over $k$.
\end{lemma}
\begin{proof}
The assumption on the singular
locus ensures that $X$ is irreducible. The assumption on the multiplicity
of $X$ at $p$ implies that a general line through $p$ meets $X$
in exactly one other point. That gives a birational map over $k$
from the projective space $\text{\bf P}^n$ of lines through $p$ to $X$.
\end{proof}
We return to the proof of Theorem \ref{main}.
By \cite[Theorem 2.1]{Totarohyper}, a very general
quartic 4-fold in $\text{\bf P}^5_{\text{\bf C}}$
is not stably rational. Choose one smooth quartic 4-fold $Y$ over $\text{\bf C}$
which is not stably rational. Let $X_0$ be the projective cone
over $Y$ in $\text{\bf P}^6$. Then $X_0$ is a quartic 5-fold, and $X_0$
is not rational because it is birational to $\text{\bf P}^1\times Y$.
Also, $X_0$ is terminal, because $Y$ has Fano index 2 which is greater than 1,
meaning that the anticanonical bundle $-K_Y$ is given
by $-K_{Y}\cong -(K_{\text{\bf P}^5}+Y)|_Y=O(6-4)|_Y=O(2)|_Y$
\cite[Lemma 3.1]{Kollarsing}.
Let $Y$ be defined by the equation
$f_4(x_0,\ldots,x_5)=0$. Then $X_0$
is defined by the same equation in $\text{\bf P}^6=\{[x_0,\ldots,x_6]\}$.
Let $g_3(x_0,\ldots,x_5)$ be a nonzero cubic form over $\text{\bf C}$.
Consider the pencil of quartics in $\text{\bf P}^6$ given
by the equation
$$f_4(x_0,\ldots,x_5)+ag_3(x_0,\ldots,x_5)x_6=0$$
for $a$ in the affine line $A^1_{\text{\bf C}}$. This gives a flat family $f\colon
X\rightarrow A^1$
of hypersurfaces, and the fiber over 0 is the cone $X_0$.
Since ``terminal'' is a Zariski-open condition in families
\cite[Corollary VI.5.3]{Nakayama},
there is a Zariski open neighborhood $C$ of $0$ in $A^1$ such that
all fibers of the restricted family $f\colon X_C\rightarrow C$
are terminal. In particular, the fibers are normal and hence
have singular locus of codimension at least 2.
Finally, for all $a\neq 0$ in $C$, the fiber $X_a$ is a hypersurface
of degree 4 in $\text{\bf P}^6$ with multiplicity equal to 3 at the point
$[0,\ldots,0,1]$. By Lemma \ref{mult},
it follows that $X_a$ is rational for all $a\neq 0$
in $C$. Since $X_0$ is not rational, this completes the proof
that rationality does not
specialize among terminal varieties.
The example given is a family of 5-folds. Multiplying the family
with any projective space $\text{\bf P}^m$ shows that rationality does
not specialize among terminal varieties of any dimension at least 5.
(Here again, it is important that $Y$ is not stably rational,
so that $X_0\times \text{\bf P}^m$ is not rational.)
Finally, replace the 4-fold $Y$ by a smooth quartic 3-fold (again
called $Y$) in $\text{\bf P}^4_{\text{\bf C}}$ which is not stably rational.
Such a variety exists, by
Colliot-Th\'el\`ene and Pirutka \cite{CTP}. It follows
that the projective
cone $X_0$ over $Y$ in $\text{\bf P}^5$ (rather than $\text{\bf P}^6$) is not rational.
Since $Y$ has Fano index 1, $X_0$
has canonical
but not terminal singularities. Also, ``canonical'' is a Zariski open
condition in families \cite{Kawamata}. A pencil of hypersurfaces in $\text{\bf P}^5$
given by the same formula as above
shows that rationality does not specialize
among 4-folds with canonical singularities.
\end{proof}
\small \sc UCLA Mathematics Department, Box 951555,
Los Angeles, CA 90095-1555
[email protected]
\end{document}
|
\begin{document}
\title{Bounds for Calder\'{o}
\begin{abstract}
It is well-known that dyadic martingale transforms are a good model for Calder\'{o}n-Zygmund singular integral operators. In this paper we extend some results on weighted norm inequalities to vector-valued functions. We prove that, if \(W\) is an \(A_2\) matrix weight, then the weighted \(L^2\)-norm of a Calder\'{o}n-Zygmund operator with cancellation has the same dependence on the \(A_2\) characteristic of \(W\) as the weighted \(L^2\)-norm of an appropriate matrix martingale transform. Thus the question of the dependence of the norm of
matrix-weighted Calder{\'o}n-Zygmund operators on the \(A_2\) characteristic of the weight is reduced to the case of dyadic martingales and paraproducts. We also show a slightly different proof for the special case of Calder\'{o}n-Zygmund operators with even kernel, where only scalar martingale transforms are required. We conclude the paper by proving a version of the matrix-weighted Carleson Embedding Theorem.
Our method uses a Bellman function technique introduced by S. Treil to obtain the right estimates for the norm of dyadic Haar shift operators. We then apply the representation theorem of T. Hyt\"{o}nen to extend the result to general Calder\'{o}n-Zygmund operators.
\end{abstract}
\section{Introduction}
In the 1970's, R.A. Hunt, B. Muckenhoupt and R.L. Wheeden \cite{HuMuWh73} and R.R. Coifman and C. Fefferman \cite{CoFe74} showed that a Calder\'{o}n-Zygmund singular integral operator is bounded on the weighted space \(L^p(w)\) if and only if the scalar weight \(w\) belongs to the so-called \(A_p\) class. For the last two decades, an important open problem in Harmonic Analysis was to characterize the dependence of the operator norm on the \(A_p\) characteristic, \([w]_{A_p}\), of the weight. For \(p=2\) this dependence was conjectured to be linear in \([w]_{A_2}\); the problem has become known as the \(A_2\) conjecture. The first step was taken by J. Wittwer \cite{Wi00}, who proved the \(A_2\) conjecture for dyadic martingale transforms. Using Bellman function techniques, S. Petermichl and A. Volberg \cite{PeVo02} showed the conjecture for the Beurling-Ahlfors transform. It took a few more years until the \(A_2\) conjecture was proved for the Hilbert transform by S. Petermichl (see \cite{Pe07}). The conjecture was finally settled for general Calder\'{o}n-Zygmund operators in 2010 by T. Hyt\"{o}nen \cite{Hy12a}. The main ingredient in his proof is the pointwise representation of a general Calder\'{o}n-Zygmund operator as a weighted average over an infinite number of randomized dyadic systems of some simpler operators (called dyadic Haar shifts) in such a way that the estimates for the dyadic Haar shifts depend polynomially on the complexity.
A natural problem is to try to extend these results to vector-valued functions. S. Treil and A. Volberg introduced the correct definition of a matrix \(A_p\) weight (see \cite{TrVo97}). M. Goldberg \cite{Go03}, F. Nazarov and S. Treil \cite{hunt} and A. Volberg \cite{Vo97} showed that certain Calder\'{o}n-Zygmund operators are bounded on \(L^p(W)\) when \(1<p<\infty\) if \(W\) is a matrix \(A_p\) weight. However, the sharp dependence of the norm of a Calder\'{o}n-Zygmund operator on the \(A_2\) characteristic of \(W\) is unknown even for the martingale transform. In a recent paper, K. Bickel, S. Petermichl and B. Wick \cite{BiPeWi14} modified a scalar argument to obtain that for the Hilbert and martingale transforms this dependence is no worse than \([W]_{A_2}^{3/2}\log{[W]_{A_2}}\). This has very recently been improved to
\([W]_{A_2}^{3/2}\), or more precisely, \([W]_{A_2}^{1/2} [W]_{A_\infty}^{1/2} [W^{-1}]_{A_\infty}^{1/2}\), for all Calder\'{o}n-Zygmund operators \cite{ntvp}.
Even more recently, T. Hyt\"onen, S. Petermichl and A. Volberg \cite{hpv} proved
the sharp linear upper bound
\([W]_{A_2} \) for the matrix-weighted square function, which can be understood as an average of the matrix martingale transforms
we consider. This raises the hope that the expected sharp linear bound for matrix martingale transforms in terms of $[W]_{A_2}$
may now come into reach.
In this paper we prove that the norms of all Calder\'{o}n-Zygmund singular integrals with cancellation have the same dependence on \([W]_{A_2}\) as the matrix martingale transforms
(we denote this dependence by \(N([W]_{A_2})\)). The \(A_2\) conjecture for matrix-weighted spaces is thus reduced to the case of dyadic martingale transforms and of the paraproducts. The proof follows S. Treil's approach for the proof of the linear \(A_2\) bound in the scalar case (see \cite{Tr11}). The main challenge here is the adaptation of the Bellman function to the matrix case, where convexity properties are much more difficult than in the scalar setting. Using Hyt\"{o}nen's representation of a Calder\'{o}n-Zygmund operator, it is enough to obtain the right estimate for the dyadic Haar shift operators. Since we want to obtain the same bound in terms of \([W]_{A_2}\) for the norm of dyadic Haar shifts, we have to use the martingale transform only once. We will decompose a dyadic Haar shift of complexity \(k\) into \(k\) ``slices'' that can be seen as martingale transforms. The main idea is to linearize the norm of these slices and then use the Bellman function to estimate each summand. In order to do this, we start with a standard dyadic martingale of points from the domain of the Bellman function, where at each point we have two choices with equal probability. We will then modify the martingale, but preserving the initial point and the endpoints, and probabilities. From the starting point, instead of going to the next level in the standard martingale, we move with probabilities \(1/2\) to two new points that are ``far enough'' from the initial point, but also ``almost averages'' of the endpoints. We can still move from these new points to the endpoints, this time using a modified dyadic martingale, where at each point we have two choices with ``almost equal'' probability. This new martingale is constructed in such a way that the probabilities of moving from the starting point to the endpoints are still equal, as in the case of the standard martingale. Although we have used probabilistic terms, the formal proof involving the Bellman function is elementary.
The paper is organized as follows: in Section 2 we recall the necessary definitions and results that we are using. Then we state our main result (Theorem \ref{mainthm}) and show that it is enough to obtain a corresponding estimate for dyadic Haar shift operators, which is the content of Theorem \ref{mainshift}. In Section 3 we use the boundedness of the martingale transform to relate the norm of a dyadic Haar shift to an expression that will be controlled by the Bellman function. Section 4 contains the definition of the Bellman function associated to our problem and the description of its properties. In Section 5 we formulate and prove the main technical result of the paper, which is inspired by \cite{Tr11}. In Section 6 we show how the main estimate from the previous section is used to conclude the proof of Theorem \ref{mainshift}. In the following section we prove a similar result for Calder\'{o}n-Zygmund singular integrals with even kernel, this time using the same martingale transform as in the scalar case. We finish with a further application of our Bellman function argument, namely a matrix-weighted Carleson Embedding Theorem which holds with constants independent of the dimension and the weight. This is, however, not the simple generalization of the usual weighted Carleson Embedding Theorem in \cite{NaTrVo99}.
\section{Definitions and statement of the main results}
In this section, we recall some well-known notions and results that we are going to use later on.
\subsection{Calder\'{o}n-Zygmund operators}
Let \(\Delta=\{(x,x): x \in \mathbb{R}^p\}\) be the diagonal of \(\mathbb{R}^p \times \mathbb{R}^p\). We say that a function \(K: \mathbb{R}^p \times \mathbb{R}^p \setminus \Delta \to \mathbb{C}\) is a standard Calder\'{o}n-Zygmund kernel if there exists \(\delta>0\) such that
\[|K(x,y)| \leq \frac{C}{|x-y|^p},\]
\[|K(x,y)-K(x,z)|+|K(y,x)-K(z,x)| \leq C_{\delta} \frac{|y-z|^{\delta}}{|x-y|^{p+\delta}},\]
for all \(x,y,z \in \mathbb{R}^p\) with \(|x-y|>2|y-z|\).
An operator \(T\), defined on the class of step functions (which is dense in \(L^2(\mathbb{R}^p)\)), is called a Calder\'{o}n-Zygmund operator on \(\mathbb{R}^p\) associated to \(K\), if it satisfies the kernel representation
\[Tf(x)=\int_{\mathbb{R}^p} K(x,y)f(y)\, \mathrm{d}y, \qquad x \notin {\mathrm{supp}}\,f .\]
\subsection{Matrix \(A_2\) weights}
For \(d \geq 1\), the non-weighted Lebesgue space \(L^2(\mathbb{R})p\) consists of all measurable functions \(f:\mathbb{R}^p \to \mathbb{C}^d\) such that
\[
\|f\|_{L^2(\mathbb{R})p} := \Big( \int_{\mathbb{R}^p}\,\|f(t)\|^2_{\mathbb{C}^d}\, \mathrm{d}t \Big)^{1/2} < \infty.
\]
We will also use the space \(C^1_c(\mathbb{R}^p)\) of compactly supported, continuously differentiable functions \(f:\mathbb{R}^p \to \mathbb{C}^d\).
Let \(\mathcal{M}_d(\mathbb{C})\) be the space of \(d \times d\) complex matrices. A matrix weight on \(\mathbb{R}^p\) is a measurable locally integrable function \(W: \mathbb{R}^p \to \mathcal{M}_d(\mathbb{C}) \) whose values are almost everywhere positive definite. We define \(L^2(W)\) to be the space of measurable functions \(f:\mathbb{R}^p \to \mathbb{C}^d\) with norm
\[\|f\|^2_{L^2(W)} = \int_{\mathbb{R}^p} \|W^{1/2}(t)f(t)\|^2_{\mathbb{C}^d}\, \mathrm{d}t = \int_{\mathbb{R}^p} \langle W(t)f(t), f(t) \rangle \, \mathrm{d}t < \infty.\]
It is well-known that the dual of \(L^2(W)\) can be identified with \(L^2(W)i\), where the duality between these two spaces is given by the unweighted standard inner product.
We say that a matrix weight \(W\) satisfies the matrix \(A_2\) Muckenhoupt condition if
\begin{equation}\label{muckenhoupt}
[W]_{A_2} := \sup_{Q} \bigg \| \Big(\frac{1}{|Q|} \int_{Q} W(t)\, \mathrm{d}t \Big)^{1/2} \Big(\frac{1}{|Q|} \int_{Q} W^{-1}(t)\, \mathrm{d}t \Big)^{1/2} \bigg \| < \infty,
\end{equation}
where the supremum is taken over all cubes \(Q \subset \mathbb{R}^p\), and \(\|\cdot\|\) denotes the norm of the matrix acting on \(\mathbb{C}^d\).
The number \([W]_{A_2}\) is called the \(A_2\) characteristic of the weight \(W\). We say that a matrix weight \(W\) satisfies the dyadic matrix Muckenhoupt condition \(A_2^d\) on \(\mathbb{R}^p\) or \(\mathbb{R}\), if \eqref{muckenhoupt} is satisfied, but with the supremum now being taken only over dyadic cubes or intervals, respectively (see \cite{TrVo97}).
\subsection{Dyadic setting}
Since we will reduce the proof of our main result to the case of functions defined on \(\mathbb{R}\), we will only introduce the required notions in this setting. For the analogous definitions in the case of functions on \(\mathbb{R}^p\), we refer the readers to \cite{Hy11}.
The standard dyadic system in \(\mathbb{R}\) is
\[\mathcal D^0:=\bigcup_{j \in \mathbb{Z}} \mathcal D^0_j, \qquad \mathcal D^0_j:=\{2^{-j}([0,1)+k): k \in \mathbb{Z}\}.\]
Given a binary sequence \(\omega=(\omega_j)_{j \in \mathbb{Z}} \in (\{0,1\})^{\mathbb{Z}}\), a general dyadic system on \(\mathbb{R}\) is defined by
\[\mathcal D^{\omega}:=\bigcup_{j \in \mathbb{Z}} \mathcal D^{\omega}_j, \qquad \mathcal D^{\omega}_j:= \mathcal D^0_j + \sum_{i>j} 2^{-i} \omega_i.\]
When the particular choice of \(\omega\) is not important, we will use the notation \(\mathcal D\) for a generic dyadic system. We equip the set \(\Omega:=(\{0,1\})^{\mathbb{Z}}\) with the canonical product probability measure \(\mathbb{P}_{\Omega}\) which makes the coordinates \(\omega_j\) independent and identically distributed with \(\mathbb{P}_{\Omega}(\omega_j=0)=\mathbb{P}_{\Omega}(\omega_j=1)=1/2\). We denote by \(\mathbb{E}_{\Omega}\) the expectation over the random variables \(\omega_j, j \in \mathbb{Z}\).
For an interval \(I \in \mathcal D\), let \(I^+\) and \(I^-\) be the left and right children of \(I\). The parent of \(I\) will be denoted by \(\tilde{I}\). We will also use the notation \[\mathcal D_n(I):=\{J \in \mathcal D: J \subset I, |J|=2^{-n}|I|\}\]
for the collection of \(n\)-th generation children of \(I\), where \(|J|\) stands for the length of the interval \(J\).
For any interval \(I \in \mathcal D\), there is an associated Haar function defined by
\[h_I=|I|^{-1/2}(\chi_{I^+}-\chi_{I^-}),\]
where \(\chi_I\) is the characteristic function of \(I\).
For an arbitrary dyadic system \(\mathcal D\), the Haar functions form an orthogonal basis of \(L^2(\mathbb{R})\). Hence any function \(f \in L^2(\mathbb{R})\) admits the orthogonal expansion
\[f=\sum_{I \in \dd} \langle f,h_I \rangle h_I.\]
We denote the average of a locally integrable function \(f\) on the interval \(I\) by \(\langle f \rangle_I :=|I|^{-1}\int_I f(t)\, \mathrm{d}t\).
Let \(W\) be a matrix weight. For a sequence of \(d \times d\) matrices \(\sigma=\{\sigma_I\}_{I \in \mathcal D}\), we introduce the notation \( \|\sigma\|_{\infty, W} = \sup_{I \in \mathcal D} \big \| \langle W \rangle_I ^{1/2} \sigma_I \langle W \rangle_I ^{-1/2} \big \| \).
For a sequence \(\sigma\) such that \( \|\sigma\|_{\infty, W} < \infty\), we define the martingale transform operator \(T_{\sigma}\) by
\[T_\sigma f = \sum_{I \in \dd} \sigma_I \langle f,h_I \rangle h_I.\]
If $W$ is a matrix $A_2$ weight, then the condition \( \|\sigma\|_{\infty, W} < \infty\) is equivalent to the boundedness of \(T_{\sigma}\) on \(L^2(W)\) (see, e.g.
Theorem 5.2 in \cite{BiPeWi14} for an explicit statement; it is also contained in \cite{TrVo97}). Such martingale transforms are considered a good model for Calder\'{o}n-Zygmund singular integral operators.
A (cancellative) dyadic Haar shift on \(\mathbb{R}\) of parameters \((m,n)\), with \(m,n \in \mathbb{N}_0\), is an operator of the form
\[S f = \sum_{L \in \mathcal D}
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \langle f, h_I \rangle h_J,\]
where \(\left |c^{L}_{I,J}\right| \leq \frac{\sqrt{|I|} \sqrt{|J|}}{|L|} =2^{-(m+n)/2} \) and \(f\) is any locally integrable function. The number \(k := \max\{m,n\}+1\) is called the complexity of the Haar shift.
For \(0 \leq j \leq k-1\) we introduce the notation \(\mathcal{L} _j := \{ I \in \mathcal{D} : |I|=2^{j+kt}, t \in \mathbb{Z} \},\) and define the slice \(S_j\) by \[S_j f = \sum_{L \in \mathcal{L}_j}
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \langle f, h_I \rangle h_J. \]
We can thus decompose \(S\) as \(S=\sum _{j=0} ^{k-1} S_j\). The key point is now that the operators \(S_j\) can be seen as martingale transforms when we are moving \(k\) units of time at once, so it is possible to apply the Bellman function for dyadic martingale transforms.
Following the approach in \cite{Tr11}, one can show that it is enough to consider only dyadic Haar shifts on a dyadic system in \(\mathbb{R}\). The following construction works for general dyadic systems, but for convenience we will assume that we are dealing with the standard one. This reduction is obtained by ``arranging'' the dyadic cubes on the real line.
More precisely, for a dyadic cube \(Q\) in \(\mathbb{R}^p\), we choose a dyadic interval \(I\) such that \(|I| = |Q|\) (this interval \(I\) will correspond to the cube \(Q\)). We then split \(Q\) into two congruent parallelepipeds by dividing one of its sides into two parts, and then pick a bijection between these two parallelepipeds and the children of \(I\). By dividing a long side, we split each parallelepiped into two congruent ones, and then choose a bijection between the four parallelepipeds and the children of the two intervals from the previous step. After \(p\) divisions we obtain a bijection between the children of \(Q\) and the intervals \(J \in \mathcal D_p(I)\). The intervals \(J \in \mathcal D_n(I), 1 \leq n < p\), correspond to some ``almost children'' \(R\) of \(Q\), where by an ``almost child'' of \(Q\) we mean a parallelepiped with some sides coinciding with the sides of \(Q\), and the other sides being half of the corresponding sides of \(Q\).
This construction can also be done in the opposite direction. If \(\tilde{I}\) is the parent of the interval \(I\), and \(\tilde{Q}\) is the grandparent of \(Q\) of order \(p\), by the above method we obtain a bijection \(\Phi\) between the children and ``almost children'' of \(\tilde{Q}\), and the intervals \(J \in \mathcal D_n(\tilde{I}), 1 \leq n \leq p\), such that \(\Phi(Q) = I\). To make sure that \(\Phi(Q) = I\), at each division we have to assign to the "almost child" containing \(Q\) the dyadic interval of appropriate length that contains \(I\).
A locally integrable function \(f\) on \(\mathbb{R}^p\) will thus be transferred to a locally integrable function \(g\) on \(\mathbb{R}\) such that \(\langle f \rangle_Q = \langle g \rangle_I\), for all \(Q\) and \(I\) with \(\Phi(Q) = I\).
We now look at the differences that arise when using this reduction. If \(S\) is a dyadic Haar shift (or one of its slices) of complexity \(k\) in \(\mathbb{R}^p\), then its model in \(\mathbb{R}\) will be a Haar shift of complexity \(kp\).
If \(W\) is a matrix \(A_2^d\) weight on \(\mathbb{R}^p\), then the \(A_2^d\) characteristic of the corresponding weight on \(\mathbb{R}\) is \(\sup_{R} \big \| \langle W \rangle_R^{1/2} \langle W^{-1} \rangle_R ^{1/2} \big \|\), where the supremum is taken over all dyadic cubes in \(\mathbb{R}^p\) and all their ``almost children''. If \(R\) is an ``almost child'' of a cube \(Q\), then
\[\int_{R} W(t)\, \mathrm{d}t \leq \int_{Q} W(t)\, \mathrm{d}t, \quad \int_{R} W^{-1}(t)\, \mathrm{d}t \leq \int_{Q} W^{-1}(t)\, \mathrm{d}t,\]
and \(|R| \geq 2^{-p+1} |Q|\). We thus have
\begin{align*}
\big \| \langle W \rangle_R^{1/2} \langle W^{-1} \rangle_R^{1/2} \big \|^2 & = \big \| \langle W \rangle_R^{1/2} \langle W^{-1} \rangle_R \langle W \rangle_R^{1/2} \big \| \leq \big \| \langle W \rangle_R^{1/2} 2^{p-1} \langle W^{-1} \rangle_Q \langle W \rangle_R^{1/2} \big \| \\
& = 2^{p-1} \big \| \langle W^{-1} \rangle_Q^{1/2} \langle W \rangle_R \langle W^{-1} \rangle_Q^{1/2} \big \| \leq 2^{p-1} \big \| \langle W^{-1} \rangle_Q^{1/2} 2^{p-1} \langle W \rangle_Q \langle W^{-1} \rangle_Q^{1/2} \big \| \\
& = 2^{2(p-1)} \big \| \langle W \rangle_Q^{1/2} \langle W^{-1} \rangle_Q^{1/2} \big \|^2.
\end{align*}
Thus, after the transfer to the real line, the \(A_2^d\) characteristic \([W]_{A_2^d}\) of the weight increases at most by a factor of \(2^{2(p-1)}\).
We are using the following representation of a Calder\'{o}n-Zygmund operator in terms of dyadic Haar shifts.
\begin{theorem} [\protect{Hyt\"{o}nen \cite{Hy11}}] \label{repr}
Let \(T\) be a Calder\'{o}n-Zygmund operator on \(\mathbb{R}^p\) which satisfies the standard kernel estimates, the weak boundedness property \(|\langle T \chi_Q, \chi_Q\rangle | \leq C|Q|\) for all cubes \(Q\), and the vanishing paraproduct conditions \(T(1)=T^*(1)=0\). Then it has an expansion, say for \(f,g \in C^1_c(\mathbb{R}^p)\),
\[\langle T f,g \rangle_{L^2(\mathbb{R}^p),L^2(\mathbb{R}^p)} =
C \cdot \mathbb{E}_{\Omega} \sum_{m,n=0}^{\infty} \tau(m,n) \langle S^{mn}_{\omega} f, g \rangle_{L^2(\mathbb{R}^p),L^2(\mathbb{R}^p)},\]
where \(C\) is a constant depending only on the constants in the standard estimates of the kernel \(K\) and the weak boundedness property, \(S^{mn}_{\omega}\) is a dyadic Haar shift in \(\mathbb{R}^p\) of parameters \((m,n)\) on the dyadic system \(\mathcal{D}^{\omega}\), and \(\tau(m,n) \lesssim P(\max\{m,n\}) 2^{-\delta \max\{m,n\}} \), with \(P\) a polynomial.
\end{theorem}
We define the function \(N:[1, \infty) \to [1, \infty)\) by
\[ N(X)= \sup \|T_{\sigma}\|_{L^2(W) \to L^2(W)}, \]
where the supremum is taken over all \( d \times d\) matrix \(A_2^d\) weights \(W\) with \([W]_{A_2^d} \leq X\) and all matrix sequences $\{\sigma\}_{I \in \mathcal{D}}$ with
\(\|\sigma\|_{\infty, W} \leq 1\). It was shown in \cite{BiPeWi14} that
\begin{equation} \label{cubicbound}
N(X) \lesssim (\log X ) X^{3/2}.
\end{equation}
Here is our main result:
\begin{theorem}\label{mainthm}
Let \(W\) be a \(d \times d\) matrix \(A_2\) weight on \(\mathbb{R}^p\).
Let \(K\) be a standard kernel and \(T\) be a Calder\'{o}n-Zygmund operator on \(\mathbb{R}^p\) associated to \(K\). Suppose that \(T\) satisfies the weak boundedness property \(|\langle T \chi_Q, \chi_Q \rangle | \leq C|Q|\) for all cubes \(Q\), and the vanishing paraproduct conditions \(T(1)=T^*(1)=0\). Then
\[\|T\|_{L^2(W) \to L^2(W)} \leq C \cdot p d N(2^{2(p-1)} [W]_{A_2}) \leq C_p \cdot d N([W]_{A_2}),\]
where \(C\) depends only on the constants in the standard estimates and the weak boundedness property, while \(C_p\) depends on \(C\) and \(p\).
\end{theorem}
The second inequality in the theorem is a simple consequence of (\ref{cubicbound}),
we therefore turn to the first inequality. It is enough to show a corresponding result for Haar shift operators and then use the representation theorem of T. Hyt\"{o}nen.
Let \(f,g \in C_c^1(\mathbb{R}^p)\) (if \(W\) is a matrix \(A_2\) weight, this space is dense in both \(L^2(W)\) and \(L^2(W)i\)). Since the duality between \(L^2(W)\) and \(L^2(W)i\) is the same as the standard duality on \(L^2(\mathbb{R})p\),
by Theorem \ref{repr} we have the representation
\[\langle Tf, g \rangle_{L^2(W),L^2(W)i} = C \cdot \mathbb{E}_{\Omega} \sum_{m,n=0}^{\infty} \tau(m,n) \langle S^{mn}_{\omega} f, g \rangle_{L^2(W),L^2(W)i}\]
and therefore
\[ \|T\|_{L^2(W) \to L^2(W)} \leq C \sum_{m,n=0}^{\infty} \tau(m,n) \|S^{mn}_{\omega}\|_{L^2(W) \to L^2(W)}.\]
We will show the estimate
\[
\|S^{mn}\|_{L^2(W) \to L^2(W)} \lesssim (\max\{m,n\}+1) p d N(2^{2(p-1)} [W]_{A_2})
\]
for all dyadic Haar shifts \(S^{mn}\) on \(\mathbb{R}^p\) with parameters \((m,n)\), which ensures the convergence of the series and completes the proof of Theorem \ref{mainthm}. Using the above transference result, we can restrict ourselves to Haar shifts in \(\mathbb{R}\). This is the content of the following theorem.
\begin{theorem}\label{mainshift}
Let \(S\) be a dyadic Haar shift on \(\mathbb{R}\) of complexity \(k \geq 1\) and \(W\) be a matrix \(A_2^d\) weight. Then
\[\|S\|_{L^2(W) \to L^2(W)} \leq c \cdot k d N([W]_{A_2^d}),\]
where c is an absolute, positive constant.
\end{theorem}
\section{Reduction of the proof of Theorem \ref{mainshift} }
Let \(W\) be a \(d \times d\) matrix \(A_2^d\) weight on \(\mathbb{R}\). For each \(I \in \mathcal D\), choose an orthonormal basis of eigenvectors \(B_I = \{e_I^1, e_I^2, \ldots, e_I^d\}\) of \( \langle W \rangle_I\), and let \(P_I^i, 1 \leq i \leq d\), be the corresponding orthogonal projection onto the span of \(e_I^i\).
Using the definition of the martingale transform operator \(T_{\sigma}\) and the fact that each \(\langle W\rangle_I\) commutes with the \(P_I^i\)'s, we have for $f \in L^2(W)$, $g \in L^2(W)i$,
\begin{align} \label{est:projections}
& \sum_{i=1}^d \sum_{I \in \dd} \big| \big \langle P_I^i \langle f,h_I \rangle, P_I^i \langle g,h_I \rangle \big \rangle_{\mathbb{C}^d} \big| \\ \nonumber
& \le d \cdot \sup_{\sigma}
\sum_{I \in \dd} \big \langle \sigma_I \langle f,h_I \rangle, \langle g,h_I \rangle \big \rangle_{\mathbb{C}^d} \\ \nonumber
& = d \cdot \sup_{\sigma}
\langle T_\sigma f,g \rangle_{L^2(W),L^2(W)i} \\\nonumber
&\le d \cdot \sup_{\sigma} \|T_{\sigma}\|_{L^2(W) \to L^2(W)} \|f \|_{L^2(W)} \|g\|_{L^2(W)i} \\ \nonumber
&\le d \cdot N([W]_{A_2^d}) \|f \|_{L^2(W)} \|g\|_{L^2(W)i}, \nonumber
\end{align}
where the supremum is now taken over all matrix sequences \(\sigma = \{\sigma_I\}_{I \in \mathcal D}\) such that \(\| \sigma\|_{\infty, W} \le 1\). Notice that it would suffice to just take the $\sigma_I$'s which are diagonal in the basis $B_I$.
We can thus rewrite the estimate (\ref{est:projections}) above as
\begin{multline} \label{linearisation}
\sum_{i=1}^d \sum_{I \in \dd} \left | \big \langle P_I^i \big( \langle f \rangle _{I^+} - \langle f \rangle_{I^-} \big), P_I^i \big( \langle g \rangle _{I^+} - \langle g\rangle_{I^-} \big) \big \rangle_{\mathbb{C}^d} \right | \cdot |I| \\
= 4 \sum_{i=1}^d \sum_{I \in \dd} \left | \big \langle P_I^i \langle f,h_I \rangle, P_I^i \langle g,h_I \rangle \big \rangle_{\mathbb{C}^d} \right |
\leq 4 \, d \cdot N([W]_{A_2^d}) \|f\|_{L^2(W)} \|g\|_{L^2(W)i}
\end{multline}
for all \(f \in L^2(W)\) and \( g \in L^2(W)i.\)
Since \(S\) is a Haar shift operator of complexity \(k\), it has the form
\[S f = \sum_{L \in \mathcal D}
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \langle f, h_I \rangle h_J,\]
where \(\left |c^{L}_{I,J}\right| \leq \frac{\sqrt{|I|} \sqrt{|J|}}{|L|} =2^{-(m+n)/2} \).
Let \(f \in L^2(W),\ g \in L^2(W)i\) and \(0 \leq j \leq k-1\) be fixed. For the slice \(S_j\), we can write
\begin{align*}
& \left \langle S_j f,g \right \rangle_{L^2(W),L^2(W)i} = \Bigg \langle \sum_{L \in \mathcal{L}_j}
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \langle f, h_I \rangle h_J, \sum_{I' \in \mathcal D} \langle g, h_{I'} \rangle h_{I'} \Bigg \rangle _{L^2(W),L^2(W)i}\\
& \qquad = \sum_{L \in \mathcal{L}_j} \sum_{I' \in \mathcal D}
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \big \langle \langle f, h_I \rangle , \langle g, h_{I'} \rangle \big \rangle _{\mathbb{C}^d} \langle h_J,h_{I'} \rangle_{L^2(\mathbb{R}),L^2(\mathbb{R})} \\
& \qquad = \sum_{L \in \mathcal{L}_j}
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \big \langle \langle f, h_I \rangle , \langle g, h_J \rangle \big \rangle _{\mathbb{C}^d}
= \sum_{L \in \mathcal{L}_j} \sum_{i = 1}^d
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \big \langle P_L^i \langle f, h_I \rangle , P_L^i \langle g, h_J \rangle \big \rangle _{\mathbb{C}^d} \\
& \qquad = \sum_{L \in \mathcal{L}_j} \sum_{i = 1}^d
\sum_{\substack{
I \in \mathcal{D}_m(L) \\
J \in \mathcal{D}_n(L)}}
c^{L}_{I,J} \frac{|I|^{1/2}}{2^{k-m}} \frac{|J|^{1/2}}{2^{k-n}} \Bigg \langle
\sum_{\substack{
P \in \mathcal{D}_k(L) \\
P \subset I^{+}}}
P_L^i \big ( \langle f \rangle _P - \langle f \rangle _L \big ) +
\sum_{\substack{
P \in \mathcal{D}_k(L) \\
P \subset I^{-}}}
P_L^i \big ( \langle f \rangle _L - \langle f \rangle _P \big ) , \Bigg. \\
& \hspace{6 cm} \Bigg.
\sum_{\substack{
Q \in \mathcal{D}_k(L) \\
Q \subset J^{+}}}
P_L^i \big( \langle g \rangle _Q - \langle g \rangle _L \big ) +
\sum_{\substack{
Q \in \mathcal{D}_k(L) \\
Q \subset J^{-}}}
P_L^i \big ( \langle g \rangle _L - \langle g \rangle _Q \big )
\Bigg \rangle_{\mathbb{C}^d}.
\end{align*}
We therefore have
\begin{align} \label{est:shift}
& \Big | \left \langle S_j f,g \right \rangle_{L^2(W),L^2(W)i} \Big| \\ \nonumber
& \leq \sum_{L \in \mathcal{L}_j} |L| \sum_{i = 1}^d
\sum_{P,Q \in \mathcal{D}_k(L)} \bigg | \bigg \langle P_L^i \bigg ( \frac{\langle f \rangle _P - \langle f \rangle _L}{2^k} \bigg ), P_L^i \bigg (\frac{\langle g \rangle _Q - \langle g \rangle _L}{2^k} \bigg ) \bigg \rangle_{\mathbb{C}^d} \bigg|. \nonumber
\end{align}
\section{The Bellman function}
We are now going to define the Bellman function associated to our problem. Let \(X >1\), fix a dyadic interval \(I_0\), and for \(\mathbf f \in \mathbb{C}^d, \mathbf F \in \mathbb{R}, \mathbf U \in \mathcal{M}_d(\mathbb{C}), \mathbf g \in \mathbb{C}^d, \mathbf G \in \mathbb{R}, \mathbf V \in \mathcal{M}_d(\mathbb{C})\) satisfying
\begin{equation}\label{domain}
\mathbf U, \mathbf V >0, I_d \leq \mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \leq X \cdot I_d, \|\mathbf V^{-1/2} \mathbf f\|_{\mathbb{C}^d}^2 \leq \mathbf F,\ \| \mathbf U^{-1/2} \mathbf g\|_{\mathbb{C}^d}^2 \leq \mathbf G ,
\end{equation}
define the function \(\mathcal B_X=\mathcal B_X^{I_0}: \mathbb{C}^d \times \mathbb{R} \times \mathcal{M}_d(\mathbb{C}) \times \mathbb{C}^d \times \mathbb{R} \times \mathcal{M}_d(\mathbb{C}),\) by
\begin{equation} \label{bell}
\mathcal B_X(\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V):=
|I_0|^{-1} \sup \sum_{I \subseteq I_0} \left | \big \langle \sigma_I \big ( \langle f \rangle _{I^+} - \langle f \rangle_{I^-} \big ), \langle g \rangle _{I^+} - \langle g\rangle_{I^-} \big \rangle_{\mathbb{C}^d} \right | \cdot |I|,
\end{equation}
where the supremum is taken over all functions \(f,g :\mathbb{R} \to \mathbb{C}^d\) and matrix \(A_2\) weights \(W\) on \(I_0\) such that
\begin{equation}\label{supdomain}
\langle f \rangle _{I_0}=\mathbf f \in \mathbb{C}^d, \quad \big \langle \|W^{1/2} f\|^2_{\mathbb{C}^d} \big \rangle_{I_0} = \mathbf F \in \mathbb{R}, \quad \langle g \rangle _{I_0}=\mathbf g \in \mathbb{C}^d, \quad \big \langle \|W^{-1/2} g\|^2_{\mathbb{C}^d} \big \rangle_{I_0} = \mathbf G \in \mathbb{R},
\end{equation}
\begin{equation}\label{weightdomain}
\sup_{\substack {
I \in \mathcal{D} \\
I \subset I_0} }
\|\langle W \rangle_I ^{1/2} \langle W^{-1} \rangle_I ^{1/2} \|^2 \leq X, \quad \langle W \rangle _{I_0} = \mathbf U, \quad \langle W^{-1} \rangle_{I_0} = \mathbf V,
\end{equation}
and all sequences of $d \times d$ matrices $\sigma = \{\sigma_I\}_{I \in \mathcal{D}}$ with $\|\sigma\|_{\infty, W} \le 1$.
The Bellman function \(\mathcal B_X\) has the following properties:
\begin{enumerate}[(i)]
\item (Domain) The domain \(\mathfrak{D}_X:=\mathrm{Dom}\, \mathcal B_X\) is given by \eqref{domain}. This means that for every tuple \((\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V)\) that satisfies \eqref{domain}, there exist functions \(f, g\) and a matrix weight \(W\) such that \eqref{supdomain} holds, so the supremum is not \(-\infty\). Conversely, if the variables \(\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V\) are the corresponding averages of some functions \(f, g\) and \(W\), then they must satisfy condition \eqref{domain}. Since the set \(\{(\mathbf U,\mathbf V) \in \mathcal{M}_d(\mathbb{C}) \times \mathcal{M}_d(\mathbb{C}): \mathbf U,\mathbf V >0, I_d \leq \mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \leq X \cdot I_d\}\) is not convex, the domain \(\mathfrak{D}_X\) is not convex either.
\item (Range) \(0 \leq \mathcal B_X(\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V) \leq 4 N(X) \mathbf F^{1/2} \mathbf G^{1/2}\) for all \((\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V) \in \mathfrak{D}_X.\)
\item (Concavity condition) Consider all tuples \(A=(\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V), A_+=(\mathbf f_+, \mathbf F_+, \mathbf U_+, \mathbf g_+, \mathbf G_+, \mathbf V_+)\) and \(A_-=(\mathbf f_-, \mathbf F_-, \mathbf U_-, \mathbf g_-, \mathbf G_-, \mathbf V_-)\) in \(\mathfrak{D}_X\) such that \(A=(A_+ + A_-)/2\).
For all such tuples, we have the following concavity condition:
\[\mathcal B_X(A) \geq \frac{\mathcal B_X(A_+)+\mathcal B_X(A_-)}{2} +
\sup_{\| \tau\|_{\mathbf U} \le 1 } \left| \left \langle \tau (\mathbf f_+ - \mathbf f_- ), \mathbf g_+ - \mathbf g_- \right \rangle_{\mathbb{C}^d} \right| .\]
\end{enumerate}
Here, the supremum is taken over all $d \times d$ matrices $\tau$ with $\|\tau\|_{\mathbf U} := \|\mathbf U^{1/2} \tau \mathbf U^{-1/2}\| \le 1$.
\par
Let us now explain these properties of the function \(\mathcal B_X\). For any matrix weight \(W\) and any interval \(I\) we have \(\langle W^{-1} \rangle_I ^{1/2} \langle W \rangle_I \langle W^{-1} \rangle_I ^{1/2} \geq I_d\), so \(\mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \geq I_d\). The inequality \(\mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \leq X \cdot I_d\) follows from the definition of the matrix \(A_2\) Muckenhoupt condition. Conversely, for any positive definite matrices \(\mathbf U, \mathbf V\) such that \(I_d \leq \mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \leq X \cdot I_d\), we can find a matrix weight \(W\) that satisfies \eqref{weightdomain}. To see this, we construct a matrix weight \(W\) that is constant on the children of \(I_0\).\\
Given two matrices \(\mathbf U\) and \(\mathbf V\) as above, we want to find two positive definite matrices, \(W_1\) and \(W_2\), such that
\[\mathbf U=\frac{1}{2}(W_1+W_2) \quad \mbox{and} \quad \mathbf V=\frac{1}{2}(W_1^{-1}+W_2^{-1}).\]
We have \(\mathbf U = W_1 \mathbf V W_2 = W_2 \mathbf V W_1\), thus \(W_2^{-1} = \mathbf V W_1 \mathbf U^{-1} = \mathbf U^{-1} W_1 \mathbf V\). Let \(M:= \mathbf U^{-1/2} W_1 \mathbf U^{-1/2}\), \(\ N:= \mathbf U^{-1/2} \mathbf V^{-1} \mathbf U^{-1/2}\), and notice that \(N \leq I_d\). Then the matrices \(M\) and \(N\) commute:
\begin{align*}
N^{-1}M & = (\mathbf U^{1/2} \mathbf V \mathbf U^{1/2}) (\mathbf U^{-1/2} W_1 \mathbf U^{-1/2}) = \mathbf U^{1/2} (\mathbf V W_1 \mathbf U^{-1}) \mathbf U^{1/2} \\
& = \mathbf U^{1/2} (\mathbf U^{-1} W_1 \mathbf V) \mathbf U^{1/2} = (\mathbf U^{-1/2} W_1 \mathbf U^{-1/2}) (\mathbf U^{1/2} \mathbf V \mathbf U^{1/2}) = MN^{-1}.
\end{align*}
Furthermore, \(\mathbf U = \frac{1}{2}(W_1 + W_2) = \frac{1}{2}(W_1 + \mathbf U W_1^{-1} \mathbf V^{-1})\), so \(W_1 = \frac{1}{2}(W_1 \mathbf U^{-1} W_1 + \mathbf V^{-1})\). It follows that
\[M = \frac{1}{2}(\mathbf U^{-1/2} W_1 \mathbf U^{-1} W_1 \mathbf U^{-1/2} + \mathbf U^{-1/2} \mathbf V^{-1} \mathbf U^{-1/2}) = \frac{1}{2}(M^2+N),\]
hence \(M\) satisfies the quadratic equation \((M^2 - 2M + I_d) - (I_d-N) = 0\). Choosing \(M = I_d + (I_d-N)^{1/2}\), we obtain
\[W_1 = \mathbf U^{1/2} M \mathbf U^{1/2} = \mathbf U^{1/2} (I_d + (I_d-N)^{1/2}) \mathbf U^{1/2},\]
and
\[W_2 = 2\mathbf U - W_1 = \mathbf U^{1/2} (I_d - (I_d-N)^{1/2}) \mathbf U^{1/2}.\]
It is clear that both \(W_1\) and \(W_2\) are positive definite matrices. We now set \(W:= W_1 \chi_{I_0^+} + W_2 \chi_{I_0^-}\) and notice that \(W\) satisfies the required properties \eqref{weightdomain}.
The inequalities \(\|\mathbf V^{-1/2} \mathbf f\|_{\mathbb{C}^d}^2 \leq \mathbf F \) and \(\| \mathbf U^{-1/2} \mathbf g\|_{\mathbb{C}^d}^2 \leq \mathbf G\) follow from the Cauchy-Schwarz Inequality. To see this, choose a unit vector \(e \in \mathbb{C}^d\) such that \(\|\mathbf V^{-1/2} \mathbf f\|_{\mathbb{C}^d} = |\langle \mathbf V^{-1/2} \mathbf f ,e \rangle_{\mathbb{C}^d} |\). We then have
\begin{align*}
|\langle \mathbf V^{-1/2} \mathbf f ,e \rangle_{\mathbb{C}^d} | & = \big | \big \langle \mathbf V^{-1/2} \langle f \rangle _{I_0} ,e \big \rangle_{\mathbb{C}^d} \big | = \big | \big \langle \langle \mathbf V^{-1/2}f \rangle _{I_0},e \big \rangle_{\mathbb{C}^d} \big | \\
& = \bigg | \Big \langle \frac{1}{|I_0|} \int_{I_0} \mathbf V^{-1/2} W^{-1/2}(t) W^{1/2}(t) f(t) \, \mathrm{d}t ,e \Big \rangle_{\mathbb{C}^d} \bigg | \\
& = \bigg | \frac{1}{|I_0|} \int_{I_0} \big \langle W^{1/2}(t) f(t) ,W^{-1/2}(t) \mathbf V^{-1/2} e \big \rangle_{\mathbb{C}^d} \mathrm{d}t \bigg | \\
& \leq \bigg ( \frac{1}{|I_0|} \int_{I_0} \| W^{1/2}(t) f(t)\|_{\mathbb{C}^d}^2 \mathrm{d}t \bigg )^{1/2} \bigg ( \frac{1}{|I_0|} \int_{I_0} \| W^{-1/2}(t) \mathbf V^{-1/2} e\|_{\mathbb{C}^d}^2 \mathrm{d}t \bigg )^{1/2} \\
& = \mathbf F^{1/2} \bigg ( \frac{1}{|I_0|} \int_{I_0} \big \langle W^{-1}(t) \mathbf V^{-1/2} e, \mathbf V^{-1/2} e \big \rangle_{\mathbb{C}^d} \mathrm{d}t \bigg )^{1/2} \\
& = \mathbf F^{1/2} \Big \langle \frac{1}{|I_0|} \int_{I_0} W^{-1}(t) \mathbf V^{-1/2} e \, \mathrm{d}t , \mathbf V^{-1/2} e \Big \rangle_{\mathbb{C}^d} \\
& = \mathbf F^{1/2} \big \langle \mathbf V \mathbf V^{-1/2} e, \mathbf V^{-1/2} e \big \rangle_{\mathbb{C}^d} = \mathbf F^{1/2},
\end{align*}
since all matrices involved are positive definite. The other inequality follows in the same way.
On the other hand, given a tuple \((\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V) \in \mathfrak{D}\) and a matrix weight \(W\) satisfying (\ref{weightdomain}),
we can always find two functions \(f, g\) satisfying \eqref{supdomain}. We first choose a function \(\phi : \mathbb{R} \to \mathbb{C}^d\) such that
\[\int_{I_0} \phi(t)\, \mathrm{d}t =0, \quad \int_{I_0} W(t) \phi(t)\, \mathrm{d}t =0, \quad \frac{1}{|I_0|} \int_{I_0} \|W^{1/2}(t) \phi(t) \|^2_{\mathbb{C}^d}\, \mathrm{d}t = 1,\]
and then set \(f(t):= W^{-1}(t) \mathbf V^{-1} \mathbf f + (\mathbf F - \|\mathbf V^{-1/2}\mathbf f\|^2)^{1/2} \phi(t)\). It can be easily checked that this function has the required properties. A similar argument allows us to construct the function \(g\).
Property (ii) follows from the definition of \(\mathcal B_X\) and the inequality \eqref{linearisation}.
To prove the concavity condition, we consider three tuples \(A, A_+, A_- \in \mathfrak{D}_X\) such that \(A=(A_+ + A_-)/2\) and choose two functions \(f, g\) and a matrix weight \(W\) on \(I_0\) so that
\begin{equation}\label{avI0+-}
A_{\pm}= \Big ( \langle f \rangle _{I_0^{\pm}},\ \big \langle \|W^{1/2}f\|_{\mathbb{C}^d}^2 \big \rangle_{I_0^{\pm}},\ \langle W \rangle _{I_0^{\pm}},\ \langle g \rangle _{I_0^{\pm}},\ \big \langle \|W^{-1/2}g\|_{\mathbb{C}^d}^2 \big \rangle_{I_0^{\pm}},\ \langle W^{-1} \rangle _{I_0^{\pm}} \Big ).
\end{equation}
Then
\[A=\frac{A_+ + A_-}{2} = \Big ( \langle f \rangle _{I_0},\ \big \langle \|W^{1/2} f\|_{\mathbb{C}^d}^2 \big \rangle_{I_0},\ \langle W \rangle _{I_0},\ \langle g \rangle _{I_0},\ \big \langle \|W^{-1/2} g\|_{\mathbb{C}^d}^2 \big \rangle_{I_0}\ \langle W^{-1} \rangle _{I_0} \Big )\]
is the vector of corresponding averages over \(I_0\). The expression in the definition of \(\mathcal B_X(\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V)\), before taking the supremum, can be split into the average of the corresponding expressions for \(\mathcal B_X(\mathbf f_+, \mathbf F_+, \mathbf U_+, \mathbf g_+, \mathbf G_+, \mathbf V_+)\) and \(\mathcal B_X(\mathbf f_-, \mathbf F_-, \mathbf U_-, \mathbf g_-, \mathbf G_-, \mathbf V_-)\), plus the term
\[ \sup_{\sigma_{I_0}: \|\mathbf U^{1/2} \sigma_{I_0} \mathbf U^{-1/2} \| \le 1 } \big| \left \langle \sigma_{I_0} (\mathbf f_+ - \mathbf f_- ), (\mathbf g_+ - \mathbf g_- ) \right \rangle_{\mathbb{C}^d} \big |. \]
Taking now the supremum over all \(f, g\) and \(W\) that satisfy conditions \eqref{avI0+-} we conclude that
\[\frac{\mathcal B_X(A_+)+\mathcal B_X(A_-)}{2} + \sup_{\| \tau\|_{\mathbf U} \le 1 } \big| \left \langle \tau (\mathbf f_+ - \mathbf f_- ), (\mathbf g_+ - \mathbf g_- ) \right \rangle_{\mathbb{C}^d} \big| \leq \mathcal B_X(A).\]
This inequality is true because the set of functions over which we are taking the supremum is smaller than the one corresponding to \(\mathcal B_X(A)\), since we are excluding all those functions \(f, g\) and \(W\) whose averages on the children of \(I_0\) are not the prescribed values in \eqref{avI0+-}.
\begin{remark}
\normalfont
The concavity condition (iii) implies that the function \(\mathcal B_X\) is midpoint concave, that is \(\mathcal B_X \big(\frac{A_+ + A_-}{2}\big) \geq \frac{1}{2} \big(\mathcal B_X(A_+) + \mathcal B_X(A_-) \big)\), for all \(A_+, A_- \in \mathfrak{D}_X \) with \( \frac{A_+ + A_-}{2} \in \mathfrak{D}_X$.
It is well-known that locally bounded below midpoint concave functions are actually concave (see e.g.~\cite{RoVa73}, Theorem C, p. 215). Therefore \(\mathcal B_X\) is a concave function.
\end{remark}
We conclude this section with a result that allows us to overcome the non-convexity of the domain of the Bellman function.
\begin{lemma}\label{extdom}
Let \(A, A_+, A_- \in \mathfrak{D}_X\) such that \(A=(A_+ + A_-)/2\) . Then the line segment with endpoints \(A_+\) and \(A_-\) belongs to \(\mathfrak{D}_{4X}\).
\end{lemma}
\begin{proof}
We start by proving that the set $\mathfrak{D}_\infty$ given by the inequalities
\begin{equation*}
\mathbf U, \mathbf V >0, I_d \leq \mathbf V^{1/2} \mathbf U \mathbf V^{1/2}, \|\mathbf V^{-1/2} \mathbf f\|_{\mathbb{C}^d}^2 \leq \mathbf F,\ \| \mathbf U^{-1/2} \mathbf g\|_{\mathbb{C}^d}^2 \leq \mathbf G
\end{equation*}
is convex.
We first prove that the inequality \(\|\mathbf V^{-1/2} \mathbf f\|_{\mathbb{C}^d}^2 \leq \mathbf F\) is convex (the other inequality, \(\ \| \mathbf U^{-1/2} \mathbf g\|_{\mathbb{C}^d}^2 \leq \mathbf G\), follows in a similar way). It is enough to show that if \(\|\mathbf V_1^{-1/2} \mathbf f_1\|_{\mathbb{C}^d}^2 \leq \mathbf F_1\) and \(\|\mathbf V_2^{-1/2} \mathbf f_2\|_{\mathbb{C}^d}^2 \leq \mathbf F_2\), then
\begin{equation} \label{est:convex}
\bigg \| \left( \frac{1}{2}(\mathbf V_1 + \mathbf V_2)\right)^{-1/2} \frac{1}{2}(\mathbf f_1 + \mathbf f_2)\bigg \|_{\mathbb{C}^d}^2 \leq \frac{1}{2}(\mathbf F_1 + \mathbf F_2).
\end{equation}
We have
\begin{align*}
\|(\mathbf V_1 + \mathbf V_2)^{-1/2} (\mathbf f_1 + \mathbf f_2)\|_{\mathbb{C}^d}^2 & = \left \langle (\mathbf V_1 + \mathbf V_2)^{-1}, (\mathbf f_1 + \mathbf f_2) \otimes (\mathbf f_1 + \mathbf f_2) \right \rangle _{HS} \\
& = \left \langle (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_1 \otimes \mathbf f_1 + (\mathbf f_1 \otimes \mathbf f_2 + \mathbf f_2 \otimes \mathbf f_1) + \mathbf f_2 \otimes \mathbf f_2 \right \rangle _{HS} \\
& =: T_1 + T_2 + T_3,
\end{align*}
where \(\langle \cdot, \cdot \rangle_{HS}\) denotes the Hilbert-Schmidt (trace) inner product. \\
Using the identities
\[(\mathbf V_1 + \mathbf V_2)^{-1} = \mathbf V_1^{-1} - \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1} \quad \mbox{and} \quad (\mathbf V_1 + \mathbf V_2)^{-1} = \mathbf V_2^{-1} - \mathbf V_2^{-1} \mathbf V_1 (\mathbf V_1 + \mathbf V_2)^{-1}, \]
we get that
\[T_1 = \left \langle (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_1 \otimes \mathbf f_1 \right \rangle_{HS} = \|\mathbf V_1^{-1/2} \mathbf f_1\|_{\mathbb{C}^d}^2 - \left \langle \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_1 \otimes \mathbf f_1 \right \rangle_{HS},\]
and
\[T_3 = \left \langle (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_2 \otimes \mathbf f_2 \right \rangle_{HS} = \|\mathbf V_2^{-1/2} \mathbf f_2\|_{\mathbb{C}^d}^2 - \left \langle \mathbf V_2^{-1} \mathbf V_1 (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_2 \otimes \mathbf f_2 \right \rangle_{HS}.\]
Noting that $\mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1} = (\mathbf V_1 + \mathbf V_2)^{-1} \mathbf V_2 \mathbf V_1^{-1} >0$ and writing $\tilde \mathbf f_2 =\mathbf V_1 \mathbf V_2^{-1} \mathbf f_2$, we find that
\begin{align*}
T_1 + T_2 + T_3
& \leq - \left \langle \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_1 \otimes \mathbf f_1 \right \rangle_{HS} - \left \langle \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1}, \tilde \mathbf f_2 \otimes \tilde \mathbf f_2 \right \rangle_{HS} \\
& \qquad + \left \langle \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1}, \mathbf f_1 \otimes \tilde \mathbf f_2 \right \rangle_{HS}
+ \left \langle \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1}, \tilde \mathbf f_2 \otimes \mathbf f_1 \right \rangle_{HS} + \mathbf F_1 + \mathbf F_2 \\
&= - \left \langle \mathbf V_1^{-1} \mathbf V_2 (\mathbf V_1 + \mathbf V_2)^{-1}, (\mathbf f_1 - \tilde \mathbf f_2 ) \otimes( \mathbf f_1 - \tilde \mathbf f_2 ) \right \rangle_{HS}+ \mathbf F_1 + \mathbf F_2
\leq \mathbf F_1 + \mathbf F_2. \\
\end{align*}
This concludes the proof of our claim.
We now check that the set \(C_0:=\{(\mathbf U,\mathbf V) \in \mathcal{M}_d(\mathbb{C}) \times \mathcal{M}_d(\mathbb{C}): \mathbf U,\mathbf V >0, I_d \leq \mathbf V^{1/2} \mathbf U \mathbf V^{1/2}\}\) is convex. As before, it is enough to show that it is midpoint convex. \\
Let \( (\mathbf U_1,\mathbf V_1), (\mathbf U_2,\mathbf V_2) \in C_0\). We have to prove that
\[I_d \leq \left( \frac{\mathbf V_1+\mathbf V_2}{2} \right)^{1/2} \left( \frac{\mathbf U_1+\mathbf U_2}{2} \right) \left( \frac{\mathbf V_1+\mathbf V_2}{2} \right)^{1/2},\]
which is equivalent to
\[(\mathbf V_1+\mathbf V_2)(\mathbf U_1+\mathbf U_2)(\mathbf V_1+\mathbf V_2) \geq 4(\mathbf V_1+\mathbf V_2).\]
Since \( (\mathbf U_1,\mathbf V_1), (\mathbf U_2,\mathbf V_2) \in C_0\), we have \(\mathbf U_1 \geq \mathbf V_1^{-1}\) and \(\mathbf U_2 \geq \mathbf V_2^{-1}\), so
\begin {align*}
(\mathbf V_1+\mathbf V_2)(\mathbf U_1+\mathbf U_2)(\mathbf V_1+\mathbf V_2) & \geq (\mathbf V_1+\mathbf V_2)(\mathbf V_1^{-1}+\mathbf V_2^{-1})(\mathbf V_1+\mathbf V_2) \\
& = 3 \mathbf V_1 + 3 \mathbf V_2 + \mathbf V_1 \mathbf V_2^{-1} \mathbf V_1 + \mathbf V_2 \mathbf V_1^{-1} \mathbf V_2 .
\end{align*}
It is therefore enough to check that
\[ \mathbf V_1 \mathbf V_2^{-1} \mathbf V_1 + \mathbf V_2 \mathbf V_1^{-1} \mathbf V_2 - \mathbf V_1 - \mathbf V_2 \geq 0,\]
which is the same as showing that
\[\mathbf V_1 ^{1/2}\mathbf V_2^{-1} \mathbf V_1^{1/2} + \mathbf V_1 ^{-1/2}\mathbf V_2 \mathbf V_1^{-1/2} \mathbf V_1 ^{-1/2}\mathbf V_2 \mathbf V_1^{-1/2} -I_d - \mathbf V_1 ^{-1/2}\mathbf V_2 \mathbf V_1^{-1/2} \geq 0.\]
Let \(T:=\mathbf V_1 ^{1/2}\mathbf V_2^{-1} \mathbf V_1^{1/2} >0\). The previous inequality becomes \(T+T^{-2} - I_d -T^{-1} \geq 0\), which is equivalent to \(T^3 +I_d -T^2 - T \geq 0\). But \(T^3 +I_d -T^2 - T = (T-I_d)(T+I_d)(T-I_d),\) and this is a positive semidefinite matrix since \(T+I_d \geq 0\). This concludes the proof of the convexity of \(C_0\).
To finish the proof of the lemma, we have to show that if \( (\mathbf U,\mathbf V), (\mathbf U_+,\mathbf V_+), (\mathbf U_-,\mathbf V_-)\) are in the set \(C_X:=\{(\mathbf U,\mathbf V) \in \mathcal{M}_d(\mathbb{C}) \times \mathcal{M}_d(\mathbb{C}): \mathbf U,\mathbf V >0, \mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \leq X \cdot I_d\}\) and \((\mathbf U,\mathbf V) = \frac{1}{2}[(\mathbf U_+,\mathbf V_+) + (\mathbf U_-,\mathbf V_-)]\), then for all \(\theta \in [0,1]\), the points \((\mathbf U_{\theta}, \mathbf V_{\theta}) = (\theta \mathbf U_+ + (1-\theta)\mathbf U_-, \theta \mathbf V_+ + (1-\theta)\mathbf V_-)\) belong to the set \(C_{4X}\).
Since \(\theta \in [0,1]\), we have \(\theta \mathbf U_+ \leq \mathbf U_+\) and \( (1-\theta)\mathbf U_- \leq \mathbf U_-\), so \(\mathbf U_{\theta} \leq \mathbf U_+ + \mathbf U_-=2 \mathbf U\); we also have \(\mathbf V_{\theta} \leq 2 \mathbf V\). It is then sufficient to show that \(\mathbf V_{\theta}^{1/2} (\mathbf U_+ + \mathbf U_-) \mathbf V_{\theta}^{1/2} \leq 4X I_d\). But this is equivalent to \(\|\mathbf V_{\theta}^{1/2} (\mathbf U_+ + \mathbf U_-) \mathbf V_{\theta}^{1/2} \| \leq 4X\). All matrices that appear are positive definite, so \(\|\mathbf V_{\theta}^{1/2} (\mathbf U_+ + \mathbf U_-) \mathbf V_{\theta}^{1/2} \| = \| (\mathbf U_+ + \mathbf U_-) ^{1/2} \mathbf V_{\theta} (\mathbf U_+ + \mathbf U_-) ^{1/2}\|\). Then again \( \| (\mathbf U_+ + \mathbf U_-) ^{1/2} \mathbf V_{\theta} (\mathbf U_+ + \mathbf U_-) ^{1/2}\| \leq 4X\) if and only \((\mathbf U_+ + \mathbf U_-) ^{1/2} \mathbf V_{\theta} (\mathbf U_+ + \mathbf U_-) ^{1/2} \leq 4XI_d\).
We finally have
\[(\mathbf U_+ + \mathbf U_-) ^{1/2} \mathbf V_{\theta} (\mathbf U_+ + \mathbf U_-) ^{1/2} = 2 \mathbf U^{1/2} \mathbf V_{\theta} \mathbf U^{1/2} \leq 4 \mathbf U^{1/2} \mathbf V \mathbf U^{1/2} \leq 4XI_d,\]
since \((\mathbf U,\mathbf V)\), and thus also \( (\mathbf V,\mathbf U)\), are in the set \(C_X\), so the proof of the lemma is complete.
\end{proof}
\section{The main estimate}
The following result is the main tool in the proof of Theorem \ref{mainshift}.
\begin{lemma}\label{mainlemma}
Let \(X>1\) and \(\mathcal B_X\) be a function satisfying properties (i)-(iii) from Section 6. Fix \(k \geq 1\) and a dyadic interval \(I_0\). For all \(I \in \mathcal{D}_n(I_0),\ 0 \leq n \leq k,\) let the points \(A_I= (\mathbf f_I, \mathbf F_I, \mathbf U_I, \mathbf g_I, \mathbf G_I, \mathbf V_I) \in \mathfrak{D}_X=\mathrm{Dom}\, \mathcal B_X\) be given. Assume that the points \(A_I\) satisfy the dyadic martingale dynamics, i.e. \(A=(A_{I^+}+A_{I^-})/2,\) where \(I^+\) and \(I^-\) are the children of \(I\). Let \(B_{I_0} = \{e_{I_0}^1, e_{I_0}^2, \ldots, e_{I_0}^d\}\) be an orthonormal basis of eigenvectors of \( \mathbf U_{I_0}\) and for $1 \le i \le d$, let \(P_{I_0}^i\) be the orthogonal projection onto the span of \(e_{I_0}^i\).
For \(1 \leq i \leq d\) and \(K,L \in \mathcal{D}_k(I_0)\), we define the coefficients \(\lambda_{KL}^i\) by
\[\lambda_{KL}^i := \bigg \langle P_{I_0}^i \bigg ( \frac{\mathbf f_K - \mathbf f_{I_0}}{2^k} \bigg ), P_{I_0}^i \bigg ( \frac{\mathbf g_L - \mathbf g_{I_0}}{2^k} \bigg ) \bigg \rangle_{\mathbb{C}^d} .\]
Then
\[\sum_{i = 1}^d \sum_{K,L \in \mathcal{D}_k({I_0})} |\lambda_{KL}^i| \leq c \cdot d \bigg( \mathcal B_{X'}(A_{I_0}) - 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \mathcal B_{X'}(A_I) \bigg),\]
where \(c\) is a positive absolute constant and \(X'=\frac{100}{9}X\).
\end{lemma}
\begin{proof}
For \(1 \leq i \leq d\), we introduce the notation
\(\Lambda^i := \left (\lambda_{KL}^i \right )_{K,L \in \mathcal{D}_k(I_0)}.\) Assume for the moment that for each \(i\), we can find a sequence \(\{\alpha^i_I\}_{I \in \mathcal{D}_k(I_0)}\) such that \(|\alpha^i_I| \leq 1/4\) for all \(I \in \mathcal{D}_k(I_0), \sum_{I \in \mathcal{D}_k(I_0)} \alpha^i_I =0,\) and
\begin{equation}\label{sequence}
\bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha^i_K \alpha^i_L \lambda_{KL}^i \bigg | \geq c \sum_{K,L \in \mathcal{D}_k({I_0})} |\lambda_{KL}^i|.
\end{equation}
For each \(i\), we define \(A_{I_0}^{i, \pm}=(\mathbf f^{i, \pm}, \mathbf F^{i, \pm}, \mathbf U^{i, \pm}, \mathbf g^{i, \pm}, \mathbf G^{i, \pm}, \mathbf V^{i, \pm})\) by
\begin{equation} \label{eq:dyn}
A_{I_0}^{i, \pm}:=2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} (1 \pm \alpha^i_I)A_I = A_{I_0} \pm 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \alpha^i_I A_I,
\end{equation}
so \(A_{I_0}=(A_{I_0}^{i, +} + A_{I_0}^{i, -})/2\).
The following notations and computations hold for every \(1 \leq i \leq d\), so we fix such an \(i\). For simplicity, we also drop the \(i\) superscript until further notice.
For each \(I \in \mathcal{D}_k(I_0)\), let \(a_I^{\pm}:=1 \pm \alpha_I\) and note that \(3/4 \leq a_I^{\pm} \leq 5/4.\)
For \(I \in \mathcal{D}_n(I_0),\ 1 \leq n \leq k,\) let us define
\[A_I^{\pm}:= \left (
\sum_{\substack{
J \in \mathcal{D}_k(I_0) \\
J \subseteq I}}
a_J^{\pm}A_J \right ) \left (
\sum_{\substack{
J \in \mathcal{D}_k(I_0) \\
J \subseteq I}}
a_J^{\pm} \right )^{-1}.\]
If \(I \in \mathcal{D}_k(I_0)\) we have \(A_I^+=A_I^-=A_I\), where the \(A_I\)'s are the points from the statement of the lemma. The points \(A_I^{\pm}\) are in the convex hull of the set \(\{A_J: J \in \mathcal{D}_k(I_0), \ J \subseteq I\}\). To address the lack of convexity of $\mathfrak{D}_X$, we need an additional lemma:
\begin{lemma} \label{lemm:convex}
\(A_I^{\pm} \in \mathfrak{D}_{25/9X}\) for all \(I \in \mathcal{D}_n(I_0),\ 1 \leq n \leq k\).
\end{lemma}
\begin{proof} of Lemma \ref{lemm:convex}.
Since the points \(A_I^{\pm}\) are in the convex hull of the set \(\{A_J \in \mathfrak{D}_X \subset \mathfrak{D}_{25/9X}\}\), and among the conditions that define \(\mathfrak{D}_{25/9X}\) only the constraint \(\mathbf V^{1/2} \mathbf U \mathbf V^{1/2} \leq \frac{25}{9}X \cdot I_d\) is not convex, we just have to check this condition.
Let us consider the \(\mathbf U\)-coordinate of the points \(A_I\). The maximal numerator is obtained when all coefficients \(a_J^{\pm}\) are equal to \(5/4\), and the minimal denominator is attained when \(a_J^{\pm}=3/4\) for all \(J \in \mathcal{D}_k(I_0), \ J \subseteq I\). This implies that \(\mathbf U_I^{\pm} \leq \frac{5}{4} (\frac{3}{4})^{-1} \mathbf U_I = \frac{5}{3} \mathbf U_I\). Similarly, we also have \(\mathbf V_I^{\pm} \leq \frac{5}{3} \mathbf V_I\). Using elementary properties of positive definite matrices, it follows that
\[(\mathbf V_I^{\pm})^{1/2} \mathbf U_I^{\pm} (\mathbf V_I^{\pm})^{1/2} \leq \frac{5}{3} (\mathbf V_I^{\pm})^{1/2} \mathbf U_I (\mathbf V_I^{\pm})^{1/2}\]
and
\begin{align*}
\big \|(\mathbf V_I^{\pm})^{1/2} \mathbf U_I^{\pm} (\mathbf V_I^{\pm})^{1/2} \big \| & \leq \frac{5}{3} \big \|(\mathbf V_I^{\pm})^{1/2} \mathbf U_I (\mathbf V_I^{\pm})^{1/2} \big \| = \frac{5}{3} \big \|\mathbf U_I^{1/2} \mathbf V_I^{\pm} \mathbf U_I^{1/2} \big \| \\
& \leq \left( \frac{5}{3} \right)^2 \big \|\mathbf U_I^{1/2} \mathbf V_I \mathbf U_I^{1/2} \big \| \leq \frac{25}{9}X,
\end{align*}
hence \((\mathbf V_I^{\pm})^{1/2} \mathbf U_I^{\pm} (\mathbf V_I^{\pm})^{1/2} \leq \frac{25}{9}X \cdot I_d\). This means that the points \(A_I^{\pm}\) belong to \(\mathfrak{D}_{25/9X}\).
Let \(\tilde{A}^\pm_{I}\) be the midpoints of the line segments with endpoints \(A_{I^+}^{\pm}\) and \(A_{I^-}^{\pm}\). We prove that \(\tilde{A}^\pm_{I} \in \mathfrak{D}_{25/9X}\).
As before, we have \(\mathbf U_{I^{\pm}}^{\pm} \leq \frac{5}{3} \mathbf U_{I^{\pm}}\) and \(\mathbf V_{I^{\pm}}^{\pm} \leq \frac{5}{3} \mathbf V_{I^{\pm}}\). Therefore,
\[\tilde{\mathbf U}_I^{\pm} = \frac{\mathbf U_{I^+}^{\pm} + \mathbf U_{I^-}^{\pm}}{2} \leq \frac{5}{3} \frac{\mathbf U_{I^+} +\mathbf U_{I^-}}{2} = \frac{5}{3} \mathbf U_I\]
and \(\tilde{\mathbf V}_I^{\pm} \leq \frac{5}{3} \mathbf V_I\). It follows that \((\tilde{\mathbf V}_I^{\pm})^{1/2} \tilde{\mathbf U}_I^{\pm} (\tilde{\mathbf V}_I^{\pm})^{1/2} \leq \frac{25}{9}X \cdot I_d\), so the points \(\tilde{A}^\pm_{I}\) belong to \(\mathfrak{D}_{25/9X}\).
Applying Lemma \ref{extdom}, we conclude that the line segments with endpoints \(A_{I^+}^{\pm}\) and \(A_{I^-}^{\pm}\) are in \(\mathfrak{D}_{X'}\), where \(X'=4 \frac{25}{9} X = \frac{100}{9} X\).
This finishes the proof of Lemma \ref{lemm:convex}.
\end{proof}
We continue with the proof of Lemma \ref{mainlemma}.
For \(I \in \mathcal{D}_n(I_0),\ 1 \leq n \leq k,\) we define
\[\theta_I^{\pm}:= \left (
\sum_{\substack{
J \in \mathcal{D}_k(I_0) \\
J \subseteq I}}
a_J^{\pm} \right ) \left (
\sum_{\substack{
J \in \mathcal{D}_k(I_0) \\
J \subseteq \tilde{I}}}
a_J^{\pm} \right )^{-1}.\]
\noindent
It is easy to see that \(3/10 \leq \theta_I^{\pm} \leq 5/6\) and
\begin{equation}\label{convexity}
\theta_{I^+}^{\pm} + \theta_{I^-}^{\pm} =1, \qquad A_I^{\pm}=\theta_{I^+}^{\pm} A_{I^+}^{\pm} + \theta_{I^-}^{\pm} A_{I^-}^{\pm}.
\end{equation}
\noindent
The last equality means that the point \(A_I^+\) is on the line segment with endpoints \(A_{I^+}^+\) and \(A_{I^-}^+\), and similarly for \(A_I^-\). \(\theta_{I^+}^{\pm}\) and \(\theta_{I^-}^{\pm}\) represent the probabilities of moving from the points \(A_I^{\pm}\) to \(A_{I^+}^{\pm}\) and \(A_{I^-}^{\pm}\), respectively.
Since by (\ref{eq:dyn})
\begin{align*}
\Big | \big \langle P_{I_0} \big (\mathbf f^+ - \mathbf f^- \big ), P_{I_0} \big (\mathbf g^+ - \mathbf g^- \big ) \big \rangle_{\mathbb{C}^d} \Big | & = \bigg| \bigg \langle 2 \cdot 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \alpha_I P_{I_0}(\mathbf f_I), 2 \cdot 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \alpha_I P_{I_0}(\mathbf g_I) \bigg \rangle_{\mathbb{C}^d} \bigg| \\
& = 4 \Big | \big \langle P_{I_0} \big (\mathbf f^{\pm} - \mathbf f_{I_0} \big) , P_{I_0} \big(\mathbf g^{\pm} - \mathbf g_{I_0} \big) \big \rangle_{\mathbb{C}^d} \Big |,
\end{align*}
we get by property (iii) of the Bellman function \(\mathcal B_{X'}\)
\begin{multline}\label{bellmandif}
\big | \left \langle P_{I_0} \big (\mathbf f^{\pm} - \mathbf f_{I_0} \big) , P_{I_0} \big(\mathbf g^{\pm} - \mathbf g_{I_0} \big) \right \rangle_{\mathbb{C}^d} \big |
\leq \sup_{\| \tau\|_{\mathbf U_{I_0} } \le 1 } \left| \left \langle \tau (\mathbf f^{\pm} - \mathbf f_{I_0} ), (\mathbf g^{\pm} - \mathbf g_{I_0} ) \right \rangle_{\mathbb{C}^d} \right| \\
\leq \frac{1}{4} \bigg ( \mathcal B_{X'}(A_{I_0}) - \frac{\mathcal B_{X'}(A_{I_0}^+)+\mathcal B_{X'}(A_{I_0}^-)}{2} \bigg ).
\end{multline}
From the concavity of the function \(\mathcal B_{X'}\) and \eqref{convexity} it follows that
\[\mathcal B_{X'}(A_I^{\pm}) \geq \theta_{I^+}^{\pm} \mathcal B_{X'}(A_{I^+}^{\pm}) + \theta_{I^-}^{\pm} \mathcal B_{X'}(A_{I^-}^{\pm}).\]
Applying now this inequality to \(I \in \mathcal{D}_n(I_0),\ 0 \leq n \leq k-1\), and taking into account that
\[\prod_{\substack{
J \in \mathcal{D} \\
I \subseteq J \subsetneq I_0 }}
\theta_J^{\pm} = a_I^{\pm} \bigg (\sum_{J \in \mathcal{D}_k(I_0)} a_J^{\pm} \bigg )^{-1} = 2^{-k} a_I^{\pm}\]
for all \(I \in \mathcal{D}_k(I_0),\) we obtain the estimate
\[\mathcal B_{X'}(A_{I_0}^{\pm}) \geq 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} a_I^{\pm} \mathcal B_{X'}(A_I).\]
Since \(a_I^+ + a_I^-=2\) when \(I \in \mathcal{D}_k(I_0),\) substituting the previous inequality in \eqref{bellmandif} gives
\begin{equation}\label{single_est}
\Big | \big \langle P_{I_0} \big (\mathbf f^{\pm} - \mathbf f_{I_0} \big) , P_{I_0} \big(\mathbf g^{\pm} - \mathbf g_{I_0} \big) \big \rangle_{\mathbb{C}^d}\Big | \leq \frac{1}{4} \bigg ( \mathcal B_{X'}(A_{I_0}) - 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \mathcal B_{X'}(A_I) \bigg ) .
\end{equation}
We are now ready to obtain the conclusion of the lemma. By \eqref{sequence} and the fact that \\ \(\sum_{I \in \mathcal{D}_k(I_0)} \alpha^i_I =0,\) we have the estimate
\begin{align}\label{est1}
c \sum_{i = 1}^d \sum_{K,L \in \mathcal{D}_k({I_0})} \left| \lambda_{KL}^i \right| & \leq \sum_{i = 1}^d \bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha^i_K \alpha^i_L \lambda_{KL}^i \bigg | \\
& = \sum_{i = 1}^d \bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha^i_K \alpha^i_L \bigg \langle P_{I_0}^i \bigg ( \frac{\mathbf f_K - \mathbf f_{I_0}}{2^k} \bigg ), P_{I_0}^i \bigg ( \frac{\mathbf g_L - \mathbf g_{I_0}}{2^k} \bigg ) \bigg \rangle_{\mathbb{C}^d} \bigg | \nonumber \\
& = \bigg | \sum_{i = 1}^d \bigg \langle 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \alpha^i_I P_{I_0}^i (\mathbf f_I - \mathbf f_{I_0}), 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \alpha^i_I P_{I_0}^i (\mathbf g_I - \mathbf g_{I_0}) \bigg \rangle_{\mathbb{C}^d} \bigg | \nonumber \\
& = \sum_{i = 1}^d \Big | \big \langle P_{I_0}^i \big( \mathbf f^{i, \pm} - \mathbf f_{I_0} \big) , P_{I_0}^i \big(\mathbf g^{i, \pm} - \mathbf g_{I_0} \big) \big \rangle_{\mathbb{C}^d} \Big | \nonumber \\
& \leq \frac{d}{4} \bigg ( \mathcal B_{X'}(A_{I_0}) - 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \mathcal B_{X'}(A_I) \bigg ), \nonumber
\end{align}
where the last inequality follows from \eqref{single_est}. This completes the proof of Lemma \ref{mainlemma} under the assumption \eqref{sequence}.
For each \(1 \leq i \leq d\), the matrix \(\Lambda^i\) has complex rank 1. Dropping again the \(i\) superscript, there exist \(m = (m_K), n = (n_L) \in \mathbb{C}^{2^k}\) such that \(\lambda_{KL} = m_K n_L = (m^1_K + i m^2_K) (n^1_L + i n^2_L)\), for every \(K,L \in \mathcal{D}_k(I_0)\). We then have
\[ \sum_{K,L \in \mathcal{D}_k(I_0)} |\lambda_{KL}| \leq \sum_{K,L \in \mathcal{D}_k(I_0)} \big( |m^1_K n^1_L| + |m^1_K n^2_L| + |m^2_K n^1_L| + |m^2_K n^2_L| \big).\]
Without loss of generality, we may assume that \(\sum_{K,L \in \mathcal{D}_k(I_0)} |m^1_K n^1_L|\) is the maximum of the four sums in the above right hand side. By an application of K. Ball's ``multiple Hahn-Banach Theorem'' (\cite{Ba91}, Theorem 7), or alternatively an elementary functional analysis argument (see \cite{Tr11}, Theorem 6.2 and Lemma 6.3), we can find a real-valued sequence \(\{\alpha_I\}_{I \in \mathcal{D}_k(I_0)}\) such that \(|\alpha_I| \leq 1/4\) for all \(I \in \mathcal{D}_k(I_0), \sum_{I \in \mathcal{D}_k(I_0)} \alpha_I =0,\) and
\[\bigg| \sum_{K \in \mathcal{D}_k(I_0)} \alpha_K m^1_K \bigg| \geq \frac{1}{16} \sum_{K \in \mathcal{D}_k(I_0)} |m^1_K| , \qquad \bigg| \sum_{L \in \mathcal{D}_k(I_0)} \alpha_L n^1_L \bigg| \geq \frac{1}{16} \sum_{L \in \mathcal{D}_k(I_0)} |n^1_L|.\]
It follows that
\begin{align*}
\sum_{K,L \in \mathcal{D}_k(I_0)} |\lambda_{KL}| & \leq 4 \sum_{K,L \in \mathcal{D}_k(I_0)} |m^1_K n^1_L| \leq 4^5 \bigg| \sum_{K \in \mathcal{D}_k(I_0)} \alpha_K m^1_K \bigg| \cdot \bigg| \sum_{L \in \mathcal{D}_k(I_0)} \alpha_L n^1_L \bigg| \nonumber \\
& \leq 4^5 \bigg| \sum_{K \in \mathcal{D}_k(I_0)} \alpha_K (m^1_K + i m^2_K) \bigg| \cdot \bigg| \sum_{L \in \mathcal{D}_k(I_0)} \alpha_L (n^1_L + i n^2_L) \bigg| \nonumber \\
& = 4^5 \bigg| \sum_{K \in \mathcal{D}_k(I_0)} \alpha_K m_K \bigg| \cdot \bigg| \sum_{L \in \mathcal{D}_k(I_0)} \alpha_L n_L \bigg| = 4^5 \bigg| \sum_{K, L \in \mathcal{D}_k(I_0)} \alpha_K \alpha_L \lambda_{KL} \bigg|, \nonumber
\end{align*}
which is what we wanted to show. Therefore, the proof of the lemma is complete, with $c=4^5$.
\end{proof}
\section{Conclusion of the proof of Theorem \ref{mainshift}}
We are now ready to finish the proof of Theorem \ref{mainshift}.
Recall that for all slices \(S_j\) of \(S\) we have
\[ \Big | \left \langle S_j f,g \right \rangle_{L^2(W),L^2(W)i} \Big| \leq \sum_{L \in \mathcal{L}_j} |L| \sum_{i = 1}^d
\sum_{P,Q \in \mathcal{D}_k(L)} \bigg | \bigg \langle P_L^i \bigg ( \frac{\langle f \rangle _P - \langle f \rangle _L}{2^k} \bigg ), P_L^i \bigg (\frac{\langle g \rangle _Q - \langle g \rangle _L}{2^k} \bigg ) \bigg \rangle_{\mathbb{C}^d} \bigg|. \]
Let \(X:= [W]_{A_2}\); fix \(0 \leq j \leq k-1 \) and for all \(I \in \mathcal{L}_j\) define
\[A_I:=\Big (\langle f \rangle _I, \big \langle \|W^{1/2}f\|^2_{\mathbb{C}^d} \big \rangle_I, \langle W \rangle _I, \langle g \rangle _I, \big \langle \|W^{-1/2}g\|^2_{\mathbb{C}^d} \big \rangle_I, \langle W^{-1} \rangle _I \Big).\]
Notice that all these points are in \(\mathrm{Dom}\, \mathcal B_X=\mathfrak{D}_X\). Lemma \ref{mainlemma} says that
\begin{align*}
& |L| \sum_{i = 1}^d
\sum_{P,Q \in \mathcal{D}_k(L)} \bigg | \bigg \langle P_L^i \bigg ( \frac{\langle f \rangle _P - \langle f \rangle _L}{2^k} \bigg ), P_L^i \bigg (\frac{\langle g \rangle _Q - \langle g \rangle _L}{2^k} \bigg ) \bigg \rangle_{\mathbb{C}^d} \bigg| \\
& \qquad \qquad \leq c \cdot d \bigg( |L| \mathcal B_{X'}(A_L) - \sum_{I \in \mathcal{D}_k(L)} |I| \mathcal B_{X'}(A_I) \bigg ),
\end{align*}
for all \(L \in \mathcal{L}_j.\) We write this estimate for each \(I \in \mathcal{D}_k(L)\) and then iterate the procedure \(\ell\) times to obtain
\begin{align*}
& \sum_{\substack{
I \in \mathcal{L}_j \\
I \subseteq L\\
|I| > 2^{-k \ell}|L| }}
|I| \sum_{i = 1}^d \sum_{P,Q \in \mathcal{D}_k(I)} \bigg | \bigg \langle P_I^i \bigg ( \frac{\langle f \rangle _P - \langle f \rangle _I}{2^k} \bigg), P_I^i \bigg ( \frac{\langle g \rangle _Q - \langle g \rangle _I}{2^k} \bigg) \bigg \rangle_{\mathbb{C}^d} \bigg| \\
& \qquad \qquad \qquad \leq c \cdot d \bigg( |L| \mathcal B_{X'}(A_L) - \sum_{I \in \mathcal{D}_{k \ell}(L)} |I| \mathcal B_{X'}(A_I) \bigg ) \\
& \qquad \qquad \qquad \leq c \cdot d |L| \mathcal B_{X'}(A_L) \\
& \qquad \qquad \qquad \leq c \cdot d N(X') |L| \big \langle \|W^{1/2}f\|^2_{\mathbb{C}^d} \big \rangle_L^{1/2} \big \langle \|W^{-1/2}g\|^2_{\mathbb{C}^d} \big \rangle_L^{1/2} \\
& \qquad \qquad \qquad \leq c \cdot d N(X') \|f \chi_L\|_{L^2(W)} \|g \chi_L\|_{L^2(W)i},
\end{align*}
where the second inequality follows from property (ii) of the Bellman function.
\par Letting \(\ell \to \infty\), we have
\begin{align*}
& \sum_{\substack{
I \in \mathcal{L}_j \\
I \subseteq L }}
|I| \sum_{i = 1}^d \sum_{P,Q \in \mathcal{D}_k(I)} \bigg | \bigg \langle P_I^i \bigg ( \frac{\langle f \rangle _P - \langle f \rangle _I}{2^k} \bigg), P_I^i \bigg ( \frac{\langle g \rangle _Q - \langle g \rangle _I}{2^k} \bigg) \bigg \rangle_{\mathbb{C}^d} \bigg| \\
& \qquad \qquad \qquad \leq c \cdot d N(X') \|f \chi_L\|_{L^2(W)} \|g \chi_L\|_{L^2(W)i}.
\end{align*}
We now cover the real line with intervals \(L \in \mathcal{L}_j\) of length \(2^M\) and apply the last inequality to each \(L\) to obtain that
\begin{align*}
& \sum_{\substack{
I \in \mathcal{L}_j \\
|I| \leq 2^M }}
|I| \sum_{i = 1}^d \sum_{P,Q \in \mathcal{D}_k(I)} \bigg | \bigg \langle P_I^i \bigg ( \frac{\langle f \rangle _P - \langle f \rangle _I}{2^k} \bigg), P_I^i \bigg ( \frac{\langle g \rangle _Q - \langle g \rangle _I}{2^k} \bigg) \bigg \rangle_{\mathbb{C}^d} \bigg| \\
& \qquad \qquad \qquad \leq c \cdot d N(X) \|f\|_{L^2(W)} \|g\|_{L^2(W)i}.
\end{align*}
For \(M \to \infty\), we get that the norm of \(S_j\) is bounded by \(c \cdot d N(X)\). Since \(S\) was decomposed into \(k\) slices, it follows that the operator norm of \(S\) is bounded by \(c \cdot k d N([W]_{A_2^d})\), and therefore the proof of Theorem \ref{mainshift} is complete.
\qed
Using the bound for matrix-weighted dyadic martingale transforms proved in \cite{BiPeWi14} and the bound for matrix-weighted paraproducts in \cite{IHP}, page 7, together with Hyt\"onen's representation theorem in \cite{Hy11}, we obtain
the following consequence of Theorem \ref{mainthm}:
\begin{theorem}
Let \(W\) be a \(d \times d\) matrix \(A_2\) weight on \(\mathbb{R}^p\).
Let \(K\) be a standard kernel and \(T\) be a Calder\'{o}n-Zygmund operator on \(\mathbb{R}^p\) associated to \(K\). Suppose that \(T\) satisfies the weak boundedness property \(|\langle T \chi_Q, \chi_Q \rangle |
\leq C|Q|\) for all cubes \(Q\). Then
\[\|T\|_{L^2(W) \to L^2(W)} \leq C \cdot 2^{3(p-1)} p ([W]_{A_2})^{3/2} (2(p-1) +\log ([W]_{A_2}) ) ,\]
where \(C\) depends only on the constants in the standard estimates and the weak boundedness property, and the dimension $d$.
\end{theorem}
\begin{remark} \label{rmk1}
\normalfont
Obviously, we have not used the full power of the Bellman function here - the supremum in the Bellman function is taken over all $\tau$ with $\|\mathbf{U}^{1/2} \tau \mathbf{U}^{-1/2}\|\le1$, while we have only used the projections on the eigenspaces of $\mathbf{U}$. The setup actually allows to treat matrix-valued kernels as well, using the recent representation theorem for Calder\'{o}n-Zygmund operators with operator-valued kernels in \cite{HaHy14}, which again gives a decomposition into dyadic shifts. However, in the matrix-weighted setting, one needs to adapt the decay conditions on the Calder\'{o}n-Zygmund operator to the matrix weight $W$ (see \cite{Is15}, page 3). This approach is the subject of the paper \cite{PoSt17}.
\end{remark}
\begin{remark} \label{rmk2}
\normalfont
Following Remark \ref{rmk1}, we could also have used a smaller version of the function $N$ by choosing a smaller class of martingale transforms for our proof, namely for example
$$
N_1(X) = \sup\ \|T_{\sigma}\|_{L^2(W) \to L^2(W)},
$$
where the supremum is taken over all $ d \times d$ matrix $A_2$ weights $W$ with $ [W]_{A_2} \le X$ and all sequences of $d \times d$ matrices
$\sigma = \{\sigma_I\}_{I \in \mathcal{D}}$ with $\|\sigma_I\| \le 1$ and $ \sigma_I$ commuting with $\langle W \rangle_I $ for all $ I \in \mathcal{D}$.
One can then define the Bellman function with the projections $P_{I}^i$ from Lemma \ref{mainlemma} instead of the $\tau$, running exactly the same proof.
The reason we used the more general class of martingale transforms is that for both classes of $\sigma$'s, we have the pointwise estimate
$$
S_W(T_\sigma f)(t) \le S_W f(t),
$$
where $S_W$ is the matrix-weighted square function (see \cite{BiPeWi14}, \cite{PePo02}). Our expectation here was that the norm growth of the matrix-weighted square function controls the norm growth of the matrix-weighted martingale transforms in terms of $[W]_{A_2}$, and that both bounds are linear in $[W]_{A_2}$. This would, by Theorem \ref{mainthm},
imply the linear bound in $[W]_{A_2}$ for general Calder\'{o}n-Zygmund operators with cancellation. Indeed, the linear bound of the matrix-weighted square function has been proved after this paper was
refereed \cite{hpv}. The linear bound for martingale transforms remains currently open. An account on possible strategies and some of the obstacles can be found in Section 6 of \cite{BiPeWi14}.
\end{remark}
\section{More about Calderón-Zygmund operators with even kernel}
One of the key aspects of the definition of the martingale transform operator in Section 2.2 is that the matrices \(\sigma_I\) interact well with the weight \(W\) (for the proof of our main result, we have essentially used the special case where
the \(\sigma_I\)'s are diagonal in some basis).
In the scalar-valued case, the definition of the martingale transform is simpler. More precisely, for a real sequence \(\sigma=\{\sigma_I\}_{I \in \mathcal D},\ \sigma_I= \pm 1,\) we define the martingale transform operator \(\tilde{T}_{\sigma}\) by
\[\tilde{T}_\sigma f = \sum_{I \in \dd} \sigma_I \langle f,h_I \rangle h_I.\]
Allowing this operator to act on vector-valued functions, we can prove a similar result to Theorem \ref{mainthm}, but this time, the bound will only apply to Calder\'{o}n-Zygmund operator
with even kernels and sufficient smoothness of the kernel. For this, we define the function \(\tilde{N}:[1, \infty) \to [1, \infty)\) by
\[ \tilde{N}(X)= \sup \|\tilde{T}_{\sigma}\|_{L^2(W) \to L^2(W)}, \]
where the supremum is taken over all real sequences \(\sigma\) as above and all \(d \times d\) matrix \(A_2^d\) weights \(W\) on \(\mathbb{R}\) with \([W]_{A_2^d} \leq X\).
\begin{theorem}\label{mainthm_even}
Let \(W\) be a \(d \times d\) matrix \(A_2\) weight on \(\mathbb{R}^p\). Let \(\tilde{K}\) be an even standard kernel with smoothness \(\delta > 1/2\) and \(T\) be a Calder\'{o}n-Zygmund operator on \(\mathbb{R}^p\) associated to \(\tilde{K}\). Suppose that \(T\) satisfies the weak boundedness property \(|\langle T \chi_Q, \chi_Q \rangle | \leq C|Q|\) for all cubes \(Q\), and the vanishing paraproduct conditions \(T(1)=T^*(1)=0\). Then
\[\|T\|_{L^2(W) \to L^2(W)} \leq C \cdot pd \tilde{N}(2^{2(p-1)} [W]_{A_2}) \leq C_p \cdot d \tilde{N}([W]_{A_2}),\]
where \(C\) depends only on the constants in the standard estimates and the weak boundedness property, while \(C_p\) depends on \(C\) and \(p\).
\end{theorem}
As before, the proof of this result follows from a corresponding inequality for self-adjoint Haar shift operators. More precisely, we will show the estimate
\begin{equation}
\|S^{mn}\|_{L^2(W) \to L^2(W)} \lesssim (\max\{m,n\}+1) 2^{\max\{m,n\}/2} \tilde{N} ([W]_{A_2})
\end{equation}
for all dyadic Haar shifts \(S^{mn}\) of parameters \((m,n)\), which ensures the convergence of the series (since \(\delta > 1/2\)) in the representation theorem. This is the content of the following theorem.
\begin{theorem}\label{mainshift_even}
Let \(S\) be a self-adjoint dyadic Haar shift on \(\mathbb{R}\) of complexity \(k \geq 1\) and \(W\) be a matrix \(A_2^d\) weight. Then
\[\|S\|_{L^2(W) \to L^2(W)} \leq c \cdot k 2^{k/2} \tilde{N} ([W]_{A_2^d}),\]
where c is an absolute, positive constant.
\end{theorem}
The reduction of the proof follows almost like in Section 3, except that the orthogonal projection operators \(P_I^i\) don't appear. Since the dyadic Haar shift \(S\) is self-adjoint, we obtain the following estimate:
\begin{align*}
& 2 \Big | \left \langle S_j f,g \right \rangle_{L^2(W),L^2(W)i} \Big| = \Big | \left \langle S_j f,g \right \rangle_{L^2(W),L^2(W)i} + \left \langle f,S_j g \right \rangle_{L^2(W),L^2(W)i} \Big | \\
& \leq \sum_{L \in \mathcal{L}_j} |L|
\sum_{P,Q \in \mathcal{D}_k(L)} \bigg | \bigg \langle \frac{\langle f \rangle _P - \langle f \rangle _L}{2^k}, \frac{\langle g \rangle _Q - \langle g \rangle _L}{2^k} \bigg \rangle_{\mathbb{C}^d} + \bigg \langle \frac{\langle f \rangle _Q - \langle f \rangle _L}{2^k}, \frac{\langle g \rangle _P - \langle g \rangle _L}{2^k} \bigg \rangle_{\mathbb{C}^d} \bigg|.
\end{align*}
With the same notations as in Section 4, the Bellman function \(\mathcal B_X\) is defined by
\[\mathcal B_X(\mathbf f, \mathbf F, \mathbf U, \mathbf g, \mathbf G, \mathbf V):= |I_0|^{-1} \sup \sum_{I \subseteq I_0} \left | \big \langle \langle f \rangle _{I^+} - \langle f \rangle_{I^-}, \langle g \rangle _{I^+} - \langle g\rangle_{I^-} \big \rangle_{\mathbb{C}^d} \right | \cdot |I|.\]
The only differences between the properties of this function and those of the old Bellman function (\ref{bell}) are the replacement of \(N(X)\) by \(\tilde{N}(X)\) in (ii) and the absence of the operators \(P_I^i\) in (iii).
The proof of Theorem \ref{mainshift_even} is based on the following result, which is a similar version of Lemma \ref{mainshift}.
\begin{lemma}\label{mainlemma_even}
Let \(X>1\) and \(\mathcal B_X\) be a function satisfying properties (i)-(iii) from Section 6. Fix \(k \geq 1\) and a dyadic interval \(I_0\). For all \(I \in \mathcal{D}_n(I_0),\ 0 \leq n \leq k,\) let the points \(A_I= (\mathbf f_I, \mathbf F_I, \mathbf U_I, \mathbf g_I, \mathbf G_I, \mathbf V_I) \in \mathfrak{D}_X=\mathrm{Dom}\, \mathcal B_X\) be given. Assume that the points \(A_I\) satisfy the dyadic martingale dynamics, i.e. \(A=(A_{I^+}+A_{I^-})/2,\) where \(I^+\) and \(I^-\) are the children of \(I\). For \(K,L \in \mathcal{D}_k(I_0)\), we define the coefficients \(\lambda_{KL}\) by
\[\lambda_{KL}:= \bigg \langle \frac{\mathbf f_K - \mathbf f_{I_0}}{2^k}, \frac{\mathbf g_L - \mathbf g_{I_0}}{2^k} \bigg \rangle_{\mathbb{C}^d} + \bigg \langle \frac{\mathbf f_L - \mathbf f_{I_0}}{2^k}, \frac{\mathbf g_K - \mathbf g_{I_0}}{2^k} \bigg \rangle_{\mathbb{C}^d}.\]
Then
\[\sum_{K,L \in \mathcal{D}_k({I_0})} |\lambda_{KL}| \leq c \cdot 2^{k/2} \bigg( \mathcal B_{X'}(A_{I_0}) - 2^{-k} \sum_{I \in \mathcal{D}_k(I_0)} \mathcal B_{X'}(A_I) \bigg),\]
where \(c\) is a positive absolute constant and \(X'=\frac{100}{9}X\).
\end{lemma}
The only difference between the proof of this result and that of Lemma \ref{mainshift} is the way to obtain the existence of the real sequence
\( \{\alpha_I\}_{I \in \mathcal{D}_k(I_0)}\) such that \(|\alpha_I| \leq 1/4\) for all \(I \in \mathcal{D}_k(I_0), \\ \sum_{I \in \mathcal{D}_k(I_0)} \alpha_I =0,\) and
\begin{equation}\label{sequence_even}
\bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha_K \alpha_L \lambda_{KL} \bigg | \geq c \cdot 2^{-k/2} \sum_{K,L \in \mathcal{D}_k({I_0})} |\lambda_{KL}|.
\end{equation}
We will use again the notation \(\Lambda := \left (\lambda_{KL} \right )_{K,L \in \mathcal{D}_k(I_0)}.\) Let us now define
\[\|\Lambda\|_1 := \sup_{\alpha} \bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha_K \alpha_L \lambda_{KL} \bigg|,\]
where the supremum is taken over all real sequences \(\alpha=\{\alpha_I\}_{I \in \mathcal{D}_k(I_0)}\) with \(\|\alpha\|_{\infty} \leq 1/4\) and \(\sum_{I \in \mathcal{D}_k(I_0)} \alpha_I =0.\) Since we are in a finite-dimensional space, we can find a sequence \(\alpha\) with \(|\alpha_I| \leq 1/4,\ I \in \mathcal{D}_k(I_0),\) and \(\sum_{I \in \mathcal{D}_k(I_0)} \alpha_I =0,\) such that
\begin{equation}\label{norm1}
\bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha_K \alpha_L \lambda_{KL} \bigg| = \|\Lambda\|_1.
\end{equation}
Using the symmetry of \(\Lambda\) and the fact that its row and column sums are all zero, it is easy to see that \(\|\Lambda\|_1\) is equivalent to
\[\|\Lambda\|_2 := \sup_{\substack{
\|\alpha\|_{\infty} \leq 1 \\
\|\beta\|_{\infty} \leq 1}}
\bigg| \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha_K \beta_L \lambda_{KL} \bigg|,\]
where we take the supremum over all real sequences \( \alpha=\{\alpha_I\}_{I \in \mathcal{D}_k(I_0)}\) and \(\beta=\{\beta_I\}_{I \in \mathcal{D}_k(I_0)}.\) More precisely, we have \(64 \|\Lambda\|_1 \leq \|\Lambda\|_2 \leq 192 \|\Lambda\|_1.\) Since we may assume that \(\Lambda\) is not the zero matrix (otherwise the lemma becomes trivially true), \(\|\Lambda\|_1,\) and hence \(\|\Lambda\|_2\), are not \(0.\)
We also need the notion of Schur multiplier. If \(A=(a_{ij}) \in \mathcal{M}_n(\mathbb{R})\), the Schur multiplier is the bounded operator \(S_A:\mathcal{M}_n(\mathbb{R}) \to \mathcal{M}_n(\mathbb{R})\) that acts on a matrix \(M=(m_{ij})\) by Schur multiplication: \(S_A(M)=(a_{ij}m_{ij})\). The Schur multiplier norm is
\[\|A\|_m:=\sup_{M \in \mathcal{M}_n(\mathbb{C})} \frac{\|S_A(M)\|_{op}}{\|M\|_{op}},\]
where \(\|M\|_{op}\) is the operator norm of the matrix \(M\) on \(\ell^2(\{1,2,\ldots,n\})\). If \(A=(a_{ij}) \in \mathcal{M}_n(\mathbb{R})\) is of the form \(a_{ij}=s_i t_j\), then \(A\) is called a rank one Schur multiplier. It is easy to see that if \(A\) is a rank one Schur multiplier, then \(\|A\|_m \leq \|(s_i)_{i=1}^n\|_{\infty} \|(t_j)_{j=1}^n\|_{\infty} \). A classical result due to A. Grothendieck says that the converse is essentially true (up to a constant called Grothendieck constant).
\begin{theorem} [\protect{\cite[Theorem 1.2]{DaDo07}, \cite[Theorem 3.2]{Pi12}}] \label{grot}
The closure of the convex hull of the rank one Schur multipliers of norm one in the topology of pointwise convergence contains the ball of all Schur multipliers of norm at most \(K_G^{-1}\), where \(K_G\) is a universal constant.
\end{theorem}
If \(\alpha\) and \(\beta\) are two real sequences as above, the matrix \(\Phi=(\Phi_{KL})=(\alpha_K \beta_L)\) is a rank one Schur multiplier of norm \(\|\Phi\|_m\) at most \(1\). The inequality \(\|\Lambda\|_2 \leq 192 \|\Lambda\|_1\) can thus be rewritten as
\[\sup \big |\langle \Phi, \Lambda \rangle_{HS} \big | \leq 192 \|\Lambda\|_1,\]
where $\langle \cdot, \cdot \rangle_{HS}$ denotes inner product on the Hilbert-Schmidt class and the supremum is taken over all rank one Schur multipliers \(\Phi\) of norm at most \(1\).
Using Theorem \ref{grot}, we obtain that
\[\sup \big |\langle M, \Lambda \rangle_{HS} \big | \leq 192 K_G \|\Lambda\|_1,\]
where the supremum is now taken over all Schur multipliers \(M\) of norm at most \(1\), and \(K_G\) is the (real) Grothendieck constant.
By choosing either the real or the imaginary part of the matrix \(\Lambda\) (the one with greater \(\ell^1\)-norm), we have \( \sum_{K,L \in \mathcal{D}_k({I_0})} |\lambda_{KL}| \leq 2 \sup \big | \langle M, \Lambda \rangle_{HS} \big |,\) where the supremum is taken over all matrices \(M \in \mathcal{M}_{2^k}(\mathbb{R})\) with entries \(\pm 1\). For such a matrix \(M\) we have \(\|M\|_m \leq 2^{k/2}\), see \cite{DaDo07}, Lemma 2.5. Putting everything together, we get the inequality
\begin{equation}\label{sumbdd}
\sum_{K,L \in \mathcal{D}_k({I_0})} |\lambda_{KL}| \leq 384 K_G 2^{k/2} \|\Lambda\|_1.
\end{equation}
Using \eqref{norm1} and \eqref{sumbdd}, it follows that there exists a real sequence \(\alpha\) with \(|\alpha_I| \leq 1/4\) for all \(I \in \mathcal{D}_k(I_0),\) and \(\sum_{I \in \mathcal{D}_k(I_0)} \alpha_I =0,\) such that
\[\sum_{K,L \in \mathcal{D}_k(I_0)} |\lambda_{KL}| \leq 384 K_G 2^{k/2} \|\Lambda\|_1 = 384 K_G 2^{k/2} \bigg | \sum_{K,L \in \mathcal{D}_k({I_0})} \alpha_K \alpha_L \lambda_{KL} \bigg|,\]
which is what we wanted to show. Since the other arguments are the same as in Lemma \ref{mainshift}, this completes the proof of Lemma \ref{mainlemma_even}.
The inequality in Theorem \ref{mainshift_even} is now obtained as in Section 6.
\section{A matrix version of the weighted Carleson Embedding Theorem}
In this section we will prove a version of the matrix-weighted Carleson Embedding Theorem. In the weighted setting, contrary to the unweighted case, the scalar-valued Carleson Embedding Theorem cannot be used to obtain the matrix version of the theorem. Here is the main result of this section.
\begin{theorem}[Matrix Carleson Embedding Theorem]\label{matrix_CET}
Let \(W\) be a \(d \times d\) matrix weight, and \(\{A_I\}_{I \in \mathcal D}\) be a sequence of \( d \times d\) positive definite matrices. Then for $0 < t\le 1$,
\[
t\sum_{I \in \dd} \big \langle (I_d + t \langle W \rangle_I ^{-1} \widetilde{\mathbf M}_I)^{-1} A_I (I_d + t \widetilde{\mathbf M}_I \langle W \rangle_I^{-1})^{-1} \langle W^{1/2} f \rangle_I, \langle W^{1/2} f \rangle_I \big \rangle_{\mathbb{C}^d} \leq 8 \|f\|^2_{L^2(\mathbb{R})}\]
if
\[\frac{1}{|I|} \sum_{J \subseteq I} \langle W \rangle_J A_J \langle W \rangle_J \leq \langle W \rangle_I, \quad \text{for all } I \in \mathcal D,\]
where \(\widetilde{\mathbf M}_J = \frac{1}{|J|} \sum_{K \subsetneq J} \langle W \rangle_K A_K \langle W \rangle_K\) and \(I_d\) is the \(d \times d\) identity matrix.
\end{theorem}
As we have said earlier, this version is not the simple generalization of the usual weighted Carleson Embedding Theorem in \cite{NaTrVo99}. This is due to the extra factor \((I_d + t \widetilde{\mathbf M}_I \langle W \rangle_I^{-1})^{-1}\) that appears (twice) in the left-hand side of the conclusion. However, the constants that appear in the theorem don't depend on the dimension \(d\) or on the weight \(W\). The proof of the result also uses arguments that were previously discussed in Section 4.
\begin{proof}
Let $t=1$.
We first have to introduce the Bellman function associated to the problem. For \( \mathbf f \in \mathbb{C}^d, \mathbf F \in \mathbb{R}, \mathbf W \in \mathcal{M}_d(\mathbb{C}), \mathbf M \in \mathcal{M}_d(\mathbb{C}) \) satisfying
\begin{equation}\label{domain2}
\langle \mathbf W^{-1} \mathbf f, \mathbf f \rangle_{\mathbb{C}^d} \leq \mathbf F \qquad \mbox{and} \qquad \mathbf M \leq \mathbf W,
\end{equation}
define the function \(\mathcal B : \mathbb{C}^d \times \mathbb{R} \times \mathcal{M}_d(\mathbb{C}) \times \mathcal{M}_d(\mathbb{C})\) by
\[\mathcal B(\mathbf f, \mathbf F, \mathbf W, \mathbf M):= 4 \big( \mathbf F - \big \langle (\mathbf W + \mathbf M)^{-1} \mathbf f, \mathbf f \big \rangle_{\mathbb{C}^d} \big). \]
The Bellman function \(\mathcal B\) has the following properties:
\begin{enumerate}[(i)]
\item (Domain) The domain \(\mathfrak{D}:=\mathrm{Dom}\, \mathcal B\) is given by \eqref{domain2}.
\item (Range) \(0 \leq \mathcal B(\mathbf f, \mathbf F, \mathbf W, \mathbf M) \leq 4 \mathbf F \) for all \((\mathbf f, \mathbf F, \mathbf W, \mathbf M) \in \mathfrak{D}.\)
\item (Concavity condition) Consider all tuples \(A=(\mathbf f, \mathbf F, \mathbf W, \mathbf M), A_+=(\mathbf f_+, \mathbf F_+, \mathbf W_+, \mathbf M_+)\) and \(A_-=(\mathbf f_-, \mathbf F_-, \mathbf W_-, \mathbf M_-)\) in \(\mathfrak{D}\) such that \(\mathbf f=(\mathbf f_+ + \mathbf f_-)/2, \mathbf F=(\mathbf F_+ + \mathbf F_-)/2, \mathbf W=(\mathbf W_+ + \mathbf W_-)/2,\) and \(\mathbf M = m + (\mathbf M_+ + \mathbf M_-)/2 = m + \widetilde{\mathbf M},\) where \(m\) is a positive definite matrix. For all such tuples, we have the following concavity condition:
\[\mathcal B(A) - \frac{\mathcal B(A_+)+\mathcal B(A_-)}{2} \geq \frac{1}{2} \big \langle (\mathbf W + \widetilde{\mathbf M})^{-1} m (\mathbf W + \widetilde{\mathbf M})^{-1} \mathbf f, \mathbf f \big \rangle.\]
\end{enumerate}
\par
Let us now explain these properties of the function \(\mathcal B\).
The inequality \(\|\mathbf W^{-1/2} \mathbf f\|_{\mathbb{C}^d}^2 \leq \mathbf F \) follows from the Cauchy-Schwarz Inequality. The other inequality in \eqref{domain} is related to the Carleson condition.
Property (ii) follows trivially from the definition of \(\mathcal B\).
To prove the concavity condition, we consider three tuples \(A, A_+, A_- \in \mathfrak{D}\) such that \(\mathbf f=(\mathbf f_+ + \mathbf f_-)/2, \mathbf F=(\mathbf F_+ + \mathbf F_-)/2, \mathbf W=(\mathbf W_+ + \mathbf W_-)/2,\) and \(\mathbf M = m + (\mathbf M_+ + \mathbf M_-)/2 = m + \widetilde{\mathbf M}\). Let \(\tilde{A} = (\mathbf f, \mathbf F, \mathbf W, \widetilde{\mathbf M})\). We prove the inequality in (iii) by splitting it into two inequalities. The first one,
\[\mathcal B(\tilde{A}) - \frac{1}{2} \big( \mathcal B(A_+) + \mathcal B(A_-) \big) \geq 0,\]
follows from the convexity of the first inequality in \eqref{domain} (like in the first part of the proof of Lemma \ref{extdom}).
The second inequality,
\[\mathcal B(A) - \mathcal B(\tilde{A}) \geq \frac{1}{2} \langle (\mathbf W + \widetilde{\mathbf M})^{-1} m (\mathbf W + \widetilde{\mathbf M})^{-1} \mathbf f, \mathbf f \rangle,\]
is obtained by showing that
\[ (\mathbf W + \widetilde{\mathbf M})^{-1} - (\mathbf W + \widetilde{\mathbf M} + m)^{-1} \geq \frac{1}{2} (\mathbf W + \widetilde{\mathbf M})^{-1} m (\mathbf W + \widetilde{\mathbf M})^{-1}. \]
To see this, notice that the left-hand side of this inequality can be written as
\[(\mathbf W + \widetilde{\mathbf M})^{-1/2} \bigg ( I_d - \Big ( I_d + (\mathbf W + \widetilde{\mathbf M})^{-1/2} m (\mathbf W + \widetilde{\mathbf M})^{-1/2} \Big )^{-1} \bigg ) (\mathbf W + \widetilde{\mathbf M})^{-1/2}.\]
If \(E := (\mathbf W + \widetilde{\mathbf M})^{-1/2} m (\mathbf W + \widetilde{\mathbf M})^{-1/2}\), we have that \(0 < E \leq I_d\), since \(m \leq \mathbf W \leq \mathbf W + \widetilde{\mathbf M}\). Then the inequality \(I_d - (I_d + E)^{-1} \geq \frac{1}{2} E\) is equivalent to \(I_d + E - I_d \geq \frac{1}{2} E(I_d + E)\), which can be rewritten as \(E \geq E^2\). This last inequality is clearly true since \(E \leq I_d\). It follows that
\begin{align*}
(\mathbf W + \widetilde{\mathbf M})^{-1} - (\mathbf W + \widetilde{\mathbf M} + m)^{-1} & = (\mathbf W + \widetilde{\mathbf M})^{-1/2} \big(I_d - (I_d + E)^{-1}\big) (\mathbf W + \widetilde{\mathbf M})^{-1/2} \\
& \geq \frac{1}{2} (\mathbf W + \widetilde{\mathbf M})^{-1/2} E (\mathbf W + \widetilde{\mathbf M})^{-1/2} \\
& = \frac{1}{2} (\mathbf W + \widetilde{\mathbf M})^{-1} m (\mathbf W + \widetilde{\mathbf M})^{-1},
\end{align*}
which is the desired inequality.
To prove Theorem \ref{matrix_CET}, let \(W\) be a matrix weight, \(f \in L^2(\mathbb{R})\) and \(\{A_I\}_I\) be a sequence of \( d \times d\) positive definite matrices. For any \(I \in \mathcal D\), let
\[\mathbf f_I = \langle W^{1/2} f \rangle_I \in \mathbb{C}^d, \quad \mathbf F_I = \langle \|f\|^2 \rangle_I \in \mathbb{R}, \]
\[\mathbf W_I = \langle W \rangle_I \in \mathcal{M}_d(\mathbb{C}), \quad \mathbf M_I = \frac{1}{|I|} \sum_{J \subseteq I} \langle W \rangle_J A_J \langle W \rangle_J \in \mathcal{M}_d(\mathbb{C}).\]
Then
\[m_I = \frac{1}{|I|} \mathbf W_I A_I \mathbf W_I \quad \mbox{and} \quad \widetilde{\mathbf M}_I = \frac{1}{|I|} \sum_{J \subsetneq I} \langle W \rangle_J A_J \langle W \rangle_J .\]
For the interval \(I\), the concavity condition (iii) implies that
\begin{align*}
& \frac{|I|}{2} \big \langle (\mathbf W_I + \widetilde{\mathbf M}_I)^{-1} m_I (\mathbf W_I + \widetilde{\mathbf M}_I)^{-1} \mathbf f, \mathbf f \big \rangle \\
& \qquad \qquad \qquad \leq |I| \mathcal B(\mathbf f_I, \mathbf F_I, \mathbf W_I, \mathbf M_I) - |I^+| \mathcal B(\mathbf f_{I^+}, \mathbf F_{I^+}, \mathbf W_{I^+}, \mathbf M_{I^+}) - |I^-| \mathcal B(\mathbf f_{I^-}, \mathbf F_{I^-}, \mathbf W_{I^-}, \mathbf M_{I^-}).
\end{align*}
Iterating this inequality \(k\) times, we obtain
\begin{align*}
& \frac{|I|}{2} \sum_{\substack{
J \subseteq I\\
|J| > 2^{-k}|I| }} \langle (\mathbf W_J+ \widetilde{\mathbf M}_J)^{-1} m_J (\mathbf W_J + \widetilde{\mathbf M}_J)^{-1} \mathbf f_J, \mathbf f_J \rangle \\
& \qquad \qquad \qquad \leq |I| \mathcal B(\mathbf f_I, \mathbf F_I, \mathbf W_I, \mathbf M_I) -
\sum_{\substack{
J \subseteq I\\
|J| = 2^{-k}|I| }}
|J| \mathcal B(\mathbf f_J, \mathbf F_J, \mathbf W_J, \mathbf M_J) \\
& \qquad \qquad \qquad \leq |I| \mathcal B(\mathbf f_I, \mathbf F_I, \mathbf W_I, \mathbf M_I) \leq 4 |I| \mathbf F_I.
\end{align*}
Using that
\[ \big \langle (\mathbf W_I + \widetilde{\mathbf M}_I)^{-1} m_I (\mathbf W_I + \widetilde{\mathbf M}_I)^{-1} \mathbf f_I, \mathbf f_I \big \rangle\\
= \frac{1}{|I|} \big \langle (I_d + \langle W \rangle_I ^{-1} \widetilde{\mathbf M}_I)^{-1} A_I (I_d + \langle W \rangle_I^{-1} \widetilde{\mathbf M}_I )^{-1} \langle W^{1/2} f \rangle_I, \langle W^{1/2} f \rangle_I \big \rangle,\]
and letting \(k \to \infty\), we get
\[\sum_{J \subseteq I} \big \langle (I_d + \langle W \rangle_I ^{-1} \widetilde{\mathbf M}_J)^{-1} A_J (I_d + \widetilde{\mathbf M}_J \langle W \rangle_I^{-1})^{-1} \langle W^{1/2} f \rangle_J, \langle W^{1/2} f \rangle_J \big \rangle_{\mathbb{C}^d} \leq 8 |I| \langle \|f\|^2 \rangle_I,\]
which is our desired conclusion for \(t = 1\).
For $0 < t < 1$, just replace $A_I$ by $t A_I$ and apply the inequality which we have just proved.
\end{proof}
\begin{remark}
\normalfont
While this paper was prepared for publication, A. Culiuc and S. Treil posted a result which appears to be the correct generalization of the scalar weighted Carleson Embedding Theorem to matrix weights in finite dimension $d$ (see \cite{CuTr15}).
In the notation
of Theorem \ref{matrix_CET}, it says that
\[
\sum_{I \in \dd} \big \langle A_I \langle W^{1/2} f \rangle_I, \langle W^{1/2} f \rangle_I \big \rangle_{\mathbb{C}^d} \leq C(d) \|f\|^2_{L^2(\mathbb{R})}\]
if
\[\frac{1}{|I|} \sum_{J \subseteq I} \langle W \rangle_J A_J \langle W \rangle_J \leq \langle W \rangle_I \quad \text{for all } I \in \mathcal D,\]
(Theorem 1.2 in \cite{CuTr15}). An important step in their proof, the estimate (2.5) in \cite{CuTr15}, is essentially identical with our Theorem \ref{matrix_CET}, obtained with a different proof.
\end{remark}
\begin{bibsection}
\begin{biblist}
\bib{Ba91}{article}{
author = {K. Ball},
title = {The plank problem for symmetric bodies},
journal = {Invent. Math.},
volume = {104},
year = {1991},
number = {3},
pages = {535-543},
owner = {Andrei},
timestamp = {2015.08.26}
}
\bib{BiPeWi14}{article}{
author = {K. Bickel},
author = {S. Petermichl},
author = {B. Wick},
title = {Bounds for the {H}ilbert transform with matrix {$A_2$} weights},
journal = {J. Funct. Anal.},
year = {2016},
volume = {270},
pages = {1719-1743},
number = {5},
owner = {Andrei},
timestamp = {2016.05.02}
}
\bib{CoFe74}{article}{
author = {R. R. Coifman},
author = {C. Fefferman},
title = {Weighted norm inequalities for maximal functions and singular integrals},
journal = {Studia Math.},
year = {1974},
volume = {51},
pages = {241-250},
owner = {Andrei},
timestamp = {2013.02.28}
}
\bib{CuTr15}{article}{
author={A. Culiuc},
author={S. Treil},
title={The Carleson Embedding Theorem with Matrix Weights},
year = {2015},
volume={Preprint, arXiv:1508.01716},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{DaDo07}{article}{
author = {K. R. Davidson},
author = {A. P. Donsig},
title = {Norms of {S}chur multipliers},
journal = {Illinois J. Math.},
year = {2007},
volume = {51},
pages = {743-766},
number = {3},
owner = {Andrei},
timestamp = {2013.03.01}
}
\bib{Go03}{article}{
author = {M. Goldberg},
title = {Matrix {$A_p$} weights via maximal functions},
journal = {Pacific J. Math.},
year = {2003},
volume = {211},
pages = {201-220},
number = {2},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{HaHy14}{article}{
author = {T. S. H{\"a}nninen},
author = {T. P. Hyt{\"o}nen},
title = {Operator-valued dyadic shifts and the {$T(1)$} theorem},
year = {2016},
volume = {180},
pages={213 -- 253}
number={2}
journal={Monatsh. Math. }
owner = {Andrei},
timestamp = {2015.08.21}
}
\bib{HuMuWh73}{article}{
author = {R. A. Hunt},
author = {B. Muckenhoupt},
author = {R. L. Wheeden},
title = {Weighted norm inequalities for the conjugate function and {H}ilbert
transform},
journal = {Trans. Amer. Math. Soc.},
year = {1973},
volume = {176},
pages = {227-251},
owner = {Andrei},
timestamp = {2013.02.28}
}
\bib{Hy11}{article}{
author = {T. P. Hyt{\"o}nen},
title = {Representation of singular integrals by dyadic operators, and the
{$A_2$} theorem},
year = {2011},
volume = {Preprint, arXiv:1108.5119},
journal={Lecture notes of an intensive course at Universidad de Sevilla, Summer 2011},
owner = {Andrei},
timestamp = {2013.03.01}
}
\bib{Hy12a}{article}{
author = {T. P. Hyt{\"o}nen},
title = {The sharp weighted bound for general {C}alder\'{o}n-{Z}ygmund operators},
journal = {Ann. of Math. (2)},
year = {2012},
volume = {175},
pages = {1473-1506},
number = {3},
owner = {Andrei},
timestamp = {2013.03.01}
}
\bib{hpv}{article}{
author = {T. Hyt\"onen},
author = {S. Petermichl},
author = {A. Volberg},
title = {The sharp square function estimate with matrix weight},
journal = {},
year = {2017},
volume = {Preprint, arXiv:1702.04569},
pages = {},
number = {},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{Is15}{article}{
author={J. Isralowitz},
title={A matrix weighted T$_1$ theorem for matrix kernelled CZOs and a matrix weighted John-Nirenberg theorem},
year={2015},
volume={Preprint, arXiv:1508.02474}
owner = {Andrei},
timestamp = {2015.08.21}
}
\bib{IHP}{article}{
author={J. Isralowitz},
author={H.-K. Kwon},
author={S.Pott},
title={Matrix-weighted norm inequalities for commutators and paraproducts with matrix symbols},
year={2015},
volume={Preprint, arXiv:1507.04032}
journal = {to appear in J. London Math. Soc.},
}
\bib{hunt}{article}{
author = {F. Nazarov},
author = {S. Treil},
title = {The hunt for a Bellman function: applications to estimates for singular integral operators and to other classical problems of harmonic analysis},
journal = {St. Petersburg Math. J.},
year = {1997},
volume = {8},
pages = {721--824},
number = {5},
owner = {Andrei},
timestamp = {}
}
\bib{NaTrVo99}{article}{
author = {F. Nazarov},
author = {S. Treil},
author = {A. Volberg},
title = {The {B}ellman functions and two-weight inequalities for {H}aar multipliers},
journal = {J. Amer. Math. Soc.},
year = {1999},
volume = {12},
pages = {909-928},
number = {4},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{ntvp}{article}{
author = {F. Nazarov},
author = {S. Petermichl},
author = {S. Treil},
author = {A. Volberg},
title = {Convex body domination and weighted estimates with matrix weights },
volume = {arXiv:1701.01907},
pages = {},
number = {},
year={2017},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{Pe07}{article}{
author = {S. Petermichl},
title = {The sharp bound for the {H}ilbert transform on weighted {L}ebesgue
spaces in terms of the classical {$A_p$} characteristic},
journal = {Amer. J. Math.},
year = {2007},
volume = {129},
pages = {1355-1375},
number = {5},
owner = {Andrei},
timestamp = {2013.02.28}
}
\bib{PePo02}{article}{
author = {S. Petermichl},
author = {S. Pott},
title = {An estimate for weighted {H}ilbert transform via square functions},
journal = {Trans, Amer. Math. Soc.},
year = {2002},
volume = {354},
pages = {1699-1703},
number = {4},
owner = {Andrei},
timestamp = {2015.08.21}
}
\bib{PeVo02}{article}{
author = {S. Petermichl},
author = {A. Volberg},
title = {Heating of the {A}hlfors-{B}eurling operator: weakly quasiregular
maps on the plane are quasiregular},
journal = {Duke Math. J.},
year = {2002},
volume = {112},
pages = {281-305},
number = {2},
owner = {Andrei},
timestamp = {2013.02.28}
}
\bib{Pi12}{article}{
author = {G. Pisier},
title = {Grothendieck's theorem, past and present},
journal = {Bull. Amer. Math. Soc. (N.S.)},
year = {2012},
volume = {49},
pages = {237-323},
number = {2},
owner = {Andrei},
timestamp = {2013.03.01}
}
\bib{PoSt17}{article}{
author={S. Pott},
author={A. Stoica},
title={Sharp bounds and \(T1\) theorem for Calder\'{o}n-Zygmund operators with matrix kernel on matrix weighted spaces},
year={2017},
volume={Preprint, arXiv:1705.06105},
journal = {},
}
\bib{RoVa73}{book}{
title = {Convex functions},
publisher = {Academic Press},
year = {1973},
author = {A. W. Roberts},
author = {D. E. Varberg},
volume = {57},
series = {Pure and Applied Mathematics},
address = {New York-London},
owner = {Andrei},
timestamp = {2013.03.01}
}
\bib{Tr11}{inproceedings}{
author = {S. Treil},
title = {Sharp {$A_2$} estimates of {H}aar shifts via {B}ellman function},
booktitle ={Recent trends in
Analysis, Theta Ser. Adv. Math.},
pages={187-- 208},
publisher={ Theta, Bucharest}
year = {2013},
note = {arXiv:1105.2252},
owner = {Andrei},
timestamp = {2013.03.01}
}
\bib{TrVo97}{article}{
author = {S. Treil},
author = {A. Volberg},
title = {Wavelets and the {A}ngle between {P}ast and {F}uture},
journal = {J. Funct. Anal.},
year = {1997},
volume = {143},
pages = {269-308},
number = {2},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{Vo97}{article}{
author = {A. Volberg},
title = {Matrix {$A_p$} weights via $S$-functions},
journal = {J. Amer. Math. Soc.},
year = {1997},
volume = {10},
pages = {445-466},
number = {2},
owner = {Andrei},
timestamp = {2015.06.29}
}
\bib{Wi00}{article}{
author = {J. Wittwer},
title = {A sharp estimate on the norm of the martingale transform},
journal = {Math. Res. Lett.},
year = {2000},
volume = {7},
pages = {1-12},
number = {1},
owner = {Andrei},
timestamp = {2013.02.28}
}
\end{biblist}
\end{bibsection}
\end{document}
|
\begin{document}
\title[Entire and Analytical Solutions of Certain Classes...]{Entire and Analytical Solutions of Certain Classes of Abstract Degenerate Fractional Differential Equations and Their Systems$^{\ast}$}
\author{Marko Kosti\' c}
\address{Faculty of Technical Sciences,
University of Novi Sad,
Trg D. Obradovi\' ca 6, 21125 Novi Sad, Serbia}
\email{[email protected]}
{\renewcommand{\thefootnote}{} \footnote{
$^{\ast}$The text was submitted by the author for the English version of the journal.
\\ \text{ } \ \ 2010 {\it Mathematics
Subject Classification.} 47D06, 47D60,
47D62, 47D99.
\\ \text{ } \ \ {\it Key words and phrases.} Abstract degenerate differential equations, Volterra integro-differential equations, fractional differential equations, entire and analytical solutions, well-posedness.
}}
\begin{abstract}
In this paper, we are primarily concerned with the study of entire and analytical solutions of abstract degenerate (multi-term)
fractional differential equations with Caputo time-fractional derivatives. We also analyze systems of such equations and furnish several illustrative
examples to demonstrate usage of obtained theoretical results.
\end{abstract}
\maketitle
\section{INTRODUCTION AND PRELIMINARIES}
Fractional calculus has gained considerable popularity and importance during the past four decades, mainly due to its applications in diverse fields of science and engineering.
Fairly complete information about fractional calculus and non-degenerate fractional
differential equations can be obtained
by consulting the references
\cite{bajlekova}, \cite{Diet}, \cite{kilbas}-\cite{knjigaho} and
\cite{Po}-\cite{samko}.
Various types of abstract degenerate Volterra integro-differential equations and abstract degenerate (multi-term) fractional differential equations have been recently considered in \cite{fedorov}-\cite{vlad-mar} and \cite{filomat}-\cite{R-L-bilten}
(cf. \cite{FK} for a comprehensive survey of results, as well as \cite{aliev1},
\cite{carol}, \cite{dem}, \cite{faviniyagi}, \cite{me152}, \cite{svir-fedorov}-\cite{svir3} and \cite{XL}-\cite{XL-HIGHER} for some other papers concerning the
abstract degenerate differential equations).
It is well known that
the study of entire solutions of abstract differential equations was initiated by R. deLaubenfels \cite{l1-forum} in 1991. Concerning the theory
of abstract differential equations with integer order derivatives,
further contributions
have been obtained by L. Autret \cite{auter0}, L. Autret-H. A. Emamirad \cite{auter}, T.-J. Xiao-J. Liang \cite{XL-entire}, Y. Mishura-Y. Tomilov \cite{mistom}, and the author \cite{knjigah}, \cite{sic}. The existence and uniqueness of entire and analytical solutions of the abstract non-degenerate time-fractional differential equations with Caputo derivatives have been investigated in \cite{fcaa}-\cite{systemss}.
In a joint research paper with V. Fedorov \cite{vlad-mar-prim}, the author has recently considered a class of abstract degenerate multi-term fractional differential equations in locally convex spaces, pointing out that the methods proposed in \cite{XL-entire} (cf. also \cite[Section 4.4, pp. 167-175]{x263}), \cite{fcaa} and \cite[Remark 2.2(x)-(xi)]{vlad-mar-prim} can serve one to prove some results on the
existence and uniqueness of entire solutions of degenerate multi-term differential equations
with integer order derivatives (cf. \cite[Chapter 4]{svir-fedorov} for some basic results on the entire groups of solving operators for abstract degenerate differential equations of first order). Motivated primarily by this fact, in the second section of paper we consider the
existence and uniqueness of entire and analytical solutions to (systems) of degenerate multi-term fractional differential equations
with Caputo derivatives. It should also be noticed that in Subsection 2.1
we initiate the analysis of existence and uniqueness of entire and analytical solutions of some very important degenerate equations of mathematical physics in $L^{p}$ type spaces.
We use the standard notation throughout the paper.
Unless specifed otherwise,
we assume
that $X$ is a Hausdorff sequentially complete
locally convex space over the field of complex numbers. We use the shorthand SCLCS to denote such a space.
By
$L(X)$ we denote the space consisting of all continuous linear mappings from $X$ into
$X.$ By $\circledast$ we denote the fundamental systems of seminorms which defines the topology of $X.$
The Hausdorff locally convex topology on
$L(X)$ is defined in the usual way (see \cite[Section 1.1]{knjigaho}).
Let us recall that the space $L(X)$ is sequentially
complete provided that $X$ is barreled\index{barreled space} (\cite{meise}).
If $A$ is a linear operator
acting on $X$,
then the domain, kernel space and range of $A$ will be denoted by
$D(A),$ $N(A)$ and $R(A),$
respectively. Since no confusion
seems likely, we will identify $A$ with its graph. The symbol $I$ stands for the identity operator on $X.$
If $C\in L(X)$ and $A$ is a closed linear operator acting on $X$, then we define the
$C$-resolvent set of $A,$
$\rho_{C}(A)$ for short, by $
\rho_{C}(A):=\{\lambda \in {\mathbb C} \ | \ \lambda -A \mbox{ is
injective and } (\lambda-A)^{-1}C\in L(X)\};
$ $\rho(A)\equiv \rho_{I}(A).$
If $V$ is a general topological vector space,
then a function $f :
\Omega \rightarrow V,$ where $\Omega$ is an open non-empty subset of ${\mathbb
C},$ is said to be analytic iff it is locally expressible in a
neighborhood of any point $z\in \Omega$ by a uniformly convergent
power series with coefficients in $V.$
We refer the reader to \cite{a43} and \cite[Section 1.1]{knjigaho} and references cited there for the basic information about vector-valued analytic functions. In our approach the space $X$ is sequentially complete, so that the analyticity of a mapping
$f: \Omega \rightarrow X$ is equivalent with its weak analyticity.
By ${\mathcal F}$ and ${\mathcal F}^{-1}$ we denote the Fourier transform on ${\mathbb R}^{n}$ and its inverse transform, respectively.
Given $\theta\in(0,\pi]$
in advance, define $\Sigma_{\theta}:=\{\lambda\in {\mathbb C}:\lambda\neq 0$, $|\arg(\lambda)|<\theta\}.$
Further on, $\lfloor\beta\rfloor:=\sup\{k\in {\mathbb Z}:\allowbreak k\leq\beta\},$
$\lceil\beta\rceil :=\inf\{k\in {\mathbb Z}:\beta\leq k\}$ ($\beta \in {\mathbb R}$), ${\mathbb N}_{n}:=\{1,\cdot \cdot \cdot,n\}$ and ${\mathbb N}_{n}^{0}:={\mathbb N}_{n} \cup \{0\}$ ($n\in {\mathbb N}$).
The Gamma function is denoted by $\Gamma(\cdot)$ and the principal branch is always used to take
the powers; the convolution like
mapping $\ast$ is given by $f\ast g(t):=\int_{0}^{t}f(t-s)g(s)\,
ds .$ Set $g_{\zeta}(t):=t^{\zeta-1}/\Gamma(\zeta),$
$0^{\zeta}:=0$ ($\zeta>0,$ $t>0$), and
$g_{0}(t):=$ the Dirac $\delta$-distribution.
For a number $\zeta>0$ given in advance,
the Caputo fractional derivative ${\mathbf
D}_{t}^{\zeta}u$ (\cite{bajlekova}, \cite{knjigaho}) is defined
for those functions $u\in C^{\lceil \zeta \rceil-1}([0,\infty) : X)$ for which
$g_{\lceil \zeta \rceil-\zeta} \ast (u-\sum_{j=0}^{\lceil \zeta \rceil-1}u^{(j)}(0)g_{j+1}) \in
C^{\lceil \zeta \rceil}([0,\infty) : X),$ by
$$
{\mathbf
D}_{t}^{\zeta}u(t):=\frac{d^{\lceil \zeta \rceil}}{dt^{\lceil \zeta \rceil}}\Biggl[g_{\lceil \zeta \rceil-\zeta}
\ast \Biggl(u-\sum_{j=0}^{\lceil \zeta \rceil-1}u^{(j)}(0)g_{j+1}\Biggr)\Biggr].
$$
The
Mittag-Leffler function $E_{\beta,\gamma}(z)$ ($\beta>0,$ $\gamma \in {\mathbb R}$) is defined by
$$
E_{\beta,\gamma}(z):=\sum_{k=0}^{\infty}\frac{z^{k}}{\Gamma(\beta
k+\gamma)},\quad z\in {\mathbb C}.
$$
In this place, we assume that
$1/\Gamma(\beta k+\gamma)=0$ if $\beta k+\gamma \in -{{\mathbb
N}_{0}}.$ Set, for short, $E_{\beta}(z):=E_{\beta,1}(z),$ $z\in
{\mathbb C}.$
The asymptotic behaviour of the entire function $E_{\beta,\gamma}(z)$
is given in the following auxiliary lemma (see e.g. \cite[Section 1.3]{knjigaho}):
\begin{lem}\label{1.1}
Let $0<\sigma<\frac{1}{2}\pi .$ Then, for every $z\in {\mathbb C}
\setminus \{0\}$ and $l\in {\mathbb N} \setminus \{1\},$
$$
E_{\beta,\gamma}(z)=\frac{1}{\beta}\sum
\limits_{s}Z_{s}^{1-\gamma}e^{Z_{s}}-\sum
\limits^{l-1}_{j=1}\frac{z^{-j}}{\Gamma(\gamma-\beta
j)}+O\bigl(|z|^{-l}\bigr),\quad |z|\rightarrow \infty ,
$$
where $Z_{s}$ is defined by $Z_{s}:=z^{1/\beta}e^{2\pi i s/\beta}$
and the first summation is taken over all those integers $s$
satisfying $|\arg (z) + 2\pi s|<\beta(\frac{\pi}{2}+\sigma).$
\end{lem}
For further information about the Mittag-Leffler functions and the abstract Volterra integro-differential equations in Banach and locally convex spaces, the reader may consult \cite{bajlekova}, \cite{prus}, \cite{knjigaho} and references cited there.
Assume
that
$n\in {\mathbb N}$ and $iA_{j},\ 1\leq j\leq n$ are commuting
generators of bounded $C_{0}$-groups on a Banach space $X.$ Set $A:=(A_{1},\cdot \cdot \cdot,A_{n})$ and
$A^{\eta}:=A_{1}^{\eta_{1}}\cdot \cdot \cdot A_{n}^{\eta_{n}}$ for
any $\eta=(\eta_{1},\cdot \cdot \cdot, \eta_{n})\in {{\mathbb
N}_{0}^{n}};$
denote by ${\mathcal D}({{\mathbb R}^{n}})$
and ${\mathcal S}({{\mathbb R}^{n}})$ the Schwartz space of $C^{\infty}({\mathbb R}^{n})$-functions with compact support and
the Schwartz space of rapidly decreasing functions on ${{\mathbb R}^{n}},$ respectively (in the sequel, the meaning of symbol $A$ will be clear from the context). Let $k=1+\lfloor
n/2\rfloor.$ For every $\xi=(\xi_{1},\cdot \cdot \cdot, \xi_{n}) \in
{{\mathbb R}^{n}}$ and $u\in {\mathcal F}L^{1}({\mathbb R}^{n})= \{
{\mathcal F}f : f \in L^{1}({{\mathbb R}^{n}}) \},$ we set
$|\xi|:=(\sum_{j=1}^{n}\xi_{j}^{2})^{1/2},$
$(\xi,A):=\sum_{j=1}^{n}\xi_{j}A_{j}$ and
$
u(A)x:=\int_{{\mathbb R}^{n}}{\mathcal
F}^{-1}u(\xi)e^{-i(\xi,A)}x\, d\xi,\ x\in X.
$
Then $u(A)\in
L(X),$ $u\in {\mathcal F}L^{1}({{\mathbb R}^{n}})$ and there exists a finite constant
$M\geq 1$ such that
$
\|u(A)\|\leq M \|{\mathcal
F}^{-1}u\|_{L^{1}({{\mathbb R}^{n}})},\ u\in {\mathcal
F}L^{1}({\mathbb R}^{n}).
$
Let $N\in {\mathbb N},$ and let
$p(x)=\sum_{|\eta|\leq N}a_{\eta}x^{\eta},$ $x\in {\mathbb R}^{n}$ be a complex polynomial. Then we define
$
p(A):=\sum_{|\eta|\leq N}a_{\eta}A^{\eta}\mbox{ and }
X_{0}:=\bigl\{\phi(A)x : \phi \in {\mathcal S}({{\mathbb R}^{n}}),\ x\in
X\bigr\}.
$
We know that the operator $p(A)$ is
closable and that the following holds:
\begin{itemize}
\item[($\triangleright$)]
$\overline{X_{0}}=X,$ $X_{0}\subseteq \bigcap _{\eta \in {{\mathbb
N}_{0}^{n}}}D(A^{\eta}),$ $\overline{p(A)_{|X_{0}}}=\overline{p(A)}$
and\\ $\phi(A)p(A)\subseteq p(A)\phi(A)=(\phi p)(A),$ $\phi \in
{\mathcal S}({{\mathbb R}^{n}}).$
\end{itemize}
Denote by ${\mathbb C}^{m,m}$ the ring of $m \times m$ matrices over ${\mathbb C}$; $I_{m}$ stands for the identity matrix of format $m\times m$ ($m\in {\mathbb N}$). If
$P(x)=[p_{ij}(x)]$ is an $m\times m$ matrix of polynomials of $x\in {\mathbb R}^{n},$ then
there exist $d\in {\mathbb N}$ and matrices $P_{\eta}\in {\mathbb C}^{m,m}$ such that $P(x)=\sum_{|\eta|\leq d}P_{\eta}x^{\eta},$ $x\in {\mathbb R}^{n}.$ Then the operator $P(A):=\sum_{|\eta|\leq d}P_{\eta}A^{\eta}
$ is closable on $X^{m}.$
For further information concerning the functional calculus for commuting generators of bounded $C_{0}$-groups, see \cite{l1}, \cite{knjigaho} and \cite{zheng-pacific}-\cite{quan-miao}.
The proof of following auxiliary lemma, which is probably known in the existing literature, is included for the sake of completeness.
\begin{lem}\label{lap-inj}
Suppose that $1\leq p<\infty,$ $n\in {\mathbb N}$ and $X:=L^{p}({\mathbb R}^{n}).$ Denote by $\Delta_{p,n}$ the operator $\Delta$ acting on $X$ with its maximal distributional domain. Then $\Delta_{p,n}$ is injective.
\end{lem}
\begin{proof}
If $1<p<\infty,$ then the statement immediately follows from the fact that the operator $-\Delta_{p,n}$ is non-negative, with dense domain and range (cf. \cite[pp. 256, 266]{MSP}). Suppose now that $p=1$ and $\Delta_{p,n}f=0$ for some $f\in X.$ Then \cite[Lemma 3.2]{MSP} implies that, for every $\varphi \in {\mathcal D}({\mathbb R}^{n})$ and for every multi-index $\eta \in {\mathbb N}_{0}^{n},$ the function
$\varphi \ast f$ belongs to the space ${\mathcal T}$ consisting of those $C^{\infty}({\mathbb R}^{n})$-functions whose any partial
derivative belongs to $L^{1}({\mathbb R}^{n}) \cap L^{\infty}({\mathbb R}^{n}).$ Since $\Delta_{p,n}(\varphi \ast f)=\varphi \ast \Delta_{p,n}f=0,$ $\varphi \in {\mathcal D}({\mathbb R}^{n})$ and the operator $\Delta_{{\mathcal T}}$ is injective by \cite[Remark 3.3]{MSP}, we have that $\varphi \ast f=0,$ $\varphi \in {\mathcal D}({\mathbb R}^{n}).$ Hence, $f=0.$
\end{proof}
\section{FORMULATION AND PROOF OF MAIN RESULTS. EXAMPLES AND APPLICATIONS}
Before stating our first main result, we need to repeat some notations and preliminaries from \cite{vlad-mar-prim}.
Suppose that $n\in {\mathbb N},$ $0<\zeta \leq 2,$
$q_{0},q_{1},\cdot \cdot \cdot ,q_{n}$ are given non-negative integers satisfying $q_{0}=0$ and $0<q_{1}\leq q_{2}\leq \cdot \cdot \cdot \leq q_{n}.$ Let $A_{0},A_{1},\cdot \cdot \cdot, A_{n-1},A_n$ be closed linear operators acting on $X.$ Set $A_{n}:=B,$ $T_{i}u(t):=A_{i}({\mathbf D}_{t}^{\zeta})^{q_{i}}u(t),$ $t\geq 0,$ $i\in {\mathbb N}_{n}^{0}$ and
$$
P_{\lambda}:=\lambda^{q_{n}\zeta}B+\sum \limits_{i=0}^{n-1}\lambda^{q_{i}\zeta}A_{i},\quad \lambda \in {\mathbb C} \setminus \{0\}.
$$
Of concern is the following abstract degenerate multi-term Cauchy problem:
\begin{equation}\label{snarky}
\sum \limits_{i=0}^{n}T_{i}u(t)=0,\quad t\geq 0,
\end{equation}
accompanied with the following initial conditions:
\begin{align}
\notag
& \Bigl(\bigl ({\mathbf D}_{t}^{\zeta}\bigr)^{j}u(t)\Bigr)_{t=0}=u_{j},\ j\in {\mathbb N}_{q_{n}-1}^{0},\mbox{ if }\zeta \in (0, 1],\mbox{ resp., }\\\label{puppy}&
\Bigl(\bigl ({\mathbf D}_{t}^{\zeta}\bigr)^{j}u(t)\Bigr)_{t=0}=u_{j},\ j\in {\mathbb N}_{q_{n}-1}^{0}; \ \Bigl(\frac{d}{dt}\bigl ({\mathbf D}_{t}^{\zeta}\bigr)^{j}u(t)\Bigr)_{t=0}=v_{j},\ j\in {\mathbb N}_{q_{n}-1}^{0},\mbox{ if }\zeta \in (1,2].
\end{align}
In \cite{vlad-mar-prim}, we have considered the abstract Cauchy problem [(\ref{snarky})-(\ref{puppy})]
with $0<\zeta \leq 1.$ The notion of a strong solution of
problem [(\ref{snarky})-(\ref{puppy})], introduced in the first part of following definition, coincides with the corresponding notion introduced in \cite[Definition 1]{vlad-mar-prim} in the case that $0<\zeta \leq 1.$
\begin{defn}\label{RES}
\begin{itemize}
\item[(i)] A function $u\in C([0,\infty): X)$ is said to be a strong solution of
problem [(\ref{snarky})-(\ref{puppy})]
iff the term $T_{i}u(t)$ is well defined and continuous for any $t\geq 0,$ $i\in {\mathbb N}_{n}^{0}$,
and [(\ref{snarky})-(\ref{puppy})] holds identically on $[0,\infty).$
\item[(ii)] A function $u\in C([0,\infty): X)$ is said to be an entire solution of
problem [(\ref{snarky})-(\ref{puppy})] iff $u(\cdot)$ is a strong solution of [(\ref{snarky})-(\ref{puppy})] and it can be analytically extended to the whole complex plane, as well as any of the terms $A_{i}u^{(p)}(\cdot)$ ($0\leq i\leq n,$ $p\in {\mathbb N}_{0}$) can be analytically extended to the whole complex plane.
\item[(iii)] A function $u\in C([0,\infty): X)$ is said to be an analytical solution of
problem [(\ref{snarky})-(\ref{puppy})] on the region ${\mathbb C} \setminus (-\infty,0]$ iff $u(\cdot)$ is a strong solution of [(\ref{snarky})-(\ref{puppy})] and it can be extended to the whole complex plane, analytically on the region ${\mathbb C} \setminus (-\infty,0]$ and continuously on the region ${\mathbb C} \setminus (-\infty,0)$, as well as any of the terms $A_{i}({\mathbf D}_{t}^{\zeta})^{p}u(t)$ ($0\leq i\leq n,$ $p\in {\mathbb N}_{0},$ $t\geq 0$) is well defined and can be extended to the whole complex plane, analytically on the region ${\mathbb C} \setminus (-\infty,0]$ and continuously on the region ${\mathbb C} \setminus (-\infty,0).$
\end{itemize}
\end{defn}
Set $S_{\omega}:=\{ j\in {{\mathbb N}_{n}^{0}} : q_{j}-1\geq \omega\}$ ($\omega \in {{\mathbb N}_{q_{n}-1}^{0}}$). We need to introduce the following condition:
\begin{equation}\label{818}
-\infty <\nu ' <\min \limits_{\omega \in {{\mathbb N}_{q_{n}-1}^{0}}} \Bigl[ -\Bigl(q_{n}-1-\omega +\max \bigl\{q_{j} : j\in {\mathbb N}_{n}^{0} \setminus S_{\omega}\bigr\} \Bigr) \Bigr].
\end{equation}
Then $n\in S_{\omega}$ for all $\omega \in {{\mathbb N}_{q_{n}-1}^{0}},$ and (\ref{818}) holds provided that $-\infty <\nu'<1-q_{n}.$
Now we are ready to formulate the following theorem.
\begin{thm}\label{GZA}
Suppose that the operator $C\in L(X)$ is injective, $CA_{i}\subseteq A_{i}C,$ $i\in {\mathbb N}_{n}^{0},$
$0<\zeta \leq 2,$
$\phi \in (-\pi,\pi],$ $\theta \in (\pi-\pi \zeta,\pi-(\pi \zeta)/2),$ $a>r>0$ and $\nu'$ satisfies (\ref{818}). Assume, further, that the following holds:
\begin{itemize}
\item[(i)] The operator family $\{(1+|\lambda|)^{-\nu'}P_{\lambda^{1/\zeta}}^{-1}C : \lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|\geq r \}\subseteq L(X)$ is both equicontinuous and strongly continuous.
\item[(ii)] For every $x\in X$ and $i\in {\mathbb N}_{n-1}^{0},$ the mapping $\lambda \mapsto A_{i}P_{\lambda^{1/\zeta}}^{-1}Cx,$ $\lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|\geq r$ is continuous and there exists $v_{i}\in {\mathbb N}$ such that the operator family $\{(1+|\lambda|)^{-v_{i}}A_{i}P_{\lambda^{1/\zeta}}^{-1}C : \lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|\geq r\}\subseteq L(X)$ is equicontinuous.
\end{itemize}
Denote by ${\mathfrak W}$ (${\mathfrak W}_{e}$)
the subspace of $X^{q_{n}},$ resp. $X^{2q_{n}}$, consisting of all initial values $(u_{0},\cdot \cdot \cdot,u_{q_{n}-1})\in X^{q_{n}},$
resp. $(u_{0},\cdot \cdot \cdot,u_{q_{n}-1}; v_{0},\cdot \cdot \cdot,v_{q_{n}-1})\in X^{2q_{n}},$ subjected to some analytical solution $u(\cdot)$ of problem [(\ref{snarky})] on the region ${\mathbb C} \setminus (-\infty,0]$ (entire solution $ u(\cdot)$ of problem [(\ref{snarky})]). Then ${\mathfrak W}$
is dense in
$(C(
\bigcap_{j=0}^{n}D(A_{j})))^{q_{n}}$ for the topology of $X^{q_{n}},$ provided that $0<\zeta<1,$ resp. $(C(
\bigcap_{j=0}^{n}D(A_{j})))^{2q_{n}}$ for the topology of $X^{2q_{n}},$ provided that $1<\zeta<2$; furthermore, ${\mathfrak W}_{e}$
is dense in
$(C(
\bigcap_{j=0}^{n}D(A_{j})))^{q_{n}}$ for the topology of $X^{q_{n}},$ provided that $\zeta=1,$ resp. $(C(
\bigcap_{j=0}^{n}D(A_{j})))^{2q_{n}}$ for the topology of $X^{2q_{n}},$ provided that $\zeta=2.$
\end{thm}
To prove Theorem \ref{GZA}, we need the following lemma (cf. also \cite[Lemma 1.1, Theorem 1.1]{svir3}).
\begin{lem}\label{tuga-jesenja}
Let $x\in X.$ Then the mapping
$$
\lambda \mapsto P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}Cx,\quad \lambda \in \Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|> r
$$
is analytic.
\end{lem}
\begin{proof}
Without loss of generality, we may assume that $q_{i}=i$ ($i\in {\mathbb N}_{n}^{0}$), $\zeta=1$ and $\phi=0.$
Clearly, (ii) holds
for every $x\in X$ and $i\in {\mathbb N}_{n}^{0}.$ Furthermore, the following analogon of the Hilbert resolvent equation holds:
\begin{align*}
& P_{\lambda}^{-1}C^{2}x -P_{z}^{-1}C^{2}x
=(z-\lambda)P_{\lambda}^{-1}C\\& \times \Biggl[ \sum_{k=1}^{n-1}{n \choose k}\bigl(z-\lambda \bigr)^{k-1}\lambda^{n-k}B+\sum_{k=1}^{n-2}{n-1 \choose k}\bigl(z-\lambda \bigr)^{k-1}\lambda^{n-1-k}A_{n-1}+\cdot \cdot \cdot +A_{1} \Biggr]
\\ & \times P_{z}^{-1}Cx,\mbox{ provided } \lambda,\ z\in \Sigma_{(\zeta \pi/2)+\theta}\mbox{ and } |\lambda|,\ |z|> r.
\end{align*}
This implies that the mapping $\lambda \mapsto P_{\lambda}^{-1}C^{2}x,$ $\lambda \in \Sigma_{(\zeta \pi/2)+\theta},$ $|\lambda|>r$
is weakly analytic and therefore analytic, as well as that
\begin{align*}
&\frac{d}{d\lambda}\Bigl \langle x^{\ast}, P_{\lambda}^{-1}C^{2}x \Bigr \rangle
\\ & =-\Bigl \langle x^{\ast} , P_{\lambda}^{-1}\bigl[ n \lambda^{n-1}B+(n-1)\lambda^{n-2}A_{n-1}+\cdot \cdot \cdot +A_{1}\bigr]P_{\lambda}^{-1}Cx\Bigr \rangle,
\end{align*}
provided $x^{\ast} \in X^{\ast}, $ $\ \lambda
\in \Sigma_{(\zeta \pi/2)+\theta}$ and $|\lambda|>r.$ Using the Morera theorem and the observation from \cite[Remark 2.7]{ralf}, the above implies that the mapping $ \lambda \mapsto P_{\lambda}^{-1}Cx,$ $\lambda \in \Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|> r $ is analytic, as claimed.
\end{proof}
Now we can proceed to the proof of Theorem \ref{GZA}.
\begin{proof}
Suppose first $0<\zeta\leq 1.$ Clearly, $(\zeta \pi/2)+\theta<\pi,$ $\pi \zeta /2 >\pi-(\zeta \pi/2)-\theta$ and we can find a number $b\in {\mathbb R}$ satisfying
$$
1<b<\frac{\pi \zeta /2}{\pi -(\zeta \pi/2)-\theta}.
$$
Denote by $\Gamma$ the upwards oriented boundary of the region $\{\lambda \in \Sigma_{(\zeta \pi/2)+\theta} : |\lambda|\geq r\}.$ Let $\Omega$ be the open region on the left of $\Gamma.$ Then there exists a sufficiently large number $R>0$ such that
$a-\lambda \in \Sigma_{\pi-(\zeta \pi/2)-\theta}$ for all $\lambda \in \Omega \cup \Gamma$ with $|\lambda| \geq R.$
This implies $|e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}|=e^{-\varepsilonilon \Re ((a-\lambda)^{b/\zeta})}\leq
e^{-\varepsilonilon |a-\lambda|^{b/\zeta}\cos(b\zeta^{-1}(\pi-(\pi \zeta/2)-\theta))}
,$ provided $\varepsilonilon>0,$ $\lambda \in \Omega \cup \Gamma$ and $|\lambda|\geq R.$
Keeping in mind Lemma \ref{1.1}, we obtain the existence of a constant $c_{\zeta}'>0$ such that $|E_{\zeta}( z^{\zeta}\lambda e^{i\phi})|\leq E_{\zeta}(|z|^{\zeta} |\lambda|)\leq c_{\zeta}'e^{|z||\lambda|^{1/\zeta}}$ for all $z\in {\mathbb C}$ and $\lambda \in {\mathbb C}.$ Hence, there exists a constant $c_{\zeta}>0$ such that
\begin{equation}\label{estimate}
\Bigl| e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( z^{\zeta}\lambda e^{i\phi}\bigr) \Bigr| \leq c_{\zeta}e^{-\varepsilonilon |a-\lambda|^{b/\zeta}\cos(b\zeta^{-1}(\pi-(\pi \zeta/2)-\theta))+|z||\lambda|^{1/\zeta}},
\end{equation}
for any $z\in {\mathbb C},\ \varepsilonilon>0$ and $\lambda \in \Omega.$
Suppose now that $x_{w}\in \bigcap_{j=0}^{n}D(A_{j})$ for all $w\in {\mathbb N}_{q_{n}-1}^{0}.$ Then (i) and the estimate (\ref{estimate}) enable us to define the function $z\mapsto u_{\varepsilonilon}(z),$ $z\in {\mathbb C},$ for any $\varepsilonilon>0,$ by
$$
u_{\varepsilonilon}(z):=\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( z^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda .
$$
It can be simply verified that the mapping $z\mapsto u_{\varepsilonilon}(z),$ $z\in {\mathbb C} \setminus (-\infty,0)$ is continuous ($\varepsilonilon>0$).
Using Lemma \ref{1.1} and the proof of \cite[Theorem 2]{vlad-mar-prim}, it readily follows that the mapping $z\mapsto u_{\varepsilonilon}(z),$ $z\in {\mathbb C} \setminus (-\infty,0]$ is analytic ($\varepsilonilon>0$), with
\begin{equation}\label{nujabes}
u^{\prime}_{\varepsilonilon}(z)=\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}z^{\zeta-1}E_{\zeta,\zeta}\bigl( z^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{q_{j}-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda ,
\end{equation}
for any $\varepsilonilon>0$ and $z\in {\mathbb C} \setminus (-\infty,0];$ furthermore, the mapping $z\mapsto u_{\varepsilonilon}(z),$ $z\in {\mathbb C}$ is entire provided $\varepsilonilon>0,$ $\zeta=1$ and, in this case, (\ref{nujabes}) holds
for any $\varepsilonilon>0$ and $z\in {\mathbb C}.$ The proof of \cite[Theorem 2]{vlad-mar-prim} also shows that the term $({\mathbf D}_{t}^{\zeta})^{p}u_{\varepsilonilon}(t),$ $t\geq 0$ is well defined for any $p\in {\mathbb N}_{0}$ and $\varepsilonilon>0,$ with
\begin{align}
\notag
({\mathbf D}_{t}^{\zeta})^{p}& u_{\varepsilonilon}(t)
\\\label{london-acid} &=\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{p+q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda ;
\end{align}
cf. also the formula \cite[(1.25)]{bajlekova}. Combined with the Cauchy theorem, (ii) and Lemma \ref{1.1},
the above implies that the term
$A_{i}({\mathbf D}_{t}^{\zeta})^{p}u_{\varepsilonilon}(t)$ is well defined for $t\geq 0,$ $i\in {\mathbb N}_{n}^{0},$ $p\in {\mathbb N}_{0}$ and $\varepsilonilon>0,$
with
\begin{align*}
& A_{i}\bigl({\mathbf D}_{t}^{\zeta}\bigr)^{p}u_{\varepsilonilon}(t)
\\& =\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{p+q_{j}-1-w}A_{i}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda.
\end{align*}
This implies that, for every $\varepsilonilon>0,$ any of the terms $A_{i}({\mathbf D}_{t}^{\zeta})^{p}u_{\varepsilonilon}(\cdot)$ ($0\leq i\leq n,$ $p\in {\mathbb N}_{0}$) can be extended to the whole complex plane, analytically on the region ${\mathbb C} \setminus (-\infty,0]$ and continuously on the region ${\mathbb C} \setminus (-\infty,0);$ we only need to replace the variable $t\geq 0,$ appearing in the above formula, with the variable $z\in {\mathbb C}.$
Furthermore,
\begin{align*}
& \sum \limits_{i=0}^{n}A_{i}({\mathbf D}_{t}^{\zeta})^{q_{i}}u_{\varepsilonilon}(t)
\\ & =\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\sum \limits_{i=0}^{n}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{q_{i}+q_{j}-1-w}A_{i}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda
\\ & =\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}} \frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr)\bigl(\lambda e^{i\phi}\bigr)^{q_{j}-1-w}CA_{j}x_{\omega}\, d\lambda=0,\ t\geq 0,\ \varepsilonilon>0,
\end{align*}
so that for each $\varepsilonilon>0$ the mapping $t\mapsto u_{\varepsilonilon}(t),$ $t\geq 0$
is an analytical solution of problem (\ref{snarky}) on the region ${\mathbb C} \setminus (-\infty,0]$ (entire solution of problem (\ref{snarky}), provided that $\zeta=1$). Let $u_{l}^{\varepsilonilon}=( ({\mathbf D}_{t}^{\zeta})^{l}u_{\varepsilonilon}(t))_{t=0},$ $l \in {\mathbb N}_{q_{n}-1}^{0}$ ($\varepsilonilon>0$).
Now we will prove that $(u_{0}^{\varepsilonilon},\cdot \cdot \cdot,u_{q_{n}-1}^{\varepsilonilon})$ converges to $e^{-i\phi}(Cx_{0},\cdot \cdot \cdot , Cx_{q_{n}-1})$ as $\varepsilonilon \rightarrow 0+,$ for the topology of $X^{q_{n}}$ (cf. also \cite[Remark 1(x)]{vlad-mar-prim}). Let $\omega \in {\mathbb N}_{q_{n}-1}^{0}$ and $l\in {\mathbb N}_{q_{n}-1}^{0}$ be fixed. Keeping in mind (\ref{london-acid}), it suffices to prove that the following holds:
$$
\lim \limits_{\varepsilonilon \rightarrow 0+}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}\bigl(\lambda e^{i\phi}\bigr)^{l+q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda =e^{-i\phi}\delta_{ \omega l}Cx_{\omega},
$$
i.e., that
\begin{align}
\notag \lim \limits_{\varepsilonilon \rightarrow 0+}& \frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}\bigl(\lambda e^{i\phi}\bigr)^{l-1-w}
\\\label{AJ} & \times \Biggl[ Cx_{\omega}-\sum \limits_{j\in {\mathbb N}_{n}^{0} \setminus S_{\omega}}\bigl(\lambda e^{i\phi}\bigr)^{q_{j}}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\Biggr]\, d\lambda =e^{-i\phi}\delta_{ \omega l}Cx_{\omega},
\end{align}
where $\delta_{ \omega l}$ denotes the Kronecker delta. Since $|e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}|\leq 1,$ $\lambda \in \Gamma ,$ $\varepsilonilon>0,$ (\ref{818}) and (i) holds, we have that there exists $\sigma>0$ such that
$$
\Biggl | e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}\bigl(\lambda e^{i\phi}\bigr)^{l-1-w}\bigl(\lambda e^{i\phi}\bigr)^{q_{j}}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w} \Biggr |\leq \mbox{Const. } |\lambda|^{-1-\sigma},
$$
for any $\lambda \in \Gamma ,$ $\varepsilonilon>0$ and $j\in {\mathbb N}_{n}^{0} \setminus S_{\omega}.$ Applying the dominated convergence theorem, Lemma \ref{tuga-jesenja} and the Cauchy theorem, we get that
\begin{align*}
\lim \limits_{\varepsilonilon \rightarrow 0+}&\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}\bigl(\lambda e^{i\phi}\bigr)^{l-1-w}\sum \limits_{j\in {\mathbb N}_{n}^{0} \setminus S_{\omega}}\bigl(\lambda e^{i\phi}\bigr)^{q_{j}}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda
\\ & = \frac{1}{2\pi i}\int_{\Gamma}\bigl(\lambda e^{i\phi}\bigr)^{l-1-w}\sum \limits_{j\in {\mathbb N}_{n}^{0} \setminus S_{\omega}}\bigl(\lambda e^{i\phi}\bigr)^{q_{j}}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda =0.
\end{align*}
Taking into account the last formula on p. 170 of \cite{x263}, it readily follows that (\ref{AJ}) golds good.
The proof of theorem is thereby complete in the case that $0<\zeta \leq 1.$ Suppose now $1<\zeta \leq 2.$ Then it is not difficult to show that there exists a finite constant $d_{\zeta}>0$ such that the function $F_{\lambda}(z):=zE_{\zeta,2}(z^{\zeta}\lambda e^{i\phi}),$ $z\in {\mathbb C}$ ($\lambda \in {\mathbb C}$) satisfies
$F_{\lambda}^{\prime}(z)=E_{\zeta} (z^{\zeta}\lambda e^{i\phi}),$ $z\in {\mathbb C} \setminus (-\infty,0]$
and $|F_{\lambda}(z)|\leq d_{\zeta}(1+|z|)e^{|z||\lambda|^{1/\zeta}},$ $z\in {\mathbb C}$ ($\lambda \in {\mathbb C}$). Since for any function $u\in C^{1}([0,\infty) :X)$ with $u^{\prime}(0)=0$ we have ${\mathbf D}_{t}^{\zeta}(g_{1}\ast u)(t)=(g_{1}\ast {\mathbf D}_{\cdot}^{\zeta}u)(t),$ $t\geq 0,$ provided in addition that the term $ {\mathbf D}_{t}^{\zeta}u(t)$ is defined for $t\geq 0,$ it readily follows that
${\mathbf D}_{t}^{\zeta}F_{\lambda}(t)=(g_{1}\ast {\mathbf D_{t}^{\zeta}}E_{\zeta}(\cdot^{\zeta}\lambda e^{i\phi}))(t)=\lambda e^{i\phi}(g_{1}\ast E_{\zeta}(\cdot^{\zeta}\lambda e^{i\phi}))(t)=\lambda e^{i\phi}F_{\lambda}(t),$ $t\geq 0$ ($\lambda \in {\mathbb C}$).
Let $x_{w},\ y_{w}\in \bigcap_{j=0}^{n}D(A_{j})$ for all $w\in {\mathbb N}_{q_{n}-1}^{0}.$ Define now the solution $u_{\varepsilonilon}(\cdot)$ by
\begin{align*}
& u_{\varepsilonilon}(z):=\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( z^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda
\\ & + \sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}} \frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}F_{\lambda}(z) \bigl(\lambda e^{i\phi}\bigr)^{q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}y_{w}\, d\lambda,
\end{align*}
for any $z\in {\mathbb C}$ and $\varepsilonilon>0.$
It is not difficult to prove that, for every $p\in {\mathbb N}_{0},$ $t\geq 0$ and $\varepsilonilon>0,$ the following holds:
\begin{align*}
& \bigl({\mathbf D}_{t}^{\zeta}\bigr)^{p}u_{\varepsilonilon}(t)
\\ & =\sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{p+q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda
\\ & + \sum \limits_{w=0}^{q_{n}-1}\sum \limits_{j\in S_{\omega}} \frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}F_{\lambda}(t) \bigl(\lambda e^{i\phi}\bigr)^{p+q_{j}-1-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}y_{w}\, d\lambda
\end{align*}
and
\begin{align*}
& \frac{d}{dt}\bigl({\mathbf D}_{t}^{\zeta}\bigr)^{p}u_{\varepsilonilon}(t)
\\ &=\sum \limits_{w=0}^{q_{n}-1}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}t^{\zeta-1}E_{\zeta,\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{p+q_{j}-w}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}x_{w}\, d\lambda
\\ & + \sum \limits_{w=0}^{q_{n}-1}\frac{1}{2\pi i}\int_{\Gamma}e^{-\varepsilonilon (a-\lambda)^{b/\zeta}}E_{\zeta}\bigl( t^{\zeta}\lambda e^{i\phi}\bigr) \bigl(\lambda e^{i\phi}\bigr)^{p+q_{j}-w-1}P_{(\lambda e^{i\phi})^{1/\zeta}}^{-1}CA_{j}y_{w}\, d\lambda .
\end{align*}
The remaining part of proof of theorem can be deduced by repeating almost literally
the arguments that we have already used in the case that $0<\zeta \leq 1.$
\end{proof}
\begin{rem}\label{unexp}
\begin{itemize}
\item[(i)] Theorem \ref{GZA} seems to be new and not considered elsewhere provided that $B\neq I$ or $\zeta \neq 1.$
\item[(ii)] As explained in \cite[Remark 1(iv)]{vlad-mar-prim}, the operator $({\mathbf D}_{s}^{\zeta})^{p}$ and the operator ${\mathbf D}_{s}^{\zeta p}$ can be completely different provided that $\zeta \in (0,2) \setminus \{1\}$ and $p\in {\mathbb N} \setminus \{1\},$ which clearly implies that we have to make a strict distinction between them. Observe also that Theorem \ref{GZA} can be reformulated in the case that $\zeta>2$ (cf. also \cite[Theorem 2.1, Theorem 2.2]{fcaa}) and that we can prove a similar result on the existence and uniqueness of entire and analytical solutions of problem (DFP)$_{R}$ considered in \cite{vlad-mar-prim}; we leave the reader to make this precise.
\item[(iii)] The notion of an entire solution of the abstract Cauchy problem $(ACP_{n}),$ introduced in \cite[Definition 1.1]{XL-entire}, is slightly different from the corresponding notion introduced in Definition \ref{RES}(ii). Strictly speaking, if $u(\cdot)$ is an entire solution of the abstract Cauchy problem $(ACP_{n})$ in the sense of Definition \ref{RES}(ii), then $u(\cdot)$ is an entire solution of problem $(ACP_{n})$ in the sense of \cite[Definition 1.1]{XL-entire}. The converse statement holds provided that for each index $i\in {\mathbb N}_{n-1}$ the initial values $u_{0},\cdot \cdot \cdot,u_{i-1}$ belong to $D(A_{i}).$
\item[(iv)] The uniqueness of analytical solutions of problem [(\ref{snarky})-(\ref{puppy})] on the region ${\mathbb C} \setminus (-\infty,0]$ can be proved as follows. Let $u(\cdot)$ be an analytical solution of problem [(\ref{snarky})-(\ref{puppy})] on the region ${\mathbb C} \setminus (-\infty,0],$ with the initial values $u_{j},$ resp. $u_{j},\ v_{j},$ being zeroes ($0\leq j\leq q_{n}-1$).
Then the choice of initial values in (\ref{puppy}) enables us to integrate the equation (\ref{snarky})
$(q_{n}\zeta)$-times by using the formula \cite[(1.21)]{bajlekova}. Keeping in mind the analyticity of $u(\cdot)$, we easily infer that for each $i\in {\mathbb N}_{n-1}^{0}$ the
mappings $z\mapsto Bu(z),$ $z\in {\mathbb C} \setminus (-\infty,0)$ ($z\mapsto Bu(z),$ $z\in {\mathbb C} \setminus (-\infty,0]$) and $z\mapsto (g_{(q_{n}-q_{i})\zeta} \ast A_{i}u)(z),$ $z\in {\mathbb C} \setminus (-\infty,0)$ ($z\mapsto (g_{(q_{n}-q_{i})\zeta} \ast A_{i}u)(z),$ $z\in {\mathbb C} \setminus (-\infty,0]$) are well defined and continuous (analytical), as well as that
$$
Bu\bigl(te^{i\gamma}\bigr)+\sum \limits_{i=0}^{n-1}\int^{te^{i\gamma}}_{0}g_{(q_{n}-q_{i})\zeta}(s) A_{i}u\bigl( t e^{i\gamma}-s \bigr)\, ds=0,\quad t\geq 0,\ \gamma \in (-\pi,\pi),
$$
i.e., that
\begin{equation}\label{jaco}
Bu\bigl(te^{i\gamma}\bigr)+\sum \limits_{i=0}^{n-1}\bigl(e^{i\gamma}\bigr)^{(q_{n}-q_{i})\zeta}\int^{t}_{0}g_{(q_{n}-q_{i})\zeta}(t-s) A_{i}u\bigl( se^{i\gamma} \bigr)\, ds=0,\quad t\geq 0,\ \gamma \in (-\pi,\pi).
\end{equation}
It is clear that there exists $\gamma \in (-\pi,\pi)$ such that $2re^{-i\gamma \zeta}\in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta}.$ Setting $\phi':=\gamma \zeta,$
$u_{\gamma}(t):=u(te^{i\gamma}),$ $t\geq 0,$ we obtain that $u_{\gamma} \in C([0,\infty) : X)$ and
$$
e^{-iq_{n}\phi'}Bu_{\gamma}(t)+\sum \limits^{n-1}_{i=0}e^{-iq_{i}\phi'}A_{i}\bigl(g_{(q_{n}-q_{i})\zeta}\ast u_{\gamma}\bigr)(t)=0,\quad t\geq 0.
$$
On the other hand,
$$
P_{(\lambda e^{-i\phi'})^{1/\zeta}}=\lambda^{q_{n}}e^{-iq_{n}\phi'}B+\sum \limits^{n-1}_{i=0}\lambda^{q_{i}}e^{-iq_{i}\phi'}A_{i},\quad \lambda \in {\mathbb C} \setminus \{0\}.
$$
Using the previous two equalities and \cite[Theorem 2.2]{publi} (applied to the operators $e^{-iq_{i}\phi'}A_{i}$ in place of the operators
$A_{i}$ appearing in the formulation of this theorem), we get that $u_{\gamma}(t)=0,$ $t\geq 0,$ which clearly implies that $u(z)=0,$ $z\in {\mathbb C} \setminus (-\infty,0).$
\end{itemize}
\end{rem}
It is also worth noting that Theorem \ref{GZA} is an extension of \cite[Theorem 2.1]{XL-entire} (cf. also \cite[Theorem 4.2, p. 168]{x263}), where it has been assumed that $B=C=I,$ $\zeta=1,$ $X$ is a Banach space and $\bigcap_{j=0}^{n}D(A_{j})$ is dense in $X$ (in our opinion, the strong continuity in (ii) is very important for the validity of Theorem \ref{GZA} and cannot be so simply neglected here (cf. \cite[(4.8), p. 169]{x263}); also, it ought to be observed that Lemma \ref{tuga-jesenja} is very important for
filling some absences in the proof of \cite[Theorem 4.2]{x263}, appearing on the lines 1-6, p. 171 in \cite{x263}, where the Cauchy formula has been used by assuming the analyticity of mapping $\lambda \mapsto R_{e^{i\phi}\lambda},$ $\lambda \in \Sigma_{(\pi/2)+\theta},$ $|\lambda|>r$ a priori); observe also that, in the concrete situation of abstract Cauchy problem $(ACP_{n}),$ our estimate on the growth rate of $P_{e^{i\phi}\cdot}^{-1}$ (cf. the equation (\ref{818}) with $\nu'<-(n-1)$) is slightly better than the corresponding estimate \cite[(4.2)]{x263}, where it has been required that $\nu'\leq -n.$
If $B=C=I$ and $\zeta=2,$ then Theorem \ref{GZA} strengthens \cite[Theorem 2.1]{XL-entire} in a drastic manner. Speaking-matter-of-factly, our basic requirement in (i) is that the operator $P_{\lambda}^{-1}=(\lambda^{2n}+\lambda^{2n-2}A_{n-1}+\cdot \cdot \cdot +A_{0})^{-1}$ exists on the region $\{\lambda^{1/2} : \lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta} : |\lambda|\geq r\},$ which can be contained in an arbitrary acute angle at vertex $(0,0);$ on the other hand, in the formulation of \cite[Theorem 2.1]{XL-entire}, T.-J. Xiao and J. Liang require the existence of operator $P_{\lambda}^{-1}$ for any complex number $\lambda$ having the modulus greater than or equal to $r$ and belonging to the obtuse angle
$e^{i\phi}\Sigma_{(\pi/2)+\theta}$.
In the following slight modification of \cite[Example 2]{vlad-mar-prim}, we will focus our attention on the possible applications of Theorem \ref{GZA} in which $C$ is not the identity operator on $X;$
this is a very important example because of its universality and covering a wide range of concrete applications (here, our attempt is to reconsider and relax, in a certain sense, the very restrictive condition \cite[(4.2), p. 168]{x263}).
\begin{example}\label{not-ultra-entire}
Suppose that $0< \zeta\leq 2,$ $\theta \in (-\pi,\pi],$ $r>0,$ $q_{n}>q_{n-1},$ $\emptyset \neq \Omega \subseteq {\mathbb C},$ $N\in {\mathbb N},$ $A$ is a densely defined closed linear operator in $X$ satisfying that $\Omega \subseteq \rho(A)$ and the operator family $\{(1+|\lambda|)^{-N}(\lambda-A)^{-1} : \lambda \in \Omega \}\subseteq L(X)$ is equicontinuous (here we can also assume that the operator $A$ is not densely defined or that the operator family $\{(1+|\lambda|)^{-N}(\lambda-A)^{-1}C : \lambda \in \Omega \}\subseteq L(X)$ is equicontinuous, thus providing some applications of Theorem \ref{GZA} to the abstract degenerate fractional equations involving non-elliptic differential operators with the empty resolvent set; see \cite{quan-miao}). Assume, further, that $P_{i}(z)$ is a complex polynomial ($i\in {\mathbb N}_{n}^{0}$), $P_{n}(z)\not \equiv 0,$
$\lambda_{0}\in \rho(A) \setminus \{z\in {\mathbb C} : P_{n}(z)=0\},$ dist$(\lambda_{0},\Omega)>0,$
as well as that
for each $\lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta}$ with $|\lambda|\geq r$ all roots of the polynomial
$$
z\mapsto \lambda^{q_{n}}P_{n}(z)+\sum_{i=0}^{n-1}\lambda^{q_{i}}P_{i}(z),\quad z\in {\mathbb C}
$$
belong to $\Omega.$ Set $B:=P_{n}(A)$ and $A_{i}:=P_{i}(A)$ ($i\in {\mathbb N}_{n-1}^{0}$).
Then it can be proved that there exist two sufficiently large numbers $Q'\geq N+2,$ $Q'\in {\mathbb N}$ and $r'>r$ such that the operator families
$\{(1+|\lambda|)^{q_{n}}(\lambda^{q_{n}} P_{n}(A)+\sum_{i=0}^{n-1}\lambda^{q_{i}}P_{i}(A))^{-1}(\lambda_{0}-A)^{-Q'} :\lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|\geq r' \}\subseteq L(X)$ and $\{(1+|\lambda|)^{q_{n}}P_{j}(A)(\lambda^{q_{n}} P_{n}(A)+\sum_{i=0}^{n-1}\lambda^{q_{i}}P_{i}(A))^{-1}(\lambda_{0}-A)^{-Q'} :\lambda \in e^{i\phi}\Sigma_{(\zeta \pi/2)+\theta},\ |\lambda|\geq r \}\subseteq L(X)$ are both equicontinuous and strongly continuous ($j\in {\mathbb N}_{n-1}^{0}$).
Hence, Theorem \ref{GZA} can be applied with the regularizing operator $C\equiv (\lambda_{0}-A)^{-Q'}.$
\end{example}
In the following theorem, we will reconsider the assertion of \cite[Theorem 2.3.3]{knjigaho} for systems of abstract degenerate differential equations.
\begin{thm}\label{kragujevac}
Let $(X,\|\cdot \|)$ be a complex Banach space and let $iA_{j},\
1\leq j\leq n$ be commuting generators of bounded $C_{0}$-groups on
$X.$ Suppose $\alpha>0,$ $d\in {\mathbb N}$ and $P_{i}(x)= \sum_{|\eta|\leq
d}P_{\eta,i}x^{\eta}$ ($P_{\eta,i}\in {\mathbb C}^{m,m},$ $x\in {\mathbb R}^{n},$ $i=1,2$) are
two given polynomial matrices. Suppose that for each $x\in {\mathbb R}^{n}$ the matrix $P_{2}(x)$ is regular. Then there exists a
dense subset $X_{\alpha,m}$ of $X^{m}$ such that, for every $\vec{x}\in X_{\alpha,m},$ there exists a unique solution (defined in the very obvious way) of the following abstract Cauchy problem:
\[\hbox{(DFP)}: \left\{
\begin{array}{l}
{\mathbf D}_{t}^{\alpha}\overline{P_{2}(A)}\vec{u}(t)=\overline{P_{2}(A)}{\mathbf D}_{t}^{\alpha}\vec{u}(t)=\overline{P_{1}(A)}\vec{u}(t),\quad t\geq 0,\\
\vec{u}(0)=\vec{x};\quad \vec{u}^{(j)}(0)=0,\ 1\leq j \leq \lceil \alpha \rceil -1.
\end{array}
\right.
\]
Furthermore, for every $\vec{x}\in X_{\alpha,m},$ the mapping $t\mapsto \vec{u}(t),$ $t\geq 0$ can be extended to the whole complex plane (the extension of this mapping will be denoted by the same symbol in the sequel), and the following holds:
\begin{itemize}
\item[(i)] The mapping $z\mapsto \vec{u}(z),$ $z\in {\mathbb C} \setminus (-\infty,0]$ is analytic.
\item[(ii)] The mapping $z\mapsto \vec{u}(z),$ $z\in {\mathbb C}$ is entire provided that $\alpha \in {\mathbb N}.$
\end{itemize}
\end{thm}
\begin{proof}
Let us recall that $k=1+\lfloor n/2 \rfloor.$
Suppose that $P_{1}(x)=[p_{ij;1}(x)]_{1\leq i,j\leq m}$ and $P_{2}(x)=[p_{ij;2}(x)]_{1\leq i,j\leq m}$ ($x\in {\mathbb R}^{n}$), and $d$ is the maximal degree of all non-zero polynomials $p_{ij;1}(x)$
and $p_{ij;2}(x)$ ($1\leq i,j\leq m$).
Then $\sup_{x\in {\mathbb R}^{n}}|\mbox{det}(P_{2}(x))|^{-1}<\infty$ and
we can inductively prove that there exist numbers $M_{1} \geq 1$ and $M_{2}\geq 1$ such that
for each $l\in {\mathbb N}_{0}$ there exist polynomials $R_{ij;l}(x)$ ($1\leq i,j\leq m$) of degree $\leq lmd$ satisfying that
$$
\Bigl( P_{2}(x)^{-1}P_{1}(x) \Bigr)^{l}=\frac{\bigl[ R_{ij;l}(x) \bigr]_{1\leq i,j\leq m}}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}},\quad x\in {\mathbb R}^{n}
$$
and that the following holds:
\begin{align}
\notag \Biggl| D^{\eta}\Biggl( & \frac{R_{ij;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}\Biggr) \Biggr| +
\Biggl| D^{\eta}\Biggl( p_{ij;1}(x) \frac{R_{ij;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}\Biggr) \Biggr|
\\\label{larry} &+
\Biggl| D^{\eta}\Biggl(p_{ij;2}(x) \frac{R_{ij;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}\Biggr) \Biggr|
\leq M_{1}^{l}(1+|x|)^{lmdM_{2}},
\end{align}
provided $l\in {\mathbb N}_{0},\ x\in {\mathbb R}^{n},\ 0\leq |\eta|\leq k$ and $ 1\leq i,j\leq m.$
It is very simple to prove that there exists a sufficiently large natural number $k'$ satisfying $2 | k'$ and
\begin{equation}\label{entire-even}
\lim \limits_{l\rightarrow +\infty}\frac{\Bigl(\Gamma\bigl(\frac{2M_{2}(l+1)md+n}{k'd}\bigr)\Bigr)^{1/2l}}{\bigl(\Gamma(\alpha l+1)\bigr)^{1/l}}=0.
\end{equation}
Let $a>0$ be fixed. Set
$C:=(e^{-a|x|^{k'd}})(A)$ and $X_{\alpha,m}:=(R(C))^{m}.$ Then $C\in L(X),$ $C$ is injective and
$D_{\infty}(A_{1}^{2}+\cdot \cdot \cdot +A_{n}^{2})\supseteq R(C) $
is dense in $X$ (\cite{l1}). Define
\begin{equation}\label{WR}
W_{\alpha}(z):=\Biggl[\sum \limits_{l=0}^{\infty}\frac{z^{\alpha
l}}{\Gamma(\alpha l+1)} \Biggl(\frac{R_{ij;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}e^{-a|x|^{k'd}}\Biggr)(A)\Biggr]_{1\leq i,j\leq m},\ z\in {\mathbb C}.
\end{equation}
Using (\ref{larry})-(\ref{entire-even}) and the proof of \cite[Theorem 2.3.3]{knjigaho}, it readily follows that $W_{\alpha}(z)\in L(X^{m})$ for all $z\in {\mathbb C}$, as well as that the expressions
$$
\Biggl[\sum \limits_{l=0}^{\infty}\sum_{v=1}^{m}\frac{z^{\alpha
l}}{\Gamma(\alpha l+1)} \Biggl(p_{iv;2}(x)\frac{R_{vj;l+1}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l+1}}e^{-a|x|^{k'd}}\Biggr)(A)\Biggr]_{1\leq i,j\leq m}
$$
and
$$
\Biggl[\sum \limits_{l=0}^{\infty}\sum_{v=1}^{m}\frac{z^{\alpha
l}}{\Gamma(\alpha l+1)} \Biggl(p_{iv;1}(x)\frac{R_{vj;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}e^{-a|x|^{k'd}}\Biggr)(A)\Biggr]_{1\leq i,j\leq m}
$$
define the bounded linear operators on $X^{m}$ ($z\in {\mathbb C}$). Furthermore,
the mapping $z \mapsto W_{\alpha}(z),$ $z\in {\mathbb C} \setminus (-\infty,0]$ is analytic, and
the mapping $z\mapsto W_{\alpha}(z),$ $z\in {\mathbb C}$ is entire provided that $\alpha \in {\mathbb N}.$
Suppose now $\vec{x}\in X_{\alpha,m}.$ Then there exists $\vec{x'}\in X^{m}$ such that $\vec{x}=C_{m}\vec{x'},$ where $C_{m}=CI_{m}.$ Setting $\vec{u}(z):=W_{\alpha}(z)\vec{x'},$ $z\in {\mathbb C},$ we immediately obtain that
(i) and (ii) hold. It is not difficult to prove that ${\mathbf D}_{t}^{\alpha}(t)\vec{u}(t)$ is well-defined, as well as that
$$
{\mathbf D}_{t}^{\alpha}(t)\vec{u}(t)=\Biggl[\sum \limits_{l=1}^{\infty}\frac{t^{\alpha (
l-1)}}{\Gamma(\alpha (l-1)+1)} \Biggl(\frac{R_{ij;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}e^{-a|x|^{k'd}}\Biggr)(A)\Biggr]_{1\leq i,j\leq m}\vec{x'},\quad t\geq 0,
$$
and $\vec{u}(0)=\vec{x},\ \vec{u}^{(j)}(0)=0,\ 1\leq j \leq \lceil \alpha \rceil -1.$
Since $\overline{P_{1}(A)}$
and $\overline{P_{2}(A)}$ are closed, we can prove with the help of ($\triangleright$)
that $\vec{u}(t) \in D(\overline{P_{1}(A)}) \cap D(\overline{P_{2}(A)}),$
$
{\mathbf D}_{t}^{\alpha}\vec{u}(t) \in D(\overline{P_{2}(A)}),$ the term ${\mathbf D}_{t}^{\alpha}\overline{P_{2}(A)}\vec{u}(t)$ is well defined, with
\begin{align*}
& \overline{P_{2}(A)}{\mathbf D}_{t}^{\alpha}\vec{u}(t)={\mathbf D}_{t}^{\alpha}\overline{P_{2}(A)}\vec{u}(t)
\\ & =\Biggl[\sum \limits_{l=0}^{\infty}\sum_{v=1}^{m}\frac{z^{\alpha
l}}{\Gamma(\alpha l+1)} \Biggl(p_{iv;2}(x)\frac{R_{iv;l+1}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l+1}}e^{-a|x|^{k'd}}\Biggr)(A)\Biggr]_{1\leq i,j\leq m}\vec{x'}
\end{align*}
and
$$
\overline{P_{1}(A)}\vec{u}(t)=\Biggl[\sum \limits_{l=0}^{\infty}\sum_{v=1}^{m}\frac{z^{\alpha
l}}{\Gamma(\alpha l+1)} \Biggl(p_{iv;1}(x)\frac{R_{iv;l}(x)}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}}e^{-a|x|^{k'd}}\Biggr)(A)\Biggr]_{1\leq i,j\leq m}\vec{x'},
$$
for any $t\geq 0.$ Since
$$
P_{2}(x)\frac{\bigl[ R_{ij;l+1}(x) \bigr]_{1\leq i,j\leq m}}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l+1}}=P_{1}(x)\frac{\bigl[ R_{ij;l}(x) \bigr]_{1\leq i,j\leq m}}{\bigl(\mbox{det}(P_{2}(x))\bigr)^{l}},\quad l\in {\mathbb N}_{0},\ x\in {\mathbb R}^{n},
$$
a simple matricial computation shows that the function $t\mapsto \vec{u}(t),$ $t\geq 0$ is a solution of problem (DFP). Now we will prove the uniqueness of solutions of problem (DFP). Let $t\mapsto \vec{u}(t),$ $t\geq 0$ be a solution of (DFP) with $\vec{x}=0.$
Integrating $\alpha$-times (DFP), we get that $\overline{P_{2}(A)}\vec{u}(t)=\int^{t}_{0}g_{\alpha}(t-s)\overline{P_{1}(A)}\vec{u}(s)\, ds,$ $t\geq 0.$
Using this equality, as well as the fact that $\overline{P_{2}(A)}W_{\alpha}(t)-\overline{P_{2}(A)}C_{m}=\overline{P_{1}(A)}(g_{\alpha} \ast W_{\alpha}(\cdot))(t)\in L(X^{m}),$ $t\geq 0,$ and the proof of \cite[Proposition 1.1]{prus}, we obtain that $0=(W_{\alpha}\ast 0)(t)=(\overline{P_{2}(A)}C_{m}\ast \vec{u})(t),$ $t\geq 0,$ so that it suffices to prove that the operator $\overline{P_{2}(A)}$ is injective. Suppose that $\overline{P_{2}(A)}\vec{x}=\vec{0}$
for some $\vec{x}\in X^{m}.$
By \cite[Lemma 1.1(a)]{zheng-pacific}, we may assume without loss of generality that $\vec{x}\in X_{0}^{m}$ (cf. ($\triangleright$)).
It is clear that there exist polynomials $q_{ij}(x)$ ($1\leq i,j\leq m$) such that $P_{2}(x)^{-1}=(\mbox{det}(P_{2}(x)))^{-1}[q_{ij}(x)]_{1\leq i,j\leq m}.$ Using ($\triangleright$), one can prove that $[(\mbox{det}(P_{2}(x)))(A)I_{m}][\phi(A)I_{m}]\vec{x}=[(\phi(x)q_{ij}(x))(A)]_{1\leq i,j\leq m}P_{2}(A)\vec{x}=\vec{0},$ $\phi \in {\mathcal S}({\mathbb R}^{n}).$
By \cite[Remark 4.4(i)]{filomat}, the operator $(\mbox{det}(P_{2}(x)))(A)$ is injective, whence we may conclude that
$[\phi(A)I_{m}]\vec{x}=\vec{0},$ $\phi \in {\mathcal S}({\mathbb R}^{n}).$ This, in turn, implies $\vec{x}=\vec{0}$ and completes the proof of theorem.
\end{proof}
\begin{rem}\label{denseD}
\begin{itemize}
\item[(i)]
It can be simply proved that for each $\vec{x}\in X_{\alpha,m}$ the solution $t\mapsto \vec{u}(t),$ $t\geq 0$ possesses some expected properties from Definition \ref{RES}: If $\alpha \notin {\mathbb N},$ then the terms $({\mathbf D}_{t}^{\alpha})^{p}\vec{u}(t),$ $\overline{P_{1}(A)}({\mathbf D}_{t}^{\alpha})^{p}\vec{u}(t)$ and $\overline{P_{2}(A)}({\mathbf D}_{t}^{\alpha})^{p}\vec{u}(t)$
are well-defined and can be extended to the whole complex plane, analytically on the region ${\mathbb C} \setminus (-\infty,0]$ and continuously on the region ${\mathbb C} \setminus (-\infty,0);$ if $\alpha \in {\mathbb N},$ then the terms $(d^{\alpha p}/dt^{\alpha p})\vec{u}(t),$ $\overline{P_{1}(A)}(d^{\alpha p}/dt^{\alpha p})\vec{u}(t)$ and $\overline{P_{2}(A)}(d^{\alpha p}/dt^{\alpha p})\vec{u}(t)$
are well-defined and can be entirely extended to the whole complex plane ($p\in {\mathbb N}_{0}$).
The assertion of \cite[Theorem 2.3.5]{knjigaho} can be reformulated in degenerate case, as well.
\item[(ii)]
If $m=1,$ $P_{1}(x)=\sum_{|\alpha|\leq d}a_{\alpha}x^{\alpha},$ $P_{2}(x)=\sum_{|\alpha|\leq d}b_{\alpha}x^{\alpha},$
$x\in {{\mathbb R}^{n}}$ ($a_{\alpha},\ b_{\alpha} \in {\mathbb C}$),
$P_{2}(x)\neq 0,$ $x\in {{\mathbb R}^{n}}$ and
$X$ is a
function space on which translations are uniformly bounded and
strongly continuous (for example, $L^{p}({\mathbb R}^{n})$ with
$p\in [1,\infty),$ $C_{0}({\mathbb R}^{n})$ or $BUC({\mathbb
R}^{n});$ notice also that $X$ can be consisted of functions defined
on some bounded domain \cite{l1}, \cite{quan-miao}), then
the obvious choice for $A_{j}$ is $i\partial/\partial x_{j}$ ($1\leq
j\leq n$). In this case, $\overline{P_{1}(A)}$ and $\overline{P_{2}(A)}$ are just the operators
$\sum_{|\alpha|\leq d}a_{\alpha}i^{|\alpha|}(\partial/\partial
x)^{\alpha}$ and $\sum_{|\alpha|\leq d}b_{\alpha}i^{|\alpha|}(\partial/\partial
x)^{\alpha},$ respectively, acting with their maximal distributional domains. Making use of Theorem
\ref{kragujevac} and a slight modification of the formula appearing on l. 1, p. 49 of \cite{knjigaho}, we can prove that for each $\alpha>0$ there exists a
dense subset $X_{\alpha,1}$ of $L^{p}({\mathbb R}^{n})$ such that
the abstract Cauchy problem:
\begin{align*}
\sum_{|\alpha|\leq
d}b_{\alpha}i^{|\alpha|} (\partial/\partial x)^{\alpha}{\mathbf D}_{t}^{\alpha}u(t,x) &=
{\mathbf D}_{t}^{\alpha}\sum_{|\alpha|\leq
d}b_{\alpha}i^{|\alpha|}(\partial/\partial x)^{\alpha}u(t,x)
\\ & =\sum_{|\alpha|\leq
d}a_{\alpha}i^{|\alpha|}(\partial/\partial x)^{\alpha}u(t,x),\
t>0,\ x\in {{\mathbb R}^{n}};
\end{align*}
$$
\frac{\partial^{l}}{\partial t^{l}}u(t,x)_{|t=0}=f_{l}(x),\ \ x\in {{\mathbb R}^{n}},\ l=0,1,\cdot \cdot \cdot, \lceil
\alpha \rceil -1,
$$
has a unique solution (obeying the properties clarified in the part (i) of this remark) provided $f_{l}(\cdot) \in X_{\alpha,1},$
$l=0,1,\cdot \cdot \cdot, \lceil \alpha \rceil -1.$ A similar
assertion can be formulated in $X_{l}$-type
spaces (\cite{x263}); we can also move to the spaces $L^{\infty}({\mathbb
R}^{n}),$ $C_{b}({\mathbb R}^{n})$ or $C^{\sigma}({\mathbb R}^{n})$
($0<\sigma<1$) by using distributional techniques, but then we cannot expect the density of the corresponding subspace $X_{\alpha,1}.$
\item[(iii)] In \cite{filomat}-\cite{publi}, we have recently considered
the $C$-wellposedness of the abstract degenerate Cauchy problem
\[\hbox{(DFP)}: \left\{
\begin{array}{l}
{\mathbf D}_{t}^{\alpha}\overline{P_{2}(A)}u(t)=\overline{P_{2}(A)}{\mathbf D}_{t}^{\alpha}u(t)=\overline{P_{1}(A)}u(t),\quad t\geq 0,\\
u(0)=Cx;\quad u^{(j)}(0)=0,\ 1\leq j \leq \lceil \alpha \rceil -1,
\end{array}
\right.
\]
where $0<\alpha \leq 2,$ $P_{1}(x)$ and $P_{2}(x)$ are complex polynomials, $P_{2}(x)\neq 0,$ $x\in {\mathbb R}^{n},$ $iA_{j},\ 1\leq j\leq n$ are commuting
generators of bounded $C_{0}$-groups on a Banach space $X,$ thus continuing the research studies of T.-J. Xiao-J. Liang \cite{XL}-\cite{XL-HIGHER}. Denote $\Omega(\omega)=\{ \lambda^{2}
: \Re \lambda >\omega\},$ if $\omega
>0,$ and $\Omega(\omega)={\mathbb C} \ \setminus \ (-\infty,-\omega^{2}],$ if $\omega \leq 0.$ The basic assumption in \cite{filomat}-\cite{publi} was that
$$
\sup_{x\in {\mathbb R}^{n}}\Re \Biggl(\Biggl(\frac{P_{1}(x)}{P_{2}(x)}\Biggr)^{1/\alpha}\Biggr)\leq
\omega,
$$
provided $0<\alpha<2,$ and $P_{1}(x)/P_{2}(x) \notin \Omega(\omega),$ $x\in {{\mathbb R}^{n}},$ provided $\alpha=2.$ Observe that our results from the part (i) of this remark can be applied
in the analysis of problem $\hbox{(DFP)}$ in the general case $\alpha>0,$ and also in the case that $0<\alpha \leq 2$ and the above-mentioned requirements are not satisfied.
\end{itemize}
\end{rem}
\subsection{Final conclusions and remarks}
We feel duty bound to say that Theorem \ref{kragujevac} and the conclusions from the parts of (ii) and (iii) of Remark \ref{denseD} cannot be applied in the analysis of $L^{p}$-wellposedness of a great number of very important degenerate equations of mathematical physics, like (cf. the monograph by G. V. Demidenko-S. V. Uspenskii \cite{dem} for further information):
\begin{itemize}
\item[(a)] (The Rossby wave equation, 1939)
$$
\Delta u_{t}+\beta u_{y}=0 \ \ (n=2), \ \ u(0,x,y)=u_{0}(x,y);
$$
\item[(b)] (The Sobolev equation, 1940)
\begin{align*}
\Delta u_{tt}&+\omega^{2} u_{zz}=0 \ \ (n=3),\\ & u(0,x,y,z)=u_{0}(x,y,z),\ u_{t}(0,x,y,z)=u_{1}(x,y,z),
\end{align*}
here $\omega/2$ is the angular velocity;
\item[(c)] (The internal wave equation in the Boussinesq approximation, 1903)
\begin{align*}
\Delta u_{tt}& +N^{2} \bigl( u_{xx}+u_{yy} \bigr)=0 \ \ (n=3),\\ & u(0,x,y,z)=u_{0}(x,y,z),\ u_{t}(0,x,y,z)=u_{1}(x,y,z);
\end{align*}
\item[(d)] (The gravity-gyroscopic wave equation, cf. \cite{gabov})
\begin{align*}
\Delta u_{tt}& +N^{2}\bigl( u_{xx}+u_{yy} \bigr) +\omega^{2}u_{zz}=0 \ \ (n=3),\\ & u(0,x,y,z)=u_{0}(x,y,z),\ u_{t}(0,x,y,z)=u_{1}(x,y,z);
\end{align*}
\item[(e)] (Small amplitude oscillations of a rotating viscous fluid)
\begin{align*}
\Delta u_{tt}& -2\nu \Delta^{2}u_{t}+v^{2}\Delta^{3}u+\omega^{2}u_{zz}=0 \ \ (n=3),\\ & u(0,x,y,z)=u_{0}(x,y,z),\ u_{t}(0,x,y,z)=u_{1}(x,y,z).
\end{align*}
Here $\omega/2$ is the angular velocity and $\nu >0$ is the viscosity coefficient.
\end{itemize}
Before including some details on the existence and uniqueness of entire solutions of the equations (a)-(e) in $L^{p}$ spaces, we need to explain how one can reformulate
the assertion of Theorem \ref{kragujevac} in the case that there exist a vector $x_{0}\in {\mathbb R}^{n}$ and a non-empty compact subset $K$ of ${\mathbb R}^{n}$
such that the matrix $P_{2}(x_{0})$ is singular and $\{ x\in {\mathbb R}^{n} : \mbox{det}(P_{2}(x))=0 \}\subseteq K;$ the analysis of some fractional analogons of (a)-(e) can be carry out similarly and is therefore omitted. Denote by ${\mathcal A}$ the class consisting of those $C^{\infty}({\mathbb R}^{n})$-functions $\phi(\cdot)$ satisfying that there exist two open relatively compact neighborhoods $\Omega$ and $\Omega'$ of $K$ in ${\mathbb R}^{n}$ such that $\phi(x)=0$ for all $x\in \Omega$ and $\phi(x)=1$ for all $x \in {\mathbb R}^{n} \setminus \Omega'.$ Since the estimate (\ref{larry}) holds for all $x \in {\mathbb R}^{n} \setminus \Omega , $ we can define for each $z\in {\mathbb C}$ the matricial operator $W_{\alpha}(z)$ (cf. the proof of Theorem \ref{kragujevac}) by replacing the function $e^{-a|x|^{k'd}}$ in (\ref{WR}) with the function
$\phi(x)e^{-a|x|^{k'd}}.$ Setting $C_{\phi}:=(\phi(x) e^{-a|x|^{k'd}})(A)$ for $\phi \in {\mathcal A}$ (then we do not know any longer whether the set $\bigcup_{\phi \in {\mathcal A}}R(C_{\phi})$ is dense in $X,$ and we cannot clarify whether the operator $C_{\phi}$ is injective or not) and $X_{\alpha,m}':=(\bigcup_{\phi \in {\mathcal A}}R(C_{\phi}))^{m},$ and assuming additionally
the injectivity of matricial operator $\overline{P_{2}(A)}$ on $X^{m},$ then for each $\vec{x}\in X_{\alpha,m}'$ there exists a unique solution $t\mapsto \vec{u}(t),$ $t\geq 0$ of the abstract Cauchy problem (DFP), which can be extended to the whole complex plane, and (i)-(ii) from the formulation of Theorem \ref{kragujevac} continues to hold. Rewriting any of the equations (a)-(e) in the matricial form, and using Lemma \ref{lap-inj}, we obtain that there exists a non-trivial subspace $X_{1,1}'$ of $L^{p}({\mathbb R}^{2}),$ resp. $X_{1,2}'$ of $L^{p}({\mathbb R}^{3}) \times L^{p}({\mathbb R}^{3}),$ such that the equation (a), resp. any of the equations (b)-(e), has a unique entire solution provided $u_{0}(x,y)\in X_{1,1}',$ resp. $(u_{0}(x,y,z),u_{1}(x,y,z)) \in X_{1,2}'$ (similar conclusions can be established for the wellposedness of the equations \cite[(5), (6); Section 4]{fala-prim-0} in $L^{p}(\Omega)$, with $\tau >0,$ $\lambda_{2}> 0$ and $\lambda_{3}> 0$ and $\emptyset \neq \Omega \subseteq {\mathbb R}^{n}$ being an open bounded domain with smooth boundary, which are important in the study of fluctuations of thermoelastic plates and non-stationary processes in thermal fields; the uniqueness of entire solutions of the equation \cite[(5)]{fala-prim-0}, resp. \cite[(6)]{fala-prim-0}, in the case that $\lambda_{2}<0,$ resp. $\lambda_{3}<0,$ cannot be proved here by using the injectivity of associated polynomial matrix operator $\overline{P_{2}(A)},$ and we will only refer the reader to the assertions of \cite[Theorem 4]{fala-prim-0} and \cite[Theorem 2.2]{publi} for further information in this direction). It should be finally noted that we do not know, in the present situation, whether
the subspace $X_{1,1}',$ resp. $X_{1,2}',$ of initial values $\vec{x}=u_{0},$ resp. $\vec{x}=(u_{0},u_{1}),$ for which there exists a unique entire solution $t\mapsto \vec{u}(t),$ $t\geq 0$ of the equation (a), resp. any of the equations (b)-(e), can be chosen to be dense in $L^{p}({\mathbb R}^{2}),$ resp. $L^{p}({\mathbb R}^{3}) \times L^{p}({\mathbb R}^{3}).$
{\begin{center}
{\sc ACKNOWLEDGEMENTS}
\end{center}
\ \ \ This research was supported in part by grant 174024 of Ministry
of Science and Technological Development, Republic of Serbia.
\end{document}
|
\begin{document}
\centerline{\large \bf Incompressible flows with piecewise constant density}
\begin{center}
{Rapha\"el Danchin$^1$ and Piotr Bogus\l aw Mucha$^2$}
{1. Universit\'e Paris-Est, LAMA, UMR 8050 and Institut Universitaire de France,}
{ 61 avenue du G\'en\'eral de Gaulle,
94010 Cr\'eteil Cedex, France.}
{E-mail: [email protected]}
{2. Instytut Matematyki Stosowanej i Mechaniki,
Uniwersytet Warszawski, }
{ul. Banacha 2, 02-097 Warszawa, Poland.}
{E-mail: [email protected]}
\end{center}
\partialate\today
\begin{abstract} We investigate the incompressible Navier-Stokes equations with variable density.
The aim is to prove existence and uniqueness results in the case of \emph{discontinuous} initial density.
In dimension $n=2,3,$ assuming only that the initial density is bounded and bounded away from zero, and that the initial velocity is smooth enough, we get the local-in-time existence of unique solutions. Uniqueness holds in any dimension and for a wider class of
velocity fields. Let us emphasize that all those results are true for piecewise constant densities with arbitrarily large jumps.
Global results are established in dimension two if the density is close enough to a positive constant,
and in $n$-dimension if, in addition, the initial velocity is small.
The Lagrangian formulation for describing the flow plays a key role in the analysis that is proposed in the present paper.
\end{abstract}
\noindent
{\it MSC:} 35Q30, 76D05
\noindent
{\it Key words:} Inhomogeneous Navier-Stokes equations, critical regularity, piecewise constant density, large jumps, Besov spaces, Lagrangian coordinates, discontinuous data.
\section*{Introduction}
Incompressible flows are often modeled by the \emph{homogeneous} Navier-Stokes equations~:
that is the density of the fluid is assumed to be a constant. However in many applications as blood flows or models of rivers,
although the fluid is practically incompressible, the density can not be considered
as a constant quantity, as a consequence of the complex structure of the flow due to e.g. a mixture of fluids or
pollution (see e.g. \cite{AS,Ar,Ku,LZWL,San}).
This makes us look at the density as a nonnegative unknown function which has constant values along the stream lines. The simplest model which can capture such a physical property
is the so-called \emph{inhomogeneous Navier-Stokes system}:
\begin{equation}\label{NSE}
\begin{array}{lcr}
\rho_t +v \cdot \nabla \rho =0 & \mbox{in} & \Omega \times (0,T), \\
\rho v_t + \rho v \cdot \nabla v - \nu \Delta v + \nabla Q =0& \mbox{in} & \Omega \times (0,T), \\
\hbox{\rm div}\, v =0& \mbox{in} & \Omega \times (0,T), \\
v=0 & \mbox{on} & \partial\Omega \times (0,T), \\
v|_{t=0}=v_0, \qquad \rho|_{t=0}=\rho_0 & \mbox{in} & \Omega.
\end{array}
\end{equation}
The unknown functions are: $\rho$ -- the density of the fluid, $v$ -- its velocity field and $Q$ -- its pressure. The constant positive viscosity coefficient is denoted by $\nu$.
We consider the cases where $\Omega$ is a bounded domain of ${\mathbb R}^n,$ or the whole space ${\mathbb R}^n,$
and we focus mainly on the physically relevant space dimensions $n=2,3.$
The goal of the present paper is to revisit results concerning the well-posedness issue of \eqref{NSE}. We
concentrate our analysis on the regularity of density. In our recent work \cite{DM}, we established
the existence and uniqueness of solutions to \eqref{NSE} in a critical regularity framework
which allowed the initial density to be discontinuous. However, a smallness condition over
the jumps was required there.
In the present work, we want to discard this smallness condition.
At the same time, to simplify the presentation, we do not
strive for optimal assumptions as concerns the velocity and assume
the viscosity coefficient $\nu$ to be constant.
Let us recall (see in particular \cite{Desjardins} and the textbooks \cite{AKM,Lions}) that, roughly, from the qualitative viewpoint the classical
results for the homogeneous Navier-Stokes equations carry out to \eqref{NSE} :
on the one hand global (possibly non unique) weak solutions with finite energy may be constructed
and on the other hand, if the density is smooth enough, bounded and bounded away from zero,
then global-in-time existence and uniqueness results are available in dimension two
for arbitrarily large data, and if the velocity is small in dimension three.
These latter results require relatively high regularity of the density, though. In particular it has
to be at least continuous, and to have some fractional derivatives in suitable Lebesgue spaces
(see e.g. \cite{LS} or \cite{D1}). It is worthwhile to emphasize that for smooth densities one may show the existence of unique solutions
even for vacuum states \cite{CK} provided the initial data satisfy some compatibility condition.
{}From the viewpoint of applications such results are not so satisfactory: we wish to consider fluids with
e.g. piecewise constant densities, a pattern which is of interest to model a mixture of two fluids.
\medbreak
The results of the paper are split into two groups:
\begin{itemize}
\item
The first group concerns uniqueness and local-in-time existence results in the case
where the initial density is just an $L_\infty$ positive function bounded away from zero. In particular, one may consider
piecewise constant densities \emph{with arbitrary large jumps.} As regards the existence issue, we have to restrict ourselves
to the (physically relevant) dimensions $n=2,3$.
\item The second group concerns the global-in-time existence issue.
Here we have to make a smallness assumption over
the density which, in the case of piecewise constant initial density, implies that the jumps have to be small.
Assuming enough smoothness over the velocity, this enables us to prove global existence for (possibly) large
velocity if $n=2,$ and for small velocity if $n \geq 3$ (an assumption which is also required for the homogeneous Navier-Stokes equations, anyway).
\end{itemize}
As explained above, in the present paper, we aim at doing minimal assumptions over the density but
we do not strive for optimal regularity of the velocity function. As our method relies on estimates for
the Stokes system with merely bounded coefficients, the (rather high) regularity of the velocity is somehow prescribed by the technique.
An approach to the issue
of sharp regularity has been done in \cite{DM} in the critical Besov spaces
setting. However in \cite{DM} we were able to capture discontinuous density with small jumps only.
The rest of the paper unfolds as follows. The main results are presented in the first section.
Then, some preliminary estimates involving the evolutionary Stokes system are proved.
Section \ref{s:lagrangian} is devoted to the derivation of System \eqref{NSE} in Lagrangian coordinates.
In Section \ref{s:uniqueness}, we concentrate on the proof of uniqueness results
whereas existence results are proved in the last two sections. Technical estimates involving the divergence equation are presented in the Appendix.
\section{Main results}\label{s:main}
Let us first recall the basic energy equality for System \eqref{NSE}
which may be (formally) derived by testing $(\ref{NSE})_2$ by $v$:
\begin{lem}\label{l:ene}
Let $(\rho,v)$ be a sufficiently smooth solution to \eqref{NSE} over $\Omega\times[0,T].$ Then there holds
\begin{equation}\label{energy}
\int_\Omega (\rho|v|^2)(t,x)\,dx+2\nu\! \int_0^t\!\!\int_\Omega|\nabla v(\tau,x)|^2\,dx\,d\tau=\int_\Omega (\rho|v|^2)(0,x)\,dx\!\!\quad\hbox{for all }
\ t\in[0,T].
\end{equation}
\end{lem}
Subsequently if $\rho_0$ is positive and bounded away from zero
and $v_0$ is in $L_2(\Omega)$ then we get a control over
$v$ in $L_\infty(0,T;L_2(\Omega))$ and $\nabla v$ in $L_2(\Omega \times (0,T)).$
Under very rough regularity assumptions (much less than assumed here),
the (formal) energy equality \eqref{energy} provides us with an information about low norms of the velocity, which turns out to be
crucial for the proof of global results (see in particular the monograph by \cite{Lions} and the references therein,
as regards the proof of global weak solutions with finite energy).
Note that \eqref{energy} gives some regularity information
over the velocity even for very rough density. We shall see further in the paper
a way to get even more regularity information over the velocity without assuming more on the density.
Before listing the main results of the paper, let us introduce a few notation.
Concerning the derivatives of functions $f$ depending on both
the time variable $t$ and the space variable $x,$ we denote by $f_t$ the time derivative and
by $Df$ the Jacobian matrix of $f$ with respect to the space variable, namely
$(Df)_{i,j}=\partial_j f^i.$
The notation $\nabla f$ is reserved for ${}^T\!(Df).$
The Lebesgue spaces
of measurable functions with integrable $p$-th power is denoted by $L_p(\Omega)$.
More generally, if $m\in{\mathbb N}$ then $W_p^m(\Omega)$ denotes the
set of $L_p(\Omega)$ functions with derivatives of order less than or equal to $m$ in $L_p(\Omega).$
Since the Navier-Stokes equations are of parabolic type, it is also natural
to introduce \emph{parabolic} Sobolev spaces $W^{2,1}_{q,p}(\Omega \times (0,T))$ that is the closure
of smooth functions for the norm
\begin{equation}
\|u\|_{W^{2,1}_{q,p}(\Omega \times (0,T))} = \|u,\partial_t u\|_{L_p(0,T;L_q(\Omega))}
+\|u\|_{L_p(0,T;W^2_p(\Omega))}.
\end{equation}
Granted with parabolic spaces, one may now define Besov spaces over $\Omega$ as
the following trace space:
\begin{equation}\label{xx1}
B^{2-2/p}_{q,p}(\Omega)= \Bigl\{ f:\Omega\rightarrow{\mathbb R}\ \hbox{measurable s.t.} \ f=\bar f|_{t=0}\!\ \hbox{ for some }\ \!\bar f \!\in\! W^{2,1}_{q,p}(\Omega \times (0,1))\Bigr\}\cdotp
\end{equation}
The norm can be defined from the above definition as a suitable infimum (for more details concerning the Besov spaces we refer to \cite{BIN,Triebel}).
\smallbreak
Our first result states the uniqueness of solutions with merely bounded density, provided the initial velocity
is smooth enough.
\begin{theo}\label{th:uniq} Let $n \geq 2$.
Assume that $\Omega$ is ${\mathbb R}^n$ or a $C^2$ bounded domain of ${\mathbb R}^n.$
Let $(\rho^1,v^1,Q^1)$ and $(\rho^2,v^2,Q^2)$ be two solutions to \eqref{NSE}
with the same initial data, and density bounded and bounded away from $0.$
Suppose moreover that for $k=1,2,$
\begin{itemize}
\item Case $n=2$: there exists $q>2$ such that $v^k \in W^{2,1}_{q,2}(\Omega\! \times\! (0,T))$ and $\nabla Q^k \in L_2(0,T;L_q(\Omega))$,
\item Case $n \geq 3$: $v^k \in W^{2,1}_{n,2}(\Omega \times (0,T))$, $\nabla Q^k \in L_2(0,T;L_n(\Omega))$
and, in addition, $\nabla v^k\in L_2(0,T;L_\infty(\Omega)).$
\end{itemize}
Then $v^1\equiv v^2,$ $\nabla Q^1\equiv\nabla Q^2$ and $\rho^1\equiv \rho^2$.
\end{theo}
\begin{rem}
As regards the inhomogeneous incompressible Navier-Stokes equations, to our knowledge, the ``best'' uniqueness
result with no smallness condition over the density is due to P. Germain in \cite{Germain}.
It does not apply to solutions with piecewise constant densities, though.
\end{rem}
The second result complements Theorem \ref{th:uniq}. It delivers existence of local-in-time regular and unique solutions
in dimensions $2$ and $3.$ Again, the initial density just has to be bounded and bounded away from vacuum.
\begin{theo}\label{th:exist}
Let $n=2,3$ and $\Omega$ be a $C^2$ bounded domain or be ${\mathbb R}^n$.
Let $\rho_0$ satisfy
\begin{equation}\label{eq:rho}
m<\rho_0<M
\end{equation}
for some positive constants, and $v_0\in W^2_2(\Omega)$ be such that $\hbox{\rm div}\, v_0=0$ and
$v_0|_{\partial\Omega}=0$. Let $n^*=2\bigl(\frac{n+2}n\bigr).$
There exists a unique solution $(\rho,v)$ to System \eqref{NSE} on a time interval $[0,T]$ for some $T>0$ such that
$\rho(t,\cdot)$ satisfies \eqref{eq:rho} for all $t\in[0,T]$ and
$$
v \in W^{2,1}_{n^*,n^*}(\Omega \times (0,T)), \quad v_t\in L_\infty(0,T;L_2(\Omega))\mbox{ \ \ and \ \ } \nabla v_t \in L_2(\Omega \times (0,T)).
$$
\end{theo}
\begin{rem} The critical Sobolev embedding ensures that $W^2_2(\Omega)$ is continuously embedded
in the Besov space $B^{2-2/n^*}_{n^*,n^*}(\Omega).$ Keeping in mind the definition of this space given in \eqref{xx1},
the appearance of the parabolic Sobolev space $W^{2,1}_{n^*,n^*}(\Omega \times (0,T))$ in the above statement does not
come up as a surprise. The $W^2_2(\Omega)$ assumption for $v_0$ is needed to ensure that
$(\partial_tv+v\cdot\nabla v)|_{t=0}$ is in $L_2(\Omega).$
At the same time, owing to the low regularity of the density,
we do not know how to propagate the $W^2_2(\Omega)$ regularity for the velocity.
\end{rem}
Proofs of Theorems \ref{th:uniq} and \ref{th:exist} are based on the analysis of \eqref{NSE} in the Lagrangian coordinates defined by the stream lines. Since the density is merely bounded
there is an obstacle to apply any bootstrap method in order to improve the regularity of the velocity. The main difficulty is located in the term with the time derivative. To obtain a better
information about $v_t,$ we adopt techniques from the compressible Navier-Stokes system \cite{Mu-CM,MZ2} (concerning uniqueness criteria for the compressible Navier-Stokes system in Lagrangian formulation, see also the recent work by D. Hoff).
Roughly speaking, we differentiate the (Lagrangian) velocity equation once with respect to time, then
apply an energy method.
This approach via the Lagrangian coordinates requires only $L_\infty$ bounds (by above and by below) for the density, provided the velocity has high regularity. That the density is time-independent in the Lagrangian setting, hence is just a given function, is of course fundamental.
In comparison, in \cite{D1} where the Eulerian framework is used, the initial density has to be in the Besov space $B^{n/2}_{2,1}({\mathbb R}^n)$ (which, roughly, means that it has $n/2$ derivatives in $L_2({\mathbb R}^n)$)
but the initial velocity therein has only \emph{critical regularity}, namely it is in $B^{n/2-1}_{2,1}({\mathbb R}^n)$ (to be compared with
$B^2_{2,1}({\mathbb R}^n)$ and $n=2,3$ here).
To highlight consequences of Theorems \ref{th:uniq} and \ref{th:exist}, let us consider the case where the initial
divergence-free velocity field is in $W^2_2(\Omega)$ (and vanishes at the boundary), and the initial
density $\rho_0$ is\footnote{Here
$\chi_{A_0}$ stands for the characteristic function of the set $A_0$.}
\begin{equation}\label{rem1}
\rho_0=m+\sigma \chi_{A_0},
\end{equation}
where $m,\sigma$ are positive constants and $A_0$ is a set with a $C^1$ boundary.
The velocity field $v$ given by Theorem \ref{th:exist} is Lipschitz with respect to the space
variable hence generates a unique $C^1$ flow $X$ defined by
$$
X(t,y)=y+\int_0^tv(\tau,X(\tau,y))\,d\tau.
$$
Therefore, the density at time $t$ is given by
\begin{equation}\label{rem2}
\rho(t,\cdot)=m+\sigma \chi_{A(t)},
\quad\hbox{with }\ A(t):=X(t,A_0).
\end{equation}
As the flow $X$ is at least $C^1$, the initial regularity of the boundary
of $A(t)$ is preserved and any geometrical catastrophe (e.g. breaking down or self-intersections
of the boundary) will not appear : if $A_0$ is diffeomorphic to a ball, then $A(t)$ is diffeomorphic to a ball, too.
The above case shows that the system \eqref{NSE} can model an interaction of two fluids separated by a free interface. Although tracking the regularity of the boundary $\partial A(t)$ is not the main topic of this paper, we see that Theorem \ref{th:exist} ensures
that the $C^1$ or $C^{1,\alpha}$ regularity (with $\alpha$ small enough) of $\partial A(t)$ is preserved during the evolution.
In other words, we have partially solved in an indirect way a complex free boundary
problem which has been left as an open question by P.-L. Lions in his book \cite{Lions}.
Let us emphasize that the standard approach for solving problems of such type requires very technical considerations
(see e.g. \cite{Ables,Shimizu,TW}).
Furthermore, with our approach, there are no requirements concerning the regularity of the boundary of the set $A_0$:
our results hold for any measurable set $A_0$.
\medbreak
The above results concern local-in-time analysis. In order to obtain global-in-time solutions, we have to
assume that the jumps of the initial density are small enough.
The following theorem states that under this sole assumption over the density, and for sufficiently smooth (possibly large) initial velocity fields,
global existence holds true.
\begin{theo}\label{th:lar}
Let $\Omega$ be a $C^2$ bounded two-dimensional set, or be ${\mathbb R}^2$.
There exists a constant $c$ depending only on $\Omega$ and such that if
$\rho_0 \in L_\infty(\Omega)$ satisfies
\begin{equation}\label{den-str}
\frac{\partialisplaystyle \sup_{x \in \Omega} \rho_0(x) - \inf_{x\in \Omega} \rho_0(x)}{\partialisplaystyle \inf_{x\in \Omega} \rho_0(x)} \leq c
\end{equation}
then for all $v_0 \in B^{1}_{4,2}(\Omega)\cap L_2(\Omega)$ with $\hbox{\rm div}\, v_0=0$ and $v_0|_{\partial\Omega}=0,$
there exists a unique global-in-time solution to System \eqref{NSE} such that \eqref{energy} is satisfied and that, for all $T>0,$
\begin{equation}\label{l2}
v \in W^{2,1}_{4,2}(\Omega \times (0,T)),\ \ \nabla Q\in L_2(0,T;L_4(\Omega)) \mbox{ \ and \ } \rho \in L_\infty(\Omega \times (0,T)).
\end{equation}
\end{theo}
In dimension $n\geq3,$ getting global-in-time strong solutions requires also the initial velocity to be small,
(an assumption which is needed in the homogeneous case, anyway). Here is our statement:
\begin{theo}\label{th:bdd}
Let $\Omega$ be a bounded $n$-dimensional $C^2$ domain. Let $\rho_0 \in L_\infty(\Omega)$
be positive and bounded away from $0,$ and
$v_0 \in B^{2-\frac 2q}_{q,p}(\Omega)$ with $1<p<\infty,$ $n<q<\infty$ and $2-2/p\not=1/q.$
There exist two constants $c$ and $c'$ depending only on $\Omega,$ $p$ and $q$ and such that if
\begin{equation}\label{den-str-1}
\frac{\partialisplaystyle \sup_{x \in \Omega} \rho_0(x) - \inf_{x\in \Omega} \rho_0(x)}{\partialisplaystyle \inf_{x\in \Omega} \rho_0(x)} < c
\quad\hbox{and}\quad
\|v_0\|_{B^{2-\frac 2p}_{q,p}(\Omega)} \leq c'\nu,
\end{equation}
then there exists a unique global-in-time solution to the inhomogeneous Navier-Stokes system \eqref{NSE} such that
$$
v \in W^{2,1}_{q,p}(\Omega \times{\mathbb R}_+),\ \ \nabla Q\in L_p({\mathbb R}_+;L_q(\Omega)) \mbox{ \ and \ } \rho \in L_\infty(\Omega \times {\mathbb R}_+).
$$
Furthermore, there exist two positive constants $\alpha$ and $C$ depending only on $\Omega,$ $p,$ $q$ and of
the lower and upper bounds for $\rho_0$ so that for all $t>0,$
$$
\|v\|_{W^{2,1}_{q,p}(\Omega \times(t,t+1))}+\|\nabla P\|_{L_p(t,t+1;L_q(\Omega))}\leq Ce^{-\alpha t} \|v_0\|_{B^{2-\frac 2p}_{q,p}(\Omega)}.
$$
\end{theo}
Theorems \ref{th:lar} and \ref{th:bdd} follow from classical maximal regularity techniques.
The smallness conditions \eqref{den-str} and \eqref{den-str-1} allow
to treat the oscillations of the density as a perturbation that may be put in the right-hand side of the estimates.
At the end we would like to underline that most of our results hold for bounded domains and ${\mathbb R}^n$.
The case of the whole space is easier: there is no boundary condition and solving the divergence equation is simpler, too.
One exception is Theorem \ref{th:bdd} where the
boundedness of the domain is essential here as it provides exponential decay of the energy norm
(the whole space case is tractable under stronger conditions over the density,
see our recent work in \cite{DM}).
\section{Some linear estimates}\label{s:linear}
A fundamental role will be played by the Stokes system, that is the following
linearization of the velocity equation in \eqref{NSE}:
\begin{equation}\label{stokes}
\begin{array}{lcr}
mu_t - \nu \Delta u + \nabla Q= f & \mbox{in} & \Omega \times (0,T), \\[1ex]
\hbox{\rm div}\, u= \hbox{\rm div}\, R & \mbox{in} & \Omega \times (0,T), \\[1ex]
u=0 & \mbox{in} &\partial \Omega \times (0,T), \\[1ex]
u|_{t=0}=u_0 & \mbox{in} & \Omega,
\end{array}
\end{equation}
where $m$ and $\nu$ are positive constants.
\smallbreak
We shall make an extensive use of
the following solvability result for the Stokes system
in the $L_p(0,T; L_q(\Omega))$ framework:
\begin{theo}\label{th:stokes} Let $\Omega$ be a $C^2$ bounded subset of ${\mathbb R}^n.$ Let
$1<p,q<\infty,$ $u_0\in B^{2-\frac 2p}_{q,p}(\Omega),$ $f\in L_p(0,T;L_q(\Omega))$, $R\in W^1_p(0,T;L_q(\Omega))$
so that
$\hbox{\rm div}\, R \in L_p(0,T;W^1_q(\Omega)).$
Suppose that
$$\hbox{\rm div}\, u_0=\hbox{\rm div}\, R|_{t=0}\quad\hbox{and}\quad\vec n \cdot R|_{\partial \Omega \times (0,T)} =0.$$
If $2-2/p> 1/q,$ assume in addition that $u_0=0$ at the boundary, otherwise
we assume only $u_0\cdot \vec n =0$ at $\partial\Omega$\footnote{For simplicity we exclude the case $2-2/p=1/q$.}.
Then there exists a unique solution to \eqref{stokes} such that $u\in W^{2,1}_{q,p}(\Omega \times (0,T))$, $\nabla P \in L_p(0,T;L_q(\Omega)),$ and the following estimate is valid:
\begin{multline}\label{est-sto}
\|mu_t, \nu \nabla^2 u, \nabla P\|_{L_p(0,T;L_q(\Omega))}+ m^{\frac1p}\nu^{\frac1{p'}}\sup_{0\leq t \leq T}\|u(t)\|_{B^{2-\frac 2p}_{q,p}(\Omega)} \\
\leq C
(\|f,m R_t\|_{L_p(0,T;L_q(\Omega))} + \|\nu \hbox{\rm div}\, R\|_{L_p(0,T;W^1_q(\Omega))} + m^{\frac1p}\nu^{\frac1{p'}}\|u_0\|_{B^{2-\frac 2p}_{q,p}(\Omega)}),
\end{multline}
where $C$ is independent of $m$, $\nu$ and $T$.
\end{theo}
\begin{p}
In the case $R\equiv0,$ this result is classical (see e.g.
\cite{GS,MS} and the appendix of \cite{D}).
The general case follows from this particular case once constructed a suitable vector-field $w:\Omega\times (0,T) \to
{\mathbb R}^n$ fulfilling
\begin{equation}\label{eqdiv}
\hbox{\rm div}\, w=\hbox{\rm div}\, R \mbox{~~in }\Omega, \qquad w=0 \mbox{~~ at } \partial \Omega.
\end{equation}
Taking for granted the existence of such a vector-field, the strategy is simple :
we set $v=u-w$ and we gather that $v$ has to satisfy
$$\begin{array}{lcr}
mv_t - \nu \Delta v + \nabla Q= f-mw_t+\nu\Delta w & \mbox{in} & \Omega \times (0,T), \\[1ex]
\hbox{\rm div}\, v= 0 & \mbox{in} & \Omega \times (0,T), \\[1ex]
v=0 & \mbox{in} &\partial \Omega \times (0,T), \\[1ex]
v|_{t=0}=u_0-w_0 & \mbox{in} & \Omega.
\end{array}
$$
Therefore, in order to reduce our study to the case $R\equiv0,$ the vector-field $w$ is required to
satisfy $w_t,D^2w\in L_p(0,T;L_q(\Omega))$ (note that this will imply that
$w_0\in B^{2-2/p}_{q,p}(\Omega),$ see \eqref{xx1}).
The fact that such a solution to \eqref{eqdiv} does exist
is granted by the following lemma, the proof of which is postponed in Appendix (see Proposition \ref{p:bog}):
\end{p}
\begin{lem}\label{l:bog}
Let $R(t,\cdot)$ be a family of vector-fields defined over the $C^2$ bounded domain $\Omega,$ parameterized by $t\in (0,T).$
Assume that, for some $1<q<\infty$ and $1\leq p\leq\infty$ we have
$\hbox{\rm div}\, R \in L_p(0,T;W^1_q(\Omega))$, $R,R_t \in L_q(0,T;L_p(\Omega))$ and $R\cdot \vec n =0$ at the boundary.
Then there exists a vector-field $w$ in $L_p(0,T;W^2_q(\Omega))$ vanishing on $\partial\Omega,$ fulfilling
$$
\hbox{\rm div}\, w=\hbox{\rm div}\, R\quad\hbox{and}\quad \hbox{\rm div}\, w_t=\hbox{\rm div}\, R_t\quad\hbox{in }\ \Omega
$$
and the following estimates:
\begin{eqnarray}\label{eq:bog1}
&\|w\|_{L_p(0,T;W^2_q(\Omega))} \leq C\|\hbox{\rm div}\, R\|_{L_p(0,T;W^1_q(\Omega))},\\ \label{eq:bog2}
&\|w_t\|_{L_p(0,T;L_q(\Omega))} \leq C\|R_t\|_{L_p(0,T;L_q(\Omega))}
\end{eqnarray}
for some constant $C$ depending only on $q$ and $\Omega.$
\end{lem}
\begin{rem}\label{r:stokes} The whole space case is easier to deal with for we do not have to take care of boundary conditions (apart
from suitable decay at infinity given by the functional setting). Indeed, in order to solve \eqref{eqdiv}, one may
set
$$
w=-\nabla(-\Delta)^{-1}\hbox{\rm div}\, R.
$$
As the corresponding Fourier multiplier is homogenous of degree $0,$ we readily get \eqref{eq:bog1} and \eqref{eq:bog2}.
Therefore, arguing as above and using the standard maximal regularity result for the Stokes system in ${\mathbb R}^n,$
we conclude to
Theorem \ref{th:stokes} in the case $\Omega={\mathbb R}^n$ if the Besov space $B_{q,p}^{2-\frac2p}(\Omega)$
is replaced with the \emph{homogeneous Besov space} $\partialot B^{2-\frac2p}_{q,p}({\mathbb R}^n)$ and
$W^k_q(\Omega),$ by its homogeneous version $\partialot W^k_q({\mathbb R}^n).$
\end{rem}
Theorem \ref{th:stokes} can be viewed as a classical result. In order to prove Theorem \ref{th:exist} we need
to adapt it to the case of \emph{variable} coefficients. Below, we focus
on the $L_2$ case where only the boundedness of coefficients is needed.
\begin{lem}\label{l:l2stokes} Let $\Omega$ be a bounded domain of ${\mathbb R}^n,$ or ${\mathbb R}^n.$
Let $\eta \in L_\infty(\Omega)$ be a time independent positive function, bounded away from zero, and
$R$ satisfy the above boundary conditions. Then the solution $(u,\nabla P)$ with $u\in W_{2,2}^{2,1}(\Omega\times(0,T))$
and $\nabla P\in L_2(\Omega\times(0,T))$ to
the system
\begin{equation}\label{u2}
\begin{array}{lcr}
\eta u_t -\nu \Delta u + \nabla P =f & \hbox{ in } & \Omega\times (0,T),\\
\hbox{\rm div}\, u = \hbox{\rm div}\, R & \mbox{ in } & \Omega\times (0,T),\\
u=0 & \mbox{ on } & \partial \Omega\times (0,T),\\
u|_{t=0}=u_0 & \mbox{ on } & \Omega,
\end{array}
\end{equation}
fulfills
\begin{multline}\label{u3}
\sqrt\nu\sup_{0\leq t \leq T} \|\nabla u(t)\|_{L_2(\Omega)}+
\|u_t,\nu\nabla^2u,\nabla P\|_{L_2(\Omega \times(0,T))}
\leq C\big( \|f,R_t\|_{L_2(\Omega \times (0,T))} \\
+\nu\|\hbox{\rm div}\, R\|_{L_2(0,T;W^1_2(\Omega))} +\sqrt\nu\|\nabla u_0\|_{L_2(\Omega)}\big),
\end{multline}
where $C$ depends on $\inf\eta$ and $\sup\eta,$ but is independent of $T$ and $\nu.$
\end{lem}
\begin{p}
First we remove the right-hand side of $\eqref{u2}_2$ by means of Lemma \ref{l:bog} (or the remark that follows if $\Omega={\mathbb R}^n$):
we introduce a vector-field $w$ fulfilling \eqref{eqdiv} such that $w\in W^{2,1}_{2,2}(\Omega \times(0,T))$
with the following bound
\begin{equation}\label{u5}
\|\partial_tw,\nu D^2w\|_{L_2(0,T;L_2(\Omega))} \leq C(\|\hbox{\rm div}\, R\|_{L_2(0,T;W^1_2(\Omega))} + \|R_t\|_{L_2(\Omega \times (0,T))}).
\end{equation}
Hence we may reduce the proof to the case $R \equiv 0$.
Now, we observe that testing by $u_t$ gives:
$$
\int_\Omega \eta |u_t|^2\, dx + \frac\nu2\frac{d}{dt} \int_\Omega |\nabla u|^2\, dx
=\int_\Omega f\cdot u_t\,dx.
$$
Therefore, integrating in time yields
\begin{equation}\label{u6}
\nu\|\nabla u(t)\|_{L_2(\Omega)}^2+\int_0^t\|\sqrt\eta\,u_t\|_{L_2(\Omega)}^2\,d\tau\leq
\nu\|\nabla u_0\|_{L_2(\Omega)}^2+\int_0^t\|f/\sqrt\eta\|_{L_2(\Omega)}^2\,d\tau.
\end{equation}
Since $\eta$ is a positive time independent function which is pointwise bounded from below and above, we obtain
\begin{equation}\label{u7}
\|u_t\|_{L_2(\Omega \times (0,T))}+\sup_{0\leq t \leq T} \sqrt\nu\|Du(t)\|_{L_2(\Omega)} \leq C(\|f\|_{L_2(\Omega \times (0,T))}
+\sqrt\nu\|Du_0\|_{L_2(\Omega)}).
\end{equation}
In order to estimate $D^2u$ and $DP,$ we rewrite \eqref{u2} as
\begin{equation}\label{u8}
\begin{array}{lcr}
-\nu \Delta u + \nabla P =f - \eta u_t \qquad \qquad & \mbox{ in } & \Omega\times (0,T),\\
\hbox{\rm div}\, u = 0 & \mbox{ in } & \Omega\times (0,T),\\
u=0 & \mbox{ at } & \partial \Omega\times (0,T).
\end{array}
\end{equation}
If $\Omega$ is a $C^2$ bounded domain then the solvability of \eqref{u8} in the $L_2$ framework is clear (see e.g. \cite{Galdi}, Th. 6.1, page 231), thus taking into account bounds \eqref{u5} and \eqref{u7} we get \eqref{u3}. Lemma \ref{l:l2stokes} is proved.
In the ${\mathbb R}^n$ case, one may just notice that
$\nabla P=-\nabla(-\Delta)^{-1}\hbox{\rm div}\, (f-\eta u_t).$ As $f-u_t$ is in $L_2(\Omega\times(0,T)),$ we still
get the result, first for $\nabla P,$ and next for $\nabla^2u.$
\end{p}
\section{The Lagrangian coordinates}\label{s:lagrangian}
A fundamental point of our analysis is the use of Lagrangian coordinates. In order to define them we solve the following
ordinary differential equation (treating $y$ as a parameter):
\begin{equation}\label{i2}
\frac{d X(t,y)}{dt} = v( t,X(t,y)), \qquad X(t,y)|_{t=0}=y.
\end{equation}
This leads to the following relation between the Eulerian coordinates $x$ and the Lagrangian coordinates $y$:
\begin{equation}\label{i3}
X(t,y)=y+\int_{0}^t v(\tau,X(\tau,y))\, d\tau.
\end{equation}
Let us list a few basic properties for the Lagrangian change of variables:
\begin{prop}\label{p:lag}
Suppose that $v\in L_1(0,T;W^1_\infty(\Omega))$ with $v \cdot \vec n|_{\partial \Omega}=0.$ Then the solution to System \eqref{i2} exists on the time interval $[0,T]$, $X(t,\Omega)=\Omega$ for all $t\in[0,T),$ and $D_yX\in L_\infty(0,T;L_\infty(\Omega))$ with in addition
\begin{equation}\label{i5}
\|D_yX(t)\|_{L_\infty(\Omega)}\leq \exp\biggl(\int_0^t\|D_xv\|_{L_\infty(\Omega)}\,d\tau\biggr).
\end{equation}
Furthermore
\begin{equation}\label{i5a}
X(t,y)=y +\int^t_0 u(t',y)\,dt' \mbox{ \ \ with \ } u(t,y):=v(t,X(t,y))
\end{equation}
so that $DX$ satisfies
\begin{equation}\label{i6}
D_yX(t,y)=\hbox{\rm Id} +\int^t_0 D_yu(t',y)\,dt'.
\end{equation}
Let $Y(t,\cdot)$ be the inverse diffeomorphism of $X(t,\cdot).$
Then
\begin{equation}\label{i6b}
D_xY(t,x)=(D_yX(t,y))^{-1}\quad\hbox{with }\ x=X(t,y)
\end{equation}
and, if $\partialisplaystyle \int_0^t |D_yu(t,y)|\,dt'\leq 1/2$ then
\begin{equation}\label{i7}
\left| D_xY(t,x) - \hbox{\rm Id}\right| \leq 2\int_0^t |D_yu(t',y)|\,dt'.
\end{equation}
Finally, if $v\in L_1(0,T;W^2_\infty(\Omega))$ then $D_yX\in L_\infty(0,T;W^1_\infty(\Omega))$ and
\begin{equation}\label{i6a}
\left|D^2_yX(t,y)\right|\leq e^{\int_0^t|D_xv(t',X(t',y))|\,dt'}
\int_0^t|D^2_xv(t',X(t',y))|e^{\int_0^{t'}|D_xv(t'',X(t'',y))|\,dt''}dt'.
\end{equation}
and if $v \in L_1(0,T;W^s_p(\Omega))$ with $s>\frac np +1$, then $D_yX-\hbox{\rm Id}\in L_\infty(0,T;W^{s-1}_p(\Omega))$.
\end{prop}
\begin{p}
The existence of $X$ for $(t,y) \in (0,T)\times\Omega$ follows from
the standard ODE theory, a consequence of Picard's theorem.
Inequalities \eqref{i5} and \eqref{i6a} follow from \eqref{i3} by differentiation and Gronwall lemma.
The higher regularity stems from the fact that, under our
assumptions, $W^{s-1}_p$ is an algebra (the reader may refer to the appendix of \cite{DM} for the proof
of similar results in a slightly different context).
Equation \eqref{i6} follows from \eqref{i5a}, by differentiation. Then \eqref{i7}
comes from \eqref{i6b} provided $D_yX - \hbox{\rm Id}$ is small enough: indeed, we have
$$
D_xY=(\hbox{\rm Id}+(D_yX-\hbox{\rm Id}))^{-1}=\sum_{k=0}^{+\infty}(-1)^k\biggl(\int_0^tD_yu(t',y)\,dt'\biggr)^k.
$$
This yields \eqref{i6}.
\end{p}
\medbreak
Let us now derive the Navier-Stokes equations \eqref{NSE} in \emph{ the Lagrangian coordinates}: we set
\begin{equation}\label{i4}
\eta(t,y):=\rho(t,X(t,y)), \quad u(t,y):=v(t,X(t,y))\ \hbox{ and }\ P(t,y):=Q(t,X(t,y)).
\end{equation}
We claim that System \eqref{NSE} recasts in
\begin{equation}\label{NSL}
\begin{array}{lcr}
\eta_t=0 & \mbox{in} & \Omega \times (0,T), \\
\eta u_t - \nu \Delta_u u +\nabla_u P =0 & \mbox{in} & \Omega \times (0,T), \\
\hbox{\rm div}\, u u=0 & \mbox{in} & \Omega \times (0,T), \\
u=0 & \mbox{on} & \partial \Omega \times (0,T), \\
u|_{t=t_0}=v|_{t=t_0}, \qquad \eta|_{t=0}=\rho|_{t=t_0}\ & \mbox{in} & \Omega,
\end{array}
\end{equation}
where operators $\Delta_u,\nabla_u, \hbox{\rm div}\, u$ correspond to the original operators $\Delta,\nabla,\hbox{\rm div}\, ,$ respectively,
after performing the change to the Lagrangian coordinates. Index $u$ underlines the dependence with respect to $u$.
Let us also notice that, as $v$ and $u$ vanish at the boundary, we do have $X(t,\Omega)=\Omega$ for all $t$.
So let us now give a formal derivation of \eqref{NSL}.
First, given the definition of $X,$ it is obvious from the chain rule that
$$
\partial_t\eta(t,y)=(\partial_t\rho+ v \cdot\nabla\rho)(t,x)
\quad\hbox{and}\quad
\partial_t u(t,y)=(\partial_t v+v\cdot\nabla v)(t,x)\quad\hbox{with }\ x:=X(t,y).
$$
The chain rule also yields
\begin{equation}
D_y P(t,y)=D_xQ(X(t,y))\cdot D_yX(t,y)
\quad\hbox{with }\ (D_yX)_{ij}:=\partial_{y_j}X^i.
\end{equation}
Hence we have
\begin{equation}\label{eq:A}
D_x Q(t,x)=D_y P(t,y)\cdot A(t,y)\quad\hbox{with}\quad A(t,y):=(D_yX(t,y))^{-1}=D_xY(t,x).
\end{equation}
Next, we notice that if the transform $X$ is volume preserving
then for any smooth enough vector-field $H$ we have
\begin{equation}\label{eq:div}
\hbox{\rm div}\, x H(x)=\hbox{\rm div}\, y(A\bar H)(y)\quad\hbox{with }\ x=X(y)\ \hbox{ and }\ \bar H(y)=H(x).
\end{equation}
This stems from the following series of computations which uses
the fact that $\partialet A\equiv1$ and the change of variable
$x=X(y)$: for any smooth $q$ with compact support, we have
$$
\begin{array}{lll}
\partialisplaystyle \int q(x)\hbox{\rm div}\, x H(x)\,dx&=&-\partialisplaystyle \int D_xq(x)\cdot H(x)\,dx,\\[1ex]
&=&-\partialisplaystyle \int D_y\bar q(y)\cdot A(y)\cdot \bar H(y)\,dy,\\[1ex]
&=&\partialisplaystyle \int \bar q(y)\hbox{\rm div}\, y(A\bar H)(y)\,dy.\end{array}
$$
Combining \eqref{eq:A} and \eqref{eq:div}, we thus deduce that, in Lagrangian coordinates
operators $\nabla,$ $\hbox{\rm div}\, $ and $\Delta$ become
\begin{equation}
\nabla_u := {}^T\!\!A \cdot \nabla_y, \quad \hbox{\rm div}\, u :=\hbox{\rm div}\, (A\cdot) \mbox{ ~~ and ~~ } \Delta_u:=\hbox{\rm div}\, u \nabla_u.
\end{equation}
In consequence, we have the following relations that will be of constant use:
\begin{eqnarray}\label{eq:duP}
&&(\nabla-\nabla_u)P=(\hbox{\rm Id}-{}^T\!A)\nabla P,\\\label{eq:Deltauu}
&&(\Delta-\Delta_u)u=\hbox{\rm div}\, ((\hbox{\rm Id}-A{}^T\!A)\nabla u).
\end{eqnarray}
Let us finally emphasize that, owing to the chain rule, we have
\begin{equation}\label{eq:magic}
\hbox{\rm div}\, y (A\cdot)=\hbox{\rm div}\, u=A:D_y.
\end{equation}
This algebraic relation will be of fundamental importance in our analysis.
\medbreak
The following statement ensures the full equivalence between \eqref{NSE} and \eqref{NSL} under
the assumptions of our results stated in Section \ref{s:main}.
\begin{prop}\label{p:change}
Let $1<p,q<\infty.$ Let $\rho_0 \in L_\infty(\Omega)$ and
$(u,P)$ be a solution to \eqref{NSL} such that $u \in W^{2,1}_{q,p}(\Omega \times (0,T)),$ $\nabla P\in L_p(0,T;L_q(\Omega))$
and
\begin{equation}
\int_0^T\|\nabla u\|_{L_\infty(\Omega)}\,dt\leq1/2.
\end{equation}
Then
$$
v(t,x)=u(t,y),\quad Q(t,x)=P(t,y) \mbox{ \ \ and \ \ } \rho(t,x)=\rho_0(y)
$$
with $x=X(t,y)$ given by \eqref{i3}
defines a $W^{2,1}_{q,p}$-solution to \eqref{NSE}.
\medbreak
Conversely, if $\rho \in L_\infty(\Omega \times (0,T))$ and
$(v,Q)$ with $v\in W^{2,1}_{q,p}(\Omega \times (0,T)),$ $\nabla v\in L_1(0,T;L_\infty(\Omega)),$ and
$\nabla Q\in L_p(0,T;L_q(\Omega))$
is a solution to \eqref{NSE} then
$$
u(t,y)=v(t,X(t,y)), \quad P(t,y)=Q(t,X(t,y)) \mbox{ \ \ and \ \ } \eta=\rho|_{t=0}
$$
defines a $W^{2,1}_{q,p}$-solution to \eqref{NSL}.
\end{prop}
\begin{p}
The proof goes along the lines of the corresponding one in the appendix of \cite{DM}.
Having $Du$ small enough in $L_1(0,T;L_\infty(\Omega))$ is of course fundamental.
\end{p}
\section{Proof of Theorem \ref{th:uniq} -- uniqueness}\label{s:uniqueness}
In this part we prove the uniqueness of solutions to System \eqref{NSL} under the assumptions of
Theorem \ref{th:uniq}.
Here $\Omega$ is a $C^2$ bounded domain, or the whole space.
The proof is a
straightforward application of Lemma \ref{l:l2stokes} to the equations in the Lagrangian form.
The important fact is that we have
$$\nabla v^i \in L_1(0,T;L_\infty(\Omega)) \qquad i=1,2.$$
Hence, taking $T$ small enough, one may assume with no loss of generality that
\begin{equation}\label{eq:smallDu}\int_0^T\|\nabla v^i\|_{L_\infty(\Omega)}\,dt < \frac 12,\end{equation} so that
Propositions \ref{p:lag} and \ref{p:change} apply.
In particular the regularity properties of
those solutions in Lagrangian coordinates are the same as those of Theorem \ref{th:uniq}.
Hence it suffices to consider two solutions $(u^1,P^1)$ and $(u^2,P^2)$ to System \eqref{NSL} with the same initial data
and satisfying the conditions of Theorem \ref{th:uniq}.
Then, denoting $A^i:=A(u^i)$ (see \eqref{eq:A}), $\partialelta\! u:=u^1-u^2$ and so on, we get
\begin{equation}\label{NSL-uniq}
\begin{array}{lcr}
\eta\partialelta\! u_t - \nu \Delta\partialelta\! u + \nabla\partialP=
-\nu[(\Delta -\Delta_{u^1}) u^1 - (\Delta -\Delta_{u^2})u^2]\qquad&& \\
\qquad\qquad\qquad\qquad\qquad+
[(\nabla-\nabla_{u^1}) P^1 - (\nabla -\nabla_{u^2})P^2] & \mbox{in} & \Omega \times (0,T),\\[4pt]
\hbox{\rm div}\, \partialelta\! u=\hbox{\rm div}\, [( \hbox{\rm Id} -A^1)u^1 -(\hbox{\rm Id} -A^2) u^2] & \mbox{in} & \Omega \times (0,T),\\[4pt]
\partialelta\! u=0 & \mbox{ at } & \partial \Omega \times (0,T),\\[4pt]
\partialelta\! u|_{t=0}=0 & \mbox{ in } & \Omega.
\end{array}
\end{equation}
Let us underline that the boundary condition on $R$ from Lemma \ref{l:l2stokes} is fulfilled, since by definition $u^1$ and $u^2$ are zero at the boundary.
Therefore, keeping \eqref{eq:magic} in mind, we obtain for some constant $C$ depending only on $\nu,$ $\inf \eta,$ $\sup\eta$ and $\Omega$
the inequality
\begin{equation}\label{uu8}
\|\partialelta\! u\|_{L_\infty(0,T;W^1_2(\Omega))}+ \|\partialelta\! u_t,\nabla^2\partialelta\! u,\nabla\partialP\|_{L_2(\Omega \times (0,T))} \\\leq
C(I_1+I_2+I_3+I_4)
\end{equation}
with
$$\begin{array}{lll}
I_1&:=& \| (\nabla-\nabla_{u^1})P^1 - (\nabla-\nabla_{u^2})P^2\|_{L_2(\Omega \times (0,T))}, \\[1ex]
I_2&:=&\| (\hbox{\rm Id}-A^1):Du^1 - (\hbox{\rm Id} -A^2):Du^2\|_{L_2(0,T;W^1_2(\Omega))},\\[1ex]
I_3&:=&\| [(\Delta -\Delta_{u^1})u^1 - (\Delta -\Delta_{u^2})u^2\|_{L_2(\Omega \times (0,T))}, \\[1ex]
I_4&:=&\| \partial_t[(\hbox{\rm Id}-A^1)u^1 - (\hbox{\rm Id}-A^2)u^2]\|_{L_2(\Omega \times (0,T))}.
\end{array}
$$
In the following computations, we shall use repeatedly the fact that $\partialelta\! A:=A^2-A^1$ satisfies
\begin{equation}\label{eq:dA}
\partialelta\! A(t)=\biggl(\int_0^tD\partialelta\! u\,d\tau\biggr)\cdot
\biggl(\sum_{k\geq1}\sum_{0\leq j<k} C_1^jC_2^{k-1-j}\biggr)
\quad\hbox{with}\quad
C_i(t):=\int_0^tDu^i\,d\tau.
\end{equation}
We concentrate on the case $n\geq3.$ We shall indicate how our arguments have to be modified
if $n=2,$ at the end of the section.
In order to bound $I_1,$ we write
\begin{equation}\label{u9}
I_1(t)\leq
\|(A^1-A^2) \nabla P^1\|_{L_2(\Omega \times (0,t))} +
\|(\hbox{\rm Id} - A^2) \nabla (P^1 - P^2)\|_{L_2(\Omega \times (0,t))}.
\end{equation}
It is clear that
\begin{equation}
\|(\hbox{\rm Id} - A^2) \nabla (P^1 - P^2)\|_{L_2(\Omega \times (0,t))} \leq Ct^{1/2}
\|\nabla\partialP\|_{L_2(\Omega \times (0,t))}\|Du^2\|_{L_2(0,t;L_\infty(\Omega))}.
\end{equation}
Let us notice that, according to \eqref{i7},\eqref{eq:dA} and to the critical Sobolev embedding
of $W^1_2(\Omega)$ in $L_{2^*}(\Omega)$ (that is $1/2^*+1/n=1/2$), we have
$$\begin{array}{lll}
\|\partialelta\! A\|_{L_\infty(0,t;L_{2^*}(\Omega))} &\leq& C
\|\partialisplaystyle \int_0^t |\nabla\partialelta\! u|dt'\|_{L_\infty(0,t;L_{2^*}(\Omega))}\\[1.5ex]
&\leq& Ct^{1/2}\|D^2 \partialelta\! u\|_{L_2(\Omega \times (0,t))}
\end{array}$$
with $C$ depending only on the norm of the two solutions on $[0,T].$
Therefore,
$$
\begin{array}{lll}
\|(A^1-A^2) \nabla P^1\|_{L_2(\Omega \times (0,t))} &\leq& C
\|\partialelta\! A\|_{L_\infty(0,t;L_{2^*}(\Omega))}
\| \nabla P^1\|_{L_2(0,t;L_n(\Omega))}\\[1.5ex]
&\leq& C t^{1/2} \|D^2 \partialelta\! u\|_{L_2(\Omega\times(0,t))}\| \nabla P^1\|_{L_2(0,t;L_n(\Omega))}.
\end{array}
$$
Let us now bound $I_2.$ Note that it suffices to bound the norm in
$L_2(\Omega\times(0,T))$ of the gradient of the corresponding term. If $\Omega$ is
bounded this is a consequence of the Poincar\'e-Wirtinger inequality
as $\hbox{\rm div}\, \partialelta\! u$ has $0$ average over $\Omega,$ and if $\Omega={\mathbb R}^n$ this stems from
the fact that only the norm in $\partialot W^1_2({\mathbb R}^n)$ is involved (see Remark \ref{r:stokes}).
Now, we notice that
$$
(\hbox{\rm Id}-A^1):Du^1 - (\hbox{\rm Id} -A^2):Du^2=-\partialelta\! A:D u^1+(A^2-\hbox{\rm Id}):D\partialelta\! u.
$$
First, using the embedding of $W^1_2(\Omega)$ in $L_{2^*}(\Omega),$
and keeping in mind \eqref{eq:smallDu} and that
\begin{equation}
Du^i\in L_2(0,T;L_\infty(\Omega))\ \hbox{ and }\
D^2u^i\in L_2(0,T;L_n(\Omega))\quad\hbox{for }\ i=1,2,
\end{equation}
we get for all $t\in[0,T],$
$$
\begin{array}{lll}
\|D(\partialelta\! A \!:\! Du^1)\|_{L_2(\Omega\times(0,t))}
&\!\!\!\!\lesssim\!\!\!\!&\!\!\!\! \Bigl\| |Du^1|\!\partialisplaystyle \int_0^\tau \!|D^2\partialelta\! u|\,d\tau' \Bigr\|_{L_2(\Omega\times (0,t))}
\!+\! \Bigl\||D^2u^1|\!\int_0^\tau \!|D\partialelta\! u|\,d\tau' \Bigr\|_{L_2(\Omega\times (0,t))}
\\[1.5ex]
&\!\!\!\!\lesssim\!\!\!\!& t^{1/2}\Bigl(\|D^2\partialelta\! u\|_{L_2(\Omega\times(0,t))} \|Du^1\|_{L_2(0,t; L_\infty(\Omega))}
\\&&\qquad\qquad+ \|D\partialelta\! u\|_{L_2(0,t;L_{2^*}(\Omega))} \|D^2u^1\|_{L_2(0,t;L_n(\Omega))}\Bigr)
\\[1ex] &\!\!\!\!\lesssim\!\!\!\! &t^{1/2} \|D^2\partialelta\! u\|_{L_2(\Omega\times(0,t))}.
\end{array}
$$
Second, we have
$$
\begin{array}{lll}
\|D((A^2-\hbox{\rm Id})\!:\!D\partialelta\! u)\|_{L_2(\Omega\times(0,t))}
&\!\!\!\!\lesssim\!\!\!\!&\!\!
\|D A^2\!\otimes\! D\partialelta\! u\|_{L_2(\Omega\times(0,T))}+ \|(A^2-\hbox{\rm Id})\!\otimes\! D^2\partialelta\! u)\|_{L_2(\Omega\times(0,t))}
\\[1ex]
&\!\!\!\!\lesssim\!\!\!\!& t^{1/2}\Bigl(\|D^2u^2\|_{L_2(0,t;L_n(\Omega))}
\|D\partialelta\! u\|_{L_2(0,t; L_{2^*}(\Omega))} \\&&+\|Du^2\|_{L_1(0,t;L_\infty(\Omega))} \|D^2\partialelta\! u\|_{L_2(\Omega\times(0,t))}\Bigr).
\end{array}
$$
So finally for all $t\in[0,T],$
\begin{equation}\label{eq:I4}
I_2(t)\leq Ct^{1/2} \|\nabla^2\partialelta\! u\|_{L_2(\Omega\times(0,t))}
\end{equation}
with $C$ depending only the norm of the solutions over $[0,T].$
The term $I_3$ may be handled
along the same lines. Indeed we have
$$
I_3(t)=\Bigl\|\hbox{\rm div}\, \Bigl(\bigl(\hbox{\rm Id}-A^1\,{}^T\!A^1\bigr)\nabla u^1-\bigl(\hbox{\rm Id}-A^2\,{}^T\!A^2\bigr)\nabla u^2\Bigr)\Bigr\|_{L_2(\Omega\times(0,t))}.
$$
Finally, we examine $I_4.$
Using again \eqref{eq:dA}, we get (with the convention that $Du^{1,2}$ denotes the components
of $Du^1$ and $Du^2$):
$$\begin{array}{lll}
\!\!\|\partial_t[\partialelta\! A \,u^1]\|_{L_2(\Omega\times(0,t))}
&\!\!\!\!\lesssim\!\!\!& \|D\partialelta\! u\, u^1\|_{L_2(\Omega\times(0,t))}
+\Bigl\|\partialisplaystyle \int_0^\tau |D\partialelta\! u|\,d\tau'|Du^{1,2}|\,|u^1|\Bigr\|_{L_2(\Omega\times(0,t))}\\[1.5ex]
&&\qquad\qquad\qquad\qquad+ \Bigl\|\partialisplaystyle \int_0^\tau |D\partialelta\! u|\,d\tau' \, |u^1_t| \Bigr\|_{L_2(\Omega\times(0,t))}\\[3ex]
&\!\!\!\!\lesssim\!\!\!& \|D\partialelta\! u\|_{L_\infty(0,t;L_2(\Omega))} \|u^1\|_{L_2(0,t;L_\infty(\Omega))}
\\&&\!\!\!\!\!\!\!\!\!\qquad\!+t^{1/2}\|D\partialelta\! u\|_{L_2(0,t;L_{2^*}(\Omega))}
\bigl(\|u_t^1\|_{L_2(0,t;L_n(\Omega))}\!+\!\||u^1||Du^{1,2}|\|_{L_2(0,t;L_n(\Omega))}\bigr)
\\[2ex]
&\!\!\!\!\lesssim\!\!\!& t^{1/2} \|D^2\partialelta\! u\|_{L_2(\Omega\times(0,t))}+\varepsilon(t) \|D\partialelta\! u\|_{L_\infty(0,t;L_2(\Omega))},
\end{array}
$$
with $\lim_{t\rightarrow0}\varepsilon(t)=0$ because
\begin{equation}\label{eq:u8}
\partial_tu^1,\ u^1\otimes Du^1\hbox{ and } u^2\otimes Du^1\ \hbox{ are in }\
L_2(0,T;L_n(\Omega)).
\end{equation}
At the same time, we have, for all $t\in[0,T],$
$$
\begin{array}{lll}
\|\partial_t((\hbox{\rm Id}-A^2)\partialelta\! u)\|_{L_2(\Omega\times(0,t))}&\!\!\!\lesssim\!\!\!&\|Du^2\partialelta\! u\|_{L_2(\Omega\times(0,t))}+\|(\hbox{\rm Id}-A^2)\partial_t\partialelta\! u\|_{L_2(\Omega\times(0,t))}\\[1ex]
&\!\!\!\lesssim\!\!\!&\|\partialelta\! u\|_{L_\infty(0,t;L_{n^*}(\Omega))}
\|Du^2\|_{L_2(0,t;L_n(\Omega))}\\&&\qquad\qquad
+\|Du^2\|_{L_1(0,t;L_\infty(\Omega))}\|\partial_t\partialelta\! u\|_{L_2(\Omega\times(0,t))}.
\end{array}
$$
So one may conclude that
$$
I_4(t)\leq t^{1/2} \|D^2\partialelta\! u\|_{L_2(\Omega\times(0,t))}
+\varepsilon(t)\bigl( \|D\partialelta\! u\|_{L_\infty(0,t;L_2(\Omega))}+\|\partial_t\partialelta\! u\|_{L_2(\Omega\times(0,t))}\bigr).
$$
So finally in the case $n\geq 3$, putting together all the previous
inequalities yields for all $t\in (0,T),$
$$
\partialisplaylines{\|\partialelta\! u\|_{L_\infty(0,t;W^1_2(\Omega))}+\|\partialelta\! u_t,\nabla^2\partialelta\! u,\nabla\partialP\|_{L_2(\Omega \times (0,t))} \\
\cr
\leq \varepsilon(t)\Bigl(\|\partialelta\! u\|_{L_\infty(0,t;W^1_2(\Omega))}+\|\partialelta\! u_t,\nabla^2\partialelta\! u,\nabla\partialP\|_{L_2(\Omega \times (0,t))}\Bigr)}
$$
for some positive function $\varepsilon$ going to $0$ at $0.$
Uniqueness follows on a sufficiently small time interval, then on the whole interval $[0,T]$
thanks to a standard connectivity (or bootstrap) argument.
\medbreak
Let us now explain how the arguments have to be modified
in the two-dimensional case. One cannot follow exactly
the above approach owing to the failure of the embedding of $W^1_2(\Omega)$
in $L_\infty(\Omega)$. So we have
to assume slightly higher regularity, namely $\nabla P^1,\nabla P^2 \in L_2(0,T;L_q(\Omega))$ with $q>2,$
and so on. For instance, setting $m\in(2,\infty)$ such
that $1/m+1/q=1/2,$ we may write
$$
\begin{array}{lll}
\|(A^1-A^2)\nabla P^1\|_{L_2(\Omega \times (0,t))} &\leq& Ct^{1/2} \|\nabla\partialelta\! u\|_{L_2(0,t;L_m(\Omega))} \|\nabla P^2\|_{L_2(0,t;
L_q(\Omega))}\\[1.5ex]
&\leq& C t^{1/2} \|D^2 \partialelta\! u\|_{L_2(\Omega\times(0,t))} \|\nabla P^2\|_{L_2(0,t;
L_q(\Omega))}.
\end{array}
$$
The other terms of \eqref{uu8} may be handled similarly. The details are left to the reader.
Theorem \ref{th:uniq} is thus proved.
\begin{rem}
Here we would like to explain the reason why we use the $W^{2,1}_{2,2}$ regularity for the velocity
to establish uniqueness.
Concentrate our attention on $n=3$. A direct $L_2$-energy method (i.e.
testing \eqref{NSL-uniq} by $\partialelta\! u$) requires our bounding
$(\nabla - \nabla_{u^1})P^1-(\nabla-\nabla_{u^2})P^2$ in $L_1(0,T;L_2(\Omega))$,
hence the following computation:
$$
\begin{array}{lll}
\left| \partialisplaystyle \int_0^T \int_\Omega \partialelta\! A\nabla P^1 \,\partialelta\! u \,dx\, dt \right|
&\leq&
C\|\partialelta\! A\|_{L_\infty(0,T;L_2(\Omega))} \|\nabla P^1\|_{L_2(0,T;L_3(\Omega))} \|\partialelta\! u\|_{L_2(0,T;L_6(\Omega))}
\\ &\leq&
CT^{1/2} \|\nabla P^1\|_{L_2(0,T;L_3(\Omega))} \|\nabla \partialelta\! u \|_{L_2(\Omega\times(0,T))}.
\end{array}
$$
So we need $\nabla P^1 \in L_2(0,T;L_3(\Omega))$ which is naturally related to $u^1 \in W^{2,1}_{3,2}$. In addition integrating by parts
in the left-hand side of the above inequality, we need to keep track of
$\nabla^2 \partialelta\! u$ as well as of $\nabla\partialP$ in $L_2(\Omega \times (0,T)).$
Those two terms are out of control if resorting only to the basic energy inequality.
\end{rem}
\section{Proof of Theorem \ref{th:exist} -- existence} \label{s:existence}
The uniqueness property of the system is important, but to have the full picture
of the well-posedness issue, we now have to show that there exist solutions \emph{with merely bounded density}
for which Theorem~\ref{th:uniq} applies. With the method that is proposed below,
much more regularity is needed for the initial velocity. However the assumption over the initial density stays that same: it just has to be bounded and bounded away from zero.
\subsection{A priori estimates}
We first concentrate on the proof of a priori estimates for
a smooth solution $(u,P)$ to \eqref{NSL}.
To simplify the presentation, we consider the case where $\Omega$ is a $C^2$ bounded
domain of ${\mathbb R}^n.$ The whole space case may be achieved by similar arguments : this is just
a matter of using homogeneous norms $\|\cdot\|_{\partialot W^1_2({\mathbb R}^n)}$ and
$\|\cdot\|_{\partialot B^{2-2/n^*}_{n^*,n^*}({\mathbb R}^n)}$ and resorting to Remark \ref{r:stokes}.
\smallbreak
In order to prove a priori estimates for $(u,P),$ let us assume in addition that
$T$ has been chosen so that (say)
\begin{equation}\label{eq:smallu}
\int_0^T\|\nabla u\|_{L_\infty(\Omega)}\,dt\leq1/2.
\end{equation}
This enables us to go from \eqref{NSE} to \eqref{NSL} (and conversely).
For any (possibly large) initial velocity $v_0\in B^{2-2/n^*}_{n^*,n^*}(\Omega),$
and $\rho_0\in L_\infty(\Omega)$ bounded away from zero,
we want to find a bound for a solution $(u,P)$ given by Theorem \ref{th:exist}.
In other words, we want to control the following quantity:
\begin{equation}
\Xi_{(u,P)}(T):= \|u_t\|_{L_\infty(0,T;L_2(\Omega))} + \|\nabla u_t\|_{L_2(\Omega \times (0,T))} +
\|u_t,\nabla^2u,\nabla P\|_{L_{n^*}(\Omega \times (0,T))},
\end{equation}
with $n^*=2\bigl(\frac{n+2}{n}\bigr),$ if $T$ is small enough.
\medbreak
Let us first notice that, by standard Sobolev embedding
\begin{equation}\label{eq:smallu1}
\int_0^T\|\nabla u\|_{L_\infty(\Omega)}\,dt\leq CT^{1-\frac{1}{n^*}} \Xi_{(u,P)}(T).
\end{equation}
which guarantees \eqref{eq:smallu} for small times.
\medbreak
In order to use Lemma \ref{l:l2stokes} we restate System \eqref{NSL} as follows
(of course $\eta=\rho_0$ and $u_0=v_0$):
\begin{equation}\label{NSL-1}
\begin{array}{lcr}
\eta u_t - \nu \Delta u +\nabla P =-\nu(\Delta -\Delta_u)u+(\nabla-\nabla_u)P \qquad & \mbox{in} & \Omega \times (0,T), \\[4pt]
\hbox{\rm div}\, u=\hbox{\rm div}\, \bigl((\hbox{\rm Id}-A)u\bigr) & \mbox{in} & \Omega \times (0,T), \\[4pt]
u=0 & \mbox{on} & \partial \Omega \times (0,T), \\[4pt]
u|_{t=0}=u_0 & \mbox{in} & \Omega.
\end{array}
\end{equation}
Then keeping \eqref{eq:duP}, \eqref{eq:Deltauu}, \eqref{eq:magic}, and Proposition \ref{p:lag} in mind, we get
for some constant $C=C(\nu,\Omega),$
\begin{multline}\label{p1}
\sup_{0\leq t \leq T}\|u(t)\|_{W^1_2(\Omega)}+ \|u_t,\nabla^2u,\nabla P \|_{L_2(\Omega\times(0,T))} \\
\leq C\big( \|\hbox{\rm Id}-A\|_{L_\infty(\Omega \times (0,T))} \|u_t,\nabla^2u,\nabla P \|_{L_2(\Omega \times (0,T))} \\
+ \|\nabla A \, \nabla u, A_t\, u \|_{L_2(\Omega\times(0,T))} + \|u_0\|_{W^1_2(\Omega)}\big).
\end{multline}
The $W^{2,1}_{2,2}(\Omega \times (0,T))$ regularity of the velocity, coming from \eqref{p1}, is not sufficient to control the Lagrangian coordinates, namely the terms containing $A$ in the right-hand side of \eqref{p1},
because $\nabla W^{2,1}_{2,2}(\Omega \times (0,T))$ \emph{is not} embedded in $L_1(0,T;L_\infty(\Omega)).$
Hence, to close the estimates, higher regularity is needed.
Differentiating \eqref{NSL-1} once with respect to time is the easiest way to achieve it, because it does not affect
the irregular density which is time independent in the Lagrangian setting. We get
\begin{equation}\label{NSL-2}
\begin{array}{lcr}
\eta u_{tt} - \nu \Delta u_t +\nabla_u P_t = && \\
\qquad\qquad -\nu(\Delta -\Delta_u)u_t+ \nu(\Delta_u)_tu-(\nabla_u)_t P \qquad \qquad & \mbox{in} & \Omega \times (0,T), \\[4pt]
\hbox{\rm div}\, u u_t= -\hbox{\rm div}\, A_t u & \mbox{in} & \Omega \times (0,T), \\[4pt]
u_t=0 & \mbox{on} & \partial \Omega \times (0,T).
\end{array}
\end{equation}
At this stage the question of the regularity of $u_t|_{t=0}$ arises.
This information can be found out only from the equations. At time $t=0$ the Eulerian and Lagrangian coordinates coincide (that is $A=\hbox{\rm Id}$),
so the regularity of $u_t|_{t=0}$ is just that of $\eta^{-1}(\nabla P-\nu\Delta u)|_{t=0}.$
However the regularity of $\nabla P|_{t=0}$ is unknown, so we rather have to use the fact that
differentiating $\eqref{NSL-1}_2$ with respect to $t$ implies that
\begin{equation}\label{in1}
\eta u_t|_{t=0} + \nabla P|_{t=0}= \nu \Delta u_0, \qquad \qquad \hbox{\rm div}\, u_t|_{t=0} = -\hbox{\rm div}\, (A_t|_{t=0} u_0).
\end{equation}
Note that
$A_t|_{t=0}$ need not be trivial so in order to bound $u_t|_{t=0}$ in $L_2(\Omega),$
we first have to remove its potential part. For that, we
use the Bogovski\u{\i} operator ${\mathcal B}$ (see Lemma \ref{l:bogovskii}) setting
$$
\phi = {\cal B}[-\hbox{\rm div}\, (A_t|_{t=0} u_0)] \mbox{ ~~~ so that ~~~ } \hbox{\rm div}\, \phi = - \hbox{\rm div}\, (A_t|_{t=0} u_0) \mbox{ in } \Omega, \quad
\phi =0 \mbox{ at } \partial \Omega.
$$
Let us notice that, because
$$
A(t,y)\cdot DX(t,y)=\hbox{\rm Id}\ \hbox{ and }\ DX|_{t=0}=\hbox{\rm Id}\quad\hbox{with}\quad
X(t,y)=\hbox{\rm Id}+\int_0^t u(\tau,y)\,d\tau,
$$
we have $A_t|_{t=0}=-Du_0,$
hence $A_t|_{t=0} u_0 = -(u_0\cdot\nabla u_0)=-\hbox{\rm div}\, (u_0\otimes u_0).$
\smallbreak
Now, $W^2_2(\Omega)$ is an algebra if $n=2,3.$ Hence $A_t|_{t=0} u_0$ is in $W^1_2(\Omega)$
and the function $\phi$ defined above
is in $W^1_2(\Omega)$\footnote{In fact, the function $\phi$ is in $W^2_2(\Omega)$
but we shall not take advantage of this in what follows.} and satisfies:
\begin{equation}\label{in4}
\|\phi \|_{W^1_2(\Omega)} \leq C\|u_0\|^2_{W^2_2(\Omega)}.
\end{equation}
Therefore System \eqref{in1} recasts in
$$\begin{array}{l}
\eta (u_t|_{t=0}-\phi) + \nabla P|_{t=0}= \nu \Delta u_0-\eta \phi\ \mbox{ in }\ \Omega\\[1ex]
\hbox{\rm div}\, (u_t|_{t=0} -\phi)=0\ \mbox{ in }\ \Omega\\[1ex]
(u_t|_{t=0} -\phi)|_{\partial\Omega}=0\ \mbox{ on }\ \partial\Omega.
\end{array}
$$
Now, testing the first equation by $(u_t|_{t=0} - \phi)$ we get:
\begin{equation}\label{in6}
\int_\Omega \eta \bigl|u_t|_{t=0} -\phi\bigr|^2\, dx \leq
\int_\Omega \eta^{-1}\bigl|\nu\Delta u_0-\eta\phi\bigr|^2\,dx.
\end{equation}
Thus, due to \eqref{in4}, we discover that $u_t|_{t=0}$ is in $L_2(\Omega)$ and that
\begin{equation}\label{in7}
\|u_t|_{t=0} \|_{L_2(\Omega)} \leq C_{\nu,\eta}(\|u_0\|_{W^2_2(\Omega)} + \|u_0\|^2_{W^2_2(\Omega)}).
\end{equation}
At this point, we would like to apply an energy method to \eqref{NSL-2}. However, as $\hbox{\rm div}\, u u_t$ may be nonzero,
one cannot
eliminate the term coming from $P_t$ (which is out of control).
So we modify $\eqref{NSL-2}_2$ by introducing a vector-field $\xi$ so that
\begin{equation}\label{in8}
\begin{array}{lr}
\hbox{\rm div}\, u \xi = -\hbox{\rm div}\, (A_t u) \;\; &\hbox{in }\ \Omega, \\
\xi =0 &\hbox{on }\ \partial \Omega.
\end{array}
\end{equation}
We need $\xi$ to satisfy suitable estimates (in terms of the right-hand side)
in $L_\infty(0,T;L_2(\Omega))\cap L_2(0,T;W^1_2(\Omega))$
and $\xi_t$ to be bounded in $L_2(\Omega\times(0,T)).$
This may be done by means of a Bogovski\u{\i} type operator construction as in \cite{DM-luminy}.
Here we shall define $\xi$ (treating $t$ as parameter) according to Lemma \ref{l:divA} in the Appendix.
\smallbreak
Let us start with the bound in $L_\infty(0,T;L_2(\Omega))$:
we have
$$\|\xi\|_{L_\infty(0,T;L_2(\Omega))}\lesssim \|A_tu\|_{L_\infty(0,T;L_2(\Omega))}.
$$
Therefore, using the fact that
\begin{equation}\label{eq:At}
A_t=\biggl(\sum_{k\geq0}(k+1)(-1)^{k+1}\biggl(\int_0^tD_yu\,d\tau\biggr)^k\biggr)\cdot D_yu,
\end{equation}
we get (remember \eqref{eq:smallu})
\begin{eqnarray}\label{in8a}
&&\|\xi\|_{L_\infty(0,T;L_2(\Omega))}\lesssim\| u \otimes \nabla u \|_{L_\infty(0,T;L_2(\Omega))}\nonumber \\
&&\phantom{\|\xi\|_{L_\infty(0,T;L_2(\Omega))}}\lesssim\|u\|_{L_\infty(0,T;L_\infty(\Omega))} \|\nabla u \|_{L_\infty(0,T;L_2(\Omega))}.
\end{eqnarray}
In order to bound the right-hand side of \eqref{in8a}, we apply
the following classical parabolic estimate (which is related to our definition of Besov
spaces in \eqref{xx1}):
\begin{equation}\label{eq:parabolic}
\|u\|_{L_\infty(0,T;B^{2-2/p}_{p,p}(\Omega))} \leq C( \|u_0\|_{B^{2-2/p}_{p,p}(\Omega)}
+\|u_t,\nabla^2 u\|_{L_p(\Omega \times (0,T))}).
\end{equation}
Now, owing to Sobolev embedding, it is clear that the left-hand side of \eqref{eq:parabolic} controls the $L_\infty$ norm
whenever $2-2/p> n/p,$ that is $p>(n+2)/2.$ The constant in \eqref{eq:parabolic} is time independent.
Therefore, for any $m\in((n+2)/2,n^*),$ we have
\begin{eqnarray}\label{in9}
&&\|u\|_{L_\infty(\Omega \times (0,T))} \leq C_m\bigl(
\|u_0\|_{B^{2-2/m}_{m,m}(\Omega)} + \|u_t,\nabla^2 u\|_{L_{m}(\Omega \times (0,T))}\bigr)\nonumber
\\ &&\phantom{\|u\|_{L_\infty(\Omega \times (0,T))}}\leq C_m\bigl(\|u_0\|_{B^{2-2/{n^*}}_{n^*,n^*}(\Omega)} + T^{\frac 1m - \frac{1}{n^*}} \,\Xi_{(u,P)}(T)\bigr).
\end{eqnarray}
Inequality \eqref{eq:parabolic} with $p=2$ also yields
\begin{eqnarray}\label{in10}
&& \|\nabla u\|_{L_\infty(0,T;L_2(\Omega)} \lesssim
\|u_0\|_{W^1_2(\Omega)} + \|u_t,\nabla^2 u\|_{L_2(\Omega \times (0,T))}\nonumber\\
&&\phantom{ \|\nabla u\|_{L_\infty(0,T;L_2(\Omega)} }\lesssim
\|u_0\|_{W^1_2(\Omega)} + T^{\frac 12 - \frac{1}{n^*}} \Xi_{(u,P)}(T).
\end{eqnarray}
So, putting \eqref{in8a}, \eqref{in9} and \eqref{in10} together, we get for some $\partialelta>0,$
\begin{equation}\label{in10a}
\|\xi\|_{L_\infty(0,T;L_2(\Omega))}\lesssim \|u_0\|^2_{W^2_2(\Omega)} + T^\partialelta \, \Xi_{(u,P)}^2(T).
\end{equation}
Next, in order to bound $\xi$ in $L_2(0,T;W^1_2(\Omega)),$ we use the fact that
$$\hbox{\rm div}\, u\xi=-A_t:Du.$$
In effect, owing to \eqref{eq:div}, one may write
$\hbox{\rm div}\, (Au_t)=A:Du_t,$
hence the above relation may be obtained by taking the time derivative of
$\hbox{\rm div}\, (Au)=A:Du.$
So, using Lemma \ref{l:divA} and
remembering that $n^* >n,$ we get, for some $\partialelta>0,$
$$\begin{array}{lll}
\|\xi \|_{L_2(0,T;W^1_2(\Omega))} &\!\!\!\leq\!\!\!& C\| |\nabla u |^2 \|_{L_2(\Omega\times(0,T))}\\[1ex]
&\!\!\!\leq\!\!\!& C \|\nabla u\|_{L_\infty(0,T;L_2(\Omega))}\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))}\\[1ex]
&\!\!\!\leq\!\!\!& C(\|\nabla u_0\|_{L_2(\Omega)} + T^{1/2}\|\nabla u_t\|_{L_2(\Omega\times(0,T))})
\|\nabla u\|_{L_2(0,T; W^1_{n^*}(\Omega))}\\[1ex]
&\!\!\!\leq\!\!\!& C T^{\partialelta}\Xi_{(u,P)}(T)(\|u_0\|_{W^1_2(\Omega)} + T^{\partialelta}\Xi_{(u,P)}(T)).
\end{array}
$$
Finally, let us bound $\xi_t$ in $L_2(\Omega\times(0,T)).$
For that, we apply the last part of Lemma \ref{l:divA} which yields
\begin{equation}\label{in11}
\|\xi_t \|_{L_2(\Omega\times(0,T))} \leq C \|A_t \xi, A_{tt} u, A_t u_t\|_{L_2(\Omega\times(0,T))}.
\end{equation}
Using \eqref{in8a}, \eqref{in9} and \eqref{in10a}, we get
$$
\begin{array}{lll}
\|A_t \xi\|_{L_2(\Omega\times(0,T))} &\leq &C\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))} \|\xi\|_{L_\infty(0,T;L_2(\Omega))}\\[1ex]&\leq&
C\bigl(\|u_0\|_{W^{2}_{2}(\Omega)}^2 +T^\partialelta \Xi^2_{(u,P)}(T)\bigr)
\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))},
\end{array}
$$
$$
\begin{array}{lll}
\|A_{tt} u \|_{L_2(\Omega\times(0,T))}&\leq& \|\nabla u_t\|_{L_2(\Omega\times(0,T))} \|u\|_{L_\infty(0,T;L_\infty(\Omega))}\\[1ex]
&\leq& C\|\nabla u_t\|_{L_2(\Omega\times(0,T))} (\|u_0\|_{W^{2-2/n^*}_{n^*}(\Omega)} +T^\partialelta \Xi_{(u,P)}(T)),
\end{array}
$$
$$\partialisplaylines{\quad
\|A_t u_t\|_{L_2(\Omega\times(0,T))} \leq C\|\nabla u\otimes u_t\|_{L_2(\Omega\times(0,T))}
\cr
\leq
C\|\nabla u \|_{L_2(0,T;L_\infty(\Omega))} (\|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))} +
\|\xi\|_{L_\infty(0,T;L_2(\Omega))}).\quad}
$$
So one may conclude that
\begin{multline}\label{in15}
\|\xi_t\|_{L_2(\Omega\times(0,T))}\lesssim (\|u_0\|_{W^2_2(\Omega)}^2 + T^{\partialelta}\Xi_{(u,P)}^2(T))
\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))} +\|u_0\|_{W^{2-2/n^*}_{n^*}(\Omega)}\\ + T^{\partialelta}\Xi_{(u,P)}(T))\|\nabla u_t\|_{L_2(0,T;L_2(\Omega))}+
\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))} \|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))}.
\end{multline}
Note that in \eqref{in15}, there is no factor $T^\partialelta$ for the leading order
terms $\|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))}$ and $\|\nabla u_t\|_{L_2(\Omega\times(0,T))}.$
\medbreak
Once the vector-field $\xi$ has been constructed, one may recast System \eqref{NSL-2} in
\begin{equation}\label{NSL-2a}
\begin{array}{lcr}
\eta (u_{t}-\xi)_t - \nu \Delta (u_t-\xi) +\nabla_u P_t \qquad&& \\
\qquad=-\nu(\Delta -\Delta_u)u_t+ \nu( \Delta_u)_tu-(\nabla_u)_t P
-\eta \xi_t + \nu \Delta \xi & \mbox{in} & \Omega \times (0,T), \\[4pt]
\hbox{\rm div}\, u (u_t-\xi)= 0 & \mbox{in} & \Omega \times (0,T), \\[4pt]
u_t-\xi =0 & \mbox{on} & \partial \Omega \times (0,T), \\[4pt]
(u_t-\xi)|_{t=0} \in L_2(\Omega)
& \mbox{in} & \Omega.
\end{array}
\end{equation}
Note that, now, $\hbox{\rm div}\, u(u_t-\xi)=0$ and that $(u_t-\xi)|_{t=0}$ is in $L_2(\Omega)$ with
\begin{equation}\label{in13}
\|(u_t-\xi)|_{t=0}\|_{L_2(\Omega)}\leq C\bigl(\|u_0\|_{W^2_2(\Omega)}+\|u_0\|_{W^2_2(\Omega)}^2\bigr).
\end{equation}
So taking the $L_2(\Omega)$-inner product of $(\ref{NSL-2a})_1$ with $u_t-\xi,$ there is no term
generated by $\nabla_uP_t$ and we thus get
$$
\frac 12 \int_\Omega \eta |u_t -\xi|^2 dx\Big|_{t=T} + \nu \int_0^T\!\!\!\int_\Omega |\nabla (u_t -\xi)|^2 dxdt \leq
\frac 12 \int_\Omega \eta |u_t -\xi|^2\,dx\Big|_{t=0}+\sum_{j=1}^5I_j
$$
with
$$
\begin{array}{lll}
I_1&:=& C\nu\partialisplaystyle \int_0^T\!\!\! \int_\Omega|(A -\hbox{\rm Id})|\,|\nabla u_t |\, |\nabla (u_t-\xi) |\,dx\,dt,\\[2ex]
I_2&:=&C \nu\partialisplaystyle \int_0^T \!\!\!\int_\Omega |\nabla u|^2| \nabla (u_t-\xi)| \,dx\,dt,\\[2ex]
I_3&:=& C\partialisplaystyle \int_0^T \!\!\!\int_\Omega |\nabla u | \,|\nabla P |\, |u_t-\xi|\,dx\,dt,\\[2ex]
I_4&:=& \partialisplaystyle \int_0^T \!\!\!\int_\Omega \eta|\xi_t \cdot (u_t-\xi)|\,dx\,dt,\\[2ex]
I_5&:=& \nu\partialisplaystyle \int_0^T \!\!\!\int_\Omega |\nabla \xi \cdot \nabla (u_t -\xi)| \, dx\,dt.
\end{array}
$$
In order to bound terms $I_1,$ $I_2,$ $I_3$ and $I_5,$ we use
H\"older and Young inequalities. We get for all $\varepsilon>0,$
\begin{eqnarray}\label{in17}
&&I_1 \leq \varepsilon\nu \|\nabla(u_t-\xi)\|_{L_2(\Omega \times(0,T))}^2
+C_{\varepsilon,\nu}\|A-\hbox{\rm Id}\|_{L_\infty(\Omega \times(0,T))}^2 \|\nabla u_t\|_{L_2(\Omega\times(0,T))}^2,\quad \\
\label{in18}
&&I_2 \leq \varepsilon \nu \|\nabla(u_t-\xi)\|_{L_2( \Omega \times (0,T))}^2
+C_{\varepsilon,\nu} \| |\nabla u|^2 \|_{L_2(\Omega\times(0,T))}^2,\\\label{in19}
&&I_3\leq \varepsilon\|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))}^2 +C_\varepsilon\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))}^2 \|\nabla P\|^2_{L_2(\Omega\times(0,T))},\\
&&I_5\leq \varepsilon \nu \|\nabla (u_t -\xi)\|_{L_2(\Omega \times (0,T))}^2+C_{\varepsilon,\nu} \|\nabla \xi\|_{L_2(\Omega \times(0,T))}^2.
\end{eqnarray}
Inequality \eqref{in19} deserves a remark : in order to ``close the estimates'', we have to factor out the last term in the right-hand side by a quantity which is small enough
when $T$ goes to $0.$ Here this follows from the embedding
$W^{2,1}_{n^*,n^*} \subset L_2(0,T;L_\infty(\Omega))$ which gives,
because $n^*>2,$
\begin{equation}
\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))} \leq CT^{ \frac 12 - \frac{1}{n^*} } \|u\|_{W^{2,1}_{n^*,n^*}(\Omega \times (0,T))}.
\end{equation}
Finally, taking $m\in(1,2)$ so that $1=\frac 1m + \frac{1}{n^*},$ and $\partialelta := \frac{2}{m} -1,$
we may write
$$
\begin{array}{lll}
I_4&\leq& \|u_t-\xi\|_{L_{n^*}(\Omega \times (0,T))}\|\xi_t\|_{L_{m}(\Omega \times (0,T))},\\[1ex]
& \leq& \varepsilon \|u_t-\xi\|_{L_{n^*}(\Omega \times (0,T))}^2 +C_\varepsilon T^\partialelta \|\xi_t\|_{L_{2}(\Omega \times (0,T))}^2.
\end{array}
$$
Combining interpolation and Sobolev embedding, we may write for all $p\in(2,\infty),$
$$
\|u_t-\xi\|_{L_p(0,T;L_q(\Omega))}
\lesssim \|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))}^{1-2/p}\|D(u_t-\xi)\|_{L_2(\Omega\times(0,T))}^{2/p},
$$
with $n/q=n/2-2/p.$ So taking $p=q=n^*:=2(n+2)/n,$ we get
\begin{equation}\label{in20a}
\|u_t-\xi\|_{L_{n^*}(\Omega \times (0,T))}\leq C(\|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))}
+\|\nabla (u_t-\xi)\|_{L_2(\Omega \times (0,T))}).
\end{equation}
Therefore, the above estimates for $I_1$ to $I_5$ (with $\varepsilon$ small enough) eventually imply that
$$
\partialisplaylines{
\|u_t-\xi\|_{L_\infty(0,T;L_2(\Omega))}^2+\nu\|\nabla(u_t-\xi)\|_{L_2(\Omega\times(0,T))}^2
\leq 2\|(u_t-\xi)|_{t=0}\|_{L_2(\Omega)}^2
\cr
+C\Bigl(\|\nabla u\|_{L_2(0,T;L_\infty(\Omega))}^2\bigl(\|\nabla u\|_{L_\infty(0,T;L_2(\Omega))}^2
+\|\nabla P\|_{L_2(\Omega \times (0,T))}^2\bigr)
\cr
+\|\nabla u\|_{L_1(0,T;L_\infty(\Omega))}^2\|\nabla u_t\|_{L_2(\Omega \times (0,T))}^2
+T^\partialelta \|\xi_t\|_{L_2(\Omega \times (0,T))}^2+\|\nabla\xi\|^2_{L_2(\Omega \times (0,T))}\Bigr),}
$$
whence, using also the estimates for $\xi,$ $\xi_t$ and for $\nabla u$ in $L_2(0,T;L_\infty(\Omega)),$
we end up with \begin{equation}\label{in20}
\|u_t\|_{L_\infty(0,T;L_2(\Omega))}+ \|\nabla u_t \|_{L_2(\Omega \times (0,T))}
\leq C_{u_0}\bigl(1+T^\partialelta \Xi_{(u,P)}(T)\bigr)^3.
\end{equation}
Here $\partialelta>0.$ Let us also stress that $C_{u_0}$ depends only on $\|u_0\|_{W^2_2(\Omega)},$
$\|\rho_0\|_{L_\infty(\Omega)},$ $\|\rho_0^{-1}\|_{L_\infty(\Omega)},$
$\Omega$ and $\nu.$ In particular, it is time-independent.
\begin{rem}
At this stage, we find a limitation on the dimension of the domain:
as we need to have $\nabla u\in L_{2}(0,T;L_\infty(\Omega))$, embedding requires that $n^*>n$.
This is fulfilled if $n=2$ (because $2^*=4$) or $n=3$ (because $3^*=10/3$) but this is no
longer satisfied in higher dimension. So we see that our method cannot
be directly applied for $n \geq 4$ unless we differentiate the system with respect to time, more times.
Physical motivation for considering dimension $n\geq4$ is
unclear, though.
\end{rem}
Keeping \eqref{in20a} in mind, we see that in order to close the estimates,
it suffices to bound the terms $\|\nabla^2u\|_{L_{n^*}(\Omega \times (0,T))}$
and $\| \nabla P\|_{L_{n^*}(\Omega \times (0,T))}$ which appear in the right-hand side of \eqref{in20}.
For that, we rewrite System \eqref{NSL-1} as a \emph{stationary} Stokes system, treating $\eta u_t$ as a source term,
and the time variable as a parameter. So we consider
\begin{equation}\label{NSL-3}
\begin{array}{lcr}
- \nu \Delta u +\nabla P = - \eta u_t -\nu(\Delta -\Delta_u)u+(\nabla-\nabla_u)P & \mbox{in} & \Omega \times (0,T), \\[4pt]
\hbox{\rm div}\, u=\hbox{\rm div}\, ((\hbox{\rm Id}-A)u)=-A:Du & \mbox{in} & \Omega \times (0,T), \\[4pt]
u=0 & \mbox{on} & \partial \Omega \times (0,T).
\end{array}
\end{equation}
Note that one may use Proposition \ref{p:bog} so as to handle the potential part
of $u.$ Therefore using standard results for the stationary Stokes equation (see \cite{Galdi})
enables us to get
\begin{multline}\label{p4}
\|\nu\nabla^2 u,\nabla P\|_{L_{n^*}(\Omega \times (0,T))} \leq C\big( \|u_t\|_{L_{n^*}(\Omega \times (0,T))} \\
+\|\nu(\Delta-\Delta_u)u,(\nabla-\nabla_u)P,\nu \nabla (A : Du)\|_{L_{n^*}(\Omega \times (0,T))}\big).
\end{multline}
The key to bounding the right-hand side is that, because $n^*>n,$
we have by embedding and H\"older's inequality
$$
\begin{array}{lll}
\|\partialisplaystyle \int_0^t Du(t',y)\,dt'\|_{L_\infty(\Omega \times (0,T))} &\leq&
C T^{1\!-\!\frac{1}{n^*}} \|\nabla u\|_{ L_{n^*}(0,T;L_\infty(\Omega))}\\[1ex]
&\leq& CT^{1\!-\!\frac{1}{n^*}} \|\nabla^2 u\|_{ L_{n^*}(\Omega \times (0,T))}.
\end{array}
$$
In particular, this allows to write that
$$
\begin{array}{lll}
\|(\Delta-\Delta_u)u\|_{L_{n^*}(\Omega \times (0,T))}&\!\!\!\lesssim\!\!\!&
\|D(A{}^T\!A)\|_{L_\infty(0,T;L_{n^*}(\Omega)}\|Du\|_{L_{n^*}(0,T;L_\infty(\Omega)}\\&&\qquad\qquad
+\|\hbox{\rm Id}-A{}^T\!A\|_{L_\infty(\Omega \times (0,T))}\|D^2u\|_{L_{n^*}(\Omega \times (0,T))},\\[1.5ex]
&\!\!\!\lesssim\!\!\!&
\|D^2u\|_{L_1(0,T;L_{n^*}(\Omega)}\|Du\|_{L_{n^*}(0,T;L_\infty(\Omega))}\\
&&\qquad\qquad+\|Du\|_{L_1(0,T;L_\infty(\Omega))}\|D^2u\|_{L_{n^*}(\Omega\times(0,T))},\\[1.5ex]
&\!\!\!\lesssim\!\!\!&T^{1-1/n^*} \Xi_{(u,P)}^2(T).
\end{array}
$$
Similar estimates hold true for the other terms of the right-hand side of \eqref{p4}.
So finally, putting together all the above inequalities leads to
\begin{equation}
\Xi_{(u,P)}(T) \leq C_{u_0}\bigl(1+T^{\partialelta}\Xi_{(u,P)}(T)\bigr)^3
\end{equation}
for some $\partialelta=\partialelta(n)\in(0,1)$ which may be computed explicitly.
\smallbreak
Then we are able to close the estimate, namely to write that
\begin{equation}\label{p8}
\Xi_{(u,P)}(T)\leq 8C_{u_0}
\end{equation}
whenever $T$ has been chosen so that
\begin{equation}\label{eq:time}
8C_{u_0}T^\partialelta\leq1.
\end{equation}
\subsection{The proof of existence}
In this short subsection, we explain how the proof of existence may be achieved
from the above a priori estimates.
Taking for granted the proof of the existence of a solution in the smooth case is the shortest way.
Under the assumptions of Theorem \ref{th:exist}, one may for instance smooth out
the initial density $\rho_0$ by convolution by a positive mollifier.
This provides us with a family of smooth approximate densities $(\rho_0^\varepsilon)_{\varepsilon>0}$
satisfying the same lower and upper bound as $\rho_0.$
Then applying the local and existence and uniqueness statement of e.g. \cite{CK} (bounded domain
case) or \cite{D3} (whole space case), one obtains a family of
solutions $(\rho^\varepsilon,v^\varepsilon,\nabla Q^\varepsilon)$ for System \ref{NSE} with data $(\rho_0^\varepsilon,v_0).$
This family of solutions has the required regularity.
In addition, the possible blow-up of $(\rho^\varepsilon,v^\varepsilon,\nabla Q^\varepsilon)$ at time $T$ is controlled
by the norm of $Dv^\varepsilon$ in $L_\infty(0,T;L_2(\Omega))\cap L_1(0,T;L_\infty(\Omega)).$
Note that Proposition \ref{p:change} ensures
that $(\rho^\varepsilon,v^\varepsilon,\nabla Q^\varepsilon)$ corresponds to a solution $(\eta^\varepsilon,u^\varepsilon,\nabla P^\varepsilon)$ of \eqref{NSL}
with the same regularity.
Now, the computations that have been performed in the previous section, combined with the
aforementioned blow-up criterion ensure that the lifespan of $(\eta^\varepsilon,u^\varepsilon,\nabla P^\varepsilon)$ (or
of $(\rho^\varepsilon,v^\varepsilon,\nabla Q^\varepsilon)$) may be bounded by below as in \eqref{eq:time}, and
that \eqref{p8} is satisfied. The important point is that all those bounds depend on the density
\emph{only through its infimum and supremum}.
So eventuallly, $(\rho^\varepsilon,v^\varepsilon,\nabla Q^\varepsilon)$ is uniformly bounded in
$$
L_\infty(\Omega\times(0,T))\times W^{2,1}_{n^*,n^*}(\Omega\times(0,T))
\times L_{n^*}(\Omega\times(0,T)),
$$
and in addition, $\partial_tv^\varepsilon$ is bounded in $L_\infty(0,T;L_2(\Omega))\cap L_2(0,T;W^1_2(\Omega)).$
\medbreak
By resorting to standard compactness argument, it is now
easy to conclude that this family converges, up to extraction,
to some $(\rho,v,\nabla Q)$ with the same regularity and satisfying the same bounds.
The regularity is so high that the it is clear that it
satisfies \eqref{NSE}.
Uniqueness then follows from Theorem \ref{th:uniq}.
\begin{rem}
An alternative approach to the issue of existence can be done by an iterative scheme performed in the same way as in our recent work \cite{DM}, or as in \cite{MZ,Sol} for the homogeneous Navier-Stokes equations in the Lagrangian coordinates.
\end{rem}
\section{Global existence}
This section is dedicated to the proof of \emph{global-in-time} solutions.
As pointed out in the introduction, in the case of smooth data, this is a classical
issue that has been solved by different authors in the Eulerian framework :
if there is no vacuum initially then global existence may be achieved
for general (smooth) data in the two-dimensional case and
if the velocity is small in the three-dimensional case (see e.g. \cite{D3,LS}).
As for us, in order to show the global existence, one may adopt
the Eulerian approach, too. However the very low regularity of the density
will enforce us to treat the inhomogeneity of the fluid as a perturbation
(hence to assume \eqref{den-str} or \eqref{den-str-1})
and to use the Lagrangian framework to prove the uniqueness.
\subsection{The two-dimensional case}
Here we prove Theorem \ref{th:lar}.
We concentrate on the proof of global a priori estimates.
Indeed, existence can be established by an elementary approximation with smooth density
exactly as in the previous section : for smooth enough densities,
the existence of global solutions with velocity (locally) in $W^{2,1}_{4,2}(\Omega\times{\mathbb R}_+)$
is ensured by \cite{D} (bounded case) or by \cite{D3} (whole space case).
In addition, let us emphasize that, in dimension two, this regularity guarantees that we are allowed to
change coordinates between the Eulerian and Lagrangian ones.
So the uniqueness follows from Theorem \ref{th:uniq}.
\medbreak
For getting the global existence, the computations are simpler
in the Eulerian framework. We aim at getting a control
over $v_t$ in $L_\infty(0,T;L_2(\Omega))$ in terms of the data and of $T$ only.
Even though it is classical (see e.g. \cite{AKM,D3,LS}) we here
recall how to proceed.
First, we test the momentum equation of System \eqref{NSE} by $v_t.$ We get:
$$
\int_\Omega\rho|v_t|^2\,dx+\frac\nu2\frac d{dt}\int_\Omega|\nabla v|^2\,dx+\int_\Omega \sqrt\rho v_t\cdot(\sqrt\rho v\cdot\nabla v)\,dx=0.
$$
Hence H\"older and Young inequalities imply that
\begin{equation}\label{H11}
\|\sqrt\rho v_t\|_{L_2(\Omega)}^2+\nu\frac d{dt}\|\nabla v\|_{L_2(\Omega)}^2
\leq \|\sqrt\rho v\|_{L_4(\Omega)}^2
\|\nabla v\|_{L_4(\Omega)}^2.
\end{equation}
On the other hand, using maximal regularity for
the stationary Stokes equation
$$\begin{array}{cl}
-\nu\Delta v+\nabla Q=\sqrt\rho\Bigl(\sqrt\rho v_t+\sqrt\rho
v\cdot\nabla v\Bigr)&\qquad\hbox{in }\ \Omega\\
\hbox{\rm div}\, v=0 &\qquad\hbox{in }\ \Omega\\
v=0&\qquad\hbox{on }\ \partial\Omega,
\end{array}
$$
gives (omitting the time-dependency)
\begin{equation}\label{H12}
\nu\|\nabla^2 v\|_{L_2(\Omega)}
+\|\nabla Q\|_{L_2(\Omega)}\lesssim \|\sqrt\rho\|_{L_\infty(\Omega)}
\Bigl(\|\sqrt\rho v_t\|_{L_2(\Omega)}
+\|\sqrt\rho v\|_{L_4(\Omega)}\|\nabla v\|_{L_4(\Omega)}\Bigr).
\end{equation}
Now applying Ladyzhenskaya inequality
$\|\nabla v\|_{L_4(\Omega)}^2\lesssim\|\nabla v\|_{L_2(\Omega)}
\|\nabla^2v\|_{L_2(\Omega)},$
yields
\begin{eqnarray}\label{H13}
&\nu\|\nabla^2 v\|_{L_2(\Omega)}+\|\nabla Q\|_{L_2(\Omega)}\lesssim
\|\sqrt\rho\|_{L_\infty(\Omega)}\|\sqrt\rho v_t\|_{L_2(\Omega)}\qquad\qquad\qquad\nonumber\\
&\qquad\qquad\qquad\qquad+\partialisplaystyle \frac{\|\rho\|_{L_\infty(\Omega)}\|\sqrt\rho v\|_{L_4(\Omega)}^2\|\nabla v\|_{L_2(\Omega)}}\nu\cdotp
\end{eqnarray}
Making use of Ladyzhenskaya inequality in \eqref{H11}, also leads to
\begin{equation}\label{H14}
\|\sqrt\rho v_t\|_{L_2(\Omega)}^2+\nu\frac d{dt}
\|\nabla v\|_{L_2(\Omega)}^2\leq \nu^2\frac{\|\nabla^2 v\|_{L_2(\Omega)}^2}{\|\rho\|_{L_\infty(\Omega)}}
+C\frac{\|\rho\|_{L_\infty(\Omega)}}{\nu^2}\|\sqrt\rho v\|_{L_4(\Omega)}^4\|\nabla v\|_{L_2(\Omega)}^2.
\end{equation}
Finally, adding up \eqref{H14} and \eqref{H13}, using that
$\|\rho(t)\|_{L_\infty(\Omega)}=\|\rho_0\|_{L_\infty(\Omega)}$
and performing a time integration yields
$$
\partialisplaylines{
\|\nabla v(t)\|_{L_2(\Omega)}^2+\int_0^t\biggl(\frac{\|\sqrt{\rho}v_t\|_{L_2(\Omega)}^2}{2\nu}+\frac{\|\nabla Q\|_{L^2}^2}{\nu\|\rho_0\|_{L_\infty(\Omega)}}
+\nu\frac{\|\nabla^2 v\|_{L_2(\Omega)}^2}{\|\rho_0\|_{L_\infty(\Omega)}}\biggr)\,d\tau
\cr
\leq \|\nabla v_0\|_{L_2(\Omega)}^2
+\frac{C\|\rho_0\|_{L_\infty(\Omega)}}{\nu^3}\int_0^t\|\sqrt\rho v\|_{L_4(\Omega)}^4\|\nabla v\|_{L_2(\Omega)}^2\,d\tau,}
$$
and Gronwall lemma implies that
\begin{multline}\label{l14}
\nu\|\nabla v(t)\|_{L_2(\Omega)}^2\!+\!\int_0^t\!\bigl(\|\sqrt\rho v_t\|_{L_2(\Omega)}^2
+\|\rho_0\|_{L_\infty(\Omega)}^{-1}\|\nabla Q\|_{L_2(\Omega)}^2
+\nu^2\|\rho_0\|_{L_\infty(\Omega)}^{-1}\|\nabla^2v\|_{L_2(\Omega)}^2\bigr)d\tau\\
\leq \nu \|\nabla v_0\|_{L_2(\Omega)}^2\:e^{C\nu^{-3}\|\rho_0\|_{L_\infty(\Omega)}
\int_0^t\|\sqrt\rho v\|_{L_4(\Omega)}^4\,d\tau}.
\end{multline}
Note that the exponential term is controlled thanks to the basic energy equality \eqref{energy}
(combined with Ladyzhenskaya's inequality).
Since $W^1_2({\mathbb R}^2)$ is not embedded into $L_\infty({\mathbb R}^2)$ we still do not control the change of coordinates
so that we cannot apply Theorem \ref{th:uniq} to get uniqueness.
So we
are required to improve the regularity of the solution to \eqref{NSE}.
In fact, it turns out to be possible to obtain $W^{2,1}_{q,p}$ smoothness for any $1<p<\infty$ and $n<q<\infty$
via bootstrap method. To avoid technicality,
we focus on the case $p=2$ and $q=4$ which suffices both to perform the change of coordinates
and to apply the uniqueness result stated in Theorem~\ref{th:uniq}.
We rewrite System \eqref{NSE} as
\begin{equation}\label{l15}
\begin{array}{lcr}
m v_t -\nu \Delta v + \nabla Q=(m-\rho)v_t -\rho v \cdot \nabla v & \mbox{ in } & \Omega \times (0,T), \\[3pt]
\hbox{\rm div}\, v=0 & \mbox{ in } & \Omega \times (0,T), \\
v=0 & \mbox{in}& \partial \Omega \times (0,T).
\end{array}
\end{equation}
where $m =\inf_{y\in \Omega} \rho_0(y)$. Note that the method of characteristics
ensures that the initial density controls lower and upper pointwise bounds of the density over
$\Omega \times (0,T)$.
Then using Theorem \ref{th:stokes} we get:
\begin{multline}\label{l16}
\sup_{0\leq t\leq T}\sqrt{m\nu}\|v(t)\|_{B^1_{4,2}(\Omega)}+
\|m v_t, \nu\nabla^2 v,\nabla Q \|_{L_2(0,T;L_4(\Omega))} \\
\leq C\bigl(\|(\rho-m)v_t\|_{L_2(0,T;L_4(\Omega))} +\|\rho v \cdot \nabla v\|_{L_2(0,T;L_4(\Omega))} +\sqrt{m\nu}\|v_0\|_{B^1_{4,2}(\Omega)}\bigr).
\end{multline}
Now, we have
\begin{equation}\label{l17}
\|\rho v \cdot \nabla v\|_{L_2(0,T;L_4(\Omega))} \leq C \|v\|_{L_4(0,T;L_\infty(\Omega))}\|\nabla v\|_{L_4(\Omega\times(0,T))}.
\end{equation}
The right-hand side of \eqref{l16} is bounded by means of \eqref{l14} as
$$W^{2,1}_{2,2} \subset L_4(0,T;L_\infty(\Omega)\cap W^1_4 (\Omega)).$$
The first term of the right-hand side of \eqref{l16} can be absorbed by the left-hand side
provided $c$ is sufficiently small in \eqref{den-str}.
This enables us to justify that the velocity $v$ remains in $W^{2,1}_{4,2}(\Omega\times(0,T))$ for all $T>0.$
Finally, as $4\geq2={\rm dim} \, \Omega$, we are allowed to apply Theorem \ref{th:uniq} in order
to get the uniqueness of our constructed solutions. Theorem \ref{th:lar} is proved.
\subsection{Global existence in the $n$-dimensional case}
In this part we address the global solvability issue in bounded $n$-dimensional
domains with $n\geq3$. We adopt the Lagrangian framework (however the Eulerian framework
may be used as well, as regards the existence theory). The result is based on the technique for the homogeneous system
performed in \cite{Mu}.
In contrast with the other sections, working in bounded domains is important: this is due to
the following result which ensures the exponential decay of the energy norm.
\begin{lem}
Let $u$ be a sufficiently smooth solution to \eqref{NSL}. Then
\begin{equation}\label{e1}
\frac{1}{2}\frac{d}{dt}\int_\Omega \eta |u|^2 \,dy +\nu \int_\Omega |\nabla_uu|^2 \,dy =0,
\end{equation}
as long as the Lagrangian coordinates are defined.
In addition if $\Omega$ is bounded then
\begin{equation}\label{e2}
\|u(t)\|_{L_2(\Omega)}^2 \leq e^{-\frac{\nu\lambda_1}{\eta^*} t}\|u_0\|_{L_2(\Omega)}^2.
\end{equation}
where $\lambda_1$ stands for the first eigenvalue of the Laplace operator, and $\eta^*=\|\eta\|_{L_\infty(\Omega)}.$
\end{lem}
\begin{p}
The proof is similar to that of Lemma \ref{l:ene} : testing $(\ref{NSL})_2$ by $u$ we get \eqref{e1}.
In order to get \eqref{e2}, it suffices to notice that, owing to incompressibility, we have
$$
\int_\Omega \eta |u|^2 \,dy=\int_\Omega \rho |v|^2 \,dx\quad\hbox{and}\quad
\int_\Omega |\nabla_uu|^2\,dy=\int_\Omega |\nabla v|^2\,dx.
$$
Hence using \eqref{energy} and Poincar\'e's inequality,
we readily get \eqref{e2}.
\end{p}
\subsubsection*{Proof of Theorem \ref{th:bdd}:} We focus on the proof of global a priori
estimates for smooth solutions to \eqref{NSL}.
Indeed, from those estimates, it is easy to proceed as in Section \ref{s:existence} so as to prove the
existence of a global solution under the assumptions of Theorem \ref{th:bdd}:
this is only a matter of smoothing out the initial density so as to construct
a sequence of smooth solutions (given by e.g. \cite{D}) with uniform norms.
\smallbreak
So given a global solution $(\eta,u,\nabla P)$ to \eqref{NSL} with data $\rho_0\in L_\infty(\Omega)$ satisfying
\eqref{den-str-1} and $u_0\in B^{2-2/p}_{q,p}(\Omega)$ with $\hbox{\rm div}\, u_0=0$ and $u_0|_{\partial\Omega}=0,$
we introduce the following quantities:
$$\partialisplaylines{
M_{-1}:= m^{1/p}\nu^{1/p'} \|u_0\|_{B^{2-2/p}_{q,p}(\Omega)}+m\|u_0\|_{L_2(\Omega)},\cr
M_k:= m^{1/p}\nu^{1/p'} \|u\|_{L_\infty(k,k+1;B^{2-2/p}_{q,p}(\Omega))}+ \|mu_t, \nu \nabla^2 u, \nabla P\|_{L_p(k,k+1;L_q(\Omega))}}
$$
where $m:=\inf \rho_0$ and $k\in{\mathbb N}.$ Recall $1<p<\infty$, $n<q<\infty$.
\medbreak
Let us notice that setting
$$
u(t,x)=\nu \tilde u(\nu t,x)\quad\hbox{and}\quad
P(t,x)=\nu^2\tilde P(\nu t,x)
$$
reduces our study to the case $\nu=1.$
Hence we shall assume from now on that $\nu=1.$
\medbreak
Define a smooth function $\zeta: {\mathbb R} \to [0,1]$ such that
\begin{equation}\label{b2}
\zeta^k(t)=\left\{
\begin{array}{cl}
1 &\hbox{if }\ t \geq 0, \\
0 & \hbox{if }\ t\leq -1,
\end{array}
\right.
\end{equation}
and set $\zeta^k(t):=\zeta(t-k)$ for $k\geq0,$ and $I_k:=[k-1,k+1]$ for $k\geq1.$
\smallbreak
We recast System \eqref{NSL} with $t_0=k-1$ as follows:
\begin{equation}\label{b3}
\begin{array}{lcr}
m[\zeta^k u]_t - \Delta [\zeta^ku] +\nabla [\zeta^k P] = \zeta^k(m-\eta)u_t && \\[3pt] \qquad\qquad-(\Delta -\Delta_u)[\zeta^ku] +
(\nabla -\nabla_u) [\zeta^k P] + m(\zeta^k)_t u & \mbox{in} & \Omega \times (0,T),\\[5pt]
\hbox{\rm div}\, [\zeta^ku]=\hbox{\rm div}\, [\zeta^ku] -\hbox{\rm div}\, u [\zeta^k u] & \mbox{in} & \Omega \times (0,T),\\[5pt]
\zeta^ku=0 & \mbox{on} & \partial \Omega \times (0,T),\\[5pt]
\zeta^ku|_{t=k-1}=0 & \mbox{in} & \Omega.
\end{array}
\end{equation}
Let $m^*:=\sup\rho_0.$
We claim that there exist two positive constants $K$ and $\alpha$ depending only on $m^*,n,\Omega,p,q,$ so that, under Condition \eqref{den-str-1}, we have
\begin{equation}\label{b4}
M_k \leq KM_{-1} e^{-\alpha k}\quad\hbox{for all }\ k\in{\mathbb N}.
\end{equation}
Let us observe that, by Sobolev embedding (here we use that $q>n$), we have
\begin{equation}\label{b11}
\int_0^{k+1} \|Du\|_{L_\infty(\Omega)}\, ds \leq C \sum_{\ell=0}^{k} \|D^2u\|_{L_p(I_\ell;L_q(\Omega))}\leq
C \sum_{\ell=0}^{k} M_k.
\end{equation}
So, given that
$$
\sum_{k\geq0} KM_{-1} e^{-\alpha k}=\frac{KM_{-1}}{1-e^{-\alpha}},
$$
if we assume that $M_{-1}$
is small enough --a condition which is equivalent to the smallness of $c'$ in \eqref{den-str-1}--
then \eqref{eq:smallu} is satisfied on $[0,k+1]$ if \eqref{b4} is satisfied up to $k.$
\medbreak
Proving \eqref{b4} will be done by induction on $k.$
The first step, $k=0,$ is clear. This is a direct consequence of Theorem \ref{th:stokes}
applied to \eqref{NSL} on the time interval $[0,1],$
and of estimates for $A.$
\medbreak
Let us now take for granted Inequality \eqref{b4} up to $k-1.$ In order to prove it for $k,$
we shall estimate $(\zeta^ku,\zeta^kP)$ on the interval $I_k.$
For that, one may resort once again to Theorem \ref{th:stokes}.
First, we bound the right-hand side of \eqref{b3} in $L_p(I_k;L_q(\Omega))$: we readily have
$$
\begin{array}{lll}
\|\zeta^k(m-\eta) u_t\|_{L_p(I_k;L_q(\Omega))} &\!\!\!\!\leq\!\!\!\!& (m^*-m)\|u_t\|_{L_p(I_k;L_q(\Omega))},\\[1.5ex]
\|(\Delta -\Delta_{u})[\zeta^ku]\|_{L_p(I_k;L_q(\Omega))} &\!\!\!\!\leq\!\!\!\!& \|A{}^T\!A-\hbox{\rm Id}\|_{L_\infty(\Omega\times I_k)}
\|\nabla^2u\|_{L_p(I_k;L_q(\Omega))}\\&&\qquad
+\|\nabla(A{}^T\!A)\|_{L_\infty(I_k;L_q(\Omega))}\|\nabla u\|_{L_p(I_k;L_\infty(\Omega))},\\[1.5ex]
\|(\nabla -\nabla_{u}) [\zeta^k P]\|_{L_p(I_k;L_q(\Omega))}&\!\!\!\!\leq\!\!\!\!& \|A-\hbox{\rm Id}\|_{L_\infty(\Omega\times I_k)}
\|\nabla P\|_{L_p(I_k;L_q(\Omega))},\\[1.5ex]
\|m(\zeta^k)_t u\|_{ L_p(I_k;L_q(\Omega))}& \!\!\!\!\leq\!\!\!\!& m^*\|u\|_{ L_p(I_k;L_q(\Omega))}.
\end{array}
$$
Let us notice that, by interpolation and because $\Omega$ is bounded, we have for some $\theta\in(0,1),$
$$
\|u\|_{ L_p(I_k;L_q(\Omega))}\leq C \|D^2u\|_{ L_p(I_k;L_q(\Omega))}^\theta\|u\|_{L_p(I_k;L_2(\Omega))}^{1-\theta}.
$$
Therefore, taking advantage of \eqref{e2} and of the definition of $M_{k-1}$ and of $M_k,$ we get for some $\beta>0$ (depending only
on $\Omega,$ $p,$ $q$, $m_*$ and
for all $\varepsilon\in(0,1),$
\begin{equation}\label{eq:decay}
m^*\|u\|_{ L_p(I_k;L_q(\Omega))}\leq \varepsilon(M_{k-1}+M_k) +C_\varepsilon M_{-1}e^{-\beta k}.
\end{equation}
Next, we have to bound the left-hand side of $\eqref{b3}_2$: we have
$$
\begin{array}{lll}
\|\hbox{\rm div}\, ((\hbox{\rm Id} \!-\! A) [\zeta^k u])\|_{ L_p(I_k;W^1_q(\Omega))}&\!\!\!\!\leq\!\!\!\!& \|A-\hbox{\rm Id}\|_{L_\infty(\Omega\times I_k)}
\|\nabla^2u\|_{L_p(I_k;L_q(\Omega))}\\&&\qquad
+\|\nabla A\|_{L_\infty(I_k;L_q(\Omega))}\|\nabla u\|_{L_p(I_k;L_\infty(\Omega))} ,\\[1.5ex]
\|\partial_t\left((\hbox{\rm Id}-A)\zeta^k u\right)\|_{ L_p(I_k;L_q(\Omega))}&\!\!\!\!\leq\!\!\!\!&
\|A-\hbox{\rm Id}\|_{L_\infty(\Omega \times (0,T))}\|(\zeta^ku)_t\|_{L_p(I_k;L_q(\Omega))}\\&&\qquad\qquad+\|A_t\|_{L_p(I_k;L_\infty(\Omega))}
\|u\|_{ L_\infty(I_k;L_q(\Omega))}.
\end{array}
$$
Let us look at the quantities depending on the matrix $A$. Recall that
$$A^{-1}=\hbox{\rm Id} + \int_{0}^tDu(s)\,ds,$$ so taking advantage of \eqref{b11}
and of the hypothesis that follows,
one may write that
$$\|\hbox{\rm Id}-A\|_{L_\infty(\Omega\times(0,k+1))} \leq 2\|Du\|_{L_1(0,k+1;L_\infty(\Omega))},$$
and a similar inequality for $\hbox{\rm Id}-A{}^T\!A.$
Likewise, we have
\begin{equation}\label{b13}
\|DA\|_{L_\infty(I_k;L_q(\Omega))} \leq C\sum_{\ell=0}^{k} \|D^2 u\|_{L_q(I_\ell;L_q(\Omega))} \leq
C\sum_{\ell=0}^{k} M_\ell
\end{equation}
and
\begin{equation}\label{b14a}
\|A_{t}\|_{L_p(I_k;L_\infty(\Omega))} \leq C \|Du\|_{L_p(I_k;L_\infty(\Omega)}\leq C(M_{k-1}+M_k).
\end{equation}
So finally, putting together all the previous inequalities and applying Theorem \ref{th:stokes}
to \eqref{b3}, we end up with
$$\partialisplaylines{
\|m(\zeta^ku_t), \nabla^2(\zeta^ku), \nabla(\zeta^kP)\|_{L_p(I_k;L_q(\Omega))}
\cr
\leq
C(M_{k-1}+M_k) \biggl(\Bigl(\frac{m^*-m}{m}\Bigr)+ \Bigl(\sum_{\ell=0}^{k} M_\ell\Bigr) +\varepsilon\biggr)
+C_\varepsilon M_{-1}e^{-\beta k}.}
$$
At this point, it is clear that one has to take $\alpha=\beta.$ Note also that if $M_{-1}$
and the oscillations of the density are small enough then, taking $\varepsilon$ small enough too,
the above inequality implies, up to a change of $C,$
$$
M_k\leq C(c M_{k-1} + M_{-1}e^{-\alpha k}),
$$
\medbreak
Now, using the induction hypothesis \eqref{b4} for $M_{k-1},$ we deduce that
$$
M_k\leq KM_{-1}e^{-\alpha k}\biggl(\frac{cCe^{\alpha}}K+\frac CK\biggr).
$$
Therefore, we see that if we take $K=2C$ and assume that $c$ has been chosen so that $cCe^{\alpha}\leq1/2$
then we get \eqref{b4} for $M_k.$
\medbreak
Note that our proof is not quite rigorous as
we did use \eqref{b4} at rank $k$ in the above inequalities.
To make the argument work, it is just a matter of replacing the interval
$I_k$ with $[k-1,T].$ By continuity of the norms with respect to time, it is clear that
\eqref{b4} at rang $\ell\leq k-1$ ensures that the desired inequality is satisfied on $[k,T]$ for any $T$
close enough to $k.$ Then resorting to a standard bootstrap argument allows to conclude to the desired
inequality for $M_k.$
This completes the proof of Theorem \ref{th:bdd}.
\section{Appendix}
Throughout this paper, we used repeatedly the following
well-known result
for the divergence equation (see e.g. \cite{Galdi} and the references therein):
\begin{lem}\label{l:bogovskii} Let $\Omega$ be a bounded Lipschitz domain of ${\mathbb R}^n.$
There exists a linear operator ${\mathcal B}$ which is bounded from $L_q(\Omega)$ to $W^1_q(\Omega)$ for all $q\in(1,\infty)$ and
such that for any
$f\in L_q(\Omega)$ the vector-field
$u:={\mathcal B}(f)$ satisfies
\begin{equation}\label{eq:div0}
\hbox{\rm div}\, u=f \quad\hbox{in }\ \Omega\qquad\hbox{and}\qquad
u|_{\partial\Omega}=0\quad\hbox{on }\ \partial\Omega.
\end{equation}
\end{lem}
This result may be proved by means of an explicit formula -- the \emph{Bogovski\u{\i} formula} -- that provides a solution to the above
divergence equation in the case where $\Omega$ is star-shaped.
In our paper, we had to use a more elaborate version of the above lemma, namely the following statement that has
been established in \cite{DM-luminy}:
\begin{prop}\label{p:bog}
Let $\Omega$ be a $C^2$ bounded domain.
There exists a linear operator $B$ acting on couples $(R,\zeta)$ with
$R:\Omega\rightarrow{\mathbb R}^n$ and $\zeta:\partial\Omega\rightarrow{\mathbb R}$ which is
continuous from $L_q(\Omega;{\mathbb R}^n)\times W^{-1/q}_q(\partial \Omega,{\mathbb R})$ to $L_q(\Omega,{\mathbb R}^n)$ for
all $q\in(1,+\infty)$ and such that $u:=B(R,\zeta)$ satisfies the generalized divergence equation:
\begin{equation}\label{eq:gdiv}
-\int_\Omega u \cdot \nabla \phi\, dx= -\int_\Omega R \cdot \nabla \phi\, dx
+ \int_{\partial \Omega} \zeta\phi\, d\sigma \
\hbox{ for all }\ \phi \in C^\infty(\overlineerline{\Omega}).
\end{equation}
If in addition $\hbox{\rm div}\, R\in L_q(\Omega)$ and $R\cdot\vec n=0$ then $u:=B(R,0)$ satisfies
$$
\hbox{\rm div}\, u=\hbox{\rm div}\, R\quad\hbox{in }\ \Omega\qquad\hbox{and}\qquad
u|_{\partial\Omega}=0\quad\hbox{on }\ \partial\Omega,
$$
and the following inequality holds true:
\begin{equation}\label{eq:div1}
\|u\|_{W^{1}_q(\Omega)} \leq C\|\hbox{\rm div}\, R\|_{L_{q}(\Omega)}.
\end{equation}
Furthermore, if we also have $\hbox{\rm div}\, R\in W_q^1(\Omega)$ then $u$ is in $W^2_q(\Omega)$ and we have
\begin{equation}\label{eq:div2}
\|u\|_{W^{2}_q(\Omega)} \leq C\|\hbox{\rm div}\, R\|_{W^1_{q}(\Omega)}.
\end{equation}
\end{prop}
We claim that this statement implies Lemma \ref{l:bog}. Indeed, we set
$u:=B(R,0).$ Then it is is clear that \eqref{eq:bog1} holds true.
Then differentiating $u$ with respect to time yields
$$
u_t=B(R_t,0).
$$
Hence applying the first part of the above statement yields \eqref{eq:bog2}.
\bigbreak
In Section \ref{s:existence}, owing to our use of Lagrangian coordinates,
it was natural to extend Lemma \ref{l:bog} and Proposition \ref{p:bog}
to the \emph{twisted} divergence equation, namely
$$
\hbox{\rm div}\, A u=\hbox{\rm div}\, R \ \hbox{ in }\ \Omega\qquad\hbox{and }\
u=0\ \hbox{ on }\ \partial\Omega
$$
with $\hbox{\rm div}\, A u:=\hbox{\rm div}\, (A u).$
In particular, we used the following statement:
\begin{lem}\label{l:divA}
Let $\Omega$ be a $C^2$ bounded domain.
Let $A\in L_\infty (\Omega; {\mathbb R}^{n\times n})$ be such that
$\partialet A\equiv1.$
There exists a positive constant $c$ such that if
\begin{equation}\label{eq:smallA}
\|A-\hbox{\rm Id}\|_{L_\infty(\Omega)}\leq c
\end{equation}
then there exists a map $B_A$ acting on couples $(R,\zeta)$ with
$R:\Omega\rightarrow{\mathbb R}^n$ and $\zeta:\partial\Omega\rightarrow{\mathbb R}$ which is
continuous from $L_q(\Omega;{\mathbb R}^n)\times W^{-1/q}_q(\partial \Omega,{\mathbb R})$ to $L_q(\Omega,{\mathbb R}^n)$ for
all $q\in(1,+\infty)$ and such that $u:=B_A(R,\zeta)$ satisfies the
generalized twisted divergence equation:
\begin{equation}\label{eq:gdivA}
-\int_\Omega Au \cdot \nabla \phi\, dx= -\int_\Omega R \cdot \nabla \phi\, dx
+ \int_{\partial \Omega} \zeta\phi\, d\sigma \
\hbox{ for all }\ \phi \in C^\infty(\overlineerline{\Omega}).
\end{equation}
If in addition $\hbox{\rm div}\, R\in L_q(\Omega)$ and $R\cdot\vec n=0$ then $u:=B_A(R,0)$ satisfies
\begin{equation}\label{eq:divA}
\hbox{\rm div}\, A u=A:Du=\hbox{\rm div}\, R \ \hbox{ in }\ \Omega\qquad\hbox{and }\
u=0\ \hbox{ on }\ \partial\Omega,
\end{equation}
and \eqref{eq:div1}
with a constant independent of $A.$
\medbreak
Finally, in the smooth case, if the data $R$ and $A$ depend on a parameter
$t$ in some interval of ${\mathbb R}$ with $R_t$ in $L_q(\Omega)$ and \eqref{eq:smallA} satisfied
for almost all $t,$ then $u$ fulfills:
\begin{equation}\label{eq:ut}
\|u_t\|_{L_q(\Omega)}\leq C\bigl(\|A_tu\|_{L_q(\Omega)}+\|R_t\|_{L_q(\Omega)}\bigr).
\end{equation}
\end{lem}
\begin{p}
The proof follows from Proposition \ref{p:bog}: we consider the linear operator $T$ defined by
\begin{equation} T(\bar \xi)=\xi, \mbox{ where }
\xi := B\bigl((\hbox{\rm Id}- A) \bar \xi + R,\zeta\bigr).
\end{equation}
Note that this definition and the fact that $\partialet A\equiv1$ imply that
any fixed point of $T$ satisfies \eqref{eq:gdivA} (or \eqref{eq:divA} in the smooth case
with $\zeta=0$).
Next, under Condition \eqref{eq:smallA} with $c$ small enough,
one may apply the Banach fixed point theorem to $T$ so as to get a solution to our problem.
The reader may refer to \cite{DM-luminy} for more details.
\smallbreak
Concerning the proof of inequality \eqref{eq:ut}, it suffices to differentiate once
the equality
$$
u =B\bigl((\hbox{\rm Id}- A)u + R,0\bigr).
$$
We get
$$
u_t=B\bigl(-A_tu+(\hbox{\rm Id}-A)u_t+R_t,0\bigr).
$$
So it is a mere consequence of the first part of Proposition \ref{p:bog}.
\end{p}
{\bf Acknowledgement}. The second author (PBM) thanks Vladimir \v{S}verak for a fruitful discussion. The second author has been supported by
the MN grant IdP2011 000661.
\end{document}
|
\begin{document}
\title[Compact Quantum Semigroups]{A Kind of Compact Quantum Semigroups}
\scriptstyleubjclass{Primary 46L05; Secondary 16W30}
\keywords{Compact quantum semigroup, quantum family of all maps}
\author[M. M. Sadr]{Maysam Maysami Sadr}
\address{Depertment of Mathematics\\
Institute for Advanced Studies in Basic Sciences (IASBS of Zanjan) \\
P.O. Box 45195-1159, Zanjan 45137-66731, Iran}
\email{[email protected]}
\begin{abstract}
We show that the quantum family of all maps from a finite space to a finite dimensional
compact quantum semigroup has a canonical quantum semigroup structure.
\end{abstract}
\maketitle
\scriptstyleection{Introduction}
According to the Gelfand duality, the category of compact Hausdorff spaces and
continuous maps and the category of commutative unital C*-algebras and unital *-homomorphisms are dual.
In this duality, any compact space $X$ corresponds to $\mathcal{C}(X)$, the C*-algebra of all continuous complex valued maps on $X$,
and any commutative unital C*-algebra corresponds to its maximal ideal space. Thus as the fundamental concept in
{\mathrm{I}t Non-Commutative Topology}, a non-commutative unital C*-algebra $A$ is considered as the algebra of
continuous functions on a {\mathrm{I}t symbolic} compact non-commutative space $\mathfrak{Q} A$.
In this correspondence, *-homomorphisms $\Phi:A\longrightarrow B$ interpret as symbolic
continuous maps $\mathfrak{Q}\Phi:\mathfrak{Q} B\longrightarrow\mathfrak{Q} A$. Since the coordinates observable of a quantum mechanical systems are non-commutative,
some times non-commutative spaces are called quantum spaces.
Woronowicz \cite{W1} and So{\l}tan \cite{S1} have defined a quantum space $\mathfrak{Q} C$ of {\mathrm{I}t all maps}
from $\mathfrak{Q} B$ to $\mathfrak{Q} A$ and showed that $C$ exists under appropriate conditions on $A$ and $B$.
In \cite{S}, we considered the functorial properties of this notion.
In this short note, we show that if $\mathfrak{Q} A$ is a compact finite dimensional
(i.e. $A$ is unital and finitely generated) quantum semigroup,
and if $\mathfrak{Q} B$ is a finite commutative quantum space (i.e. $B$ is a finite dimensional commutative
C*-algebra), then $\mathfrak{Q} C$ has a canonical quantum semigroup structure. In the other words, we construct
the non-commutative version of semigroup $\mathcal{F}(X,S)$ described as follows:
{\mathrm{I}t Let $X$ be a finite space and $S$ be a compact semigroup. Then the space $\mathcal{F}(X,S)$
of all maps from $X$ to $S$, is a compact semigroup with compact-open topology and pointwise multiplication.}
In Section 2, we define quantum families of all maps and compact quantum semigroups.
In Section 3, we state and prove our main result; also we consider a result about quantum semigroups with counits.
At least, in Section 4, we consider some examples.
\scriptstyleection{Quantum families of maps and quantum semigroups}
All C*-algebras in this paper have unit and all C*-algebra homomorphisms preserve the units.
For any C*-algebra $A$, $\mathrm{I}_A$ and $1_A$ denote the identity homomorphism from $A$ to $A$, and the unit of $A$, respectively.
For C*-algebras $A,B$, $A\otimesimes B$ denotes the spatial tensor product of $A$ and $B$.
If $\Phi:A\longrightarrow B$ and $\Phi':A'\longrightarrow B'$ are *-homomorphisms, then $\Phi{\otimes}\Phi':A\otimesimes A'\longrightarrow B\otimesimes B'$ is the
*-homomorphism defined by $\Phi{\otimes}\Phi'(a{\otimes}a')=\Phi(a){\otimes}\Phi'(a')$ ($a\mathrm{I}n A,a'\mathrm{I}n A'$).
Let $X,Y$ and $Z$ be three compact Hausdorff spaces and $\mathcal{C}(Y,X)$ be the space of all continuous maps from $Y$ to $X$ with compact open topology. Consider a continuous map $f:Z\longrightarrow\mathcal{C}(Y,X)$. Then the pair $(Z,f)$ is
a continuous {\mathrm{I}t family of maps} from $Y$ to $X$ indexed by $f$ with parameters in $Z$. On the other hand, by topological {\mathrm{I}t exponential law} we know that $f$ is characterized by a continuous map $\tilde{f}:Y\times Z\longrightarrow X$
defined by $\tilde{f}(y,z)=f(z)(y)$. Thus $(Z,\tilde{f})$ can be considered as a family of maps from $Y$ to $X$.
Now, by Gelfand's duality we can simply translate this system to non-commutative language:
\begin{definition}\label{def1}
(\cite{W1},\cite{S1})
Let $A$ and $B$ be unital C*-algebras. By a quantum family of maps from $\mathfrak{Q} B$ to $\mathfrak{Q} A$, we mean a pair $(C,\Phi)$,
containing a unital C*-algebra $C$ and a unital *-homomorphism $\Phi:A\longrightarrow B{\otimesimes} C$.
\end{definition}
Now, suppose instead of parameter space $Z$ we use $\mathcal{C}(Y,X)$ (note that in general this space is
not locally compact). Then the family
$$\mathrm{I}\mathrm{d}:\mathcal{C}(Y,X)\longrightarrow\mathcal{C}(Y,X)\hspacespace{3mm}(\tilde{\mathrm{I}\mathrm{d}}:\mathcal{C}(Y,X)\times Y\longrightarrow X)$$
of {\mathrm{I}t all} maps from $Y$ to $X$ has the following universal property:\\
For every family $\tilde{f}:Z\times Y\longrightarrow X$ of maps from $Y$ to $X$, there is a unique map $f:Z\longrightarrow\mathcal{C}(Y,X)$
such that the following diagram is commutative.
\[\xymatrix{Z\times Y\ar[rr]^-{\tilde{f}}\ar[d]^{f\times\mathrm{I}\mathrm{d}_Y}&& X\ar@{=}[d]\\
\mathcal{C}(Y,X)\times Y\ar[rr]^-{\tilde{\mathrm{I}\mathrm{d}}}&& X}\]
Thus, we can make the following Definition in non-commutative setting:
\begin{definition}\label{def2}
(\cite{W1},\cite{S1})
With the assumptions of Definition \ref{def1}, $(C,\Phi)$ is called a quantum family of all maps from $\mathfrak{Q} B$ to $\mathfrak{Q} A$
if for every unital C*-algebra $D$ and any unital *-homomorphism $\Psi:A\longrightarrow B{\otimesimes} D$, there is a unique unital
*-homomorphism $\Gamma:C\longrightarrow D$ such that the following diagram is commutative.
\[\xymatrix{A\ar[rr]^-{\Phi}\ar@{=}[d]&& B\otimes C\ar[d]^{\mathrm{I}_B{\otimes}\Gamma}\\
A\ar[rr]^-{\Psi}&& B\otimes D}\]
\end{definition}
By the universal property of Definition \ref{def2}, it is clear that if $(C,\Phi)$ and $(C',\Phi')$
are two quantum families of all maps from $\mathfrak{Q} B$ to $\mathfrak{Q} A$, then there is a *-isometric isomorphism
between $C$ and $C'$.
\begin{proposition}
Let $A$ be a unital finitely generated C*-algebra and $B$ be a finite dimensional C*-algebra. Then
the quantum family of all maps from $\mathfrak{Q} B$ to $\mathfrak{Q} A$ exists.
\end{proposition}
\begin{proof}
See \cite{W1} or \cite{S1}.
\end{proof}
\begin{definition}
(\cite{W2},\cite{MurphyTuset1},\cite{S1},\cite{S2}) A pair $(A,\Delta)$ consisting of a unital C*-algebra $A$ and a unital
*-homomorphism $\Delta:A\longrightarrow A{\otimesimes} A$ is called a compact quantum semigroup if $\Delta$ is a coassociative
comultiplication: $(\Delta{\otimes} \mathrm{I}_A) \Delta=(\mathrm{I}_A{\otimes}\Delta) \Delta$.
\end{definition}
A *-homomorphism $\Delta:A\longrightarrow A{\otimesimes} A$ induces a binary operation on the dual space $A^*$ defined by
$\tau\scriptstyleigma=(\tau\otimes\scriptstyleigma)\Delta$ for $\tau,\scriptstyleigma\mathrm{I}n A^*$.
Now, suppose that $S$ is a compact Hausdorff topological semigroup. Using the canonical identity
$\mathcal{C}(S)\otimes\mathcal{C}(S)\cong\mathcal{C}(S\times S)$, we define a *-homomorphism
$\Delta:\mathcal{C}(S)\longrightarrow\mathcal{C}(S)\otimesimes\mathcal{C}(S)$ by
$\Delta(f)(s,s')=f(ss')$ for $f\mathrm{I}n\mathcal{C}(S)$ and $s,s'\mathrm{I}n S$.
Then $\Delta$ is a coassociative comultiplication on $\mathcal{C}(S)$ and thus $(\mathcal{C}(S),\Delta)$ is a compact quantum
semigroup. Conversely, if $(A,\Delta)$ is a compact quantum semigroup such that $A$ is abelian, then the character space
of $A$, with the binary operation induced by $\Delta$, is a compact Hausdorff topological semigroup (\cite{VV}).
It is well known that a compact semigroup with cancelation property is a compact group (\cite[Proposition 3.2]{MaesVandaele1}).
Analogous cancelation properties for quantum semigroups are defined as follows.
\begin{definition}
Let $(A,\Delta)$ be a compact quantum semigroup.
\begin{enumerate}
\mathrm{I}tem [(i)] (\cite{MurphyTuset1}) $(A,\Delta)$ has left (resp. right) cancelation property if the linear span of $\{(b\otimes1)\Delta(a):a,b\mathrm{I}n A\}$
(resp. $\{(1\otimes b)\Delta(a):a,b\mathrm{I}n A\}$) is dense in $A\otimes A$.
\mathrm{I}tem [(ii)] (\cite{MurphyTuset1}) $(A,\Delta)$ has weak left cancelation property if, whenever $\tau,\scriptstyleigma\mathrm{I}n A^*$ are such that
$(\tau a)\scriptstyleigma=0$ for all $a\mathrm{I}n A$, we must have $\tau=0$ or $\scriptstyleigma=0$. Similarly, $(A,\Delta)$ has weak right cancelation property if,
whenever $\tau(\scriptstyleigma a)=0$ for all $a\mathrm{I}n A$, we must have $\tau=0$ or $\scriptstyleigma=0$.
\mathrm{I}tem [(iii)] (\cite{S1}) A left (resp. right) counit for $(A,\Delta)$, is a character $\epsilon$ on $A$ (a unital *-homomorphism
$\epsilon:A\longrightarrow\mathbb{C}$), satisfying $(\epsilon\otimes\mathrm{I}_A)\Delta=\mathrm{I}_A$ (resp. $(\mathrm{I}_A\otimes\epsilon)\Delta=\mathrm{I}_A$. A left and right counit is called
(two-sided) counit.
\end{enumerate}
\end{definition}
In the above Definition the functionals $\tau a$ and $a\tau$ are defined by $\tau a(x)=\tau(ax)$ and $a\tau(x)=\tau(xa)$.
\begin{remark}
In \cite{W2}, counits are characters on special dense subalgebras of compact quantum groups. These subalgebras are constructed from
finite dimensional unitary representations of compact quantum groups. In this paper we mainly deal with quantum semigroups and since it is not
natural to define unitary representations for (quantum) semigroups, we use the above notion for counits.
\end{remark}
It is clear that the left (resp. right) cancelation property implies weak left (resp. weak right) cancelation property.
The converse is partially satisfied (\cite[Theorem 3.2]{MurphyTuset1}):
\begin{theorem}
Let $(A,\Delta)$ be a compact quantum semigroup. Then $(A,\Delta)$ has both left and right cancelation properties if and only if
it has both weak left and weak right cancelation properties.
\end{theorem}
\begin{definition}
(\cite{W2},\cite{MurphyTuset1},\cite{MaesVandaele1}) A compact quantum semigroup with both left and right cancelation properties
is called compact quantum group.
\end{definition}
Again consider compact semigroup $S$ and its corresponding compact quantum semigroup $(\mathcal{C}(S),\Delta)$
defined above. Using Proposition 3.2 of \cite{MaesVandaele1}, it is easily proved that $S$ is a compact group
if and only if $(\mathcal{C}(S),\Delta)$ is a compact quantum group.
\scriptstyleection{The results}
In this section, we state and prove the main result.
\begin{theorem}\label{t1}
Let $(A,\Delta)$ be a compact quantum semigroup with finitely generated $A$, $B$ be a finite dimensional
commutative C*-algebra, and $(C,\Phi)$ be the quantum family of all maps from $\mathfrak{Q} B$ to $\mathfrak{Q} A$. Consider
the unique unital *-homomorphism $\Gamma:C\longrightarrow C\otimesimes C$
such that the diagram
\begin{equation}\label{d1}
\xymatrix{A\ar[rr]^-{\Phi}\ar[d]^{\Delta}&& B\otimes C\ar[d]^{\mathrm{I}_B{\otimes}\Gamma}\\
A\otimes A\ar[d]^{\Phi{\otimes}\Phi}&& B\otimes C\otimes C\\
B\otimes C\otimes B\otimes C\ar[rr]^-{\mathrm{I}_B\otimes F\otimes \mathrm{I}_C}&&B\otimes B\otimes C\otimes C\ar[u]^{m{\otimes} \mathrm{I}_{C{\otimes} C}}}
\end{equation}
is commutative, where $F:C{\otimesimes} B\longrightarrow B{\otimesimes} C$ is the flip map, i.e.
$c{\otimes} b\longmapsto b{\otimes} c$ ($b\mathrm{I}n B, c\mathrm{I}n C$), and
$m:B{\otimesimes} B\longrightarrow B$ is the multiplication *-homomorphism of $B$, i.e. $m(b{\otimes} b')=bb'$ ($b,b'\mathrm{I}n B)$.
Then $(C,\Gamma)$ is a compact quantum semigroup.
\end{theorem}
\begin{proof}
We must prove that $(\mathrm{I}_C{\otimes}\Gamma) \Gamma=(\Gamma{\otimes}\mathrm{I}_C) \Gamma$, and for this, by the universal property of quantum
families of maps, it is enough to prove that
\begin{equation}\label{e1}
(\mathrm{I}_B{\otimes}\mathrm{I}_C{\otimes}\Gamma) (\mathrm{I}_B{\otimes}\Gamma) \Phi=(\mathrm{I}_B{\otimes}\Gamma{\otimes}\mathrm{I}_C) (\mathrm{I}_B{\otimes}\Gamma) \Phi.
\end{equation}
Note that by the commutativity of (\ref{d1}), we have
$$(\mathrm{I}_B{\otimes}\Gamma) \Phi=(m{\otimes} \mathrm{I}_{C{\otimes} C}) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta.$$
Let us begin from the left hand side of (\ref{e1}):
\begin{equation*}
\begin{split}
&(\mathrm{I}_B{\otimes}\mathrm{I}_C{\otimes}\Gamma) (\mathrm{I}_B{\otimes}\Gamma) \Phi\\
=&(\mathrm{I}_B{\otimes}\mathrm{I}_C{\otimes}\Gamma) (m{\otimes} \mathrm{I}_{C{\otimes} C}) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_C{\otimes}\Gamma) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C{\otimes}B}{\otimes}\Gamma)(\Phi{\otimes}\Phi)\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C{\otimes}B}{\otimes}\Gamma)(\Phi{\otimes}\mathrm{I}_{B{\otimes}C})
(\mathrm{I}_A{\otimes}\Phi)\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\Phi{\otimes}\mathrm{I}_{B{\otimes}C{\otimes}C})(\mathrm{I}_A{\otimes}\mathrm{I}_B{\otimes}\Gamma)
(\mathrm{I}_A{\otimes}\Phi)\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\Phi{\otimes}\mathrm{I}_B{\otimes}\Gamma)(\mathrm{I}_A{\otimes}\Phi)\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\Phi{\otimes}[(\mathrm{I}_B{\otimes}\Gamma)\Phi])\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})
(\Phi{\otimes}[(m{\otimes} \mathrm{I}_{C{\otimes} C}) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta])\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}m{\otimes} \mathrm{I}_{C{\otimes} C})
(\mathrm{I}_{B{\otimes}C{\otimes}B}{\otimes} F{\otimes} \mathrm{I}_C)(\Phi{\otimes}\Phi{\otimes}\Phi)(\mathrm{I}_A{\otimes}\Delta)\Delta
\end{split}
\end{equation*}
For the right hand side of (\ref{e1}), we have
\begin{equation*}
\begin{split}
&(\mathrm{I}_B{\otimes}\Gamma{\otimes}\mathrm{I}_C) (\mathrm{I}_B{\otimes}\Gamma) \Phi\\
=&(\mathrm{I}_B{\otimes}\Gamma{\otimes}\mathrm{I}_C)(m{\otimes} \mathrm{I}_{C{\otimes} C}) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\Gamma{\otimes}\mathrm{I}_C)(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_C)(\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_C{\otimes}\mathrm{I}_C)(\mathrm{I}_B{\otimes}\mathrm{I}_C{\otimes}F{\otimes}\mathrm{I}_C)
(\mathrm{I}_B{\otimes}\Gamma{\otimes}\mathrm{I}_B{\otimes}\mathrm{I}_C)(\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}F{\otimes}\mathrm{I}_C)([(\mathrm{I}_B{\otimes}\Gamma) \Phi]{\otimes}\Phi)\Delta\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}F{\otimes}\mathrm{I}_C)
([(m{\otimes} \mathrm{I}_{C{\otimes} C}) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta]{\otimes}\Phi)\Delta,
\end{split}
\end{equation*}
and thus if $W=(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}F{\otimes}\mathrm{I}_C)$, then
\begin{equation*}
\begin{split}
&(\mathrm{I}_B{\otimes}\Gamma{\otimes}\mathrm{I}_C) (\mathrm{I}_B{\otimes}\Gamma) \Phi=\\
&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})W
(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}B{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}B{\otimes}C})(\Phi{\otimes}\Phi{\otimes}\Phi)(\Delta{\otimes}\mathrm{I}_A)\Delta.
\end{split}
\end{equation*}
Thus, since $(\mathrm{I}_A{\otimes}\Delta)\Delta=(\Delta{\otimes}\mathrm{I}_A)\Delta$, to prove (\ref{e1}), it is enough to show that
\begin{equation}\label{e2}
\begin{split}
&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}m{\otimes} \mathrm{I}_{C{\otimes} C})
(\mathrm{I}_{B{\otimes}C{\otimes}B}{\otimes} F{\otimes} \mathrm{I}_C)=\\
&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})W(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}B{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}B{\otimes}C}).
\end{split}
\end{equation}
Let $b_1,b_2,b_3\mathrm{I}n B$ and $c_1,c_2,c_3\mathrm{I}n C$. Then for the left hand side of (\ref{e2}), we have,
\begin{equation*}
\begin{split}
&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}m{\otimes} \mathrm{I}_{C{\otimes} C})
(\mathrm{I}_{B{\otimes}C{\otimes}B}{\otimes} F{\otimes} \mathrm{I}_C)(b_1{\otimes}c_1{\otimes}b_2{\otimes}c_2{\otimes}b_3{\otimes}c_3)\\
&=(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(\mathrm{I}_{B{\otimes}C}{\otimes}m{\otimes} \mathrm{I}_{C{\otimes} C})
(b_1{\otimes}c_1{\otimes}b_2{\otimes}b_3{\otimes}c_2{\otimes}c_3)\\
&=(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}C})(b_1{\otimes}c_1{\otimes}(b_2b_3){\otimes}c_2{\otimes}c_3)\\
&=(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(b_1{\otimes}(b_2b_3){\otimes}c_1{\otimes}c_2{\otimes}c_3)\\
&=b_1(b_2b_3){\otimes}c_1{\otimes}c_2{\otimes}c_3\\
&=(b_1b_2b_3){\otimes}c_1{\otimes}c_2{\otimes}c_3,
\end{split}
\end{equation*}
and for the right hand side of (\ref{e2}),
\begin{equation*}
\begin{split}
&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})W(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}B{\otimes}C})(\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C{\otimes}B{\otimes}C})
(b_1{\otimes}c_1{\otimes}b_2{\otimes}c_2{\otimes}b_3{\otimes}c_3)\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})W(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}B{\otimes}C})(b_1{\otimes}b_2{\otimes}c_1{\otimes}c_2{\otimes}b_3{\otimes}c_3)\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})W(b_1b_2{\otimes}c_1{\otimes}c_2{\otimes}b_3{\otimes}c_3)\\
=&(m{\otimes}\mathrm{I}_{C{\otimes}C{\otimes}C})(b_1b_2{\otimes}b_3{\otimes}c_1{\otimes}c_2{\otimes}c_3)\\
=&(b_1b_2b_3){\otimes}c_1{\otimes}c_2{\otimes}c_3
\end{split}
\end{equation*}
Therefore, (\ref{e2}) is satisfied and the proof is complete.
\end{proof}
\begin{theorem}\label{t2}
Let $(A,\Delta)$ be a compact quantum semigroup with a left counit. Suppose that $B,C,\Phi$ and $\Gamma$ are as in Theorem \ref{t1}.
Then the compact quantum semigroup $(C,\Gamma)$ has a left counit.
\end{theorem}
\begin{proof}
Let $\epsilon:A\longrightarrow\mathbb{C}$ be a left counit for $(A,\Delta)$. Defin the unital *-algebra homomorphism $\omega:A\longrightarrow B\otimes\mathbb{C}=B$ by
$\omega(a)=1_B\otimes\epsilon(a)=\epsilon(a)1_B$ ($a\mathrm{I}n A$). Then the universal property of $(C,\Phi)$ shows that there is a character
$\hspaceat{\epsilon}:C\longrightarrow\mathbb{C}$ such that the following diagram is commutative:
\[\xymatrix{A\ar[rr]^-{\Phi}\ar@{=}[d]&& B\otimes C\ar[d]^{\mathrm{I}_B{\otimes}\hspaceat{\epsilon}}\\
A\ar[rr]^-{\omega}&& B\otimes \mathbb{C}}\]
We show that $(\hspaceat{\epsilon}\otimes\mathrm{I}_C)\Gamma=\mathrm{I}_C$, and thus $\hspaceat{\epsilon}$ is a counit for $(C,\Gamma)$. By the universal property of $(C,\Phi)$,
it is enough to show that
\begin{equation}\label{e3}
(\mathrm{I}_B{\otimes}[(\hspaceat{\epsilon}\otimes\mathrm{I}_C)\Gamma])\Phi=\Phi.
\end{equation}
We have
\begin{equation*}
\begin{split}
(\mathrm{I}_B{\otimes}[(\hspaceat{\epsilon}\otimes\mathrm{I}_C)\Gamma])\Phi=&(\mathrm{I}_B{\otimes}\hspaceat{\epsilon}{\otimes}\mathrm{I}_C) (\mathrm{I}_B{\otimes}\Gamma)\Phi\\
=&(\mathrm{I}_B{\otimes}\hspaceat{\epsilon}{\otimes}\mathrm{I}_C) (m{\otimes}\mathrm{I}_{C{\otimes}C}) (\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C}) (\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\hspaceat{\epsilon}{\otimes}\mathrm{I}_C) (\mathrm{I}_B{\otimes}F{\otimes}\mathrm{I}_{C}) (\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_C) (\mathrm{I}_B{\otimes}\hspaceat{\epsilon}{\otimes}\mathrm{I}_{B}{\otimes}\mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_C) ([(\mathrm{I}_B\otimes\hspaceat{\epsilon})\Phi]\otimes\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_C) (\omega\otimes\Phi) \Delta\\
=&(m{\otimes}\mathrm{I}_C) (\mathrm{I}_B\otimes\Phi) (\omega\otimes\mathrm{I}_A) \Delta
\end{split}
\end{equation*}
Since $\epsilon$ is a left counit for $(A,\Delta)$, we have
$$(\omega\otimes\mathrm{I}_A)\Delta(a)=1_B\otimes a,$$
for every $a\mathrm{I}n A$. This implies that
\begin{equation*}
\begin{split}
(m{\otimes}\mathrm{I}_C) (\mathrm{I}_B\otimes\Phi) (\omega\otimes\mathrm{I}_A) \Delta(a)&=(m{\otimes}\mathrm{I}_C) (\mathrm{I}_B\otimes\Phi)(1_B\otimes a)\\
&=\Phi(a),
\end{split}
\end{equation*}
for every $a$ in $A$. This completes the proof.
\end{proof}
Analogous of Theorem \ref{t2} is satisfied for quantum groups that have right and (two-sided) counits.
Some natural questions about the structure of the compact quantum semigroup $(C,\Gamma)$ arise:
\begin{question}
Let $(A,\Delta)$ and $(C,\Gamma)$ be as in Theorem \ref{t1}.
\begin{enumerate}
\mathrm{I}tem [(i)] Suppose that $(A,\Delta)$ has one of the left or weak left cancelation properties.
Dose this hold for $(C,\Gamma)$? In particular:
\mathrm{I}tem [(ii)] Suppose that $(A,\Delta)$ is a compact quantum group. Is $(C,\Gamma)$ a compact quantum group?
\mathrm{I}tem [ (iii)] Are the converse of (i) and (ii) satisfied?
\end{enumerate}
\end{question}
We consider some parts of these questions for a simple example in the next section.
\scriptstyleection{Some examples}
In this section, we consider a class of examples.
Let $A=\mathbb{C}^n$ be the C*-algebra of functions on the
commutative finite space $\{1,\cdots,n\}$, and let $(C,\Phi)$ be the quantum
family of all maps from $\mathfrak{Q} A$ to $\mathfrak{Q} A$. A direct computation shows that $C$
is the universal C*-algebra generated by $n^2$ elements $\{c_{ij}: 1\leq i,j\leq n\}$ that satisfy the
relations
\begin{enumerate}
\mathrm{I}tem [(1)] $c_{ij}^2=c_{ij}=c_{ij}^*$ for every $i,j=1,\cdots,n$,
\mathrm{I}tem [(2)] $\scriptstyleum_{j=1}^nc_{ij}=1$ for every $i=1,\cdots,n$, and
\mathrm{I}tem [(3)] $c_{ij}c_{ik}=0$ for every $i,k,j=1,\cdots,n$.
\end{enumerate}
Also, $\Phi:A\longrightarrow A\otimes C$ is defined by
$\Phi(e_k)=\scriptstyleum_{i=1}^ne_i\otimes c_{ik}$,
where $e_1,\cdots,e_n$ is the standard basis for $A$. Suppose that
$$\xi:\{1,\cdots,n\}\times\{1,\cdots,n\}\longrightarrow\{1,\cdots,n\}$$
is a semigroup multiplication. Then $\xi$ induces a comultiplication $\Delta: A\longrightarrow A\otimes A$
$$\Delta(e_k)=\scriptstyleum_{r,s=1}^n\Delta_k^{rs}e_r\otimes e_s,$$
defined by $\Delta_k^{rs}=\delta_{k\xi(r,s)}$, where $\delta$ is the Kronecker delta.
We compute the comultiplication $\Gamma:C\longrightarrow C\otimes C$, induced by $\Delta$ as in
Theorem \ref{t1}. We have
\begin{equation*}
\begin{split}
(\Phi\otimes\Phi)\Delta(e_k)&=(\Phi\otimes\Phi)(\scriptstyleum_{r,s=1}^n\Delta_k^{rs}e_r\otimes e_s)=\scriptstyleum_{r,s=1}^n\Delta_k^{rs}\Phi(e_r)\otimes \Phi(e_s)\\
&=\scriptstyleum_{r,s=1}^n\scriptstyleum_{j=1}^n\scriptstyleum_{i=1}^n\Delta_k^{rs}e_j\otimes c_{jr}\otimes e_i\otimes c_{is},
\end{split}
\end{equation*}
and therefore
\begin{equation*}
\begin{split}
&(m{\otimes} \mathrm{I}_{C{\otimes} C}) (\mathrm{I}_B{\otimes} F{\otimes} \mathrm{I}_C) (\Phi{\otimes}\Phi) \Delta(e_k)\\
=&(m{\otimes} \mathrm{I}_{C{\otimes} C})(\scriptstyleum_{j=1}^n\scriptstyleum_{i=1}^n\scriptstyleum_{r,s=1}^n\Delta_k^{rs}e_j\otimes e_i\otimes c_{jr}\otimes c_{is})\\
=&\scriptstyleum_{l=1}^n\scriptstyleum_{r,s=1}^n\Delta_k^{rs}e_l\otimes c_{lr}\otimes c_{ls}=\scriptstyleum_{l=1}^ne_l\otimes(\scriptstyleum_{r,s=1}^n\Delta_k^{rs}c_{lr}\otimes c_{ls}).
\end{split}
\end{equation*}
This equals to
$(\mathrm{I}_A\otimes\Gamma)\Phi(e_k)=(\mathrm{I}_A\otimes\Gamma)\scriptstyleum_{i=1}^ne_i\otimes c_{ik}=\scriptstyleum_{l=1}^ne_l\otimes\Gamma(c_{lk})$.
Thus $\Gamma$ is defined by
$$\Gamma(c_{lk})=\scriptstyleum_{r,s=1}^n\Delta_k^{rs}c_{lr}\otimes c_{ls}.$$
We now consider the special case $n=2$, in more details.
There are only five semigroup structures (up to isomorphism) on the set $\{1,2\}$:
\begin{enumerate}
\mathrm{I}tem [$\xi_1:$] $11=1,\hspacespace{2mm}12=2,\hspacespace{2mm}21=2,\hspacespace{2mm}22=1$.
\mathrm{I}tem [$\xi_2:$] $11=1,\hspacespace{2mm}12=2,\hspacespace{2mm}21=2,\hspacespace{2mm}22=2$.
\mathrm{I}tem [$\xi_3:$] $11=1,\hspacespace{2mm}12=1,\hspacespace{2mm}21=1,\hspacespace{2mm}22=1$.
\mathrm{I}tem [$\xi_4:$] $11=1,\hspacespace{2mm}12=1,\hspacespace{2mm}21=2,\hspacespace{2mm}22=2$.
\mathrm{I}tem [$\xi_5:$] $11=1,\hspacespace{2mm}12=2,\hspacespace{2mm}21=1,\hspacespace{2mm}22=2$.
\end{enumerate}
For every semigroup $(\{1,2\},\xi_i)$, let $(C,\Gamma_i)$ be the corresponding quantum semigroup, as above.
A simple computation shows that:
$\Bigg\{
\begin{array}{cc}
\Gamma_1(c_{11})=c_{11}\otimes c_{11}+c_{12}\otimes c_{12} & \Gamma_1(c_{12})=c_{11}\otimes c_{12}+c_{12}\otimes c_{11}\\
\Gamma_1(c_{21})=c_{21}\otimes c_{21}+c_{22}\otimes c_{22} & \Gamma_1(c_{22})=c_{21}\otimes c_{22}+c_{22}\otimes c_{21}\\
\end{array}
$
$\Bigg\{
\begin{array}{cc}
\Gamma_2(c_{11})=c_{11}\otimes c_{11} & \Gamma_2(c_{12})=c_{12}\otimes c_{12}+c_{11}\otimes c_{12}+c_{12}\otimes c_{11}\\
\Gamma_2(c_{21})=c_{21}\otimes c_{21} & \Gamma_2(c_{22})=c_{22}\otimes c_{22}+c_{21}\otimes c_{22}+c_{22}\otimes c_{21}\\
\end{array}
$
$\Bigg\{
\begin{array}{cc}
\Gamma_3(c_{11})=1 & \Gamma_3(c_{12})=0 \\
\Gamma_3(c_{21})=1 & \Gamma_3(c_{22})=0 \\
\end{array}
$
$$\Gamma_4(c)=c\otimes1\hspacespace{5mm}(\forall c\mathrm{I}n C)$$
$$\Gamma_5(c)=1\otimes c\hspacespace{5mm}(\forall c\mathrm{I}n C)$$
Note that the semigroup structure $\xi_1$ is a group structure and, $\xi_4$ and $\xi_5$ has right and left cancelation
properties, respectively. Thus $(\mathbb{C}^2,\Delta_1)$ is a compact quantum group and, $(\mathbb{C}^2,\Delta_4)$ and $(\mathbb{C}^2,\Delta_5)$
are compact quantum semigroups with right and left cancelation properties, respectively. From above computations, it is clear that
the compact quantum semigroups $(C,\Gamma_4)$ and $(C,\Gamma_5)$ have right and left cancelation properties, respectively. Now, we show that
$(C,\Gamma_1)$ is also a compact quantum group: The unital C*-algebra $C$ is generated by the two unitary elements $x=c_{11}-c_{12}$ and
$y=c_{21}-c_{22}$ (see the following Remark for more details). A simple computation shows that
$$\Gamma_1(x)=x\otimes x\hspacespace{5mm}\text{and}\hspacespace{5mm}\Gamma_1(y)=y\otimes y.$$
This easily implies that $(C,\Gamma_1)$ has left and right cancelation properties, and therefore $(C,\Gamma_1)$
is a compact quantum group.
\begin{remark}
\begin{enumerate}
\mathrm{I}tem [(1)] The algebra $A=\mathbb{C}^2$ is the universal C*-algebra
generated by a unitary self-adjoint element, say $(1,-1)$.
It follows from the proof of Theorem 3.3 of \cite{S1}, that $C$ becomes the universal
C*-algebra generated by two unitary self-adjoint elements.
A model for $C$, is the C*-algebra of all continuous maps from closed unit interval to $2\times2$ matrix algebra,
which get diagonal matrices at the endpoints of the interval, equivalently
$$C=\Bigg\{
\left(
\begin{array}{cc}
f_{11} & f_{12} \\
f_{21} & f_{22} \\
\end{array}
\right)
:f_{ij}\mathrm{I}n\mathcal{C}[0,1],f_{12}(0)=f_{12}(1)=f_{21}(0)=f_{21}(1)=0\Bigg\},$$
with unitary self-adjoint generators
$$x=
\left(
\begin{array}{cc}
\cos(\pi t) & \scriptstylein(\pi t) \\
\scriptstylein(\pi t) & -\cos(\pi t) \\
\end{array}
\right)\hspacespace{5mm}\text{and}\hspacespace{5mm}
y=
\left(
\begin{array}{cc}
-\cos(\pi t) & \scriptstylein(\pi t) \\
\scriptstylein(\pi t) & \cos(\pi t) \\
\end{array}
\right).
$$
In this representation of $C$, the generators $c_{ij}$'s become:
$c_{11}=\frac{1+x}{2}$, $c_{12}=\frac{1-x}{2}$, $c_{21}=\frac{1+y}{2}$ and $c_{22}=\frac{1-y}{2}$.
Also, the homomorphism $\Phi:\mathbb{C}^2\longrightarrow \mathbb{C}^2\otimesimes C=C\oplus C$ is defined by $\Phi(1,-1)=(x,y)$.
This representation of the C*-algebra $C$, is one of the elementary examples of non-commutative spaces, see section
II.2.$\beta$ of \cite{C}.
\mathrm{I}tem [(2)]There is another quantum semigroup structure on quantum families of all maps from any
finite quantum space to itself introduced by So{\l}tan \cite{S1}.
\end{enumerate}
\end{remark}
\textbf{Acknowledgement:}
The author is grateful to the referee for his/her valuable suggestions.
\end{document}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.